mirror of
https://github.com/akvorado/akvorado.git
synced 2025-12-12 06:24:10 +01:00
inlet/bmp: initial support for BMP protocol
At first, there was a tentative to use BMP collector implementation from bio-rd. However, this current implementation is using GoBGP instead: - BMP is very simple from a protocol point of view. The hard work is mostly around decoding. Both bio-rd and GoBGP can decode, but for testing, GoBGP is able to generate messages as well (this is its primary purpose, I suppose parsing was done for testing purpose). Using only one library is always better. An alternative would be GoBMP, but it also only do parsing. - Logging and metrics can be customized easily (but the work was done for bio-rd, so not a real argument). - bio-rd is an application and there is no API stability (and I did that too) - GoBGP supports FlowSpec, which may be useful in the future for the DDoS part. Again, one library for everything is better (but honestly, GoBGP as a lib is not the best part of it, maybe github.com/jwhited/corebgp would be a better fit while keeping GoBGP for decoding/encoding). There was a huge effort around having a RIB which is efficient memory-wise (data are interned to save memory), performant during reads, while being decent during insertions. We rely on a patched version of Kentik's Patricia trees to be able to apply mutations to the tree. There was several tentatives to implement some kind of graceful restart, but ultimetaly, the design is kept simple: when a BMP connection goes down, routes will be removed after a configurable time. If the connection comes back up, then it is just considered new. It would have been ideal to rely on EoR markers, but the RFC is unclear about them, and they are likely to be per peer, making it difficult to know what to do if one peer is back, but not the other. Remaining tasks: - [ ] Confirm support for LocRIB - [ ] Import data in ClickHouse - [ ] Make data available in the frontend Fix #52
This commit is contained in:
8
Makefile
8
Makefile
@@ -80,7 +80,7 @@ inlet/flow/decoder/flow-ANY.pb.go: inlet/flow/decoder/flow-$(FLOW_VERSION).pb.go
|
||||
done
|
||||
inlet/flow/decoder/flow-$(FLOW_VERSION).pb.go: inlet/flow/data/schemas/flow-$(FLOW_VERSION).proto | $(PROTOC_GEN_GO) ; $(info $(M) compiling protocol buffers definition…)
|
||||
$Q $(PROTOC) -I=. --plugin=$(PROTOC_GEN_GO) --go_out=module=$(MODULE):. $<
|
||||
$Q sed -i.bkp s/FlowMessagev./FlowMessage/g $@ && rm $@.bkp
|
||||
$Q sed -i.bkp s/v$(FLOW_VERSION)//g $@ && rm $@.bkp
|
||||
|
||||
common/clickhousedb/mocks/mock_driver.go: $(MOCKGEN) ; $(info $(M) generate mocks for ClickHouse driver…)
|
||||
$Q echo '//go:build !release' > $@
|
||||
@@ -125,9 +125,8 @@ changelog.md: docs/99-changelog.md # To be used by GitHub actions only.
|
||||
|
||||
# Tests
|
||||
|
||||
TEST_TARGETS := test-bench test-short test-verbose test-race
|
||||
TEST_TARGETS := test-short test-verbose test-race
|
||||
.PHONY: $(TEST_TARGETS) check test tests
|
||||
test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks
|
||||
test-short: ARGS=-short ## Run only short tests
|
||||
test-verbose: ARGS=-v ## Run tests in verbose mode with coverage reporting
|
||||
test-race: CGO_ENABLED=1 ARGS=-race ## Run tests with race detector
|
||||
@@ -138,6 +137,9 @@ check test tests: fmt lint $(GENERATED) | $(GOTESTSUM) ; $(info $(M) running $(N
|
||||
$Q $(GOTESTSUM) --junitfile test/tests.xml -- \
|
||||
-timeout $(TIMEOUT)s \
|
||||
$(ARGS) $(PKGS)
|
||||
.PHONY: test-bench
|
||||
test-bench: $(GENERATED) ; $(info $(M) running benchmarks…) @ ## Run benchmarks
|
||||
$Q $(GOTESTSUM) -f standard-quiet -- --timeout $(TIMEOUT)s -run=__absolutelynothing__ -bench=. $(PKGS)
|
||||
|
||||
COVERAGE_MODE = atomic
|
||||
.PHONY: test-coverage test-coverage-xml test-coverage-lcov
|
||||
|
||||
@@ -169,6 +169,35 @@ demo-exporter:
|
||||
20: "core"
|
||||
21: "core"
|
||||
listen: 0.0.0.0:161
|
||||
bmp: &bmp
|
||||
target: akvorado-inlet:10179
|
||||
routes:
|
||||
- prefixes: 192.0.2.0/24,2a01:db8:cafe:1::/64
|
||||
aspath: 64501
|
||||
communities: 65401:10,65401:12
|
||||
- prefixes: 203.0.113.0/24,2a01:db8:cafe:2::/64
|
||||
aspath: 65401
|
||||
communities: 65401:10,65401:13
|
||||
- prefixes: 216.58.206.0/24,2a00:1450:4007:807::2000/124
|
||||
aspath: 174,1299,15169
|
||||
- prefixes: 179.60.192.0/24,2a03:2880:f130:83:face:b00c:0::/112
|
||||
aspath: 1299,1299,32934
|
||||
- prefixes: 198.38.120.0/23,2a00:86c0:115:115::/112
|
||||
aspath: 5511,1299,1299,32934
|
||||
- prefixes: 23.33.27.0/24,2a02:26f0:9100:28:0:17c0::/112
|
||||
aspath: 174,174,174,20940
|
||||
- prefixes: 52.84.175.0/24,2600:9000:218d:4a00:15:74db::/112
|
||||
aspath: 16509
|
||||
- prefixes: 199.232.178.0/29,2a04:4e42:1d::/126
|
||||
aspath: 1299,54113
|
||||
- prefixes: 52.223.202.128/27
|
||||
aspath: 16509,46489
|
||||
- prefixes: 138.231.0.0/16
|
||||
aspath: 1299,174,2269,2269
|
||||
- prefixes: 0.0.0.0/0
|
||||
aspath: 174
|
||||
- prefixes: ::/0
|
||||
aspath: 1299
|
||||
flows: &flows1
|
||||
samplingrate: 50000
|
||||
target: akvorado-inlet:2055
|
||||
@@ -351,6 +380,8 @@ demo-exporter:
|
||||
20: "core"
|
||||
21: "core"
|
||||
listen: 0.0.0.0:161
|
||||
bmp:
|
||||
<<: *bmp
|
||||
flows:
|
||||
<<: *flows1
|
||||
seed: 100
|
||||
@@ -362,6 +393,8 @@ demo-exporter:
|
||||
20: "core"
|
||||
21: "core"
|
||||
listen: 0.0.0.0:161
|
||||
bmp:
|
||||
<<: *bmp
|
||||
flows:
|
||||
<<: *flows1
|
||||
seed: 200
|
||||
@@ -373,6 +406,8 @@ demo-exporter:
|
||||
20: "core"
|
||||
21: "core"
|
||||
listen: 0.0.0.0:161
|
||||
bmp:
|
||||
<<: *bmp
|
||||
flows:
|
||||
<<: *flows1
|
||||
seed: 300
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"akvorado/common/http"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/demoexporter"
|
||||
"akvorado/demoexporter/bmp"
|
||||
"akvorado/demoexporter/flows"
|
||||
"akvorado/demoexporter/snmp"
|
||||
)
|
||||
@@ -22,6 +23,7 @@ type DemoExporterConfiguration struct {
|
||||
HTTP http.Configuration
|
||||
DemoExporter demoexporter.Configuration `mapstructure:",squash" yaml:",inline"`
|
||||
SNMP snmp.Configuration
|
||||
BMP bmp.Configuration
|
||||
Flows flows.Configuration
|
||||
}
|
||||
|
||||
@@ -31,6 +33,9 @@ func (c *DemoExporterConfiguration) Reset() {
|
||||
HTTP: http.DefaultConfiguration(),
|
||||
Reporting: reporter.DefaultConfiguration(),
|
||||
DemoExporter: demoexporter.DefaultConfiguration(),
|
||||
SNMP: snmp.DefaultConfiguration(),
|
||||
BMP: bmp.DefaultConfiguration(),
|
||||
Flows: flows.DefaultConfiguration(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +94,12 @@ func demoExporterStart(r *reporter.Reporter, config DemoExporterConfiguration, c
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize SNMP component: %w", err)
|
||||
}
|
||||
bmpComponent, err := bmp.New(r, config.BMP, bmp.Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize BMP component: %w", err)
|
||||
}
|
||||
flowsComponent, err := flows.New(r, config.Flows, flows.Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
})
|
||||
@@ -116,6 +127,7 @@ func demoExporterStart(r *reporter.Reporter, config DemoExporterConfiguration, c
|
||||
components := []interface{}{
|
||||
httpComponent,
|
||||
snmpComponent,
|
||||
bmpComponent,
|
||||
flowsComponent,
|
||||
demoExporterComponent,
|
||||
}
|
||||
|
||||
13
cmd/inlet.go
13
cmd/inlet.go
@@ -11,6 +11,7 @@ import (
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/http"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/inlet/bmp"
|
||||
"akvorado/inlet/core"
|
||||
"akvorado/inlet/flow"
|
||||
"akvorado/inlet/geoip"
|
||||
@@ -24,6 +25,7 @@ type InletConfiguration struct {
|
||||
HTTP http.Configuration
|
||||
Flow flow.Configuration
|
||||
SNMP snmp.Configuration
|
||||
BMP bmp.Configuration
|
||||
GeoIP geoip.Configuration
|
||||
Kafka kafka.Configuration
|
||||
Core core.Configuration
|
||||
@@ -36,6 +38,7 @@ func (c *InletConfiguration) Reset() {
|
||||
Reporting: reporter.DefaultConfiguration(),
|
||||
Flow: flow.DefaultConfiguration(),
|
||||
SNMP: snmp.DefaultConfiguration(),
|
||||
BMP: bmp.DefaultConfiguration(),
|
||||
GeoIP: geoip.DefaultConfiguration(),
|
||||
Kafka: kafka.DefaultConfiguration(),
|
||||
Core: core.DefaultConfiguration(),
|
||||
@@ -105,6 +108,12 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize SNMP component: %w", err)
|
||||
}
|
||||
bmpComponent, err := bmp.New(r, config.BMP, bmp.Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to initialize BMP component: %w", err)
|
||||
}
|
||||
geoipComponent, err := geoip.New(r, config.GeoIP, geoip.Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
})
|
||||
@@ -120,7 +129,8 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
|
||||
coreComponent, err := core.New(r, config.Core, core.Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
Flow: flowComponent,
|
||||
Snmp: snmpComponent,
|
||||
SNMP: snmpComponent,
|
||||
BMP: bmpComponent,
|
||||
GeoIP: geoipComponent,
|
||||
Kafka: kafkaComponent,
|
||||
HTTP: httpComponent,
|
||||
@@ -142,6 +152,7 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
|
||||
components := []interface{}{
|
||||
httpComponent,
|
||||
snmpComponent,
|
||||
bmpComponent,
|
||||
geoipComponent,
|
||||
kafkaComponent,
|
||||
coreComponent,
|
||||
|
||||
134
common/helpers/intern.go
Normal file
134
common/helpers/intern.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package helpers
|
||||
|
||||
// InternValue is the interface that should be implemented by types
|
||||
// used in an intern pool. Also, it should be immutable.
|
||||
type InternValue[T any] interface {
|
||||
Hash() uint64
|
||||
Equal(T) bool
|
||||
}
|
||||
|
||||
// InternReference is a reference to an interned value. 0 is not a
|
||||
// valid reference value.
|
||||
type InternReference[T any] uint32
|
||||
|
||||
// InternPool keeps values in a pool by storing only one distinct copy
|
||||
// of each. Values will be referred as an uint32 (implemented as an
|
||||
// index).
|
||||
type InternPool[T InternValue[T]] struct {
|
||||
values []internValue[T]
|
||||
availableIndexes []InternReference[T]
|
||||
valueIndexes map[uint64]InternReference[T]
|
||||
}
|
||||
|
||||
// internValue is the value stored in an intern pool. It adds resource
|
||||
// keeping to the raw value.
|
||||
type internValue[T InternValue[T]] struct {
|
||||
next InternReference[T] // next value with the same hash
|
||||
previous InternReference[T] // previous value with the same hash
|
||||
refCount uint32
|
||||
|
||||
value T
|
||||
}
|
||||
|
||||
// NewInternPool creates a new intern pool.
|
||||
func NewInternPool[T InternValue[T]]() *InternPool[T] {
|
||||
return &InternPool[T]{
|
||||
values: make([]internValue[T], 1), // first slot is reserved
|
||||
availableIndexes: make([]InternReference[T], 0),
|
||||
valueIndexes: make(map[uint64]InternReference[T]),
|
||||
}
|
||||
}
|
||||
|
||||
// Get retrieves a (copy of the) value from the intern pool using its reference.
|
||||
func (p *InternPool[T]) Get(ref InternReference[T]) T {
|
||||
return p.values[ref].value
|
||||
}
|
||||
|
||||
// Take removes a value from the intern pool. If this is the last
|
||||
// used reference, it will be deleted from the pool.
|
||||
func (p *InternPool[T]) Take(ref InternReference[T]) {
|
||||
value := &p.values[ref]
|
||||
value.refCount--
|
||||
if value.refCount == 0 {
|
||||
p.availableIndexes = append(p.availableIndexes, ref)
|
||||
if value.previous > 0 {
|
||||
// Not the first one, link previous to next
|
||||
p.values[value.previous].next = value.next
|
||||
p.values[value.next].previous = value.previous
|
||||
return
|
||||
}
|
||||
hash := value.value.Hash()
|
||||
if value.next > 0 {
|
||||
// We are the first one of a chain, move the pointer to the next one
|
||||
p.valueIndexes[hash] = value.next
|
||||
p.values[value.next].previous = 0
|
||||
return
|
||||
}
|
||||
// Last case, we are the last one, let's find our hash and delete us from here
|
||||
delete(p.valueIndexes, hash)
|
||||
}
|
||||
}
|
||||
|
||||
// Put adds a value to the intern pool, returning its reference.
|
||||
func (p *InternPool[T]) Put(value T) InternReference[T] {
|
||||
v := internValue[T]{
|
||||
value: value,
|
||||
refCount: 1,
|
||||
previous: 0,
|
||||
next: 0,
|
||||
}
|
||||
|
||||
// Allocate a new index
|
||||
newIndex := func() InternReference[T] {
|
||||
availCount := len(p.availableIndexes)
|
||||
if availCount > 0 {
|
||||
index := p.availableIndexes[availCount-1]
|
||||
p.availableIndexes = p.availableIndexes[:availCount-1]
|
||||
return index
|
||||
}
|
||||
if len(p.values) == cap(p.values) {
|
||||
// We need to extend capacity first
|
||||
temp := make([]internValue[T], len(p.values), (cap(p.values)+1)*2)
|
||||
copy(temp, p.values)
|
||||
p.values = temp
|
||||
}
|
||||
index := len(p.values)
|
||||
p.values = p.values[:index+1]
|
||||
return InternReference[T](index)
|
||||
}
|
||||
|
||||
// Check if we have already something
|
||||
hash := value.Hash()
|
||||
if index := p.valueIndexes[hash]; index > 0 {
|
||||
prevIndex := index
|
||||
for index > 0 {
|
||||
if p.values[index].value.Equal(value) {
|
||||
p.values[index].refCount++
|
||||
return index
|
||||
}
|
||||
prevIndex = index
|
||||
index = p.values[index].next
|
||||
}
|
||||
|
||||
// We have a collision, add to the chain
|
||||
index = newIndex()
|
||||
v.previous = prevIndex
|
||||
p.values[prevIndex].next = index
|
||||
p.values[index] = v
|
||||
return index
|
||||
}
|
||||
|
||||
// Add a new one
|
||||
index := newIndex()
|
||||
p.values[index] = v
|
||||
p.valueIndexes[hash] = index
|
||||
return index
|
||||
}
|
||||
|
||||
// Len returns the number of elements in the pool.
|
||||
func (p *InternPool[T]) Len() int {
|
||||
return len(p.values) - len(p.availableIndexes) - 1
|
||||
}
|
||||
149
common/helpers/intern_test.go
Normal file
149
common/helpers/intern_test.go
Normal file
@@ -0,0 +1,149 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package helpers
|
||||
|
||||
import "testing"
|
||||
|
||||
type likeInt int
|
||||
|
||||
func (i likeInt) Equal(j likeInt) bool { return i == j }
|
||||
func (i likeInt) Hash() uint64 { return uint64(i) % 10 }
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
p := NewInternPool[likeInt]()
|
||||
|
||||
a := p.Put(likeInt(10))
|
||||
b := p.Put(likeInt(10))
|
||||
c := p.Put(likeInt(11))
|
||||
d := p.Put(likeInt(12))
|
||||
|
||||
if a != b {
|
||||
t.Error("got two references for Put(10)")
|
||||
}
|
||||
if a == c || a == d || c == d {
|
||||
t.Error("got same reference for Put(10)/Put(11)/Put(12)")
|
||||
}
|
||||
if p.Get(a) != likeInt(10) {
|
||||
t.Errorf("Get(Put(10)) == %d != 10", p.Get(a))
|
||||
}
|
||||
if p.Get(c) != likeInt(11) {
|
||||
t.Errorf("Get(Put(11)) == %d != 10", p.Get(c))
|
||||
}
|
||||
if p.Get(d) != likeInt(12) {
|
||||
t.Errorf("Get(Put(12)) == %d != 10", p.Get(d))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutCollision(t *testing.T) {
|
||||
p := NewInternPool[likeInt]()
|
||||
|
||||
a := p.Put(likeInt(10))
|
||||
b := p.Put(likeInt(20))
|
||||
c := p.Put(likeInt(11))
|
||||
d := p.Put(likeInt(21))
|
||||
if a == b || a == c || a == d || b == c || b == d || c == d {
|
||||
t.Error("got same reference for two different values")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTake(t *testing.T) {
|
||||
p := NewInternPool[likeInt]()
|
||||
|
||||
val1 := likeInt(10)
|
||||
ref1 := p.Put(val1)
|
||||
val2 := likeInt(10)
|
||||
ref2 := p.Put(val2)
|
||||
val3 := likeInt(12)
|
||||
ref3 := p.Put(val3)
|
||||
val4 := likeInt(22) // collision
|
||||
ref4 := p.Put(val4)
|
||||
val5 := likeInt(32)
|
||||
ref5 := p.Put(val5)
|
||||
|
||||
expectedValues := []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 1, next: 3},
|
||||
{value: 22, refCount: 1, previous: 2, next: 4},
|
||||
{value: 32, refCount: 1, previous: 3},
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
p.Take(ref4)
|
||||
|
||||
expectedValues = []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 1, next: 4},
|
||||
{value: 22, refCount: 0, previous: 2, next: 4}, // free
|
||||
{value: 32, refCount: 1, previous: 2},
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
ref6 := p.Put(likeInt(42))
|
||||
if ref6 != ref4 {
|
||||
t.Fatal("p.Put() did not reuse free slot")
|
||||
}
|
||||
|
||||
expectedValues = []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 1, next: 4},
|
||||
{value: 42, refCount: 1, previous: 4},
|
||||
{value: 32, refCount: 1, previous: 2, next: 3},
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
p.Take(ref3)
|
||||
|
||||
expectedValues = []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 0, next: 4}, // free
|
||||
{value: 42, refCount: 1, previous: 4},
|
||||
{value: 32, refCount: 1, next: 3},
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
p.Take(ref5)
|
||||
|
||||
expectedValues = []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 0, next: 4}, // free
|
||||
{value: 42, refCount: 1},
|
||||
{value: 32, refCount: 0, next: 3}, // free
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
p.Take(ref6)
|
||||
|
||||
expectedValues = []internValue[likeInt]{
|
||||
{},
|
||||
{value: 10, refCount: 2},
|
||||
{value: 12, refCount: 0, next: 4}, // free
|
||||
{value: 42, refCount: 0}, // free
|
||||
{value: 32, refCount: 0, next: 3}, // free
|
||||
}
|
||||
if diff := Diff(p.values, expectedValues, DiffUnexported); diff != "" {
|
||||
t.Fatalf("p.values (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
p.Take(ref1)
|
||||
p.Take(ref2)
|
||||
diff := p.Len()
|
||||
if diff != 0 {
|
||||
t.Fatalf("Take() didn't free everything (%d remaining)", diff)
|
||||
}
|
||||
}
|
||||
@@ -116,6 +116,11 @@ func (r *Reporter) MetricCollector(c prometheus.Collector) {
|
||||
r.metrics.Collector(c)
|
||||
}
|
||||
|
||||
// MetricCollectorForCurrentModule register a custom collector prefixed by the current module name.
|
||||
func (r *Reporter) MetricCollectorForCurrentModule(c prometheus.Collector) {
|
||||
r.metrics.CollectorForCurrentModule(1, c)
|
||||
}
|
||||
|
||||
// MetricDesc defines a new metric description.
|
||||
func (r *Reporter) MetricDesc(name, help string, variableLabels []string) *MetricDesc {
|
||||
return r.metrics.Desc(1, name, help, variableLabels)
|
||||
|
||||
@@ -113,3 +113,12 @@ func (m *Metrics) Desc(skipCallstack int, name, help string, variableLabels []st
|
||||
func (m *Metrics) Collector(c prometheus.Collector) {
|
||||
m.registry.MustRegister(c)
|
||||
}
|
||||
|
||||
// CollectorForCurrentModule register a custom collector and prefix
|
||||
// everything with the module name.
|
||||
func (m *Metrics) CollectorForCurrentModule(skipCallStack int, c prometheus.Collector) {
|
||||
callStack := stack.Callers()
|
||||
call := callStack[1+skipCallStack] // Should be the same as above !
|
||||
prefix := getPrefix(call.FunctionName())
|
||||
prometheus.WrapRegistererWithPrefix(prefix, m.registry).MustRegister(c)
|
||||
}
|
||||
|
||||
@@ -68,8 +68,10 @@ from [MaxMind](https://dev.maxmind.com/geoip/geolite2-free-geolocation-data).
|
||||
*country* and the *AS number*. It applies rules to add attributes to
|
||||
exporters. Interface rules attach to each interface a *boundary*
|
||||
(external or internal), a *network provider* and a *connectivity
|
||||
type* (PNI, IX, transit). The flow is exported to *Kafka*,
|
||||
serialized using *Protobuf*.
|
||||
type* (PNI, IX, transit). Optionally, it may also receive BGP routes
|
||||
through the BMP protocol to get the *AS number*, the *AS path*, and
|
||||
the communities. The flow is exported to *Kafka*, serialized using
|
||||
*Protobuf*.
|
||||
|
||||
- The **orchestrator service** configures the internal and external
|
||||
components. It creates the *Kafka topic* and configures *ClickHouse*
|
||||
|
||||
@@ -104,8 +104,36 @@ flow:
|
||||
```
|
||||
|
||||
Without configuration, *Akvorado* will listen for incoming
|
||||
Netflow/IPFIX and sFlow flows on a random port (check the logs to know which
|
||||
one).
|
||||
Netflow/IPFIX and sFlow flows on a random port (check the logs to know
|
||||
which one).
|
||||
|
||||
### BMP
|
||||
|
||||
The BMP component handles incoming BMP connections from routers. The
|
||||
information received can be used to fetch source and destination AS
|
||||
numbers, as well as the AS paths and communities. Not all exporters
|
||||
need to send their tables with BMP. *Akvorado* will try to select the
|
||||
best route using the next hop advertised in the flow and fallback to
|
||||
any next hop if not found.
|
||||
|
||||
The following keys are accepted:
|
||||
|
||||
- `listen` specifies the IP address and port to listen for incoming connections (default port is 10179)
|
||||
- `rds` specifies a list of route distinguisher to accept (0 is meant
|
||||
to accept routes without an associated route distinguisher)
|
||||
- `collect-asns` tells if origin AS numbers should be collected
|
||||
- `collect-aspaths` tells if AS paths should be collected
|
||||
- `collect-communities` tells if communities should be collected (both
|
||||
regular communities and large communities; extended communities are
|
||||
not supported)
|
||||
- `keep` tells how much time the routes sent from a terminated BMP
|
||||
connection should be kept
|
||||
|
||||
If you are not interested in AS paths and communities, disabling them
|
||||
will decrease the memory usage of *Akvorado*.
|
||||
|
||||
*Akvorado* supports receiving the AdjRIB-in, with or without
|
||||
filtering. It may also work with a LocRIB.
|
||||
|
||||
### Kafka
|
||||
|
||||
@@ -165,10 +193,11 @@ The following configuration keys are accepted:
|
||||
one received in the flows. This is useful if a device lie about its
|
||||
sampling rate. This is a map from subnets to sampling rates (but it
|
||||
would also accept a single value).
|
||||
- `asn-providers` defines the source list for AS numbers. The available
|
||||
sources are `flow`, `flow-except-private` (use information from flow
|
||||
except if the ASN is private), and `geoip`. The default value is
|
||||
`flow` and `geoip`.
|
||||
- `asn-providers` defines the source list for AS numbers. The
|
||||
available sources are `flow`, `flow-except-private` (use information
|
||||
from flow except if the ASN is private), `geoip`, `bmp`, and
|
||||
`bmp-except-private`. The default value is `flow`, `bmp`, and
|
||||
`geoip`.
|
||||
|
||||
Classifier rules are written using [expr][].
|
||||
|
||||
@@ -521,8 +550,8 @@ database:
|
||||
## Demo exporter service
|
||||
|
||||
For testing purpose, it is possible to generate flows using the demo
|
||||
exporter service. It features a NetFlow generate and a simple SNMP
|
||||
agent.
|
||||
exporter service. It features a NetFlow generator, a simple SNMP
|
||||
agent and a BMP exporter.
|
||||
|
||||
```yaml
|
||||
snmp:
|
||||
@@ -533,6 +562,12 @@ snmp:
|
||||
20: "core"
|
||||
21: "core"
|
||||
listen: 0.0.0.0:161
|
||||
bmp:
|
||||
target: 127.0.0.1:10179
|
||||
routes:
|
||||
- prefixes: 192.0.2.0/24,2a01:db8:cafe:1::/64
|
||||
aspath: 64501
|
||||
communities: 65401:10,65401:12
|
||||
flows:
|
||||
samplingrate: 50000
|
||||
target: 127.0.0.1:2055
|
||||
@@ -553,10 +588,12 @@ flows:
|
||||
```
|
||||
|
||||
In the `snmp` section, all fields are mandatory. The `interfaces`
|
||||
section maps interface indexes to their descriptions. In the `flows`
|
||||
section, all fields are mandatory. Have a look at the provided
|
||||
`akvorado.yaml` configuration file for a more complete example. As
|
||||
generating many flows is quite verbose, it may be useful to rely on
|
||||
[YAML anchors][] to avoid repeating a lot of stuff.
|
||||
section maps interface indexes to their descriptions. In the `bmp`
|
||||
session, for each set of prefixes, the `aspath` is mandatory, but the
|
||||
`communities` are optional. In the `flows` section, all fields are
|
||||
mandatory. Have a look at the provided `akvorado.yaml` configuration
|
||||
file for a more complete example. As generating many flows is quite
|
||||
verbose, it may be useful to rely on [YAML anchors][] to avoid
|
||||
repeating a lot of stuff.
|
||||
|
||||
[YAML anchors]: https://www.linode.com/docs/guides/yaml-anchors-aliases-overrides-extensions/
|
||||
|
||||
@@ -77,6 +77,20 @@ control-plane
|
||||
address ipv4 <akvorado-ip>
|
||||
```
|
||||
|
||||
To configure BMP, adapt the following snippet:
|
||||
|
||||
```cisco
|
||||
bmp server 1
|
||||
host <akvorado-ip> port 10179
|
||||
flapping-delay 60
|
||||
bmp server all
|
||||
route-monitoring policy post inbound
|
||||
router bgp 65400
|
||||
vrf public
|
||||
neighbor 192.0.2.100
|
||||
bmp-activate server 1
|
||||
```
|
||||
|
||||
### Juniper
|
||||
|
||||
#### IPFIX
|
||||
@@ -247,6 +261,27 @@ snmp {
|
||||
}
|
||||
```
|
||||
|
||||
#### BMP
|
||||
|
||||
If needed, you can configure BMP on one router to send all AdjRIB-in
|
||||
to Akvorado.
|
||||
|
||||
```junos
|
||||
routing-options {
|
||||
bmp {
|
||||
connection-mode active;
|
||||
station-address 203.0.113.1;
|
||||
station-port 10179;
|
||||
station collector;
|
||||
hold-down 30 flaps 10 period 30;
|
||||
route-monitoring post-policy;
|
||||
monitor enable;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [Juniper's documentation](https://www.juniper.net/documentation/us/en/software/junos/bgp/topics/ref/statement/bmp-edit-routing-options.html) for more details.
|
||||
|
||||
### Arista
|
||||
|
||||
#### sFlow
|
||||
|
||||
@@ -259,7 +259,7 @@ SHOW tables
|
||||
```
|
||||
|
||||
You should have a few tables, including `flows`, `flows_1m0s` (and
|
||||
others), and `flows_2_raw`. If one is missing, look at the log in the
|
||||
others), and `flows_3_raw`. If one is missing, look at the log in the
|
||||
orchestrator. This is the component creating the tables.
|
||||
|
||||
To check if ClickHouse is late, use the following SQL query through
|
||||
@@ -280,10 +280,10 @@ from Kafka's point of view:
|
||||
$ kafka-consumer-groups.sh --bootstrap-server kafka:9092 --describe --group clickhouse
|
||||
|
||||
GROUP TOPIC PARTITION CURRENT-OFFSET LOG-END-OFFSET LAG CONSUMER-ID HOST CLIENT-ID
|
||||
clickhouse flows-v2 0 5650351527 5650374314 22787 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-0-77740d0a-79b7-4bef-a501-25a819c3cee4 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-0
|
||||
clickhouse flows-v2 3 3035602619 3035628290 25671 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-3-1e4629b0-69a3-48dd-899a-20f4b16be0a2 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-3
|
||||
clickhouse flows-v2 2 1645914467 1645930257 15790 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-2-79c9bafe-fd36-42fe-921f-a802d46db684 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-2
|
||||
clickhouse flows-v2 1 889117276 889129896 12620 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-1-f0421bbe-ba13-49df-998f-83e49045be00 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_2_raw-1
|
||||
clickhouse flows-v2 0 5650351527 5650374314 22787 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-0-77740d0a-79b7-4bef-a501-25a819c3cee4 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-0
|
||||
clickhouse flows-v2 3 3035602619 3035628290 25671 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-3-1e4629b0-69a3-48dd-899a-20f4b16be0a2 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-3
|
||||
clickhouse flows-v2 2 1645914467 1645930257 15790 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-2-79c9bafe-fd36-42fe-921f-a802d46db684 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-2
|
||||
clickhouse flows-v2 1 889117276 889129896 12620 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-1-f0421bbe-ba13-49df-998f-83e49045be00 /240.0.4.8 ClickHouse-ee97b7e7e5e0-default-flows_3_raw-1
|
||||
```
|
||||
|
||||
If you still have an issue, be sure to check the errors reported by
|
||||
|
||||
@@ -76,6 +76,8 @@ fatal, or rate-limited and accounted into a metric.
|
||||
The CLI (not a component) is handled by
|
||||
[Cobra](https://github.com/spf13/cobra). The configuration file is
|
||||
handled by [mapstructure](https://github.com/mitchellh/mapstructure).
|
||||
Handling backward compatibility is done by registering hooks to
|
||||
transform the configuration.
|
||||
|
||||
## Flow decoding
|
||||
|
||||
@@ -106,7 +108,9 @@ of the databases in order to update a local cached copy.
|
||||
The Kafka component relies on
|
||||
[Sarama](https://github.com/Shopify/sarama). It is tested using the
|
||||
mock interface provided by this package. *Sarama* uses `go-metrics` to
|
||||
store metrics. We convert them to Prometheus to keep them.
|
||||
store metrics. We convert them to Prometheus to keep them. The logger
|
||||
is global and there is a hack to be plug it into the reporter design
|
||||
we have.
|
||||
|
||||
If a real broker is available under the DNS name `kafka` or at
|
||||
`localhost` on port 9092, it will be used for a quick functional test.
|
||||
@@ -138,6 +142,24 @@ for a minute to ensure it does not eat up all the workers' resources.
|
||||
Testing is done by another implementation of an [SNMP
|
||||
agent](https://github.com/slayercat/GoSNMPServer).
|
||||
|
||||
## BMP
|
||||
|
||||
The BMP server uses [GoBGP](http://github.com/osrg/gobgp)'s
|
||||
implementation. GoBGP does not have a BMP collector, but it's just a
|
||||
simple TCP connection receiving BMP messages and we use GoBGP to parse
|
||||
them. The data we need is stored in a Patricia tree.
|
||||
|
||||
[github.com/kentik/patricia](https://github.com/kentik/patricia)
|
||||
implements a fast Patricia tree for IP lookup in a tree of subnets. It
|
||||
leverages Go generics to make the code safe. It is used both for
|
||||
configuring subnet-dependent settings (eg SNMP communities) and for
|
||||
storing data received using BMP.
|
||||
|
||||
To save memory, *Akvorado* "interns" next-hops, origin AS, AS paths
|
||||
and communities. Each unique combination is associated to a
|
||||
reference-counter 32-bit integer, which is used in the RIB in place of
|
||||
the original information.
|
||||
|
||||
## Web console
|
||||
|
||||
The web console is built as a REST API with a single page application
|
||||
|
||||
@@ -13,6 +13,12 @@ identified with a specific icon:
|
||||
|
||||
## Unreleased
|
||||
|
||||
This release features a BMP collector to grab BGP routes from one or
|
||||
several routers. The routes can be used to determine source and
|
||||
destination AS (instead of using GeoIP or information from the flows)
|
||||
but also the AS path and the communities.
|
||||
|
||||
- ✨ *inlet*: BMP collector to get AS numbers, AS paths, and communities from BGP
|
||||
- ✨ *inlet*: add `inlet.snmp.agents` to override exporter IP address for SNMP queries
|
||||
- 🩹 *inlet*: handle sFlow specific interface number for locally
|
||||
originated/terminated traffic, discarded traffic and traffic sent to
|
||||
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 554 KiB After Width: | Height: | Size: 556 KiB |
152
demoexporter/bmp/client.go
Normal file
152
demoexporter/bmp/client.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bmp"
|
||||
)
|
||||
|
||||
// startBMPClient starts the BMP client
|
||||
func (c *Component) startBMPClient(ctx context.Context) {
|
||||
var d net.Dialer
|
||||
conn, err := d.DialContext(ctx, "tcp", c.config.Target)
|
||||
if err != nil {
|
||||
c.r.Err(err).Msg("cannot connect to target")
|
||||
c.metrics.errors.WithLabelValues(err.Error()).Inc()
|
||||
return
|
||||
}
|
||||
c.metrics.connections.Inc()
|
||||
defer conn.Close()
|
||||
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
peerHeader := bmp.NewBMPPeerHeader(
|
||||
bmp.BMP_PEER_TYPE_GLOBAL, 0, 0,
|
||||
c.config.PeerIP.Unmap().String(),
|
||||
uint32(c.config.PeerASN),
|
||||
"2.2.2.2",
|
||||
0)
|
||||
pkt, err := bmp.NewBMPInitiation([]bmp.BMPInfoTLVInterface{
|
||||
bmp.NewBMPInfoTLVString(bmp.BMP_INIT_TLV_TYPE_SYS_DESCR, "Fake exporter"),
|
||||
bmp.NewBMPInfoTLVString(bmp.BMP_INIT_TLV_TYPE_SYS_NAME, "fake.example.com"),
|
||||
}).Serialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf.Write(pkt)
|
||||
pkt, err = bmp.NewBMPPeerUpNotification(*peerHeader, c.config.LocalIP.Unmap().String(), 179, 47647,
|
||||
bgp.NewBGPOpenMessage(c.config.LocalASN, 30, "1.1.1.1",
|
||||
[]bgp.OptionParameterInterface{
|
||||
bgp.NewOptionParameterCapability([]bgp.ParameterCapabilityInterface{
|
||||
bgp.NewCapMultiProtocol(bgp.RF_IPv4_UC),
|
||||
bgp.NewCapMultiProtocol(bgp.RF_IPv6_UC),
|
||||
}),
|
||||
},
|
||||
),
|
||||
bgp.NewBGPOpenMessage(c.config.PeerASN, 30, "2.2.2.2",
|
||||
[]bgp.OptionParameterInterface{
|
||||
bgp.NewOptionParameterCapability([]bgp.ParameterCapabilityInterface{
|
||||
bgp.NewCapMultiProtocol(bgp.RF_IPv4_UC),
|
||||
bgp.NewCapMultiProtocol(bgp.RF_IPv6_UC),
|
||||
}),
|
||||
},
|
||||
),
|
||||
).Serialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf.Write(pkt)
|
||||
|
||||
// Send the routes
|
||||
for _, af := range []bgp.RouteFamily{bgp.RF_IPv4_UC, bgp.RF_IPv6_UC} {
|
||||
for _, route := range c.config.Routes {
|
||||
prefixes := []bgp.AddrPrefixInterface{}
|
||||
for _, prefix := range route.Prefixes {
|
||||
if af == bgp.RF_IPv4_UC && prefix.Addr().Is4() {
|
||||
prefixes = append(prefixes,
|
||||
bgp.NewIPAddrPrefix(uint8(prefix.Bits()), prefix.Addr().String()))
|
||||
} else if af == bgp.RF_IPv6_UC && prefix.Addr().Is6() {
|
||||
prefixes = append(prefixes,
|
||||
bgp.NewIPv6AddrPrefix(uint8(prefix.Bits()), prefix.Addr().String()))
|
||||
}
|
||||
}
|
||||
if len(prefixes) == 0 {
|
||||
continue
|
||||
}
|
||||
attrs := []bgp.PathAttributeInterface{
|
||||
// bgp.NewPathAttributeNextHop("192.0.2.20"),
|
||||
bgp.NewPathAttributeOrigin(1),
|
||||
bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, route.ASPath),
|
||||
}),
|
||||
bgp.NewPathAttributeMpReachNLRI("fe80::1", prefixes),
|
||||
}
|
||||
if route.Communities != nil {
|
||||
comms := make([]uint32, len(route.Communities))
|
||||
for idx, comm := range route.Communities {
|
||||
comms[idx] = uint32(comm)
|
||||
}
|
||||
attrs = append(attrs, bgp.NewPathAttributeCommunities(comms))
|
||||
}
|
||||
pkt, err = bmp.NewBMPRouteMonitoring(*peerHeader,
|
||||
bgp.NewBGPUpdateMessage(nil, attrs, nil)).Serialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf.Write(pkt)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the packets on the wire
|
||||
if _, err := conn.Write(buf.Bytes()); err != nil {
|
||||
c.r.Err(err).Msg("cannot write BMP message to target")
|
||||
c.metrics.errors.WithLabelValues(err.Error()).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the connection stays up by sending stats messages
|
||||
// (we cannot read as remote end may have closed the write
|
||||
// side)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
pkt, err := bmp.NewBMPStatisticsReport(*peerHeader, []bmp.BMPStatsTLVInterface{}).
|
||||
Serialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
buf.Write(pkt)
|
||||
if _, err := conn.Write(buf.Bytes()); err != nil && err != io.EOF && !errors.Is(err, syscall.ECONNRESET) && !errors.Is(err, syscall.EPIPE) {
|
||||
c.r.Err(err).Msg("cannot write to remote")
|
||||
c.metrics.errors.WithLabelValues(err.Error()).Inc()
|
||||
close(done)
|
||||
return
|
||||
} else if err != nil {
|
||||
c.r.Info().Msg("remote closed connection")
|
||||
c.metrics.errors.WithLabelValues("EOF").Inc()
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(c.config.StatsDelay):
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
return
|
||||
}
|
||||
105
demoexporter/bmp/client_test.go
Normal file
105
demoexporter/bmp/client_test.go
Normal file
@@ -0,0 +1,105 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp_test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
gobmp "github.com/osrg/gobgp/v3/pkg/packet/bmp"
|
||||
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/helpers"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/demoexporter/bmp"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Listen() error:\n%+v", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
config := bmp.DefaultConfiguration()
|
||||
config.Target = listener.Addr().String()
|
||||
config.RetryAfter = 0
|
||||
config.StatsDelay = 10 * time.Millisecond
|
||||
config.Routes = []bmp.RouteConfiguration{
|
||||
{
|
||||
Prefixes: []netip.Prefix{netip.MustParsePrefix("2001:db8::/64")},
|
||||
ASPath: []uint32{65001, 65002, 65002},
|
||||
Communities: []bmp.Community{500, 600, 700},
|
||||
}, {
|
||||
Prefixes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.0.2.0/24"),
|
||||
netip.MustParsePrefix("203.0.113.0/24"),
|
||||
},
|
||||
ASPath: []uint32{12322, 1299},
|
||||
}, {
|
||||
Prefixes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.0.2.0/24"),
|
||||
netip.MustParsePrefix("2001:db8::/64"),
|
||||
},
|
||||
ASPath: []uint32{65001, 65002},
|
||||
},
|
||||
}
|
||||
r := reporter.NewMock(t)
|
||||
c, err := bmp.New(r, config, bmp.Dependencies{
|
||||
Daemon: daemon.NewMock(t),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
}
|
||||
helpers.StartStop(t, c)
|
||||
|
||||
// Test we get a reconnect
|
||||
conn, err := listener.Accept()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
conn.Close()
|
||||
conn, err = listener.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("Accept() error:\n%+v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
got := make([]byte, 5000)
|
||||
n, err := conn.Read(got)
|
||||
if err != nil {
|
||||
t.Fatalf("Read() error:\n%+v", err)
|
||||
}
|
||||
got = got[:n]
|
||||
|
||||
msgs := []*gobmp.BMPMessage{}
|
||||
for {
|
||||
advance, token, err := gobmp.SplitBMP(got, len(got) > 0)
|
||||
if err != nil {
|
||||
t.Fatalf("SplitBMP() error:\n%+v", err)
|
||||
}
|
||||
if token == nil {
|
||||
break
|
||||
}
|
||||
t.Logf("BMP message len: %d", len(token))
|
||||
msg, err := gobmp.ParseBMPMessage(token)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseBMPMessage() error:\n%+v", err)
|
||||
}
|
||||
msgs = append(msgs, msg)
|
||||
got = got[advance:]
|
||||
}
|
||||
|
||||
// Assume we got what we want.
|
||||
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_demoexporter_")
|
||||
expectedMetrics := map[string]string{
|
||||
`bmp_connections_total`: "2",
|
||||
`bmp_errors_total{error="EOF"}`: "1",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Fatalf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
}
|
||||
82
demoexporter/bmp/config.go
Normal file
82
demoexporter/bmp/config.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Configuration describes the configuration for the BMP component. Only one peer is emulated.
|
||||
type Configuration struct {
|
||||
// Target specify the IP address and port to generate BMP routes to. Empty if this component is disabled.
|
||||
Target string `validate:"isdefault|hostname_port"`
|
||||
// Routes is the set of routes to announce to the collector using BMP.
|
||||
Routes []RouteConfiguration `validate:"dive"`
|
||||
// LocalASN is the local AS number
|
||||
LocalASN uint16 `validate:"required,min=1"`
|
||||
// PeerASN is the peer AS number
|
||||
PeerASN uint16 `validate:"required,min=1"`
|
||||
// LocalIP is the local IP address.
|
||||
LocalIP netip.Addr `validate:"required"`
|
||||
// PeerIP is the peer IP address.
|
||||
PeerIP netip.Addr `validate:"required"`
|
||||
// RetryAfter tells how much time to wait before retrying
|
||||
RetryAfter time.Duration `validate:"min=0s"`
|
||||
// StatsDelay tells how much time to wait between two BMP stats message (to check connection liveness)
|
||||
StatsDelay time.Duration `validate:"min=0s"`
|
||||
}
|
||||
|
||||
// RouteConfiguration describes a route to be generated with BMP.
|
||||
type RouteConfiguration struct {
|
||||
// Prefix is the prefix to announce.
|
||||
Prefixes []netip.Prefix `validate:"min=1"`
|
||||
// ASPath is the AS path to associate with the prefix.
|
||||
ASPath []uint32 `validate:"min=1"`
|
||||
// Communities are the set of standard communities to associate with the prefix
|
||||
Communities []Community
|
||||
}
|
||||
|
||||
// DefaultConfiguration represents the default configuration for the BMP component.
|
||||
func DefaultConfiguration() Configuration {
|
||||
return Configuration{
|
||||
LocalASN: 64496,
|
||||
PeerASN: 64497,
|
||||
LocalIP: netip.MustParseAddr("2001:db8::1"),
|
||||
PeerIP: netip.MustParseAddr("2001:db8::2"),
|
||||
RetryAfter: time.Duration(5 * time.Second),
|
||||
StatsDelay: time.Duration(time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
// Community is a standard community.
|
||||
type Community uint32
|
||||
|
||||
// UnmarshalText parses a standard community.
|
||||
func (comm *Community) UnmarshalText(input []byte) error {
|
||||
text := string(input)
|
||||
elems := strings.Split(text, ":")
|
||||
if len(elems) != 2 {
|
||||
return errors.New("community should be ASN:XX")
|
||||
}
|
||||
asn, err := strconv.ParseUint(elems[0], 10, 16)
|
||||
if err != nil {
|
||||
return errors.New("community should be ASN2:XX")
|
||||
}
|
||||
local, err := strconv.ParseUint(elems[1], 10, 16)
|
||||
if err != nil {
|
||||
return errors.New("community should be ASN:XX2")
|
||||
}
|
||||
*comm = Community((asn << 16) + local)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String turns a community to a string.
|
||||
func (comm Community) String() string {
|
||||
return fmt.Sprintf("%d:%d", comm>>16, comm&0xffff)
|
||||
}
|
||||
46
demoexporter/bmp/config_test.go
Normal file
46
demoexporter/bmp/config_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
)
|
||||
|
||||
func TestCommunity(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input string
|
||||
Expected Community
|
||||
Error bool
|
||||
}{
|
||||
{"12322:10", 807534602, false},
|
||||
{"0:100", 100, false},
|
||||
{"1:0", 65536, false},
|
||||
{"65536:1", 0, true},
|
||||
{"12322:65536", 0, true},
|
||||
{"kfjgkf", 0, true},
|
||||
{"fdgj:gffg", 0, true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
var got Community
|
||||
err := got.UnmarshalText([]byte(tc.Input))
|
||||
if err == nil && tc.Error {
|
||||
t.Errorf("UnmarshalText(%q) did not error", tc.Input)
|
||||
} else if err != nil && !tc.Error {
|
||||
t.Errorf("UnmarshalText(%q) error:\n%+v", tc.Input, err)
|
||||
} else if err == nil && got != tc.Expected {
|
||||
t.Errorf("UnmarshalText(%q) == %d, expected %d", tc.Input, got, tc.Expected)
|
||||
} else if err == nil && got.String() != tc.Input {
|
||||
t.Errorf("%q.String() == %s", tc.Input, got.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultConfiguration(t *testing.T) {
|
||||
config := DefaultConfiguration()
|
||||
if err := helpers.Validate.Struct(config); err != nil {
|
||||
t.Fatalf("validate.Struct() error:\n%+v", err)
|
||||
}
|
||||
}
|
||||
90
demoexporter/bmp/root.go
Normal file
90
demoexporter/bmp/root.go
Normal file
@@ -0,0 +1,90 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Package bmp simulates an BMP client
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/tomb.v2"
|
||||
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/reporter"
|
||||
)
|
||||
|
||||
// Component represents the BMP component.
|
||||
type Component struct {
|
||||
r *reporter.Reporter
|
||||
d *Dependencies
|
||||
t tomb.Tomb
|
||||
config Configuration
|
||||
|
||||
bmpPort int
|
||||
metrics struct {
|
||||
connections reporter.Counter
|
||||
errors *reporter.CounterVec
|
||||
}
|
||||
}
|
||||
|
||||
// Dependencies define the dependencies of the BMP component.
|
||||
type Dependencies struct {
|
||||
Daemon daemon.Component
|
||||
}
|
||||
|
||||
// New creates a new BMP component.
|
||||
func New(r *reporter.Reporter, config Configuration, dependencies Dependencies) (*Component, error) {
|
||||
c := Component{
|
||||
r: r,
|
||||
d: &dependencies,
|
||||
config: config,
|
||||
}
|
||||
|
||||
c.metrics.connections = c.r.Counter(
|
||||
reporter.CounterOpts{
|
||||
Name: "connections_total",
|
||||
Help: "Number of successful connections to target.",
|
||||
},
|
||||
)
|
||||
c.metrics.errors = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "errors_total",
|
||||
Help: "Number of unsuccessful connections to target.",
|
||||
},
|
||||
[]string{"error"},
|
||||
)
|
||||
|
||||
if config.Target != "" {
|
||||
c.d.Daemon.Track(&c.t, "demo-exporter/bmp")
|
||||
}
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Start starts the BMP component.
|
||||
func (c *Component) Start() error {
|
||||
if c.config.Target != "" {
|
||||
c.r.Info().Msg("starting BMP component")
|
||||
c.t.Go(func() error {
|
||||
for {
|
||||
ctx := c.t.Context(nil)
|
||||
c.startBMPClient(ctx)
|
||||
if !c.t.Alive() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(c.config.RetryAfter)
|
||||
}
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the BMP component.
|
||||
func (c *Component) Stop() error {
|
||||
if c.config.Target != "" {
|
||||
defer c.r.Info().Msg("BMP component stopped")
|
||||
c.r.Info().Msg("stopping the BMP component")
|
||||
c.t.Kill(nil)
|
||||
return c.t.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -6,6 +6,7 @@ package demoexporter
|
||||
|
||||
import (
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/demoexporter/bmp"
|
||||
"akvorado/demoexporter/flows"
|
||||
"akvorado/demoexporter/snmp"
|
||||
)
|
||||
@@ -20,6 +21,7 @@ type Component struct {
|
||||
// Dependencies define the dependencies of the demo exporter service.
|
||||
type Dependencies struct {
|
||||
SNMP *snmp.Component
|
||||
BMP *bmp.Component
|
||||
Flows *flows.Component
|
||||
}
|
||||
|
||||
|
||||
9
go.mod
9
go.mod
@@ -26,6 +26,7 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/netsampler/goflow2 v1.1.0
|
||||
github.com/oschwald/maxminddb-golang v1.10.0
|
||||
github.com/osrg/gobgp/v3 v3.6.0
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/rs/zerolog v1.28.0
|
||||
@@ -64,7 +65,7 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/goccy/go-json v0.9.7 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -109,7 +110,6 @@ require (
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.opentelemetry.io/otel v1.9.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.9.0 // indirect
|
||||
@@ -123,3 +123,8 @@ require (
|
||||
modernc.org/memory v1.3.0 // indirect
|
||||
modernc.org/sqlite v1.18.2 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/benbjohnson/clock => github.com/vincentbernat/go-clock v0.0.0-20220922224448-739bd11b5833
|
||||
github.com/kentik/patricia => github.com/vincentbernat/patricia v0.0.0-20220923091046-b376a1167a94
|
||||
)
|
||||
|
||||
19
go.sum
19
go.sum
@@ -57,8 +57,6 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/antonmedv/expr v1.9.0 h1:j4HI3NHEdgDnN9p6oI6Ndr0G5QryMY0FNxT4ONrFDGU=
|
||||
github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -83,8 +81,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
|
||||
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
|
||||
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
@@ -150,14 +148,16 @@ github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl
|
||||
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||
github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529 h1:2voWjNECnrZRbfwXxHB1/j8wa6xdKn85B5NzgVL/pTU=
|
||||
github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -270,8 +270,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kentik/patricia v1.2.0 h1:WZcp8V8GQhsya0bMZuXktEH/Wz+aBlhiMle4tExkj6M=
|
||||
github.com/kentik/patricia v1.2.0/go.mod h1:6jY40ESetsbfi04/S12iJlsiS6DYL2B2W+WAcqoDHtw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
@@ -334,6 +332,8 @@ github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrB
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd9181uj2MQ5Vndg=
|
||||
github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0=
|
||||
github.com/osrg/gobgp/v3 v3.6.0 h1:mKJoFUt+5RaGujNfus3XELmBGntSeniJJNGfCh/5XVc=
|
||||
github.com/osrg/gobgp/v3 v3.6.0/go.mod h1:fKQPuk7+4qMiDT5viZTXT/aSEn8yYDkEs5p3NjmU2bw=
|
||||
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
|
||||
github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A=
|
||||
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
|
||||
@@ -443,9 +443,12 @@ github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/vincentbernat/go-clock v0.0.0-20220922224448-739bd11b5833 h1:eeHgOFlrGNESR9TF+AJovNWOxH8AdmXWK2nGXKa6RUU=
|
||||
github.com/vincentbernat/go-clock v0.0.0-20220922224448-739bd11b5833/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/vincentbernat/patricia v0.0.0-20220923091046-b376a1167a94 h1:T7+yyM6300NYIv1kqlXX53d2cjEHpgDt6cFbBYO+upk=
|
||||
github.com/vincentbernat/patricia v0.0.0-20220923091046-b376a1167a94/go.mod h1:6jY40ESetsbfi04/S12iJlsiS6DYL2B2W+WAcqoDHtw=
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
|
||||
34
inlet/bmp/config.go
Normal file
34
inlet/bmp/config.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import "time"
|
||||
|
||||
// Configuration describes the configuration for the BMP server.
|
||||
type Configuration struct {
|
||||
// Listen tells on which port the BMP server should listen to.
|
||||
Listen string `validate:"listen"`
|
||||
// RDs list the RDs to keep. If none are specified, all
|
||||
// received routes are processed. 0 match an absence of RD.
|
||||
RDs []RD
|
||||
// CollectASNs is true when we want to collect origin AS numbers
|
||||
CollectASNs bool
|
||||
// CollectASPaths is true when we want to collect AS paths
|
||||
CollectASPaths bool
|
||||
// CollectCommunities is true when we want to collect communities
|
||||
CollectCommunities bool
|
||||
// Keep tells how long to keep routes from a BMP client when it goes down
|
||||
Keep time.Duration `validate:"min=1s"`
|
||||
}
|
||||
|
||||
// DefaultConfiguration represents the default configuration for the BMP server
|
||||
func DefaultConfiguration() Configuration {
|
||||
return Configuration{
|
||||
Listen: "0.0.0.0:10179",
|
||||
Keep: 5 * time.Minute,
|
||||
CollectASNs: true,
|
||||
CollectASPaths: true,
|
||||
CollectCommunities: true,
|
||||
}
|
||||
}
|
||||
16
inlet/bmp/config_test.go
Normal file
16
inlet/bmp/config_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
)
|
||||
|
||||
func TestDefaultConfiguration(t *testing.T) {
|
||||
if err := helpers.Validate.Struct(DefaultConfiguration()); err != nil {
|
||||
t.Fatalf("validate.Struct() error:\n%+v", err)
|
||||
}
|
||||
}
|
||||
420
inlet/bmp/events.go
Normal file
420
inlet/bmp/events.go
Normal file
@@ -0,0 +1,420 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bmp"
|
||||
)
|
||||
|
||||
// peerKey is the key used to identify a peer
|
||||
type peerKey struct {
|
||||
exporter netip.AddrPort // exporter IP + source port
|
||||
ip netip.Addr // peer IP
|
||||
ptype uint8 // peer type
|
||||
distinguisher RD // peer distinguisher
|
||||
asn uint32 // peer ASN
|
||||
bgpID uint32 // peer router ID
|
||||
}
|
||||
|
||||
// peerInfo contains some information attached to a peer.
|
||||
type peerInfo struct {
|
||||
reference uint32 // used as a reference in the RIB
|
||||
staleUntil time.Time // when to remove because it is stale
|
||||
marshallingOptions []*bgp.MarshallingOption // decoding option (add-path mostly)
|
||||
}
|
||||
|
||||
// peerKeyFromBMPPeerHeader computes the peer key from the BMP peer header.
|
||||
func peerKeyFromBMPPeerHeader(exporter netip.AddrPort, header *bmp.BMPPeerHeader) peerKey {
|
||||
peer, _ := netip.AddrFromSlice(header.PeerAddress.To16())
|
||||
return peerKey{
|
||||
exporter: exporter,
|
||||
ip: peer,
|
||||
ptype: header.PeerType,
|
||||
distinguisher: RD(header.PeerDistinguisher),
|
||||
asn: header.PeerAS,
|
||||
bgpID: binary.BigEndian.Uint32(header.PeerBGPID.To4()),
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleStalePeersRemoval schedule the next time a peer should be
|
||||
// removed. This should be called with the lock held.
|
||||
func (c *Component) scheduleStalePeersRemoval() {
|
||||
var next time.Time
|
||||
for _, pinfo := range c.peers {
|
||||
if pinfo.staleUntil.IsZero() {
|
||||
continue
|
||||
}
|
||||
if next.IsZero() || pinfo.staleUntil.Before(next) {
|
||||
next = pinfo.staleUntil
|
||||
}
|
||||
}
|
||||
if next.IsZero() {
|
||||
c.r.Debug().Msg("no stale peer")
|
||||
c.staleTimer.Stop()
|
||||
} else {
|
||||
c.r.Debug().Msgf("next removal for stale peer scheduled on %s", next)
|
||||
c.staleTimer.Reset(c.d.Clock.Until(next))
|
||||
}
|
||||
}
|
||||
|
||||
// removeStalePeers remove the stale peers.
|
||||
func (c *Component) removeStalePeers() {
|
||||
start := c.d.Clock.Now()
|
||||
c.r.Debug().Msg("remove stale peers")
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
defer c.metrics.locked.WithLabelValues("stale").Observe(
|
||||
float64(c.d.Clock.Now().Sub(start).Nanoseconds()) / 1000 / 1000 / 1000)
|
||||
for pkey, pinfo := range c.peers {
|
||||
if pinfo.staleUntil.IsZero() || pinfo.staleUntil.After(start) {
|
||||
continue
|
||||
}
|
||||
c.removePeer(pkey, pinfo)
|
||||
}
|
||||
c.scheduleStalePeersRemoval()
|
||||
}
|
||||
|
||||
func (c *Component) addPeer(pkey peerKey) *peerInfo {
|
||||
c.lastPeerReference++
|
||||
if c.lastPeerReference == 0 {
|
||||
// This is a very unlikely event, but we don't
|
||||
// have anything better. Let's crash (and
|
||||
// hopefully be restarted).
|
||||
c.r.Fatal().Msg("too many peer up events")
|
||||
go c.Stop()
|
||||
}
|
||||
pinfo := &peerInfo{
|
||||
reference: c.lastPeerReference,
|
||||
}
|
||||
c.peers[pkey] = pinfo
|
||||
return pinfo
|
||||
}
|
||||
|
||||
// removePeer remove a peer.
|
||||
func (c *Component) removePeer(pkey peerKey, pinfo *peerInfo) {
|
||||
exporterStr := pkey.exporter.Addr().Unmap().String()
|
||||
peerStr := pkey.ip.Unmap().String()
|
||||
c.r.Info().Msgf("remove peer %s for exporter %s", peerStr, exporterStr)
|
||||
removed := c.rib.flushPeer(pinfo.reference)
|
||||
c.metrics.routes.WithLabelValues(exporterStr).Sub(float64(removed))
|
||||
c.metrics.peers.WithLabelValues(exporterStr).Dec()
|
||||
delete(c.peers, pkey)
|
||||
}
|
||||
|
||||
// markExporterAsStale marks all peers from an exporter as stale.
|
||||
func (c *Component) markExporterAsStale(exporter netip.AddrPort, until time.Time) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for pkey, pinfo := range c.peers {
|
||||
if pkey.exporter != exporter {
|
||||
continue
|
||||
}
|
||||
pinfo.staleUntil = until
|
||||
}
|
||||
c.scheduleStalePeersRemoval()
|
||||
}
|
||||
|
||||
// handlePeerDownNotification handles a peer-down notification by
|
||||
// marking the peer as stale.
|
||||
func (c *Component) handlePeerDownNotification(pkey peerKey) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
pinfo, ok := c.peers[pkey]
|
||||
if !ok {
|
||||
c.r.Info().Msgf("received peer down from exporter %s for peer %s, but no peer up",
|
||||
pkey.exporter.Addr().Unmap().String(),
|
||||
pkey.ip.Unmap().String())
|
||||
return
|
||||
}
|
||||
c.removePeer(pkey, pinfo)
|
||||
}
|
||||
|
||||
// handleConnectionDown handles a disconnect or a session termination
|
||||
// by marking all associated peers as stale.
|
||||
func (c *Component) handleConnectionDown(exporter netip.AddrPort) {
|
||||
until := c.d.Clock.Now().Add(c.config.Keep)
|
||||
c.markExporterAsStale(exporter, until)
|
||||
}
|
||||
|
||||
// handleConnectionUp handles the connection from a new exporter.
|
||||
func (c *Component) handleConnectionUp(exporter netip.AddrPort) {
|
||||
exporterStr := exporter.Addr().Unmap().String()
|
||||
// Do not set to 0, exporterStr may cover several exporters.
|
||||
c.metrics.peers.WithLabelValues(exporterStr).Add(0)
|
||||
c.metrics.routes.WithLabelValues(exporterStr).Add(0)
|
||||
}
|
||||
|
||||
// handlePeerUpNotification handles a new peer.
|
||||
func (c *Component) handlePeerUpNotification(pkey peerKey, body *bmp.BMPPeerUpNotification) {
|
||||
if body.ReceivedOpenMsg == nil || body.SentOpenMsg == nil {
|
||||
return
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
exporterStr := pkey.exporter.Addr().Unmap().String()
|
||||
peerStr := pkey.ip.Unmap().String()
|
||||
pinfo, ok := c.peers[pkey]
|
||||
if ok {
|
||||
c.r.Info().Msgf("received extra peer up from exporter %s for peer %s",
|
||||
exporterStr, peerStr)
|
||||
} else {
|
||||
// Peer does not exist at all
|
||||
c.metrics.peers.WithLabelValues(exporterStr).Inc()
|
||||
pinfo = c.addPeer(pkey)
|
||||
}
|
||||
|
||||
// Check for ADD-PATH support.
|
||||
receivedAddPath := map[bgp.RouteFamily]bgp.BGPAddPathMode{}
|
||||
received, _ := body.ReceivedOpenMsg.Body.(*bgp.BGPOpen)
|
||||
for _, param := range received.OptParams {
|
||||
switch param := param.(type) {
|
||||
case *bgp.OptionParameterCapability:
|
||||
for _, cap := range param.Capability {
|
||||
switch cap := cap.(type) {
|
||||
case *bgp.CapAddPath:
|
||||
for _, tuple := range cap.Tuples {
|
||||
receivedAddPath[tuple.RouteFamily] = tuple.Mode
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sent, _ := body.SentOpenMsg.Body.(*bgp.BGPOpen)
|
||||
addPathOption := map[bgp.RouteFamily]bgp.BGPAddPathMode{}
|
||||
for _, param := range sent.OptParams {
|
||||
switch param := param.(type) {
|
||||
case *bgp.OptionParameterCapability:
|
||||
for _, cap := range param.Capability {
|
||||
switch cap := cap.(type) {
|
||||
case *bgp.CapAddPath:
|
||||
for _, sent := range cap.Tuples {
|
||||
receivedMode := receivedAddPath[sent.RouteFamily]
|
||||
if receivedMode == bgp.BGP_ADD_PATH_BOTH || receivedMode == bgp.BGP_ADD_PATH_SEND {
|
||||
if sent.Mode == bgp.BGP_ADD_PATH_BOTH || sent.Mode == bgp.BGP_ADD_PATH_RECEIVE {
|
||||
// We have at least the receive mode. We only do decoding.
|
||||
addPathOption[sent.RouteFamily] = bgp.BGP_ADD_PATH_RECEIVE
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pinfo.marshallingOptions = []*bgp.MarshallingOption{{AddPath: addPathOption}}
|
||||
|
||||
c.r.Debug().
|
||||
Str("addpath", fmt.Sprintf("%s", addPathOption)).
|
||||
Msgf("new peer %s from exporter %s", peerStr, exporterStr)
|
||||
}
|
||||
|
||||
func (c *Component) handleRouteMonitoring(pkey peerKey, body *bmp.BMPRouteMonitoring) {
|
||||
// We expect to have a BGP update message
|
||||
if body.BGPUpdate == nil || body.BGPUpdate.Body == nil {
|
||||
return
|
||||
}
|
||||
update, ok := body.BGPUpdate.Body.(*bgp.BGPUpdate)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Ignore this peer if this is a L3VPN and it does not have
|
||||
// the right RD.
|
||||
if pkey.ptype == bmp.BMP_PEER_TYPE_L3VPN && !c.isAcceptedRD(pkey.distinguisher) {
|
||||
return
|
||||
}
|
||||
|
||||
exporterStr := pkey.exporter.Addr().Unmap().String()
|
||||
peerStr := pkey.ip.Unmap().String()
|
||||
pinfo, ok := c.peers[pkey]
|
||||
if !ok {
|
||||
// We may have missed the peer down notification?
|
||||
c.r.Info().Msgf("received route monitoring from exporter %s for peer %s, but no peer up",
|
||||
exporterStr, peerStr)
|
||||
c.metrics.peers.WithLabelValues(exporterStr).Inc()
|
||||
pinfo = c.addPeer(pkey)
|
||||
}
|
||||
|
||||
var nh netip.Addr
|
||||
var rta routeAttributes
|
||||
for _, attr := range update.PathAttributes {
|
||||
switch attr := attr.(type) {
|
||||
case *bgp.PathAttributeNextHop:
|
||||
nh, _ = netip.AddrFromSlice(attr.Value.To16())
|
||||
case *bgp.PathAttributeAsPath:
|
||||
if c.config.CollectASNs || c.config.CollectASPaths {
|
||||
rta.asPath = asPathFlat(attr)
|
||||
}
|
||||
case *bgp.PathAttributeCommunities:
|
||||
if c.config.CollectCommunities {
|
||||
rta.communities = attr.Value
|
||||
}
|
||||
case *bgp.PathAttributeLargeCommunities:
|
||||
if c.config.CollectCommunities {
|
||||
rta.largeCommunities = make([]bgp.LargeCommunity, len(attr.Values))
|
||||
for idx, c := range attr.Values {
|
||||
rta.largeCommunities[idx] = *c
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no AS path, consider the peer AS as the origin AS,
|
||||
// otherwise the last AS.
|
||||
if c.config.CollectASNs {
|
||||
if path := rta.asPath; len(path) == 0 {
|
||||
rta.asn = pkey.asn
|
||||
} else {
|
||||
rta.asn = path[len(path)-1]
|
||||
}
|
||||
}
|
||||
if !c.config.CollectASPaths {
|
||||
rta.asPath = rta.asPath[:0]
|
||||
}
|
||||
|
||||
added := 0
|
||||
removed := 0
|
||||
|
||||
// Regular NLRI and withdrawn routes
|
||||
if pkey.ptype == bmp.BMP_PEER_TYPE_L3VPN || c.isAcceptedRD(0) {
|
||||
for _, ipprefix := range update.NLRI {
|
||||
prefix := ipprefix.Prefix
|
||||
plen := int(ipprefix.Length)
|
||||
if prefix.To4() != nil {
|
||||
prefix = prefix.To16()
|
||||
plen += 96
|
||||
}
|
||||
p, _ := netip.AddrFromSlice(prefix)
|
||||
added += c.rib.addPrefix(p, plen, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{
|
||||
family: bgp.RF_IPv4_UC,
|
||||
path: ipprefix.PathIdentifier(),
|
||||
rd: pkey.distinguisher,
|
||||
},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(nh)),
|
||||
attributes: c.rib.rtas.Put(rta),
|
||||
})
|
||||
}
|
||||
for _, ipprefix := range update.WithdrawnRoutes {
|
||||
prefix := ipprefix.Prefix
|
||||
plen := int(ipprefix.Length)
|
||||
if prefix.To4() != nil {
|
||||
prefix = prefix.To16()
|
||||
plen += 96
|
||||
}
|
||||
p, _ := netip.AddrFromSlice(prefix)
|
||||
removed += c.rib.removePrefix(p, plen, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{
|
||||
family: bgp.RF_IPv4_UC,
|
||||
path: ipprefix.PathIdentifier(),
|
||||
rd: pkey.distinguisher,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// MP reach and unreach NLRI
|
||||
for _, attr := range update.PathAttributes {
|
||||
var p netip.Addr
|
||||
var plen int
|
||||
var rd RD
|
||||
var ipprefixes []bgp.AddrPrefixInterface
|
||||
switch attr := attr.(type) {
|
||||
case *bgp.PathAttributeMpReachNLRI:
|
||||
nh, _ = netip.AddrFromSlice(attr.Nexthop.To16())
|
||||
ipprefixes = attr.Value
|
||||
case *bgp.PathAttributeMpUnreachNLRI:
|
||||
ipprefixes = attr.Value
|
||||
}
|
||||
for _, ipprefix := range ipprefixes {
|
||||
switch ipprefix := ipprefix.(type) {
|
||||
case *bgp.IPAddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.Length + 96)
|
||||
rd = pkey.distinguisher
|
||||
case *bgp.IPv6AddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.Length)
|
||||
rd = pkey.distinguisher
|
||||
case *bgp.LabeledIPAddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.IPPrefixLen() + 96)
|
||||
rd = pkey.distinguisher
|
||||
case *bgp.LabeledIPv6AddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.IPPrefixLen())
|
||||
rd = pkey.distinguisher
|
||||
case *bgp.LabeledVPNIPAddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.IPPrefixLen() + 96)
|
||||
rd = RDFromRouteDistinguisherInterface(ipprefix.RD)
|
||||
case *bgp.LabeledVPNIPv6AddrPrefix:
|
||||
p, _ = netip.AddrFromSlice(ipprefix.Prefix.To16())
|
||||
plen = int(ipprefix.IPPrefixLen())
|
||||
rd = RDFromRouteDistinguisherInterface(ipprefix.RD)
|
||||
case *bgp.EVPNNLRI:
|
||||
switch route := ipprefix.RouteTypeData.(type) {
|
||||
case *bgp.EVPNIPPrefixRoute:
|
||||
prefix := route.IPPrefix
|
||||
plen = int(route.IPPrefixLength)
|
||||
if prefix.To4() != nil {
|
||||
prefix = prefix.To16()
|
||||
plen += 96
|
||||
}
|
||||
p, _ = netip.AddrFromSlice(prefix.To16())
|
||||
rd = RDFromRouteDistinguisherInterface(route.RD)
|
||||
}
|
||||
default:
|
||||
c.metrics.ignoredNlri.WithLabelValues(exporterStr,
|
||||
bgp.AfiSafiToRouteFamily(ipprefix.AFI(), ipprefix.SAFI()).String()).Inc()
|
||||
continue
|
||||
}
|
||||
if pkey.ptype != bmp.BMP_PEER_TYPE_L3VPN && !c.isAcceptedRD(rd) {
|
||||
continue
|
||||
}
|
||||
switch attr.(type) {
|
||||
case *bgp.PathAttributeMpReachNLRI:
|
||||
added += c.rib.addPrefix(p, plen, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{
|
||||
family: bgp.AfiSafiToRouteFamily(ipprefix.AFI(), ipprefix.SAFI()),
|
||||
rd: rd,
|
||||
path: ipprefix.PathIdentifier(),
|
||||
},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(nh)),
|
||||
attributes: c.rib.rtas.Put(rta),
|
||||
})
|
||||
case *bgp.PathAttributeMpUnreachNLRI:
|
||||
removed += c.rib.removePrefix(p, plen, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{
|
||||
family: bgp.AfiSafiToRouteFamily(ipprefix.AFI(), ipprefix.SAFI()),
|
||||
rd: rd,
|
||||
path: ipprefix.PathIdentifier(),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.metrics.routes.WithLabelValues(exporterStr).Add(float64(added - removed))
|
||||
}
|
||||
|
||||
func (c *Component) isAcceptedRD(rd RD) bool {
|
||||
if len(c.acceptedRDs) == 0 {
|
||||
return true
|
||||
}
|
||||
_, ok := c.acceptedRDs[uint64(rd)]
|
||||
return ok
|
||||
}
|
||||
26
inlet/bmp/hash.go
Normal file
26
inlet/bmp/hash.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// SPDX-FileCopyrightText: 2019 The Go Authors
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// SPDX-FileComment: This is an excerpt from src/hash/maphash/maphash.go
|
||||
|
||||
package bmp
|
||||
|
||||
import "unsafe"
|
||||
|
||||
//go:linkname memhash runtime.memhash
|
||||
//go:noescape
|
||||
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr
|
||||
|
||||
func rthash(ptr *byte, len int, seed uint64) uint64 {
|
||||
if len == 0 {
|
||||
return seed
|
||||
}
|
||||
// The runtime hasher only works on uintptr. For 64-bit
|
||||
// architectures, we use the hasher directly. Otherwise,
|
||||
// we use two parallel hashers on the lower and upper 32 bits.
|
||||
if unsafe.Sizeof(uintptr(0)) == 8 {
|
||||
return uint64(memhash(unsafe.Pointer(ptr), uintptr(seed), uintptr(len)))
|
||||
}
|
||||
lo := memhash(unsafe.Pointer(ptr), uintptr(seed), uintptr(len))
|
||||
hi := memhash(unsafe.Pointer(ptr), uintptr(seed>>32), uintptr(len))
|
||||
return uint64(hi)<<32 | uint64(lo)
|
||||
}
|
||||
67
inlet/bmp/lookup.go
Normal file
67
inlet/bmp/lookup.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/kentik/patricia"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
// LookupResult is the result of the Lookup() function.
|
||||
type LookupResult struct {
|
||||
ASN uint32
|
||||
ASPath []uint32
|
||||
Communities []uint32
|
||||
LargeCommunities []bgp.LargeCommunity
|
||||
}
|
||||
|
||||
// Lookup lookups a route for the provided IP address. It favors the
|
||||
// provided next hop if provided. This is somewhat approximate because
|
||||
// we use the best route we have, while the exporter may not have this
|
||||
// best route available. The returned result should not be modified!
|
||||
func (c *Component) Lookup(addrIP net.IP, nextHopIP net.IP) LookupResult {
|
||||
if !c.config.CollectASNs && !c.config.CollectASPaths && !c.config.CollectCommunities {
|
||||
return LookupResult{}
|
||||
}
|
||||
ip, _ := netip.AddrFromSlice(addrIP.To16())
|
||||
nh, _ := netip.AddrFromSlice(nextHopIP.To16())
|
||||
v6 := patricia.NewIPv6Address(ip.AsSlice(), 128)
|
||||
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
bestFound := false
|
||||
found := false
|
||||
_, routes := c.rib.tree.FindDeepestTagsWithFilter(v6, func(route route) bool {
|
||||
if bestFound {
|
||||
// We already have the best route, skip remaining routes
|
||||
return false
|
||||
}
|
||||
if c.rib.nextHops.Get(route.nextHop) == nextHop(nh) {
|
||||
// Exact match found, use it and don't search further
|
||||
bestFound = true
|
||||
return true
|
||||
}
|
||||
// If we don't have a match already, use this one.
|
||||
if !found {
|
||||
found = true
|
||||
return true
|
||||
}
|
||||
// Otherwise, skip it
|
||||
return false
|
||||
})
|
||||
if len(routes) == 0 {
|
||||
return LookupResult{}
|
||||
}
|
||||
attributes := c.rib.rtas.Get(routes[len(routes)-1].attributes)
|
||||
return LookupResult{
|
||||
ASN: attributes.asn,
|
||||
ASPath: attributes.asPath,
|
||||
Communities: attributes.communities,
|
||||
LargeCommunities: attributes.largeCommunities,
|
||||
}
|
||||
}
|
||||
86
inlet/bmp/metrics.go
Normal file
86
inlet/bmp/metrics.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import "akvorado/common/reporter"
|
||||
|
||||
type metrics struct {
|
||||
openedConnections *reporter.CounterVec
|
||||
closedConnections *reporter.CounterVec
|
||||
peers *reporter.GaugeVec
|
||||
routes *reporter.GaugeVec
|
||||
ignoredNlri *reporter.CounterVec
|
||||
messages *reporter.CounterVec
|
||||
errors *reporter.CounterVec
|
||||
panics *reporter.CounterVec
|
||||
locked *reporter.SummaryVec
|
||||
}
|
||||
|
||||
// initMetrics initialize the metrics for the BMP component.
|
||||
func (c *Component) initMetrics() {
|
||||
c.metrics.openedConnections = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "opened_connections_total",
|
||||
Help: "Number of opened connections.",
|
||||
},
|
||||
[]string{"exporter"},
|
||||
)
|
||||
c.metrics.closedConnections = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "closed_connections_total",
|
||||
Help: "Number of closed connections.",
|
||||
},
|
||||
[]string{"exporter"},
|
||||
)
|
||||
c.metrics.peers = c.r.GaugeVec(
|
||||
reporter.GaugeOpts{
|
||||
Name: "peers_total",
|
||||
Help: "Number of peers up.",
|
||||
},
|
||||
[]string{"exporter"},
|
||||
)
|
||||
c.metrics.routes = c.r.GaugeVec(
|
||||
reporter.GaugeOpts{
|
||||
Name: "routes_total",
|
||||
Help: "Number of routes up.",
|
||||
},
|
||||
[]string{"exporter"},
|
||||
)
|
||||
c.metrics.ignoredNlri = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "ignored_nlri_total",
|
||||
Help: "Number ignored MP NLRI received.",
|
||||
},
|
||||
[]string{"exporter", "type"},
|
||||
)
|
||||
c.metrics.messages = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "messages_received_total",
|
||||
Help: "Number of BMP messages received.",
|
||||
},
|
||||
[]string{"exporter", "type"},
|
||||
)
|
||||
c.metrics.errors = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "errors_total",
|
||||
Help: "Number of errors while processing BMP messages.",
|
||||
},
|
||||
[]string{"exporter", "error"},
|
||||
)
|
||||
c.metrics.panics = c.r.CounterVec(
|
||||
reporter.CounterOpts{
|
||||
Name: "panics_total",
|
||||
Help: "Number of fatal errors while processing BMP messages.",
|
||||
},
|
||||
[]string{"exporter"},
|
||||
)
|
||||
c.metrics.locked = c.r.SummaryVec(
|
||||
reporter.SummaryOpts{
|
||||
Name: "locked_duration_seconds",
|
||||
Help: "Duration during which the RIB is locked.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
}
|
||||
127
inlet/bmp/rd.go
Normal file
127
inlet/bmp/rd.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
// RD defines a route distinguisher.
|
||||
type RD uint64
|
||||
|
||||
// UnmarshalText parses a route distinguisher.
|
||||
func (rd *RD) UnmarshalText(input []byte) error {
|
||||
// We can have several formats:
|
||||
// 1. 2-byte ASN : index
|
||||
// 2. IPv4 address : index
|
||||
// 3. 4-byte ASN : index (4-byte can be in asdot format)
|
||||
// We also accept a specific type with type : X : index or just an uint64
|
||||
text := string(input)
|
||||
elems := strings.Split(text, ":")
|
||||
typ := -1
|
||||
switch len(elems) {
|
||||
case 1:
|
||||
result, err := strconv.ParseUint(text, 10, 64)
|
||||
if err != nil {
|
||||
return errors.New("cannot parse RD as a 64-bit number")
|
||||
}
|
||||
*rd = RD(result)
|
||||
return nil
|
||||
case 3:
|
||||
var err error
|
||||
typ, err = strconv.Atoi(elems[0])
|
||||
if err != nil || typ < 0 || typ > 2 {
|
||||
return errors.New("cannot parse RD type")
|
||||
}
|
||||
elems = elems[1:]
|
||||
fallthrough
|
||||
case 2:
|
||||
if typ == 1 || (typ == -1 && strings.Count(elems[0], ".") > 0) {
|
||||
// IPv4 : index
|
||||
ip := net.ParseIP(elems[0])
|
||||
if ip == nil || ip.To4() == nil {
|
||||
return errors.New("cannot parse RD as IPv4 address + index")
|
||||
}
|
||||
index, err := strconv.ParseUint(elems[1], 10, 16)
|
||||
if err != nil {
|
||||
return errors.New("cannot parse RD as IPv4 address + index")
|
||||
}
|
||||
*rd = RD((1 << 48) + // Type
|
||||
(uint64(binary.BigEndian.Uint32(ip.To4())) << 16) +
|
||||
index)
|
||||
return nil
|
||||
}
|
||||
asn, err := strconv.ParseUint(elems[0], 10, 32)
|
||||
if err != nil {
|
||||
return errors.New("cannot parse RD as ASN + index")
|
||||
}
|
||||
index, err := strconv.ParseUint(elems[1], 10, 32)
|
||||
if err != nil {
|
||||
return errors.New("cannot parse RD as ASN + index")
|
||||
}
|
||||
if typ == 0 && asn > 65535 {
|
||||
return errors.New("cannot parse RD as ASN2 + index")
|
||||
} else if asn <= 65535 && typ != 2 {
|
||||
*rd = RD((0 << 48) + // Type
|
||||
(asn << 32) +
|
||||
index)
|
||||
return nil
|
||||
} else if index > 65535 {
|
||||
return errors.New("cannot parse RD as ASN4 + index")
|
||||
}
|
||||
*rd = RD((2 << 48) + // Type
|
||||
(asn << 16) +
|
||||
index)
|
||||
return nil
|
||||
default:
|
||||
return errors.New("cannot parse RD")
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalText turns a route distinguisher into a textual representation.
|
||||
func (rd RD) MarshalText() ([]byte, error) {
|
||||
return []byte(rd.String()), nil
|
||||
}
|
||||
|
||||
// String turns a route distinguisher into a textual representation.
|
||||
func (rd RD) String() string {
|
||||
typ := uint64(rd) >> 48
|
||||
remaining := uint64(rd) & 0xffffffffffff
|
||||
switch typ {
|
||||
case 0:
|
||||
return fmt.Sprintf("%d:%d", (remaining>>32)&0xffff, remaining&0xffffffff)
|
||||
case 1:
|
||||
return fmt.Sprintf("%d.%d.%d.%d:%d",
|
||||
(remaining>>40)&0xff,
|
||||
(remaining>>32)&0xff,
|
||||
(remaining>>24)&0xff,
|
||||
(remaining>>16)&0xff,
|
||||
remaining&0xffff)
|
||||
case 2:
|
||||
asn := (remaining >> 16) & 0xffffffff
|
||||
if asn <= 65535 {
|
||||
return fmt.Sprintf("2:%d:%d", asn, remaining&0xffff)
|
||||
}
|
||||
return fmt.Sprintf("%d:%d", asn, remaining&0xffff)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
const errorRD = RD(65535 << 48)
|
||||
|
||||
// RDFromRouteDistinguisherInterface converts a RD from GoBGP to our representation.
|
||||
func RDFromRouteDistinguisherInterface(input bgp.RouteDistinguisherInterface) RD {
|
||||
buf, err := input.Serialize()
|
||||
if err != nil || len(buf) != 8 {
|
||||
return errorRD
|
||||
}
|
||||
return RD(binary.BigEndian.Uint64(buf))
|
||||
}
|
||||
82
inlet/bmp/rd_test.go
Normal file
82
inlet/bmp/rd_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
"akvorado/inlet/bmp"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
func TestParseRouteDistinguisher(t *testing.T) {
|
||||
cases := []struct {
|
||||
RD string
|
||||
Expected uint64
|
||||
CanonicalRD string // empty when same as RD
|
||||
Error bool
|
||||
}{
|
||||
{"0", 0, "0:0", false},
|
||||
{"100", 100, "0:100", false},
|
||||
{"51324:65201", 220434901565105, "", false},
|
||||
{"51324:65536", 220434901565440, "", false},
|
||||
{"65535:0", 281470681743360, "", false},
|
||||
{"0:65535:0", 281470681743360, "65535:0", false},
|
||||
{"65536:0", 562954248388608, "", false},
|
||||
{"65536:3", 562954248388611, "", false},
|
||||
{"2:65535:0", 562954248323072, "", false},
|
||||
{"1.1.1.1:0", 282578800148480, "", false},
|
||||
{"1:1.1.1.1:0", 282578800148480, "1.1.1.1:0", false},
|
||||
{"1:1.1.1.1:0", 282578800148480, "1.1.1.1:0", false},
|
||||
|
||||
{RD: "gfjkgjkf", Error: true},
|
||||
{RD: "18446744073709551616", Error: true},
|
||||
{RD: "65536:65536", Error: true},
|
||||
{RD: "0:65536:0", Error: true},
|
||||
{RD: "2:65536:65536", Error: true},
|
||||
{RD: "1:1897:0", Error: true},
|
||||
{RD: "2:1897:65536", Error: true},
|
||||
{RD: "2:1.1.1.1:0", Error: true},
|
||||
{RD: "0:1.1.1.1:0", Error: true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if tc.CanonicalRD == "" {
|
||||
tc.CanonicalRD = tc.RD
|
||||
}
|
||||
var got bmp.RD
|
||||
err := got.UnmarshalText([]byte(tc.RD))
|
||||
if err != nil && !tc.Error {
|
||||
t.Errorf("UnmarshalText(%q) error:\n%+v", tc.RD, err)
|
||||
} else if err == nil && tc.Error {
|
||||
t.Errorf("UnmarshalText(%q) no error", tc.RD)
|
||||
} else if err != nil && tc.Error {
|
||||
continue
|
||||
} else if diff := helpers.Diff(uint64(got), tc.Expected); diff != "" {
|
||||
t.Errorf("UnmarshalText(%q) (-got, +want):\n%s", tc.RD, diff)
|
||||
} else if diff := helpers.Diff(got.String(), tc.CanonicalRD); diff != "" {
|
||||
t.Errorf("UnmarshalText(%q) (-got, +want):\n%s", tc.RD, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRDFromRouteDistinguisherInterface(t *testing.T) {
|
||||
cases := []struct {
|
||||
input bgp.RouteDistinguisherInterface
|
||||
expected string
|
||||
}{
|
||||
{bgp.NewRouteDistinguisherFourOctetAS(100, 200), "2:100:200"},
|
||||
{bgp.NewRouteDistinguisherFourOctetAS(66000, 200), "66000:200"},
|
||||
{bgp.NewRouteDistinguisherTwoOctetAS(120, 200), "120:200"},
|
||||
{bgp.NewRouteDistinguisherIPAddressAS("2.2.2.2", 30), "2.2.2.2:30"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
got := bmp.RDFromRouteDistinguisherInterface(tc.input).String()
|
||||
if got != tc.expected {
|
||||
t.Errorf("RDFromRouteDistinguisherInterface(%q) == %q != %q",
|
||||
tc.input.String(), got, tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
20
inlet/bmp/release.go
Normal file
20
inlet/bmp/release.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
//go:build release
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
const rtaHashMask = 0xffffffffffffffff
|
||||
|
||||
var rtaHashSeed uint64
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixMicro())
|
||||
rtaHashSeed = rand.Uint64()
|
||||
}
|
||||
176
inlet/bmp/rib.go
Normal file
176
inlet/bmp/rib.go
Normal file
@@ -0,0 +1,176 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"unsafe"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
|
||||
"github.com/kentik/patricia"
|
||||
tree "github.com/kentik/patricia/generics_tree"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
// rib represents the RIB.
|
||||
type rib struct {
|
||||
tree *tree.TreeV6[route]
|
||||
nextHops *helpers.InternPool[nextHop]
|
||||
rtas *helpers.InternPool[routeAttributes]
|
||||
}
|
||||
|
||||
// route contains the peer (external opaque value), the NLRI, the next
|
||||
// hop and route attributes. The primary key is prefix (implied), peer
|
||||
// and nlri.
|
||||
type route struct {
|
||||
peer uint32
|
||||
nlri nlri
|
||||
nextHop helpers.InternReference[nextHop]
|
||||
attributes helpers.InternReference[routeAttributes]
|
||||
}
|
||||
|
||||
// nlri is the NLRI for the route (when combined with prefix). The
|
||||
// route family is included as we may normalize NLRI accross AFI/SAFI.
|
||||
type nlri struct {
|
||||
family bgp.RouteFamily
|
||||
path uint32
|
||||
rd RD
|
||||
}
|
||||
|
||||
// nextHop is just an IP address.
|
||||
type nextHop netip.Addr
|
||||
|
||||
// Hash returns a hash for the next hop.
|
||||
func (nh nextHop) Hash() uint64 {
|
||||
ip := netip.Addr(nh).As16()
|
||||
state := rtaHashSeed
|
||||
return rthash((*byte)(unsafe.Pointer(&ip[0])), 16, state)
|
||||
}
|
||||
|
||||
// Equal tells if two next hops are equal.
|
||||
func (nh nextHop) Equal(nh2 nextHop) bool {
|
||||
return nh == nh2
|
||||
}
|
||||
|
||||
// routeAttributes is a set of route attributes.
|
||||
type routeAttributes struct {
|
||||
asn uint32
|
||||
asPath []uint32
|
||||
communities []uint32
|
||||
// extendedCommunities []uint64
|
||||
largeCommunities []bgp.LargeCommunity
|
||||
}
|
||||
|
||||
// Hash returns a hash for route attributes. This may seem like black
|
||||
// magic, but this is important for performance.
|
||||
func (rta routeAttributes) Hash() uint64 {
|
||||
state := rtaHashSeed
|
||||
state = rthash((*byte)(unsafe.Pointer(&rta.asn)), 4, state)
|
||||
if len(rta.asPath) > 0 {
|
||||
state = rthash((*byte)(unsafe.Pointer(&rta.asPath[0])), len(rta.asPath)*4, state)
|
||||
}
|
||||
if len(rta.communities) > 0 {
|
||||
state = rthash((*byte)(unsafe.Pointer(&rta.communities[0])), len(rta.communities)*4, state)
|
||||
}
|
||||
if len(rta.largeCommunities) > 0 {
|
||||
// There is a test to check that this computation is
|
||||
// correct (the struct is 12-byte aligned, not
|
||||
// 16-byte).
|
||||
state = rthash((*byte)(unsafe.Pointer(&rta.largeCommunities[0])), len(rta.largeCommunities)*12, state)
|
||||
}
|
||||
return state & rtaHashMask
|
||||
}
|
||||
|
||||
// Equal tells if two route attributes are equal.
|
||||
func (rta routeAttributes) Equal(orta routeAttributes) bool {
|
||||
if rta.asn != orta.asn {
|
||||
return false
|
||||
}
|
||||
if len(rta.asPath) != len(orta.asPath) {
|
||||
return false
|
||||
}
|
||||
if len(rta.communities) != len(orta.communities) {
|
||||
return false
|
||||
}
|
||||
if len(rta.largeCommunities) != len(orta.largeCommunities) {
|
||||
return false
|
||||
}
|
||||
for idx := range rta.asPath {
|
||||
if rta.asPath[idx] != orta.asPath[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for idx := range rta.communities {
|
||||
if rta.communities[idx] != orta.communities[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for idx := range rta.largeCommunities {
|
||||
if rta.largeCommunities[idx] != orta.largeCommunities[idx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// addPrefix add a new route to the RIB. It returns the number of routes really added.
|
||||
func (r *rib) addPrefix(ip netip.Addr, bits int, new route) int {
|
||||
v6 := patricia.NewIPv6Address(ip.AsSlice(), uint(bits))
|
||||
added, _ := r.tree.AddOrUpdate(v6, new,
|
||||
func(r1, r2 route) bool {
|
||||
return r1.peer == r2.peer && r1.nlri == r2.nlri
|
||||
}, func(old route) route {
|
||||
r.nextHops.Take(old.nextHop)
|
||||
r.rtas.Take(old.attributes)
|
||||
return new
|
||||
})
|
||||
if !added {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// removePrefix removes a route from the RIB. It returns the number of routes really removed.
|
||||
func (r *rib) removePrefix(ip netip.Addr, bits int, old route) int {
|
||||
v6 := patricia.NewIPv6Address(ip.AsSlice(), uint(bits))
|
||||
removed := r.tree.Delete(v6, func(r1, r2 route) bool {
|
||||
// This is not enforced/documented, but the route in the tree is the first one.
|
||||
if r1.peer == r2.peer && r1.nlri == r2.nlri {
|
||||
r.nextHops.Take(old.nextHop)
|
||||
r.rtas.Take(r1.attributes)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, old)
|
||||
return removed
|
||||
}
|
||||
|
||||
// flushPeer removes a whole peer from the RIB, returning the number
|
||||
// of removed routes.
|
||||
func (r *rib) flushPeer(peer uint32) int {
|
||||
removed := 0
|
||||
buf := make([]route, 0)
|
||||
iter := r.tree.Iterate()
|
||||
for iter.Next() {
|
||||
removed += iter.DeleteWithBuffer(buf, func(payload route, val route) bool {
|
||||
if payload.peer == peer {
|
||||
r.nextHops.Take(payload.nextHop)
|
||||
r.rtas.Take(payload.attributes)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, route{})
|
||||
}
|
||||
return removed
|
||||
}
|
||||
|
||||
// newRIB initializes a new RIB.
|
||||
func newRIB() *rib {
|
||||
return &rib{
|
||||
tree: tree.NewTreeV6[route](),
|
||||
nextHops: helpers.NewInternPool[nextHop](),
|
||||
rtas: helpers.NewInternPool[routeAttributes](),
|
||||
}
|
||||
}
|
||||
316
inlet/bmp/rib_test.go
Normal file
316
inlet/bmp/rib_test.go
Normal file
@@ -0,0 +1,316 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/kentik/patricia"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
func TestLargeCommunitiesAlign(t *testing.T) {
|
||||
largeCommunities := []bgp.LargeCommunity{
|
||||
{ASN: 1, LocalData1: 2, LocalData2: 3},
|
||||
{ASN: 4, LocalData1: 5, LocalData2: 6},
|
||||
}
|
||||
first := unsafe.Pointer(&largeCommunities[0])
|
||||
second := unsafe.Pointer(&largeCommunities[1])
|
||||
diff := uintptr(second) - uintptr(first)
|
||||
if diff != 12 {
|
||||
t.Fatalf("Alignment error for large community slices. Got %d, expected 12",
|
||||
diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRTAEqual(t *testing.T) {
|
||||
cases := []struct {
|
||||
rta1 routeAttributes
|
||||
rta2 routeAttributes
|
||||
equal bool
|
||||
}{
|
||||
{routeAttributes{asn: 2038}, routeAttributes{asn: 2038}, true},
|
||||
{routeAttributes{asn: 2038}, routeAttributes{asn: 2039}, false},
|
||||
{
|
||||
routeAttributes{asn: 2038, asPath: []uint32{}},
|
||||
routeAttributes{asn: 2038},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{}},
|
||||
routeAttributes{asn: 2039},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, communities: []uint32{}},
|
||||
routeAttributes{asn: 2038},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, communities: []uint32{}},
|
||||
routeAttributes{asn: 2039},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{}},
|
||||
routeAttributes{asn: 2038},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{}},
|
||||
routeAttributes{asn: 2039},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3}},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 0}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 4}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4}},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34}},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34}},
|
||||
routeAttributes{asn: 2038, asPath: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300, 400}},
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300, 400}},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300, 400}},
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300, 402}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300}},
|
||||
routeAttributes{asn: 2038, communities: []uint32{100, 200, 300, 400}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
true,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 8}}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 4}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
false,
|
||||
}, {
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}}},
|
||||
routeAttributes{asn: 2038, largeCommunities: []bgp.LargeCommunity{{ASN: 1, LocalData1: 2, LocalData2: 3}, {ASN: 3, LocalData1: 4, LocalData2: 5}, {ASN: 5, LocalData1: 6, LocalData2: 7}}},
|
||||
false,
|
||||
},
|
||||
}
|
||||
outer:
|
||||
for try := 3; try >= 0; try-- {
|
||||
// We may have to try a few times because of
|
||||
// collisions due to reduced hash efficiency during
|
||||
// tests.
|
||||
for _, tc := range cases {
|
||||
equal := tc.rta1.Equal(tc.rta2)
|
||||
if equal && !tc.equal {
|
||||
t.Errorf("%+v == %+v", tc.rta1, tc.rta2)
|
||||
} else if !equal && tc.equal {
|
||||
t.Errorf("%+v != %+v", tc.rta1, tc.rta2)
|
||||
} else {
|
||||
equal := tc.rta1.Hash() == tc.rta2.Hash()
|
||||
if equal && !tc.equal {
|
||||
if try > 0 {
|
||||
// We may have a collision, change the seed and retry
|
||||
rtaHashSeed = rand.Uint64()
|
||||
continue outer
|
||||
}
|
||||
t.Errorf("%+v.hash == %+v.hash", tc.rta1, tc.rta2)
|
||||
} else if !equal && tc.equal {
|
||||
t.Errorf("%+v.hash != %+v.hash", tc.rta1, tc.rta2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRIB(t *testing.T) {
|
||||
for i := 0; i < 5; i++ {
|
||||
t.Logf("Run %d", i+1)
|
||||
r := newRIB()
|
||||
random := rand.New(rand.NewSource(100 * int64(i)))
|
||||
type lookup struct {
|
||||
peer uint32
|
||||
prefix netip.Addr // Assume /64
|
||||
nextHop netip.Addr
|
||||
rd RD
|
||||
asn uint32
|
||||
removed bool
|
||||
}
|
||||
// We store all lookups that should succeed
|
||||
lookups := []lookup{}
|
||||
removeLookup := func(lookup lookup) {
|
||||
for idx := range lookups {
|
||||
if lookups[idx].peer != lookup.peer {
|
||||
continue
|
||||
}
|
||||
if lookups[idx].prefix != lookup.prefix || lookups[idx].rd != lookup.rd {
|
||||
continue
|
||||
}
|
||||
if lookups[idx].removed {
|
||||
continue
|
||||
}
|
||||
lookups[idx].removed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
totalExporters := 20
|
||||
peers := []uint32{}
|
||||
for i := 0; i < totalExporters; i++ {
|
||||
for j := 0; j < int(random.Uint32()%14); j++ {
|
||||
peer := uint32((i << 16) + j)
|
||||
peers = append(peers, peer)
|
||||
for k := 0; k < int(random.Uint32()%10000); k++ {
|
||||
lookup := lookup{
|
||||
peer: peer,
|
||||
prefix: netip.MustParseAddr(fmt.Sprintf("2001:db8:f:%x::",
|
||||
random.Uint32()%300)),
|
||||
nextHop: netip.MustParseAddr(
|
||||
fmt.Sprintf("2001:db8:c::%x", random.Uint32()%500)),
|
||||
rd: RD(random.Uint64() % 3),
|
||||
asn: random.Uint32() % 1000,
|
||||
}
|
||||
r.addPrefix(lookup.prefix, 64,
|
||||
route{
|
||||
peer: peer,
|
||||
nlri: nlri{rd: lookup.rd},
|
||||
nextHop: r.nextHops.Put(nextHop(lookup.nextHop)),
|
||||
attributes: r.rtas.Put(routeAttributes{
|
||||
asn: lookup.asn,
|
||||
}),
|
||||
})
|
||||
removeLookup(lookup)
|
||||
lookups = append(lookups, lookup)
|
||||
}
|
||||
for k := 0; k < int(random.Uint32()%500); k++ {
|
||||
prefix := netip.MustParseAddr(fmt.Sprintf("2001:db8:f:%x::",
|
||||
random.Uint32()%300))
|
||||
rd := RD(random.Uint64() % 4)
|
||||
r.removePrefix(prefix, 64,
|
||||
route{
|
||||
peer: peer,
|
||||
nlri: nlri{
|
||||
rd: rd,
|
||||
},
|
||||
})
|
||||
removeLookup(lookup{
|
||||
peer: peer,
|
||||
prefix: prefix,
|
||||
rd: rd,
|
||||
})
|
||||
}
|
||||
for k := 0; k < int(random.Uint32()%200); k++ {
|
||||
lookup := lookup{
|
||||
peer: peer,
|
||||
prefix: netip.MustParseAddr(fmt.Sprintf("2001:db8:f:%x::",
|
||||
random.Uint32()%300)),
|
||||
nextHop: netip.MustParseAddr(
|
||||
fmt.Sprintf("2001:db8:c::%x", random.Uint32()%500)),
|
||||
asn: random.Uint32() % 1010,
|
||||
}
|
||||
r.addPrefix(lookup.prefix, 64,
|
||||
route{
|
||||
peer: peer,
|
||||
nextHop: r.nextHops.Put(nextHop(lookup.nextHop)),
|
||||
attributes: r.rtas.Put(routeAttributes{
|
||||
asn: lookup.asn,
|
||||
}),
|
||||
})
|
||||
removeLookup(lookup)
|
||||
lookups = append(lookups, lookup)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, lookup := range lookups {
|
||||
if lookup.removed {
|
||||
removed++
|
||||
continue
|
||||
}
|
||||
v6 := patricia.NewIPv6Address(lookup.prefix.AsSlice(), 128)
|
||||
ok, tags := r.tree.FindDeepestTags(v6)
|
||||
if !ok {
|
||||
t.Errorf("cannot find %s/128 for %d",
|
||||
lookup.prefix, lookup.peer)
|
||||
}
|
||||
found := false
|
||||
for _, route := range tags {
|
||||
if r.nextHops.Get(route.nextHop) != nextHop(lookup.nextHop) || route.nlri.rd != lookup.rd {
|
||||
continue
|
||||
}
|
||||
if r.rtas.Get(route.attributes).asn != lookup.asn {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if !found {
|
||||
for _, route := range tags {
|
||||
t.Logf("route NH: %s, RD: %s, ASN: %d",
|
||||
netip.Addr(r.nextHops.Get(route.nextHop)),
|
||||
route.nlri.rd, r.rtas.Get(route.attributes).asn)
|
||||
}
|
||||
t.Errorf("cannot find %s/128 for %d; NH: %s, RD: %s, ASN: %d",
|
||||
lookup.prefix, lookup.peer,
|
||||
lookup.nextHop, lookup.rd, lookup.asn)
|
||||
}
|
||||
}
|
||||
if removed < 5 {
|
||||
t.Error("did not remove more than 5 prefixes, suspicious...")
|
||||
}
|
||||
|
||||
// Remove everything
|
||||
for _, peer := range peers {
|
||||
r.flushPeer(peer)
|
||||
}
|
||||
|
||||
// Check for leak of route attributes
|
||||
if r.rtas.Len() > 0 {
|
||||
t.Fatalf("%d route attributes have leaked", r.rtas.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRTAHash(b *testing.B) {
|
||||
rta := routeAttributes{
|
||||
asn: 2038,
|
||||
asPath: []uint32{1, 2, 3, 4, 5, 6, 7},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
rta.Hash()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRTAEqual(b *testing.B) {
|
||||
rta := routeAttributes{
|
||||
asn: 2038,
|
||||
asPath: []uint32{1, 2, 3, 4, 5, 6, 7},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
rta.Equal(rta)
|
||||
}
|
||||
}
|
||||
108
inlet/bmp/root.go
Normal file
108
inlet/bmp/root.go
Normal file
@@ -0,0 +1,108 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
// Package bmp provides a BMP server to receive BGP routes from
|
||||
// various exporters.
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
"gopkg.in/tomb.v2"
|
||||
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/reporter"
|
||||
)
|
||||
|
||||
// Component represents the BMP compomenent.
|
||||
type Component struct {
|
||||
r *reporter.Reporter
|
||||
d *Dependencies
|
||||
t tomb.Tomb
|
||||
config Configuration
|
||||
acceptedRDs map[uint64]struct{}
|
||||
|
||||
address net.Addr
|
||||
metrics metrics
|
||||
|
||||
// RIB management with peers
|
||||
rib *rib
|
||||
peers map[peerKey]*peerInfo
|
||||
lastPeerReference uint32
|
||||
staleTimer *clock.Timer
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Dependencies define the dependencies of the BMP component.
|
||||
type Dependencies struct {
|
||||
Daemon daemon.Component
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
// New creates a new BMP component.
|
||||
func New(r *reporter.Reporter, configuration Configuration, dependencies Dependencies) (*Component, error) {
|
||||
if dependencies.Clock == nil {
|
||||
dependencies.Clock = clock.New()
|
||||
}
|
||||
c := Component{
|
||||
r: r,
|
||||
d: &dependencies,
|
||||
config: configuration,
|
||||
|
||||
rib: newRIB(),
|
||||
peers: make(map[peerKey]*peerInfo),
|
||||
}
|
||||
if len(c.config.RDs) > 0 {
|
||||
c.acceptedRDs = make(map[uint64]struct{})
|
||||
for _, rd := range c.config.RDs {
|
||||
c.acceptedRDs[uint64(rd)] = struct{}{}
|
||||
}
|
||||
}
|
||||
c.staleTimer = c.d.Clock.AfterFunc(time.Hour, c.removeStalePeers)
|
||||
|
||||
c.d.Daemon.Track(&c.t, "inlet/bmp")
|
||||
c.initMetrics()
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Start starts the BMP component.
|
||||
func (c *Component) Start() error {
|
||||
c.r.Info().Msg("starting BMP component")
|
||||
listener, err := net.Listen("tcp", c.config.Listen)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to listen to %v: %w", c.config.Listen, err)
|
||||
}
|
||||
c.address = listener.Addr()
|
||||
c.t.Go(func() error {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
if c.t.Alive() {
|
||||
return fmt.Errorf("cannot accept new connection: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
c.t.Go(func() error {
|
||||
return c.serveConnection(conn.(*net.TCPConn))
|
||||
})
|
||||
}
|
||||
})
|
||||
c.t.Go(func() error {
|
||||
<-c.t.Dying()
|
||||
listener.Close()
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the BMP component
|
||||
func (c *Component) Stop() error {
|
||||
defer c.r.Info().Msg("BMP component stopped")
|
||||
c.r.Info().Msg("stopping BMP component")
|
||||
c.t.Kill(nil)
|
||||
return c.t.Wait()
|
||||
}
|
||||
977
inlet/bmp/root_test.go
Normal file
977
inlet/bmp/root_test.go
Normal file
@@ -0,0 +1,977 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
"akvorado/common/reporter"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
func TestBMP(t *testing.T) {
|
||||
dial := func(t *testing.T, c *Component) net.Conn {
|
||||
t.Helper()
|
||||
conn, err := net.Dial("tcp", c.LocalAddr().String())
|
||||
if err != nil {
|
||||
t.Fatalf("Dial() error:\n%+v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
conn.Close()
|
||||
})
|
||||
return conn
|
||||
}
|
||||
send := func(t *testing.T, conn net.Conn, pcap string) {
|
||||
t.Helper()
|
||||
_, err := conn.Write(helpers.ReadPcapPayload(t, path.Join("testdata", pcap)))
|
||||
if err != nil {
|
||||
t.Fatalf("Write() error:\n%+v", err)
|
||||
}
|
||||
}
|
||||
dumpRIB := func(t *testing.T, c *Component) map[netip.Addr][]string {
|
||||
t.Helper()
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
result := map[netip.Addr][]string{}
|
||||
iter := c.rib.tree.Iterate()
|
||||
for iter.Next() {
|
||||
addr := iter.Address()
|
||||
for _, route := range iter.Tags() {
|
||||
nh := c.rib.nextHops.Get(route.nextHop)
|
||||
attrs := c.rib.rtas.Get(route.attributes)
|
||||
var peer netip.Addr
|
||||
for pkey, pinfo := range c.peers {
|
||||
if pinfo.reference == route.peer {
|
||||
peer = pkey.ip
|
||||
break
|
||||
}
|
||||
}
|
||||
if _, ok := result[peer.Unmap()]; !ok {
|
||||
result[peer.Unmap()] = []string{}
|
||||
}
|
||||
result[peer.Unmap()] = append(result[peer.Unmap()],
|
||||
fmt.Sprintf("[%s] %s via %s %s/%d %d %v %v %v",
|
||||
route.nlri.family,
|
||||
addr, netip.Addr(nh).Unmap(),
|
||||
route.nlri.rd, route.nlri.path,
|
||||
attrs.asn, attrs.asPath,
|
||||
attrs.communities, attrs.largeCommunities))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// The pcap files are extracted from BMP session from a Juniper vMX. See:
|
||||
// https://github.com/vincentbernat/network-lab/tree/master/lab-juniper-vmx-bmp
|
||||
|
||||
t.Run("init, terminate", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
c, mockClock := NewMock(t, r, DefaultConfiguration())
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
// Init+EOR
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "0",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
send(t, conn, "bmp-terminate.pcap")
|
||||
time.Sleep(30 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="termination"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "0",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
_, err := conn.Write([]byte{1})
|
||||
if err != nil {
|
||||
t.Fatal("Write() did not error while connection should be closed")
|
||||
}
|
||||
|
||||
mockClock.Add(2 * time.Hour)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="termination"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "0",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
c, _ := NewMock(t, r, DefaultConfiguration())
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "8",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, reach NLRI", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-reach-addpath.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "26",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "18",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::3"): {
|
||||
"[ipv6-unicast] 2001:db8::2/127 via 2001:db8::3 0:0/0 65013 [65013] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::3 0:0/0 174 [65013 65013 174 174 174] [4260691978 4260691988] []",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::3 0:0/0 12322 [65013 65013 1299 1299 1299 12322] [4260691998] []",
|
||||
},
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
"[ipv4-unicast] 192.0.2.6/31 via 192.0.2.7 0:0/0 65017 [65017] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:102/0 64476 [65017 65017 174 3356 3356 3356 64476] [4260954122 4260954132] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:101/0 64476 [65017 65017 174 1299 64476] [4260954122 4260954132] []",
|
||||
"[l2vpn-evpn] 198.51.100.0/26 via 2001:db8::7 65017:104/0 64476 [65017 65017 3356 64476] [4260955215] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/26 via 192.0.2.7 65017:103/0 64476 [65017 65017 3356 64476] [4260955215] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:102/0 396919 [65017 65017 6453 396919] [4260954131] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:101/0 396919 [65017 65017 174 29447 396919] [4260954124] []",
|
||||
"[ipv6-unicast] 2001:db8::6/127 via 2001:db8::7 0:0/0 65017 [65017] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::7 0:0/0 174 [65017 65013 174 174 174] [4260954122 4260954132] [{65017 300 4}]",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::7 0:0/0 12322 [65017 65017 1299 1299 1299 12322] [4260954142] [{65017 400 2}]",
|
||||
"[l3vpn-ipv6-unicast] 2001:db8:4::/64 via 2001:db8::7 65017:101/0 29447 [65017 65017 1299 1299 1299 29447] [4260954412] []",
|
||||
},
|
||||
netip.MustParseAddr("192.0.2.1"): {
|
||||
"[ipv4-unicast] 192.0.2.0/31 via 192.0.2.1 0:0/0 65011 [65011] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/25 via 192.0.2.1 0:0/0 64476 [65011 65011 174 1299 64476] [4260560906 4260560916] []",
|
||||
"[ipv4-unicast] 198.51.100.128/25 via 192.0.2.1 0:0/0 396919 [65011 65011 174 29447 396919] [4260560908] []",
|
||||
},
|
||||
netip.MustParseAddr("192.0.2.5"): {
|
||||
"[ipv4-unicast] 192.0.2.4/31 via 192.0.2.5 0:0/1 65500 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, no peers up, eor, reach NLRI", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
// Same metrics as previously, except the AddPath peer.
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "17",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "3",
|
||||
`routes_total{exporter="127.0.0.1"}`: "17",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, no peers up, eor, reach NLRI, peers up", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "25",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "17",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, reach NLRI, 1 peer down", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-peer-down.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-down-notification"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "25",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "5",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "3",
|
||||
`routes_total{exporter="127.0.0.1"}`: "14",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::3"): {
|
||||
"[ipv6-unicast] 2001:db8::2/127 via 2001:db8::3 0:0/0 65013 [65013] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::3 0:0/0 174 [65013 65013 174 174 174] [4260691978 4260691988] []",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::3 0:0/0 12322 [65013 65013 1299 1299 1299 12322] [4260691998] []",
|
||||
},
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
"[ipv4-unicast] 192.0.2.6/31 via 192.0.2.7 0:0/0 65017 [65017] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:102/0 64476 [65017 65017 174 3356 3356 3356 64476] [4260954122 4260954132] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:101/0 64476 [65017 65017 174 1299 64476] [4260954122 4260954132] []",
|
||||
"[l2vpn-evpn] 198.51.100.0/26 via 2001:db8::7 65017:104/0 64476 [65017 65017 3356 64476] [4260955215] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/26 via 192.0.2.7 65017:103/0 64476 [65017 65017 3356 64476] [4260955215] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:102/0 396919 [65017 65017 6453 396919] [4260954131] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:101/0 396919 [65017 65017 174 29447 396919] [4260954124] []",
|
||||
"[ipv6-unicast] 2001:db8::6/127 via 2001:db8::7 0:0/0 65017 [65017] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::7 0:0/0 174 [65017 65013 174 174 174] [4260954122 4260954132] [{65017 300 4}]",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::7 0:0/0 12322 [65017 65017 1299 1299 1299 12322] [4260954142] [{65017 400 2}]",
|
||||
"[l3vpn-ipv6-unicast] 2001:db8:4::/64 via 2001:db8::7 65017:101/0 29447 [65017 65017 1299 1299 1299 29447] [4260954412] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("only accept RD 65017:104", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.RDs = []RD{MustParseRD("65017:104")}
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "25",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "1",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
"[l2vpn-evpn] 198.51.100.0/26 via 2001:db8::7 65017:104/0 64476 [65017 65017 3356 64476] [4260955215] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("only accept RD 0:0", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.RDs = []RD{MustParseRD("0:0")}
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "25",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "10",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::3"): {
|
||||
"[ipv6-unicast] 2001:db8::2/127 via 2001:db8::3 0:0/0 65013 [65013] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::3 0:0/0 174 [65013 65013 174 174 174] [4260691978 4260691988] []",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::3 0:0/0 12322 [65013 65013 1299 1299 1299 12322] [4260691998] []",
|
||||
},
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
"[ipv4-unicast] 192.0.2.6/31 via 192.0.2.7 0:0/0 65017 [65017] [] []",
|
||||
"[ipv6-unicast] 2001:db8::6/127 via 2001:db8::7 0:0/0 65017 [65017] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::7 0:0/0 174 [65017 65013 174 174 174] [4260954122 4260954132] [{65017 300 4}]",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::7 0:0/0 12322 [65017 65017 1299 1299 1299 12322] [4260954142] [{65017 400 2}]",
|
||||
},
|
||||
netip.MustParseAddr("192.0.2.1"): {
|
||||
"[ipv4-unicast] 192.0.2.0/31 via 192.0.2.1 0:0/0 65011 [65011] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/25 via 192.0.2.1 0:0/0 64476 [65011 65011 174 1299 64476] [4260560906 4260560916] []",
|
||||
"[ipv4-unicast] 198.51.100.128/25 via 192.0.2.1 0:0/0 396919 [65011 65011 174 29447 396919] [4260560908] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, reach, unreach", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.RDs = []RD{MustParseRD("0:0")}
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "33",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "3",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "1",
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [65019] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [65019 65019 64476] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, filtering on 65500:108", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.RDs = []RD{MustParseRD("65500:108")}
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "routes")
|
||||
expectedMetrics := map[string]string{
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, filtering on 65500:110", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.RDs = []RD{MustParseRD("65500:110")}
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "routes")
|
||||
expectedMetrics := map[string]string{
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, do not collect AS paths or communities", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectCommunities = false
|
||||
config.CollectASPaths = false
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "routes")
|
||||
expectedMetrics := map[string]string{
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, do not collect ASNs", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectASNs = false
|
||||
config.CollectCommunities = false
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "routes")
|
||||
expectedMetrics := map[string]string{
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 0 [65019] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 0 [65019 65019 64476] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, unreach", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "16",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, reach, unreach×2", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectASNs = false
|
||||
config.CollectASPaths = false
|
||||
config.CollectCommunities = false
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "41",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "1",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
// This route stays because we tweaked it in reach.pcap
|
||||
"[l2vpn-evpn] 198.51.100.0/26 via 2001:db8::7 65017:104/0 0 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, eor, reach×2, unreach", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
send(t, conn, "bmp-unreach.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "41",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "1",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, peers up, reach, eor", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectASPaths = false
|
||||
config.CollectCommunities = false
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "4",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "25",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "4",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "4",
|
||||
`routes_total{exporter="127.0.0.1"}`: "17",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("2001:db8::3"): {
|
||||
"[ipv6-unicast] 2001:db8::2/127 via 2001:db8::3 0:0/0 65013 [] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::3 0:0/0 174 [] [] []",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::3 0:0/0 12322 [] [] []",
|
||||
},
|
||||
netip.MustParseAddr("2001:db8::7"): {
|
||||
"[ipv4-unicast] 192.0.2.6/31 via 192.0.2.7 0:0/0 65017 [] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:102/0 64476 [] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/25 via 192.0.2.7 65017:101/0 64476 [] [] []",
|
||||
"[l2vpn-evpn] 198.51.100.0/26 via 2001:db8::7 65017:104/0 64476 [] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.0/26 via 192.0.2.7 65017:103/0 64476 [] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:102/0 396919 [] [] []",
|
||||
"[l3vpn-ipv4-unicast] 198.51.100.128/25 via 192.0.2.7 65017:101/0 396919 [] [] []",
|
||||
"[ipv6-unicast] 2001:db8::6/127 via 2001:db8::7 0:0/0 65017 [] [] []",
|
||||
"[ipv6-unicast] 2001:db8:1::/64 via 2001:db8::7 0:0/0 174 [] [] []",
|
||||
"[ipv6-unicast] 2001:db8:2::/64 via 2001:db8::7 0:0/0 12322 [] [] []",
|
||||
"[l3vpn-ipv6-unicast] 2001:db8:4::/64 via 2001:db8::7 65017:101/0 29447 [] [] []",
|
||||
},
|
||||
netip.MustParseAddr("192.0.2.1"): {
|
||||
"[ipv4-unicast] 192.0.2.0/31 via 192.0.2.1 0:0/0 65011 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/25 via 192.0.2.1 0:0/0 64476 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.128/25 via 192.0.2.1 0:0/0 396919 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, connection down", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectASPaths = false
|
||||
config.CollectCommunities = false
|
||||
c, mockClock := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-l3vpn.pcap")
|
||||
conn.Close()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "3",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "1",
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
mockClock.Add(2 * time.Hour)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "3",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "1",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "0",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB = map[netip.Addr][]string{}
|
||||
gotRIB = dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("init, l3vpn peer, init, l3vpn peer, connection down, terminate", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
config.CollectASPaths = false
|
||||
config.CollectCommunities = false
|
||||
c, mockClock := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
|
||||
conn1 := dial(t, c)
|
||||
send(t, conn1, "bmp-init.pcap")
|
||||
send(t, conn1, "bmp-l3vpn.pcap")
|
||||
conn2 := dial(t, c)
|
||||
send(t, conn2, "bmp-init.pcap")
|
||||
send(t, conn2, "bmp-l3vpn.pcap")
|
||||
conn1.Close()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics := r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics := map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "6",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "2",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "2",
|
||||
`routes_total{exporter="127.0.0.1"}`: "4",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB := map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [] [] []",
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB := dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
mockClock.Add(2 * time.Hour)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "6",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "2",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "1",
|
||||
`peers_total{exporter="127.0.0.1"}`: "1",
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
expectedRIB = map[netip.Addr][]string{
|
||||
netip.MustParseAddr("192.0.2.9"): {
|
||||
"[ipv4-unicast] 192.0.2.8/31 via 192.0.2.9 65500:108/0 65019 [] [] []",
|
||||
"[ipv4-unicast] 198.51.100.0/29 via 192.0.2.9 65500:108/0 64476 [] [] []",
|
||||
},
|
||||
}
|
||||
gotRIB = dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
send(t, conn2, "bmp-terminate.pcap")
|
||||
time.Sleep(30 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="termination"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "6",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "2",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`peers_total{exporter="127.0.0.1"}`: "1",
|
||||
`routes_total{exporter="127.0.0.1"}`: "2",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
gotRIB = dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
mockClock.Add(2 * time.Hour)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
gotMetrics = r.GetMetrics("akvorado_inlet_bmp_", "-locked_duration")
|
||||
expectedMetrics = map[string]string{
|
||||
`messages_received_total{exporter="127.0.0.1",type="initiation"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="termination"}`: "1",
|
||||
`messages_received_total{exporter="127.0.0.1",type="peer-up-notification"}`: "2",
|
||||
`messages_received_total{exporter="127.0.0.1",type="route-monitoring"}`: "6",
|
||||
`messages_received_total{exporter="127.0.0.1",type="statistics-report"}`: "2",
|
||||
`opened_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`closed_connections_total{exporter="127.0.0.1"}`: "2",
|
||||
`peers_total{exporter="127.0.0.1"}`: "0",
|
||||
`routes_total{exporter="127.0.0.1"}`: "0",
|
||||
}
|
||||
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
|
||||
t.Errorf("Metrics (-got, +want):\n%s", diff)
|
||||
}
|
||||
expectedRIB = map[netip.Addr][]string{}
|
||||
gotRIB = dumpRIB(t, c)
|
||||
if diff := helpers.Diff(gotRIB, expectedRIB); diff != "" {
|
||||
t.Errorf("RIB (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("lookup", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
conn := dial(t, c)
|
||||
|
||||
send(t, conn, "bmp-init.pcap")
|
||||
send(t, conn, "bmp-peers-up.pcap")
|
||||
send(t, conn, "bmp-reach.pcap")
|
||||
send(t, conn, "bmp-eor.pcap")
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
lookup := c.Lookup(net.ParseIP("2001:db8:1::10"), net.ParseIP("2001:db8::a"))
|
||||
if lookup.ASN != 174 {
|
||||
t.Errorf("Lookup() == %d, expected 174", lookup.ASN)
|
||||
}
|
||||
|
||||
// Add another prefix
|
||||
c.rib.addPrefix(netip.MustParseAddr("2001:db8:1::"), 64, route{
|
||||
peer: 1,
|
||||
nlri: nlri{family: bgp.RF_FS_IPv4_UC},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(netip.MustParseAddr("2001:db8::a"))),
|
||||
attributes: c.rib.rtas.Put(routeAttributes{asn: 176}),
|
||||
})
|
||||
|
||||
lookup = c.Lookup(net.ParseIP("2001:db8:1::10"), net.ParseIP("2001:db8::a"))
|
||||
if lookup.ASN != 176 {
|
||||
t.Errorf("Lookup() == %d, expected 176", lookup.ASN)
|
||||
}
|
||||
lookup = c.Lookup(net.ParseIP("2001:db8:1::10"), net.ParseIP("2001:db8::b"))
|
||||
if lookup.ASN != 174 {
|
||||
t.Errorf("Lookup() == %d, expected 174", lookup.ASN)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("populate", func(t *testing.T) {
|
||||
r := reporter.NewMock(t)
|
||||
config := DefaultConfiguration()
|
||||
c, _ := NewMock(t, r, config)
|
||||
helpers.StartStop(t, c)
|
||||
c.PopulateRIB(t)
|
||||
|
||||
lookup := c.Lookup(net.ParseIP("192.0.2.2").To16(), net.ParseIP("198.51.100.200").To16())
|
||||
if lookup.ASN != 174 {
|
||||
t.Errorf("Lookup() == %d, expected 174", lookup.ASN)
|
||||
}
|
||||
lookup = c.Lookup(net.ParseIP("192.0.2.254").To16(), net.ParseIP("198.51.100.200").To16())
|
||||
if lookup.ASN != 0 {
|
||||
t.Errorf("Lookup() == %d, expected 0", lookup.ASN)
|
||||
}
|
||||
})
|
||||
}
|
||||
186
inlet/bmp/serve.go
Normal file
186
inlet/bmp/serve.go
Normal file
@@ -0,0 +1,186 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bmp"
|
||||
)
|
||||
|
||||
// serveConnection handle the connection from an exporter.
|
||||
func (c *Component) serveConnection(conn *net.TCPConn) error {
|
||||
remote := conn.RemoteAddr().(*net.TCPAddr)
|
||||
exporterIP, _ := netip.AddrFromSlice(remote.IP)
|
||||
exporter := netip.AddrPortFrom(exporterIP, uint16(remote.Port))
|
||||
exporterStr := exporter.Addr().Unmap().String()
|
||||
c.metrics.openedConnections.WithLabelValues(exporterStr).Inc()
|
||||
logger := c.r.With().Str("exporter", exporterStr).Logger()
|
||||
conn.SetLinger(0)
|
||||
|
||||
// Stop the connection when exiting this method or when dying
|
||||
conn.CloseWrite()
|
||||
stop := make(chan struct{})
|
||||
c.t.Go(func() error {
|
||||
select {
|
||||
case <-stop:
|
||||
logger.Info().Msgf("connection down for %s", exporterStr)
|
||||
c.handleConnectionDown(exporter)
|
||||
case <-c.t.Dying():
|
||||
// No need to clean up
|
||||
}
|
||||
conn.CloseRead()
|
||||
c.metrics.closedConnections.WithLabelValues(exporterStr).Inc()
|
||||
return nil
|
||||
})
|
||||
defer close(stop)
|
||||
|
||||
// Setup TCP keepalive
|
||||
if err := conn.SetKeepAlive(true); err != nil {
|
||||
c.r.Error().Err(err).Msg("unable to enable keepalive")
|
||||
return nil
|
||||
}
|
||||
if err := conn.SetKeepAlivePeriod(time.Minute); err != nil {
|
||||
c.r.Error().Err(err).Msg("unable to set keepalive period")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle panics
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
logger.Panic().Str("panic", fmt.Sprintf("%+v", r)).Msg("fatal error while processing BMP messages")
|
||||
c.metrics.panics.WithLabelValues(exporterStr).Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
// Reading from connection
|
||||
c.handleConnectionUp(exporter)
|
||||
init := false
|
||||
for {
|
||||
header := make([]byte, bmp.BMP_HEADER_SIZE)
|
||||
_, err := io.ReadFull(conn, header)
|
||||
if err != nil {
|
||||
if c.t.Alive() && err != io.EOF {
|
||||
logger.Err(err).Msg("cannot read BMP header")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "cannot read BMP header").Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
msg := bmp.BMPMessage{}
|
||||
if err := msg.Header.DecodeFromBytes(header); err != nil {
|
||||
logger.Err(err).Msg("cannot decode BMP header")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "cannot decode BMP header").Inc()
|
||||
return nil
|
||||
}
|
||||
switch msg.Header.Type {
|
||||
case bmp.BMP_MSG_ROUTE_MONITORING:
|
||||
msg.Body = &bmp.BMPRouteMonitoring{}
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "route-monitoring").Inc()
|
||||
case bmp.BMP_MSG_STATISTICS_REPORT:
|
||||
// Ignore
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "statistics-report").Inc()
|
||||
case bmp.BMP_MSG_PEER_DOWN_NOTIFICATION:
|
||||
msg.Body = &bmp.BMPPeerDownNotification{}
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "peer-down-notification").Inc()
|
||||
case bmp.BMP_MSG_PEER_UP_NOTIFICATION:
|
||||
msg.Body = &bmp.BMPPeerUpNotification{}
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "peer-up-notification").Inc()
|
||||
case bmp.BMP_MSG_INITIATION:
|
||||
msg.Body = &bmp.BMPInitiation{}
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "initiation").Inc()
|
||||
init = true
|
||||
case bmp.BMP_MSG_TERMINATION:
|
||||
msg.Body = &bmp.BMPTermination{}
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "termination").Inc()
|
||||
case bmp.BMP_MSG_ROUTE_MIRRORING:
|
||||
// Ignore
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "route-mirroring").Inc()
|
||||
default:
|
||||
logger.Info().Msgf("unknown BMP message type %d", msg.Header.Type)
|
||||
c.metrics.messages.WithLabelValues(exporterStr, "unknown").Inc()
|
||||
}
|
||||
|
||||
// First message should be BMP_MSG_INITIATION
|
||||
if !init {
|
||||
logger.Error().Msg("first message is not `initiation'")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "first message not initiation").Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
body := make([]byte, msg.Header.Length-bmp.BMP_HEADER_SIZE)
|
||||
_, err = io.ReadFull(conn, body)
|
||||
if err != nil {
|
||||
if c.t.Alive() {
|
||||
logger.Error().Err(err).Msg("cannot read BMP body")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "cannot read BMP body").Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if msg.Body == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var marshallingOptions []*bgp.MarshallingOption
|
||||
var pkey peerKey
|
||||
if msg.Header.Type != bmp.BMP_MSG_INITIATION && msg.Header.Type != bmp.BMP_MSG_TERMINATION {
|
||||
if err := msg.PeerHeader.DecodeFromBytes(body); err != nil {
|
||||
logger.Error().Err(err).Msg("cannot parse BMP peer header")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "cannot parse BMP peer header").Inc()
|
||||
return nil
|
||||
}
|
||||
body = body[bmp.BMP_PEER_HEADER_SIZE:]
|
||||
pkey = peerKeyFromBMPPeerHeader(exporter, &msg.PeerHeader)
|
||||
c.mu.RLock()
|
||||
if pinfo, ok := c.peers[pkey]; ok {
|
||||
marshallingOptions = pinfo.marshallingOptions
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
}
|
||||
|
||||
if err := msg.Body.ParseBody(&msg, body, marshallingOptions...); err != nil {
|
||||
logger.Error().Err(err).Msg("cannot parse BMP body")
|
||||
c.metrics.errors.WithLabelValues(exporterStr, "cannot parse BMP body").Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle different messages
|
||||
switch body := msg.Body.(type) {
|
||||
case *bmp.BMPInitiation:
|
||||
found := false
|
||||
for _, info := range body.Info {
|
||||
switch tlv := info.(type) {
|
||||
case *bmp.BMPInfoTLVString:
|
||||
if tlv.Type == bmp.BMP_INIT_TLV_TYPE_SYS_NAME {
|
||||
logger.Info().Str("sysname", tlv.Value).Msg("new connection")
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
logger.Info().Msg("new connection")
|
||||
}
|
||||
case *bmp.BMPTermination:
|
||||
for _, info := range body.Info {
|
||||
switch tlv := info.(type) {
|
||||
case *bmp.BMPInfoTLVString:
|
||||
logger.Info().Str("reason", tlv.Value).Msg("termination message received")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
logger.Info().Msg("termination message received")
|
||||
return nil
|
||||
case *bmp.BMPPeerUpNotification:
|
||||
c.handlePeerUpNotification(pkey, body)
|
||||
case *bmp.BMPPeerDownNotification:
|
||||
c.handlePeerDownNotification(pkey)
|
||||
case *bmp.BMPRouteMonitoring:
|
||||
c.handleRouteMonitoring(pkey, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
inlet/bmp/testdata/bmp-eor.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-eor.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-init.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-init.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-l3vpn.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-l3vpn.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-peer-down.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-peer-down.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-peer-up.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-peer-up.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-peers-up.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-peers-up.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-reach-addpath.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-reach-addpath.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-reach.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-reach.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-terminate.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-terminate.pcap
vendored
Normal file
Binary file not shown.
BIN
inlet/bmp/testdata/bmp-unreach.pcap
vendored
Normal file
BIN
inlet/bmp/testdata/bmp-unreach.pcap
vendored
Normal file
Binary file not shown.
106
inlet/bmp/tests.go
Normal file
106
inlet/bmp/tests.go
Normal file
@@ -0,0 +1,106 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
//go:build !release
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/reporter"
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bmp"
|
||||
)
|
||||
|
||||
// NewMock creates a new mock component for BMP (it's a real one
|
||||
// listening to a random port).
|
||||
func NewMock(t *testing.T, r *reporter.Reporter, conf Configuration) (*Component, *clock.Mock) {
|
||||
t.Helper()
|
||||
mockClock := clock.NewMock()
|
||||
conf.Listen = "127.0.0.1:0"
|
||||
c, err := New(r, conf, Dependencies{
|
||||
Daemon: daemon.NewMock(t),
|
||||
Clock: mockClock,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
}
|
||||
return c, mockClock
|
||||
}
|
||||
|
||||
// PopulateRIB populates the RIB with a few entries.
|
||||
func (c *Component) PopulateRIB(t *testing.T) {
|
||||
t.Helper()
|
||||
pinfo := c.addPeer(peerKey{
|
||||
exporter: netip.MustParseAddrPort("[::ffff:127.0.0.1]:47389"),
|
||||
ip: netip.MustParseAddr("::ffff:203.0.113.4"),
|
||||
ptype: bmp.BMP_PEER_TYPE_GLOBAL,
|
||||
asn: 64500,
|
||||
})
|
||||
c.rib.addPrefix(netip.MustParseAddr("::ffff:192.0.2.0"), 96+27, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{family: bgp.RF_FS_IPv4_UC, path: 1},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(netip.MustParseAddr("::ffff:198.51.100.4"))),
|
||||
attributes: c.rib.rtas.Put(routeAttributes{
|
||||
asn: 174,
|
||||
asPath: []uint32{64200, 1299, 174},
|
||||
communities: []uint32{100, 200, 400},
|
||||
largeCommunities: []bgp.LargeCommunity{{ASN: 64200, LocalData1: 2, LocalData2: 3}},
|
||||
}),
|
||||
})
|
||||
c.rib.addPrefix(netip.MustParseAddr("::ffff:192.0.2.0"), 96+27, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{family: bgp.RF_FS_IPv4_UC, path: 2},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(netip.MustParseAddr("::ffff:198.51.100.8"))),
|
||||
attributes: c.rib.rtas.Put(routeAttributes{
|
||||
asn: 174,
|
||||
asPath: []uint32{64200, 174, 174, 174},
|
||||
communities: []uint32{100},
|
||||
}),
|
||||
})
|
||||
c.rib.addPrefix(netip.MustParseAddr("::ffff:192.0.2.128"), 96+27, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{family: bgp.RF_FS_IPv4_UC},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(netip.MustParseAddr("::ffff:198.51.100.8"))),
|
||||
attributes: c.rib.rtas.Put(routeAttributes{
|
||||
asn: 1299,
|
||||
asPath: []uint32{64200, 1299},
|
||||
communities: []uint32{500},
|
||||
}),
|
||||
})
|
||||
c.rib.addPrefix(netip.MustParseAddr("::ffff:1.0.0.0"), 96+24, route{
|
||||
peer: pinfo.reference,
|
||||
nlri: nlri{family: bgp.RF_FS_IPv4_UC},
|
||||
nextHop: c.rib.nextHops.Put(nextHop(netip.MustParseAddr("::ffff:198.51.100.8"))),
|
||||
attributes: c.rib.rtas.Put(routeAttributes{
|
||||
asn: 65300,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
// LocalAddr returns the address the BMP collector is listening to.
|
||||
func (c *Component) LocalAddr() net.Addr {
|
||||
return c.address
|
||||
}
|
||||
|
||||
// Reduce hash mask to generate collisions during tests (this should
|
||||
// be optimized out by the compiler)
|
||||
const rtaHashMask = 0xff
|
||||
|
||||
// Use a predictable seed for tests.
|
||||
var rtaHashSeed = uint64(0)
|
||||
|
||||
// MustParseRD parse a route distinguisher and panic on error.
|
||||
func MustParseRD(input string) RD {
|
||||
var output RD
|
||||
if err := output.UnmarshalText([]byte(input)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return output
|
||||
}
|
||||
25
inlet/bmp/utils.go
Normal file
25
inlet/bmp/utils.go
Normal file
@@ -0,0 +1,25 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import "github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
|
||||
// asPathFlat transforms an AS path to a flat AS path: first value of
|
||||
// a set is used, confed seq is considered as a regular seq.
|
||||
func asPathFlat(aspath *bgp.PathAttributeAsPath) []uint32 {
|
||||
s := []uint32{}
|
||||
for _, param := range aspath.Value {
|
||||
segType := param.GetType()
|
||||
asList := param.GetAS()
|
||||
|
||||
switch segType {
|
||||
case bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SET, bgp.BGP_ASPATH_ATTR_TYPE_SET:
|
||||
asList = asList[:1]
|
||||
}
|
||||
for _, as := range asList {
|
||||
s = append(s, as)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
66
inlet/bmp/utils_test.go
Normal file
66
inlet/bmp/utils_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// SPDX-FileCopyrightText: 2022 Free Mobile
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
|
||||
package bmp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"akvorado/common/helpers"
|
||||
|
||||
"github.com/osrg/gobgp/v3/pkg/packet/bgp"
|
||||
)
|
||||
|
||||
func TestASPathFlat(t *testing.T) {
|
||||
cases := []struct {
|
||||
AsPath *bgp.PathAttributeAsPath
|
||||
Expected []uint32
|
||||
}{
|
||||
{
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{}),
|
||||
Expected: []uint32{},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint16{65402, 65403, 65404}),
|
||||
}),
|
||||
Expected: []uint32{65402, 65403, 65404},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65402, 65536, 65537}),
|
||||
}),
|
||||
Expected: []uint32{65402, 65536, 65537},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_SET, []uint16{65402, 65403, 65404}),
|
||||
}),
|
||||
Expected: []uint32{65402},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ, []uint16{65402, 65403, 65404}),
|
||||
}),
|
||||
Expected: []uint32{65402, 65403, 65404},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SET, []uint16{65402, 65403, 65404}),
|
||||
}),
|
||||
Expected: []uint32{65402},
|
||||
}, {
|
||||
AsPath: bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint16{65402, 65403, 65404}),
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_SET, []uint16{65405, 65406}),
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ, []uint16{65407, 65408}),
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SET, []uint16{65409, 65410}),
|
||||
bgp.NewAsPathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint16{65411}),
|
||||
}),
|
||||
Expected: []uint32{65402, 65403, 65404, 65405, 65407, 65408, 65409, 65411},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.AsPath.String(), func(t *testing.T) {
|
||||
got := asPathFlat(tc.AsPath)
|
||||
if diff := helpers.Diff(got, tc.Expected); diff != "" {
|
||||
t.Fatalf("asPathFlat() (-got, +want):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -38,7 +38,7 @@ func DefaultConfiguration() Configuration {
|
||||
ExporterClassifiers: []ExporterClassifierRule{},
|
||||
InterfaceClassifiers: []InterfaceClassifierRule{},
|
||||
ClassifierCacheSize: 1000,
|
||||
ASNProviders: []ASNProvider{ProviderFlow, ProviderGeoIP},
|
||||
ASNProviders: []ASNProvider{ProviderFlow, ProviderBMP, ProviderGeoIP},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,12 +52,18 @@ const (
|
||||
ProviderFlowExceptPrivate
|
||||
// ProviderGeoIP pulls the AS number from a GeoIP database.
|
||||
ProviderGeoIP
|
||||
// ProviderBMP uses the AS number from BMP
|
||||
ProviderBMP
|
||||
// ProviderBMPExceptPrivate uses the AS number from BMP, except if this is a private AS.
|
||||
ProviderBMPExceptPrivate
|
||||
)
|
||||
|
||||
var asnProviderMap = helpers.NewBimap(map[ASNProvider]string{
|
||||
ProviderFlow: "flow",
|
||||
ProviderFlowExceptPrivate: "flow-except-private",
|
||||
ProviderGeoIP: "geoip",
|
||||
ProviderBMP: "bmp",
|
||||
ProviderBMPExceptPrivate: "bmp-except-private",
|
||||
})
|
||||
|
||||
// MarshalText turns an AS provider to text.
|
||||
|
||||
@@ -21,7 +21,7 @@ func (c *Component) hydrateFlow(exporterIP netip.Addr, exporterStr string, flow
|
||||
errLogger := c.r.Sample(reporter.BurstSampler(time.Minute, 10))
|
||||
|
||||
if flow.InIf != 0 {
|
||||
exporterName, iface, err := c.d.Snmp.Lookup(exporterIP, uint(flow.InIf))
|
||||
exporterName, iface, err := c.d.SNMP.Lookup(exporterIP, uint(flow.InIf))
|
||||
if err != nil {
|
||||
if err != snmp.ErrCacheMiss {
|
||||
errLogger.Err(err).Str("exporter", exporterStr).Msg("unable to query SNMP cache")
|
||||
@@ -37,7 +37,7 @@ func (c *Component) hydrateFlow(exporterIP netip.Addr, exporterStr string, flow
|
||||
}
|
||||
|
||||
if flow.OutIf != 0 {
|
||||
exporterName, iface, err := c.d.Snmp.Lookup(exporterIP, uint(flow.OutIf))
|
||||
exporterName, iface, err := c.d.SNMP.Lookup(exporterIP, uint(flow.OutIf))
|
||||
if err != nil {
|
||||
// Only register a cache miss if we don't have one.
|
||||
// TODO: maybe we could do one SNMP query for both interfaces.
|
||||
@@ -87,34 +87,52 @@ func (c *Component) hydrateFlow(exporterIP netip.Addr, exporterStr string, flow
|
||||
flow.InIfName, flow.InIfDescription, flow.InIfSpeed,
|
||||
&flow.InIfConnectivity, &flow.InIfProvider, &flow.InIfBoundary)
|
||||
|
||||
flow.SrcAS = c.getASNumber(flow.SrcAS, net.IP(flow.SrcAddr))
|
||||
flow.DstAS = c.getASNumber(flow.DstAS, net.IP(flow.DstAddr))
|
||||
sourceBMP := c.d.BMP.Lookup(net.IP(flow.SrcAddr), nil)
|
||||
destBMP := c.d.BMP.Lookup(net.IP(flow.DstAddr), net.IP(flow.NextHop))
|
||||
flow.SrcAS = c.getASNumber(net.IP(flow.SrcAddr), flow.SrcAS, sourceBMP.ASN)
|
||||
flow.DstAS = c.getASNumber(net.IP(flow.DstAddr), flow.DstAS, destBMP.ASN)
|
||||
flow.SrcCountry = c.d.GeoIP.LookupCountry(net.IP(flow.SrcAddr))
|
||||
flow.DstCountry = c.d.GeoIP.LookupCountry(net.IP(flow.DstAddr))
|
||||
|
||||
flow.Communities = destBMP.Communities
|
||||
flow.ASPath = destBMP.ASPath
|
||||
if len(destBMP.LargeCommunities) > 0 {
|
||||
flow.LargeCommunities = make([]*decoder.LargeCommunity, len(destBMP.LargeCommunities))
|
||||
for i := 0; i < len(destBMP.LargeCommunities); i++ {
|
||||
flow.LargeCommunities[i] = &decoder.LargeCommunity{
|
||||
ASN: destBMP.LargeCommunities[i].ASN,
|
||||
LocalData1: destBMP.LargeCommunities[i].LocalData1,
|
||||
LocalData2: destBMP.LargeCommunities[i].LocalData2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getASNumber retrieves the AS number for a flow, depending on user preferences.
|
||||
func (c *Component) getASNumber(flowAS uint32, flowAddr net.IP) (asn uint32) {
|
||||
func (c *Component) getASNumber(flowAddr net.IP, flowAS, bmpAS uint32) (asn uint32) {
|
||||
for _, provider := range c.config.ASNProviders {
|
||||
if asn != 0 {
|
||||
break
|
||||
}
|
||||
switch provider {
|
||||
case ProviderGeoIP:
|
||||
asn = c.d.GeoIP.LookupASN(flowAddr)
|
||||
case ProviderFlow:
|
||||
asn = flowAS
|
||||
case ProviderFlowExceptPrivate:
|
||||
// See https://www.iana.org/assignments/iana-as-numbers-special-registry/iana-as-numbers-special-registry.xhtml
|
||||
if flowAS == 0 || flowAS == 23456 {
|
||||
break
|
||||
}
|
||||
if 64496 <= flowAS && flowAS <= 65551 || 4_200_000_000 <= flowAS && flowAS <= 4_294_967_295 {
|
||||
break
|
||||
}
|
||||
asn = flowAS
|
||||
case ProviderGeoIP:
|
||||
asn = c.d.GeoIP.LookupASN(flowAddr)
|
||||
if isPrivateAS(asn) {
|
||||
asn = 0
|
||||
}
|
||||
case ProviderBMP:
|
||||
asn = bmpAS
|
||||
case ProviderBMPExceptPrivate:
|
||||
asn = bmpAS
|
||||
if isPrivateAS(asn) {
|
||||
asn = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
return asn
|
||||
@@ -214,3 +232,14 @@ func convertBoundaryToProto(from interfaceBoundary) decoder.FlowMessage_Boundary
|
||||
}
|
||||
return decoder.FlowMessage_UNDEFINED
|
||||
}
|
||||
|
||||
func isPrivateAS(as uint32) bool {
|
||||
// See https://www.iana.org/assignments/iana-as-numbers-special-registry/iana-as-numbers-special-registry.xhtml
|
||||
if as == 0 || as == 23456 {
|
||||
return true
|
||||
}
|
||||
if 64496 <= as && as <= 65551 || 4_200_000_000 <= as && as <= 4_294_967_295 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ import (
|
||||
"akvorado/common/helpers"
|
||||
"akvorado/common/http"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/inlet/bmp"
|
||||
"akvorado/inlet/flow"
|
||||
"akvorado/inlet/flow/decoder"
|
||||
"akvorado/inlet/geoip"
|
||||
"akvorado/inlet/kafka"
|
||||
"akvorado/inlet/snmp"
|
||||
@@ -55,8 +57,7 @@ func TestHydrate(t *testing.T) {
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "no rule, override sampling rate",
|
||||
Configuration: gin.H{"overridesamplingrate": gin.H{
|
||||
"192.0.2.0/24": 100,
|
||||
@@ -84,8 +85,7 @@ func TestHydrate(t *testing.T) {
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "no rule, no sampling rate, default is one value",
|
||||
Configuration: gin.H{"defaultsamplingrate": 500},
|
||||
InputFlow: func() *flow.Message {
|
||||
@@ -108,8 +108,7 @@ func TestHydrate(t *testing.T) {
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "no rule, no sampling rate, default is map",
|
||||
Configuration: gin.H{"defaultsamplingrate": gin.H{
|
||||
"192.0.2.0/24": 100,
|
||||
@@ -136,8 +135,7 @@ func TestHydrate(t *testing.T) {
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "exporter rule",
|
||||
Configuration: gin.H{
|
||||
"exporterclassifiers": []string{
|
||||
@@ -170,8 +168,7 @@ func TestHydrate(t *testing.T) {
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "interface rule",
|
||||
Configuration: gin.H{
|
||||
"interfaceclassifiers": []string{
|
||||
@@ -206,8 +203,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
InIfBoundary: 2, // Internal
|
||||
OutIfBoundary: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "configure twice boundary",
|
||||
Configuration: gin.H{
|
||||
"interfaceclassifiers": []string{
|
||||
@@ -238,8 +234,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
InIfBoundary: 2, // Internal
|
||||
OutIfBoundary: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "configure twice provider",
|
||||
Configuration: gin.H{
|
||||
"interfaceclassifiers": []string{
|
||||
@@ -270,8 +265,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
InIfProvider: "telia",
|
||||
OutIfProvider: "telia",
|
||||
},
|
||||
},
|
||||
{
|
||||
}, {
|
||||
Name: "classify depending on description",
|
||||
Configuration: gin.H{
|
||||
"interfaceclassifiers": []string{
|
||||
@@ -307,6 +301,41 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
InIfBoundary: 1, // external
|
||||
OutIfBoundary: 2, // internal
|
||||
},
|
||||
}, {
|
||||
Name: "use data from BMP",
|
||||
Configuration: gin.H{},
|
||||
InputFlow: func() *flow.Message {
|
||||
return &flow.Message{
|
||||
SamplingRate: 1000,
|
||||
ExporterAddress: net.ParseIP("192.0.2.142"),
|
||||
InIf: 100,
|
||||
OutIf: 200,
|
||||
SrcAddr: net.ParseIP("192.0.2.142"),
|
||||
DstAddr: net.ParseIP("192.0.2.10"),
|
||||
}
|
||||
},
|
||||
OutputFlow: &flow.Message{
|
||||
SamplingRate: 1000,
|
||||
ExporterAddress: net.ParseIP("192.0.2.142"),
|
||||
ExporterName: "192_0_2_142",
|
||||
InIf: 100,
|
||||
OutIf: 200,
|
||||
InIfName: "Gi0/0/100",
|
||||
OutIfName: "Gi0/0/200",
|
||||
InIfDescription: "Interface 100",
|
||||
OutIfDescription: "Interface 200",
|
||||
InIfSpeed: 1000,
|
||||
OutIfSpeed: 1000,
|
||||
SrcAddr: net.ParseIP("192.0.2.142").To16(),
|
||||
DstAddr: net.ParseIP("192.0.2.10").To16(),
|
||||
SrcAS: 1299,
|
||||
DstAS: 174,
|
||||
ASPath: []uint32{64200, 1299, 174},
|
||||
Communities: []uint32{100, 200, 400},
|
||||
LargeCommunities: []*decoder.LargeCommunity{
|
||||
{ASN: 64200, LocalData1: 2, LocalData2: 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
@@ -321,6 +350,8 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
geoipComponent := geoip.NewMock(t, r)
|
||||
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
|
||||
httpComponent := http.NewMock(t, r)
|
||||
bmpComponent, _ := bmp.NewMock(t, r, bmp.DefaultConfiguration())
|
||||
bmpComponent.PopulateRIB(t)
|
||||
|
||||
// Prepare a configuration
|
||||
configuration := DefaultConfiguration()
|
||||
@@ -336,10 +367,11 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
c, err := New(r, configuration, Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
Flow: flowComponent,
|
||||
Snmp: snmpComponent,
|
||||
SNMP: snmpComponent,
|
||||
GeoIP: geoipComponent,
|
||||
Kafka: kafkaComponent,
|
||||
HTTP: httpComponent,
|
||||
BMP: bmpComponent,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
@@ -392,23 +424,31 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
|
||||
|
||||
func TestGetASNumber(t *testing.T) {
|
||||
cases := []struct {
|
||||
Flow uint32
|
||||
Addr string
|
||||
FlowAS uint32
|
||||
BMPAS uint32
|
||||
Providers []ASNProvider
|
||||
Expected uint32
|
||||
}{
|
||||
{12322, "1.0.0.1", []ASNProvider{ProviderFlow}, 12322},
|
||||
{65536, "1.0.0.1", []ASNProvider{ProviderFlow}, 65536},
|
||||
{65536, "1.0.0.1", []ASNProvider{ProviderFlowExceptPrivate}, 0},
|
||||
{4_200_000_121, "1.0.0.1", []ASNProvider{ProviderFlowExceptPrivate}, 0},
|
||||
{65536, "1.0.0.1", []ASNProvider{ProviderFlowExceptPrivate, ProviderFlow}, 65536},
|
||||
{12322, "1.0.0.1", []ASNProvider{ProviderFlowExceptPrivate}, 12322},
|
||||
{12322, "1.0.0.1", []ASNProvider{ProviderGeoIP}, 15169},
|
||||
{12322, "2.0.0.1", []ASNProvider{ProviderGeoIP}, 0},
|
||||
{12322, "1.0.0.1", []ASNProvider{ProviderGeoIP, ProviderFlow}, 15169},
|
||||
{12322, "1.0.0.1", []ASNProvider{ProviderFlow, ProviderGeoIP}, 12322},
|
||||
{12322, "2.0.0.1", []ASNProvider{ProviderFlow, ProviderGeoIP}, 12322},
|
||||
{12322, "2.0.0.1", []ASNProvider{ProviderGeoIP, ProviderFlow}, 12322},
|
||||
// 1
|
||||
{"1.0.0.1", 12322, 0, []ASNProvider{ProviderFlow}, 12322},
|
||||
{"1.0.0.1", 65536, 0, []ASNProvider{ProviderFlow}, 65536},
|
||||
{"1.0.0.1", 65536, 0, []ASNProvider{ProviderFlowExceptPrivate}, 0},
|
||||
{"1.0.0.1", 4_200_000_121, 0, []ASNProvider{ProviderFlowExceptPrivate}, 0},
|
||||
{"1.0.0.1", 65536, 0, []ASNProvider{ProviderFlowExceptPrivate, ProviderFlow}, 65536},
|
||||
{"1.0.0.1", 12322, 0, []ASNProvider{ProviderFlowExceptPrivate}, 12322},
|
||||
{"1.0.0.1", 12322, 0, []ASNProvider{ProviderGeoIP}, 15169},
|
||||
{"2.0.0.1", 12322, 0, []ASNProvider{ProviderGeoIP}, 0},
|
||||
{"1.0.0.1", 12322, 0, []ASNProvider{ProviderGeoIP, ProviderFlow}, 15169},
|
||||
// 10
|
||||
{"1.0.0.1", 12322, 0, []ASNProvider{ProviderFlow, ProviderGeoIP}, 12322},
|
||||
{"2.0.0.1", 12322, 0, []ASNProvider{ProviderFlow, ProviderGeoIP}, 12322},
|
||||
{"2.0.0.1", 12322, 0, []ASNProvider{ProviderGeoIP, ProviderFlow}, 12322},
|
||||
{"192.0.2.2", 12322, 174, []ASNProvider{ProviderBMP}, 174},
|
||||
{"192.0.2.129", 12322, 1299, []ASNProvider{ProviderBMP}, 1299},
|
||||
{"192.0.2.254", 12322, 0, []ASNProvider{ProviderBMP}, 0},
|
||||
{"1.0.0.1", 12322, 65300, []ASNProvider{ProviderBMP}, 65300},
|
||||
{"1.0.0.1", 12322, 15169, []ASNProvider{ProviderBMPExceptPrivate, ProviderGeoIP}, 15169},
|
||||
}
|
||||
for i, tc := range cases {
|
||||
i++
|
||||
@@ -418,14 +458,18 @@ func TestGetASNumber(t *testing.T) {
|
||||
// We don't need all components as we won't start the component.
|
||||
configuration := DefaultConfiguration()
|
||||
configuration.ASNProviders = tc.Providers
|
||||
bmpComponent, _ := bmp.NewMock(t, r, bmp.DefaultConfiguration())
|
||||
bmpComponent.PopulateRIB(t)
|
||||
|
||||
c, err := New(r, configuration, Dependencies{
|
||||
Daemon: daemon.NewMock(t),
|
||||
GeoIP: geoip.NewMock(t, r),
|
||||
BMP: bmpComponent,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
}
|
||||
got := c.getASNumber(tc.Flow, net.ParseIP(tc.Addr))
|
||||
got := c.getASNumber(net.ParseIP(tc.Addr), tc.FlowAS, tc.BMPAS)
|
||||
if diff := helpers.Diff(got, tc.Expected); diff != "" {
|
||||
t.Fatalf("getASNumber() (-got, +want):\n%s", diff)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"akvorado/common/daemon"
|
||||
"akvorado/common/http"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/inlet/bmp"
|
||||
"akvorado/inlet/flow"
|
||||
"akvorado/inlet/geoip"
|
||||
"akvorado/inlet/kafka"
|
||||
@@ -46,7 +47,8 @@ type Component struct {
|
||||
type Dependencies struct {
|
||||
Daemon daemon.Component
|
||||
Flow *flow.Component
|
||||
Snmp *snmp.Component
|
||||
SNMP *snmp.Component
|
||||
BMP *bmp.Component
|
||||
GeoIP *geoip.Component
|
||||
Kafka *kafka.Component
|
||||
HTTP *http.Component
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"akvorado/common/helpers"
|
||||
"akvorado/common/http"
|
||||
"akvorado/common/reporter"
|
||||
"akvorado/inlet/bmp"
|
||||
"akvorado/inlet/flow"
|
||||
"akvorado/inlet/geoip"
|
||||
"akvorado/inlet/kafka"
|
||||
@@ -38,15 +39,18 @@ func TestCore(t *testing.T) {
|
||||
geoipComponent := geoip.NewMock(t, r)
|
||||
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
|
||||
httpComponent := http.NewMock(t, r)
|
||||
bmpComponent, _ := bmp.NewMock(t, r, bmp.DefaultConfiguration())
|
||||
bmpComponent.PopulateRIB(t)
|
||||
|
||||
// Instantiate and start core
|
||||
c, err := New(r, DefaultConfiguration(), Dependencies{
|
||||
Daemon: daemonComponent,
|
||||
Flow: flowComponent,
|
||||
Snmp: snmpComponent,
|
||||
SNMP: snmpComponent,
|
||||
GeoIP: geoipComponent,
|
||||
Kafka: kafkaComponent,
|
||||
HTTP: httpComponent,
|
||||
BMP: bmpComponent,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
|
||||
104
inlet/flow/data/schemas/flow-3.proto
Normal file
104
inlet/flow/data/schemas/flow-3.proto
Normal file
@@ -0,0 +1,104 @@
|
||||
syntax = "proto3";
|
||||
package decoder;
|
||||
option go_package = "akvorado/inlet/flow/decoder";
|
||||
|
||||
// This is a stripped version from the one in Goflow2, but with GeoIP added.
|
||||
|
||||
message FlowMessagev3 {
|
||||
|
||||
uint64 TimeReceived = 2;
|
||||
uint32 SequenceNum = 3;
|
||||
uint64 SamplingRate = 4;
|
||||
uint32 FlowDirection = 5;
|
||||
|
||||
// Exporter information
|
||||
bytes ExporterAddress = 6;
|
||||
string ExporterName = 99;
|
||||
string ExporterGroup = 98;
|
||||
string ExporterRole = 97;
|
||||
string ExporterSite = 96;
|
||||
string ExporterRegion = 95;
|
||||
string ExporterTenant = 94;
|
||||
|
||||
// Found inside packet
|
||||
uint64 TimeFlowStart = 7;
|
||||
uint64 TimeFlowEnd = 8;
|
||||
|
||||
// Size of the sampled packet
|
||||
uint64 Bytes = 9;
|
||||
uint64 Packets = 10;
|
||||
|
||||
// Source/destination addresses
|
||||
bytes SrcAddr = 11;
|
||||
bytes DstAddr = 12;
|
||||
|
||||
// Layer 3 protocol (IPv4/IPv6/ARP/MPLS...)
|
||||
uint32 Etype = 13;
|
||||
|
||||
// Layer 4 protocol
|
||||
uint32 Proto = 14;
|
||||
|
||||
// Ports for UDP and TCP
|
||||
uint32 SrcPort = 15;
|
||||
uint32 DstPort = 16;
|
||||
|
||||
// Interfaces
|
||||
uint32 InIf = 17;
|
||||
uint32 OutIf = 18;
|
||||
|
||||
// IP and TCP special flags
|
||||
uint32 IPTos = 19;
|
||||
uint32 ForwardingStatus = 20;
|
||||
uint32 IPTTL = 21;
|
||||
uint32 TCPFlags = 22;
|
||||
uint32 IcmpType = 23;
|
||||
uint32 IcmpCode = 24;
|
||||
uint32 IPv6FlowLabel = 25;
|
||||
uint32 FragmentId = 26;
|
||||
uint32 FragmentOffset = 27;
|
||||
uint32 BiFlowDirection = 28;
|
||||
|
||||
// Autonomous system information
|
||||
uint32 SrcAS = 29;
|
||||
uint32 DstAS = 30;
|
||||
|
||||
// Prefix size
|
||||
uint32 SrcNet = 31;
|
||||
uint32 DstNet = 32;
|
||||
|
||||
// Next hop
|
||||
bytes NextHop = 33;
|
||||
uint32 NextHopAS = 34;
|
||||
repeated uint32 ASPath = 35;
|
||||
repeated uint32 Communities = 36;
|
||||
repeated LargeCommunityv3 LargeCommunities = 37;
|
||||
|
||||
// Country
|
||||
string SrcCountry = 100;
|
||||
string DstCountry = 101;
|
||||
|
||||
// Interface names and descriptions
|
||||
enum Boundary {
|
||||
UNDEFINED = 0;
|
||||
EXTERNAL = 1;
|
||||
INTERNAL = 2;
|
||||
}
|
||||
string InIfName = 102;
|
||||
string OutIfName = 103;
|
||||
string InIfDescription = 104;
|
||||
string OutIfDescription = 105;
|
||||
uint32 InIfSpeed = 106;
|
||||
uint32 OutIfSpeed = 107;
|
||||
string InIfConnectivity = 108;
|
||||
string OutIfConnectivity = 109;
|
||||
string InIfProvider = 110;
|
||||
string OutIfProvider = 111;
|
||||
Boundary InIfBoundary = 112;
|
||||
Boundary OutIfBoundary = 113;
|
||||
}
|
||||
|
||||
message LargeCommunityv3 {
|
||||
uint32 ASN = 1;
|
||||
uint32 LocalData1 = 2;
|
||||
uint32 LocalData2 = 3;
|
||||
}
|
||||
@@ -44,6 +44,8 @@ func ConvertGoflowToFlowMessage(input *goflowmessage.FlowMessage) *FlowMessage {
|
||||
DstAS: input.DstAS,
|
||||
SrcNet: input.SrcNet,
|
||||
DstNet: input.DstNet,
|
||||
NextHop: ipCopy(input.NextHop),
|
||||
NextHopAS: input.NextHopAS,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -117,6 +117,7 @@ func TestDecode(t *testing.T) {
|
||||
OutIf: 450,
|
||||
ForwardingStatus: 64,
|
||||
TCPFlags: 16,
|
||||
NextHop: net.ParseIP("194.149.174.63").To16(),
|
||||
}, {
|
||||
SequenceNum: 44797001,
|
||||
ExporterAddress: net.ParseIP("127.0.0.1").To16(),
|
||||
@@ -137,6 +138,7 @@ func TestDecode(t *testing.T) {
|
||||
OutIf: 452,
|
||||
ForwardingStatus: 64,
|
||||
TCPFlags: 16,
|
||||
NextHop: net.ParseIP("194.149.174.71").To16(),
|
||||
}, {
|
||||
SequenceNum: 44797001,
|
||||
ExporterAddress: net.ParseIP("127.0.0.1").To16(),
|
||||
@@ -157,6 +159,7 @@ func TestDecode(t *testing.T) {
|
||||
OutIf: 306,
|
||||
ForwardingStatus: 64,
|
||||
TCPFlags: 16,
|
||||
NextHop: net.ParseIP("252.223.0.0").To16(),
|
||||
}, {
|
||||
SequenceNum: 44797001,
|
||||
ExporterAddress: net.ParseIP("127.0.0.1").To16(),
|
||||
@@ -177,6 +180,7 @@ func TestDecode(t *testing.T) {
|
||||
OutIf: 451,
|
||||
ForwardingStatus: 64,
|
||||
TCPFlags: 16,
|
||||
NextHop: net.ParseIP("194.149.174.61").To16(),
|
||||
},
|
||||
}
|
||||
for _, f := range got {
|
||||
|
||||
@@ -68,6 +68,7 @@ func TestDecode(t *testing.T) {
|
||||
SrcAddr: net.ParseIP("104.26.8.24").To16(),
|
||||
DstAddr: net.ParseIP("45.90.161.46").To16(),
|
||||
ExporterAddress: net.ParseIP("172.16.0.3").To16(),
|
||||
NextHop: net.ParseIP("45.90.161.46").To16(),
|
||||
}, {
|
||||
SequenceNum: 812646826,
|
||||
SamplingRate: 1024,
|
||||
@@ -111,6 +112,8 @@ func TestDecode(t *testing.T) {
|
||||
SrcAddr: net.ParseIP("45.90.161.148").To16(),
|
||||
DstAddr: net.ParseIP("191.87.91.27").To16(),
|
||||
ExporterAddress: net.ParseIP("172.16.0.3").To16(),
|
||||
NextHop: net.ParseIP("31.14.69.110").To16(),
|
||||
NextHopAS: 203698,
|
||||
}, {
|
||||
SequenceNum: 812646826,
|
||||
SamplingRate: 1024,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// CurrentSchemaVersion is the version of the protobuf definition
|
||||
const CurrentSchemaVersion = 2
|
||||
const CurrentSchemaVersion = 3
|
||||
|
||||
var (
|
||||
// VersionedSchemas is a mapping from schema version to protobuf definitions
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
// NewMock creates a new Kafka component with a mocked Kafka. It will
|
||||
// panic if it cannot be started.
|
||||
func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configuration) (*Component, *mocks.AsyncProducer) {
|
||||
t.Helper()
|
||||
c, err := New(reporter, configuration, Dependencies{Daemon: daemon.NewMock(t)})
|
||||
if err != nil {
|
||||
t.Fatalf("New() error:\n%+v", err)
|
||||
|
||||
@@ -216,8 +216,8 @@ WHERE database=currentDatabase() AND table NOT LIKE '.%'`)
|
||||
"flows_1h0m0s_consumer",
|
||||
"flows_1m0s",
|
||||
"flows_1m0s_consumer",
|
||||
"flows_2_raw",
|
||||
"flows_2_raw_consumer",
|
||||
"flows_3_raw",
|
||||
"flows_3_raw_consumer",
|
||||
"flows_5m0s",
|
||||
"flows_5m0s_consumer",
|
||||
"networks",
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"flows_1h0m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived FROM default.flows"
|
||||
"flows_1m0s","CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192"
|
||||
"flows_1m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived FROM default.flows"
|
||||
"flows_2_raw","CREATE TABLE default.flows_2_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessage', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_2_raw_consumer","CREATE MATERIALIZED VIEW default.flows_2_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName FROM default.flows_2_raw"
|
||||
"flows_3_raw","CREATE TABLE default.flows_3_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessage', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_3_raw_consumer","CREATE MATERIALIZED VIEW default.flows_3_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName FROM default.flows_3_raw"
|
||||
"flows_5m0s","CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192"
|
||||
"flows_5m0s_consumer","CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived FROM default.flows"
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
"flows_1h0m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived FROM default.flows"
|
||||
"flows_1m0s","CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192"
|
||||
"flows_1m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived FROM default.flows"
|
||||
"flows_2_raw","CREATE TABLE default.flows_2_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessage', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_2_raw_consumer","CREATE MATERIALIZED VIEW default.flows_2_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_2_raw"
|
||||
"flows_3_raw","CREATE TABLE default.flows_3_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessage', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_3_raw_consumer","CREATE MATERIALIZED VIEW default.flows_3_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_3_raw"
|
||||
"flows_5m0s","CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192"
|
||||
"flows_5m0s_consumer","CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived FROM default.flows"
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
"flows_1h0m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived FROM default.flows"
|
||||
"flows_1m0s","CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192"
|
||||
"flows_1m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived FROM default.flows"
|
||||
"flows_2_raw","CREATE TABLE default.flows_2_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessagev2', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_2_raw_consumer","CREATE MATERIALIZED VIEW default.flows_2_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_2_raw"
|
||||
"flows_3_raw","CREATE TABLE default.flows_3_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessagev2', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_3_raw_consumer","CREATE MATERIALIZED VIEW default.flows_3_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_3_raw"
|
||||
"flows_5m0s","CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192"
|
||||
"flows_5m0s_consumer","CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived FROM default.flows"
|
||||
|
||||
|
@@ -7,7 +7,7 @@
|
||||
"flows_1h0m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1h0m0s_consumer TO default.flows_1h0m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(3600)) AS TimeReceived FROM default.flows"
|
||||
"flows_1m0s","CREATE TABLE default.flows_1m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(12096))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry) TTL TimeReceived + toIntervalSecond(604800) SETTINGS index_granularity = 8192"
|
||||
"flows_1m0s_consumer","CREATE MATERIALIZED VIEW default.flows_1m0s_consumer TO default.flows_1m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(60)) AS TimeReceived FROM default.flows"
|
||||
"flows_2_raw","CREATE TABLE default.flows_2_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessagev2', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_2_raw_consumer","CREATE MATERIALIZED VIEW default.flows_2_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_2_raw"
|
||||
"flows_3_raw","CREATE TABLE default.flows_3_raw (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) ENGINE = Kafka SETTINGS kafka_broker_list = '127.0.0.1:9092', kafka_topic_list = 'flows-v2', kafka_group_name = 'clickhouse', kafka_format = 'Protobuf', kafka_schema = 'flow-2.proto:FlowMessagev2', kafka_num_consumers = 1, kafka_thread_per_consumer = 1"
|
||||
"flows_3_raw_consumer","CREATE MATERIALIZED VIEW default.flows_3_raw_consumer TO default.flows (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAddr` IPv6, `DstAddr` IPv6, `SrcAS` UInt32, `DstAS` UInt32, `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `SrcPort` UInt32, `DstPort` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32, `SrcNetName` String, `DstNetName` String, `SrcNetRole` String, `DstNetRole` String, `SrcNetSite` String, `DstNetSite` String, `SrcNetRegion` String, `DstNetRegion` String, `SrcNetTenant` String, `DstNetTenant` String) AS SELECT *, dictGetOrDefault('default.networks', 'name', SrcAddr, '') AS SrcNetName, dictGetOrDefault('default.networks', 'name', DstAddr, '') AS DstNetName, dictGetOrDefault('default.networks', 'role', SrcAddr, '') AS SrcNetRole, dictGetOrDefault('default.networks', 'role', DstAddr, '') AS DstNetRole, dictGetOrDefault('default.networks', 'site', SrcAddr, '') AS SrcNetSite, dictGetOrDefault('default.networks', 'site', DstAddr, '') AS DstNetSite, dictGetOrDefault('default.networks', 'region', SrcAddr, '') AS SrcNetRegion, dictGetOrDefault('default.networks', 'region', DstAddr, '') AS DstNetRegion, dictGetOrDefault('default.networks', 'tenant', SrcAddr, '') AS SrcNetTenant, dictGetOrDefault('default.networks', 'tenant', DstAddr, '') AS DstNetTenant FROM default.flows_3_raw"
|
||||
"flows_5m0s","CREATE TABLE default.flows_5m0s (`TimeReceived` DateTime CODEC(DoubleDelta, LZ4), `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `PacketSize` UInt64 ALIAS intDiv(Bytes, Packets), `PacketSizeBucket` LowCardinality(String) ALIAS multiIf(PacketSize < 64, '0-63', PacketSize < 128, '64-127', PacketSize < 256, '128-255', PacketSize < 512, '256-511', PacketSize < 768, '512-767', PacketSize < 1024, '768-1023', PacketSize < 1280, '1024-1279', PacketSize < 1501, '1280-1500', PacketSize < 2048, '1501-2047', PacketSize < 3072, '2048-3071', PacketSize < 4096, '3072-4095', PacketSize < 8192, '4096-8191', PacketSize < 10240, '8192-10239', PacketSize < 16384, '10240-16383', PacketSize < 32768, '16384-32767', PacketSize < 65536, '32768-65535', '65536-Inf'), `ForwardingStatus` UInt32) ENGINE = SummingMergeTree((Bytes, Packets)) PARTITION BY toYYYYMMDDhhmmss(toStartOfInterval(TimeReceived, toIntervalSecond(155520))) PRIMARY KEY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate) ORDER BY (TimeReceived, ExporterAddress, EType, Proto, InIfName, SrcAS, ForwardingStatus, OutIfName, DstAS, SamplingRate, SrcNetName, DstNetName, SrcNetRole, DstNetRole, SrcNetSite, DstNetSite, SrcNetRegion, DstNetRegion, SrcNetTenant, DstNetTenant, SrcCountry, DstCountry) TTL TimeReceived + toIntervalSecond(7776000) SETTINGS index_granularity = 8192"
|
||||
"flows_5m0s_consumer","CREATE MATERIALIZED VIEW default.flows_5m0s_consumer TO default.flows_5m0s (`TimeReceived` DateTime, `SamplingRate` UInt64, `ExporterAddress` LowCardinality(IPv6), `ExporterName` LowCardinality(String), `ExporterGroup` LowCardinality(String), `ExporterRole` LowCardinality(String), `ExporterSite` LowCardinality(String), `ExporterRegion` LowCardinality(String), `ExporterTenant` LowCardinality(String), `SrcAS` UInt32, `DstAS` UInt32, `SrcNetName` LowCardinality(String), `DstNetName` LowCardinality(String), `SrcNetRole` LowCardinality(String), `DstNetRole` LowCardinality(String), `SrcNetSite` LowCardinality(String), `DstNetSite` LowCardinality(String), `SrcNetRegion` LowCardinality(String), `DstNetRegion` LowCardinality(String), `SrcNetTenant` LowCardinality(String), `DstNetTenant` LowCardinality(String), `SrcCountry` FixedString(2), `DstCountry` FixedString(2), `InIfName` LowCardinality(String), `OutIfName` LowCardinality(String), `InIfDescription` String, `OutIfDescription` String, `InIfSpeed` UInt32, `OutIfSpeed` UInt32, `InIfConnectivity` LowCardinality(String), `OutIfConnectivity` LowCardinality(String), `InIfProvider` LowCardinality(String), `OutIfProvider` LowCardinality(String), `InIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `OutIfBoundary` Enum8('undefined' = 0, 'external' = 1, 'internal' = 2), `EType` UInt32, `Proto` UInt32, `Bytes` UInt64, `Packets` UInt64, `ForwardingStatus` UInt32) AS SELECT * EXCEPT (SrcAddr, DstAddr, SrcPort, DstPort) REPLACE toStartOfInterval(TimeReceived, toIntervalSecond(300)) AS TimeReceived FROM default.flows"
|
||||
|
||||
|
Reference in New Issue
Block a user