inlet/metadata: introduce metadata component with pluggable providers

SNMP is the first (and default) provider. Further commits should add:

- [ ] SNMP coalescing (or at the metadata level?)
- [ ] Configuration conversion
- [ ] At least one other provider (static one?)
This commit is contained in:
Vincent Bernat
2023-05-27 21:34:39 +02:00
parent 2980eb491c
commit 7e3ca930ad
26 changed files with 1162 additions and 1063 deletions

View File

@@ -17,7 +17,8 @@ import (
"akvorado/inlet/flow"
"akvorado/inlet/geoip"
"akvorado/inlet/kafka"
"akvorado/inlet/snmp"
"akvorado/inlet/metadata"
"akvorado/inlet/metadata/provider/snmp"
)
// InletConfiguration represents the configuration file for the inlet command.
@@ -25,7 +26,7 @@ type InletConfiguration struct {
Reporting reporter.Configuration
HTTP httpserver.Configuration
Flow flow.Configuration
SNMP snmp.Configuration
Metadata metadata.Configuration
BMP bmp.Configuration
GeoIP geoip.Configuration
Kafka kafka.Configuration
@@ -39,13 +40,14 @@ func (c *InletConfiguration) Reset() {
HTTP: httpserver.DefaultConfiguration(),
Reporting: reporter.DefaultConfiguration(),
Flow: flow.DefaultConfiguration(),
SNMP: snmp.DefaultConfiguration(),
Metadata: metadata.DefaultConfiguration(),
BMP: bmp.DefaultConfiguration(),
GeoIP: geoip.DefaultConfiguration(),
Kafka: kafka.DefaultConfiguration(),
Core: core.DefaultConfiguration(),
Schema: schema.DefaultConfiguration(),
}
c.Metadata.Provider.Config = snmp.DefaultConfiguration()
}
type inletOptions struct {
@@ -110,11 +112,11 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
if err != nil {
return fmt.Errorf("unable to initialize flow component: %w", err)
}
snmpComponent, err := snmp.New(r, config.SNMP, snmp.Dependencies{
metadataComponent, err := metadata.New(r, config.Metadata, metadata.Dependencies{
Daemon: daemonComponent,
})
if err != nil {
return fmt.Errorf("unable to initialize SNMP component: %w", err)
return fmt.Errorf("unable to initialize METADATA component: %w", err)
}
bmpComponent, err := bmp.New(r, config.BMP, bmp.Dependencies{
Daemon: daemonComponent,
@@ -138,7 +140,7 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
coreComponent, err := core.New(r, config.Core, core.Dependencies{
Daemon: daemonComponent,
Flow: flowComponent,
SNMP: snmpComponent,
Metadata: metadataComponent,
BMP: bmpComponent,
GeoIP: geoipComponent,
Kafka: kafkaComponent,
@@ -161,7 +163,7 @@ func inletStart(r *reporter.Reporter, config InletConfiguration, checkOnly bool)
// Start all the components.
components := []interface{}{
httpComponent,
snmpComponent,
metadataComponent,
bmpComponent,
geoipComponent,
kafkaComponent,

View File

@@ -1,14 +1,8 @@
---
paths:
inlet.0.snmp:
cacheduration: 30m0s
cacherefresh: 1h0m0s
cachecheckinterval: 2m0s
cachepersistfile: ""
inlet.0.metadata.provider.config:
pollerretries: 1
pollertimeout: 1s
pollercoalesce: 10
workers: 1
communities:
::/0: yopla
203.0.113.0/24: yopli

View File

@@ -1,6 +1,8 @@
---
inlet:
snmp:
metadata:
provider:
type: snmp
default-community: yopla
communities:
203.0.113.0/24: yopli

View File

@@ -7,8 +7,10 @@ geoip:
# Check docker-compose.yml for details.
asn-database: /usr/share/GeoIP/GeoLite2-ASN.mmdb
geo-database: /usr/share/GeoIP/GeoLite2-Country.mmdb
snmp:
metadata:
workers: 10
provider:
type: snmp
communities:
::/0: public
flow:

View File

@@ -170,7 +170,7 @@ The topic name is suffixed by a hash of the schema.
### Core
The core component queries the `geoip` and the `snmp` component to
The core component queries the `geoip` and the `metadata` component to
enriches the flows with additional information. It also classifies
exporters and interfaces into groups with a set of classification
rules.
@@ -309,12 +309,11 @@ is provided, the component is inactive. It accepts the following keys:
If the files are updated while *Akvorado* is running, they are
automatically refreshed.
### SNMP
### Metadata
Flows only include interface indexes. To associate them with an
interface name and description, SNMP is used to poll the exporter
sending each flows. A cache is maintained to avoid polling
continuously the exporters. The following keys are accepted:
Flows only include interface indexes. To associate them with an interface name
and description, metadata are polled. A cache is maintained. There are several
providers available to poll metadata. The following keys are accepted:
- `cache-duration` tells how much time to keep data in the cache
- `cache-refresh` tells how much time to wait before updating an entry
@@ -323,6 +322,19 @@ continuously the exporters. The following keys are accepted:
about to expire or need an update
- `cache-persist-file` tells where to store cached data on shutdown and
read them back on startup
- `poller-retries` is the number of retries on unsuccessful SNMP requests.
- `poller-timeout` tells how much time should the poller wait for an answer.
- `workers` tell how many workers to spawn to handle SNMP polling.
- `provider` defines the provider configuration
As flows missing interface information are discarded, persisting the
cache is useful to quickly be able to handle incoming flows. By
default, no persistent cache is configured.
The `provider` key contains the configuration of the provider. The provider type
is defined by the `type` key. Currently, only `snmp` is accepted. It accepts the
following configuration keys:
- `communities` is a map from subnets to the SNMPv2 community to use
for exporters in the provided subnet. Use `::/0` to set the default
value. Alternatively, it also accepts a string to use for all
@@ -341,13 +353,17 @@ continuously the exporters. The following keys are accepted:
match, the exporter IP is used)
- `ports` is a map from subnets to the SNMP port to use to poll
agents in the provided subnet.
- `poller-retries` is the number of retries on unsuccessful SNMP requests.
- `poller-timeout` tells how much time should the poller wait for an answer.
- `workers` tell how many workers to spawn to handle SNMP polling.
As flows missing interface information are discarded, persisting the
cache is useful to quickly be able to handle incoming flows. By
default, no persistent cache is configured.
For example:
```yaml
metadata:
workers: 10
provider:
type: snmp
communities:
::/0: private
```
*Akvorado* will use SNMPv3 if there is a match for the
`security-parameters` configuration option. Otherwise, it will use

View File

@@ -84,7 +84,7 @@ exported. In this case, the logs contain information such as:
- `exporter:172.19.162.244 poller breaker open`
- `exporter:172.19.162.244 unable to GET`
The `akvorado_inlet_snmp_poller_error_requests` metric would also
The `akvorado_inlet_metadata_provider_snmp_error_requests` metric would also
increase for the affected exporter. If your routers are in
`172.16.0.0/12` and you are using Docker, Docker subnets may overlap
with your routers'. To avoid this, you can put that in
@@ -211,7 +211,7 @@ There are several ways to fix that:
To process a flow, the inlet service needs the interface name and
description. This information is provided by the `snmp` submodule.
When all workers of the SNMP pollers are busy, new requests are
dropped. In this case, the `akvorado_inlet_snmp_poller_busy_count`
dropped. In this case, the `akvorado_inlet_metadata_provider_busy_count`
counter is increased. To mitigate this issue, the inlet service tries
to skip exporters with too many errors to avoid blocking SNMP requests
for other exporters. However, ensuring the exporters accept to answer

View File

@@ -27,7 +27,7 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
t := time.Now() // only call it once
if flow.InIf != 0 {
exporterName, iface, ok := c.d.SNMP.Lookup(t, exporterIP, uint(flow.InIf))
exporterName, iface, ok := c.d.Metadata.Lookup(t, exporterIP, uint(flow.InIf))
if !ok {
c.metrics.flowsErrors.WithLabelValues(exporterStr, "SNMP cache miss").Inc()
skip = true
@@ -42,7 +42,7 @@ func (c *Component) enrichFlow(exporterIP netip.Addr, exporterStr string, flow *
}
if flow.OutIf != 0 {
exporterName, iface, ok := c.d.SNMP.Lookup(t, exporterIP, uint(flow.OutIf))
exporterName, iface, ok := c.d.Metadata.Lookup(t, exporterIP, uint(flow.OutIf))
if !ok {
// Only register a cache miss if we don't have one.
// TODO: maybe we could do one SNMP query for both interfaces.

View File

@@ -22,7 +22,7 @@ import (
"akvorado/inlet/flow"
"akvorado/inlet/geoip"
"akvorado/inlet/kafka"
"akvorado/inlet/snmp"
"akvorado/inlet/metadata"
)
func TestEnrich(t *testing.T) {
@@ -497,8 +497,8 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
// Prepare all components.
daemonComponent := daemon.NewMock(t)
snmpComponent := snmp.NewMock(t, r, snmp.DefaultConfiguration(),
snmp.Dependencies{Daemon: daemonComponent})
metadataComponent := metadata.NewMock(t, r, metadata.DefaultConfiguration(),
metadata.Dependencies{Daemon: daemonComponent})
flowComponent := flow.NewMock(t, r, flow.DefaultConfiguration())
geoipComponent := geoip.NewMock(t, r)
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
@@ -520,7 +520,7 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
c, err := New(r, configuration, Dependencies{
Daemon: daemonComponent,
Flow: flowComponent,
SNMP: snmpComponent,
Metadata: metadataComponent,
GeoIP: geoipComponent,
Kafka: kafkaComponent,
HTTP: httpComponent,

View File

@@ -20,7 +20,7 @@ import (
"akvorado/inlet/flow"
"akvorado/inlet/geoip"
"akvorado/inlet/kafka"
"akvorado/inlet/snmp"
"akvorado/inlet/metadata"
)
// Component represents the HTTP compomenent.
@@ -46,7 +46,7 @@ type Component struct {
type Dependencies struct {
Daemon daemon.Component
Flow *flow.Component
SNMP *snmp.Component
Metadata *metadata.Component
BMP *bmp.Component
GeoIP *geoip.Component
Kafka *kafka.Component

View File

@@ -26,7 +26,7 @@ import (
"akvorado/inlet/flow"
"akvorado/inlet/geoip"
"akvorado/inlet/kafka"
"akvorado/inlet/snmp"
"akvorado/inlet/metadata"
)
func TestCore(t *testing.T) {
@@ -34,8 +34,8 @@ func TestCore(t *testing.T) {
// Prepare all components.
daemonComponent := daemon.NewMock(t)
snmpComponent := snmp.NewMock(t, r, snmp.DefaultConfiguration(),
snmp.Dependencies{Daemon: daemonComponent})
metadataComponent := metadata.NewMock(t, r, metadata.DefaultConfiguration(),
metadata.Dependencies{Daemon: daemonComponent})
flowComponent := flow.NewMock(t, r, flow.DefaultConfiguration())
geoipComponent := geoip.NewMock(t, r)
kafkaComponent, kafkaProducer := kafka.NewMock(t, r, kafka.DefaultConfiguration())
@@ -48,7 +48,7 @@ func TestCore(t *testing.T) {
c, err := New(r, DefaultConfiguration(), Dependencies{
Daemon: daemonComponent,
Flow: flowComponent,
SNMP: snmpComponent,
Metadata: metadataComponent,
GeoIP: geoipComponent,
Kafka: kafkaComponent,
HTTP: httpComponent,

View File

@@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package snmp
package metadata
import (
"net/netip"
@@ -9,12 +9,16 @@ import (
"akvorado/common/helpers/cache"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
// snmpCache represents the SNMP cache.
type snmpCache struct {
// Interface describes an interface.
type Interface = provider.Interface
// metadataCache represents the metadata cache.
type metadataCache struct {
r *reporter.Reporter
cache *cache.Cache[key, value]
cache *cache.Cache[provider.Query, provider.Answer]
metrics struct {
cacheHit reporter.Counter
@@ -24,26 +28,10 @@ type snmpCache struct {
}
}
// Interface contains the information about an interface.
type Interface struct {
Name string
Description string
Speed uint
}
type key struct {
IP netip.Addr
Index uint
}
type value struct {
ExporterName string
Interface
}
func newSNMPCache(r *reporter.Reporter) *snmpCache {
sc := &snmpCache{
func newMetadataCache(r *reporter.Reporter) *metadataCache {
sc := &metadataCache{
r: r,
cache: cache.New[key, value](),
cache: cache.New[provider.Query, provider.Answer](),
}
sc.metrics.cacheHit = r.Counter(
reporter.CounterOpts{
@@ -72,26 +60,23 @@ func newSNMPCache(r *reporter.Reporter) *snmpCache {
// Lookup will perform a lookup of the cache. It returns the exporter
// name as well as the requested interface.
func (sc *snmpCache) Lookup(t time.Time, ip netip.Addr, index uint) (string, Interface, bool) {
result, ok := sc.cache.Get(t, key{ip, index})
func (sc *metadataCache) Lookup(t time.Time, query provider.Query) (provider.Answer, bool) {
result, ok := sc.cache.Get(t, query)
if !ok {
sc.metrics.cacheMiss.Inc()
return "", Interface{}, false
return provider.Answer{}, false
}
sc.metrics.cacheHit.Inc()
return result.ExporterName, result.Interface, true
return result, true
}
// Put a new entry in the cache.
func (sc *snmpCache) Put(t time.Time, ip netip.Addr, exporterName string, index uint, iface Interface) {
sc.cache.Put(t, key{ip, index}, value{
ExporterName: exporterName,
Interface: iface,
})
func (sc *metadataCache) Put(t time.Time, query provider.Query, answer provider.Answer) {
sc.cache.Put(t, query, answer)
}
// Expire expire entries whose last access is before the provided time
func (sc *snmpCache) Expire(before time.Time) int {
func (sc *metadataCache) Expire(before time.Time) int {
expired := sc.cache.DeleteLastAccessedBefore(before)
sc.metrics.cacheExpired.Add(float64(expired))
return expired
@@ -99,25 +84,25 @@ func (sc *snmpCache) Expire(before time.Time) int {
// NeedUpdates returns a map of interface entries that would need to
// be updated. It relies on last update.
func (sc *snmpCache) NeedUpdates(before time.Time) map[netip.Addr]map[uint]Interface {
func (sc *metadataCache) NeedUpdates(before time.Time) map[netip.Addr]map[uint]Interface {
result := map[netip.Addr]map[uint]Interface{}
for k, v := range sc.cache.ItemsLastUpdatedBefore(before) {
interfaces, ok := result[k.IP]
interfaces, ok := result[k.ExporterIP]
if !ok {
interfaces = map[uint]Interface{}
result[k.IP] = interfaces
result[k.ExporterIP] = interfaces
}
interfaces[k.Index] = v.Interface
interfaces[k.IfIndex] = v.Interface
}
return result
}
// Save stores the cache to the provided location.
func (sc *snmpCache) Save(cacheFile string) error {
func (sc *metadataCache) Save(cacheFile string) error {
return sc.cache.Save(cacheFile)
}
// Load loads the cache from the provided location.
func (sc *snmpCache) Load(cacheFile string) error {
func (sc *metadataCache) Load(cacheFile string) error {
return sc.cache.Load(cacheFile)
}

View File

@@ -1,7 +1,7 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package snmp
package metadata
import (
"errors"
@@ -18,27 +18,29 @@ import (
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
func setupTestCache(t *testing.T) (*reporter.Reporter, *snmpCache) {
func setupTestCache(t *testing.T) (*reporter.Reporter, *metadataCache) {
t.Helper()
r := reporter.NewMock(t)
sc := newSNMPCache(r)
sc := newMetadataCache(r)
return r, sc
}
type answer struct {
ExporterName string
Interface Interface
NOk bool
}
func expectCacheLookup(t *testing.T, sc *snmpCache, exporterIP string, ifIndex uint, expected answer) {
func expectCacheLookup(t *testing.T, sc *metadataCache, exporterIP string, ifIndex uint, expected provider.Answer) {
t.Helper()
ip := netip.MustParseAddr(exporterIP)
ip = netip.AddrFrom16(ip.As16())
gotExporterName, gotInterface, ok := sc.Lookup(time.Time{}, ip, ifIndex)
got := answer{gotExporterName, gotInterface, !ok}
got, ok := sc.Lookup(time.Time{}, provider.Query{
ExporterIP: ip,
IfIndex: ifIndex,
})
if ok && (got == provider.Answer{}) {
t.Error("Lookup() returned an empty result")
} else if !ok && (got != provider.Answer{}) {
t.Error("Lookup() returned a non-empty result")
}
if diff := helpers.Diff(got, expected); diff != "" {
t.Errorf("Lookup() (-got, +want):\n%s", diff)
}
@@ -46,9 +48,9 @@ func expectCacheLookup(t *testing.T, sc *snmpCache, exporterIP string, ifIndex u
func TestGetEmpty(t *testing.T) {
r, sc := setupTestCache(t)
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_cache_")
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_cache_")
expectedMetrics := map[string]string{
`expired`: "0",
`hit`: "0",
@@ -62,15 +64,23 @@ func TestGetEmpty(t *testing.T) {
func TestSimpleLookup(t *testing.T) {
r, sc := setupTestCache(t)
sc.Put(time.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit", Speed: 1000})
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{
sc.Put(time.Now(),
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit", Speed: 1000},
})
expectCacheLookup(t, sc, "127.0.0.1", 787, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.2", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit", Speed: 1000},
})
expectCacheLookup(t, sc, "127.0.0.1", 787, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.2", 676, provider.Answer{})
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_cache_")
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_cache_")
expectedMetrics := map[string]string{
`expired`: "0",
`hit`: "1",
@@ -85,55 +95,87 @@ func TestSimpleLookup(t *testing.T) {
func TestExpire(t *testing.T) {
r, sc := setupTestCache(t)
now := time.Now()
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost2", 678, Interface{Name: "Gi0/0/0/2", Description: "Peering"})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.2"), "localhost3", 678, Interface{Name: "Gi0/0/0/1", Description: "IX"})
now = now.Add(10 * time.Minute)
sc.Expire(now.Add(-time.Hour))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{
now = now.Add(10 * time.Minute)
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{
now = now.Add(10 * time.Minute)
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.2"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost3",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
now = now.Add(10 * time.Minute)
sc.Expire(now.Add(-time.Hour))
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{
ExporterName: "localhost3",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
sc.Expire(now.Add(-29 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{
ExporterName: "localhost3",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
sc.Expire(now.Add(-19 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{
ExporterName: "localhost3",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
sc.Expire(now.Add(-9 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{NOk: true})
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
now = now.Add(10 * time.Minute)
sc.Expire(now.Add(-19 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_cache_")
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_cache_")
expectedMetrics := map[string]string{
`expired`: "3",
`hit`: "7",
@@ -148,24 +190,51 @@ func TestExpire(t *testing.T) {
func TestExpireRefresh(t *testing.T) {
_, sc := setupTestCache(t)
now := time.Now()
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 678, Interface{Name: "Gi0/0/0/2", Description: "Peering"})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.2"), "localhost2", 678, Interface{Name: "Gi0/0/0/1", Description: "IX"})
now = now.Add(10 * time.Minute)
// Refresh first entry
sc.Lookup(now, netip.MustParseAddr("::ffff:127.0.0.1"), 676)
now = now.Add(10 * time.Minute)
sc.Expire(now.Add(-29 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{
now = now.Add(10 * time.Minute)
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
now = now.Add(10 * time.Minute)
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.2"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
now = now.Add(10 * time.Minute)
// Refresh first entry
sc.Lookup(now, provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
})
now = now.Add(10 * time.Minute)
sc.Expire(now.Add(-29 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
@@ -174,14 +243,46 @@ func TestExpireRefresh(t *testing.T) {
func TestNeedUpdates(t *testing.T) {
_, sc := setupTestCache(t)
now := time.Now()
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 678, Interface{Name: "Gi0/0/0/2", Description: "Peering"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.2"), "localhost2", 678, Interface{Name: "Gi0/0/0/1", Description: "IX"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.2"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX"},
})
now = now.Add(10 * time.Minute)
// Refresh
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost1", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost1",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
now = now.Add(10 * time.Minute)
cases := []struct {
@@ -233,11 +334,35 @@ func TestLoadNotExist(t *testing.T) {
func TestSaveLoad(t *testing.T) {
_, sc := setupTestCache(t)
now := time.Now()
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 676, Interface{Name: "Gi0/0/0/1", Description: "Transit"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 676,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.1"), "localhost", 678, Interface{Name: "Gi0/0/0/2", Description: "Peering"})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.1"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
now = now.Add(10 * time.Minute)
sc.Put(now, netip.MustParseAddr("::ffff:127.0.0.2"), "localhost2", 678, Interface{Name: "Gi0/0/0/1", Description: "IX", Speed: 1000})
sc.Put(now,
provider.Query{
ExporterIP: netip.MustParseAddr("::ffff:127.0.0.2"),
IfIndex: 678,
},
provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX", Speed: 1000},
})
target := filepath.Join(t.TempDir(), "cache")
if err := sc.Save(target); err != nil {
@@ -251,12 +376,12 @@ func TestSaveLoad(t *testing.T) {
}
sc.Expire(now.Add(-29 * time.Minute))
expectCacheLookup(t, sc, "127.0.0.1", 676, answer{NOk: true})
expectCacheLookup(t, sc, "127.0.0.1", 678, answer{
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})
expectCacheLookup(t, sc, "127.0.0.1", 678, provider.Answer{
ExporterName: "localhost",
Interface: Interface{Name: "Gi0/0/0/2", Description: "Peering"},
})
expectCacheLookup(t, sc, "127.0.0.2", 678, answer{
expectCacheLookup(t, sc, "127.0.0.2", 678, provider.Answer{
ExporterName: "localhost2",
Interface: Interface{Name: "Gi0/0/0/1", Description: "IX", Speed: 1000},
})
@@ -294,9 +419,13 @@ func TestConcurrentOperations(t *testing.T) {
nowLock.RUnlock()
ip := rand.Intn(10)
iface := rand.Intn(100)
sc.Put(now, netip.MustParseAddr(fmt.Sprintf("::ffff:127.0.0.%d", ip)),
fmt.Sprintf("localhost%d", ip),
uint(iface), Interface{Name: "Gi0/0/0/1", Description: "Transit"})
sc.Put(now, provider.Query{
ExporterIP: netip.MustParseAddr(fmt.Sprintf("::ffff:127.0.0.%d", ip)),
IfIndex: uint(iface),
}, provider.Answer{
ExporterName: fmt.Sprintf("localhost%d", ip),
Interface: Interface{Name: "Gi0/0/0/1", Description: "Transit"},
})
select {
case <-done:
return
@@ -316,8 +445,10 @@ func TestConcurrentOperations(t *testing.T) {
nowLock.RUnlock()
ip := rand.Intn(10)
iface := rand.Intn(100)
sc.Lookup(now, netip.MustParseAddr(fmt.Sprintf("::ffff:127.0.0.%d", ip)),
uint(iface))
sc.Lookup(now, provider.Query{
ExporterIP: netip.MustParseAddr(fmt.Sprintf("::ffff:127.0.0.%d", ip)),
IfIndex: uint(iface),
})
atomic.AddInt64(&lookups, 1)
select {
case <-done:
@@ -335,7 +466,7 @@ func TestConcurrentOperations(t *testing.T) {
close(done)
wg.Wait()
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_cache_")
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_cache_")
hits, _ := strconv.Atoi(gotMetrics["hit"])
misses, _ := strconv.Atoi(gotMetrics["miss"])
if int64(hits+misses) != atomic.LoadInt64(&lookups) {

58
inlet/metadata/config.go Normal file
View File

@@ -0,0 +1,58 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package metadata
import (
"time"
"akvorado/common/helpers"
"akvorado/inlet/metadata/provider"
"akvorado/inlet/metadata/provider/snmp"
)
// Configuration describes the configuration for the metadata client
type Configuration struct {
// CacheDuration defines how long to keep cached entries without access
CacheDuration time.Duration `validate:"min=1m"`
// CacheRefresh defines how soon to refresh an existing cached entry
CacheRefresh time.Duration `validate:"eq=0|min=1m,eq=0|gtefield=CacheDuration"`
// CacheRefreshInterval defines the interval to check for expiration/refresh
CacheCheckInterval time.Duration `validate:"ltefield=CacheRefresh,min=1s"`
// CachePersist defines a file to store cache and survive restarts
CachePersistFile string
// Provider defines the configuration of the provider to sue
Provider ProviderConfiguration `validate:"dive"`
// Workers define the number of workers used to poll metadata
Workers int `validate:"min=1"`
}
// DefaultConfiguration represents the default configuration for the metadata provider.
func DefaultConfiguration() Configuration {
return Configuration{
CacheDuration: 30 * time.Minute,
CacheRefresh: time.Hour,
CacheCheckInterval: 2 * time.Minute,
CachePersistFile: "",
Workers: 1,
}
}
// ProviderConfiguration represents the configuration for a metadata provider.
type ProviderConfiguration struct {
// Type is the type of the provider
Type string
// Config is the actual configuration for the provider.
Config provider.Configuration
}
var providers = map[string](func() provider.Configuration){
"snmp": snmp.DefaultConfiguration,
}
func init() {
helpers.RegisterMapstructureUnmarshallerHook(
helpers.ParametrizedConfigurationUnmarshallerHook(ProviderConfiguration{}, providers))
}

View File

@@ -0,0 +1,16 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package metadata
import (
"testing"
"akvorado/common/helpers"
)
func TestDefaultConfiguration(t *testing.T) {
if err := helpers.Validate.Struct(DefaultConfiguration()); err != nil {
t.Fatalf("validate.Struct() error:\n%+v", err)
}
}

View File

@@ -0,0 +1,50 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package provider defines the interface of a provider module for metadata.
package provider
import (
"context"
"net/netip"
"akvorado/common/reporter"
)
// Interface contains the information about an interface.
type Interface struct {
Name string
Description string
Speed uint
}
// Query is the query sent to a provider.
type Query struct {
ExporterIP netip.Addr
IfIndex uint
}
// Answer is the answer received from a provider.
type Answer struct {
ExporterName string
Interface
}
// Update is an update received from a provider.
type Update struct {
Query
Answer
}
// Provider is the interface any provider should meet.
type Provider interface {
// Query asks the provider to query metadata for exporter and interface. The
// update will be returned by calling the provided callback.
Query(ctx context.Context, query Query, put func(Update)) error
}
// Configuration defines an interface to configure a provider.
type Configuration interface {
// New instantiates a new provider from its configuration.
New(r *reporter.Reporter) (Provider, error)
}

View File

@@ -14,26 +14,15 @@ import (
"github.com/mitchellh/mapstructure"
"akvorado/common/helpers"
"akvorado/inlet/metadata/provider"
)
// Configuration describes the configuration for the SNMP client
type Configuration struct {
// CacheDuration defines how long to keep cached entries without access
CacheDuration time.Duration `validate:"min=1m"`
// CacheRefresh defines how soon to refresh an existing cached entry
CacheRefresh time.Duration `validate:"eq=0|min=1m,eq=0|gtefield=CacheDuration"`
// CacheRefreshInterval defines the interval to check for expiration/refresh
CacheCheckInterval time.Duration `validate:"ltefield=CacheRefresh,min=1s"`
// CachePersist defines a file to store cache and survive restarts
CachePersistFile string
// PollerRetries tell how many time a poller should retry before giving up
PollerRetries int `validate:"min=0"`
// PollerTimeout tell how much time a poller should wait for an answer
PollerTimeout time.Duration `validate:"min=100ms"`
// PollerCoalesce tells how many requests can be contained inside a single SNMP PDU
PollerCoalesce int `validate:"min=0"`
// Workers define the number of workers used to poll SNMP
Workers int `validate:"min=1"`
// Communities is a mapping from exporter IPs to SNMPv2 communities
Communities *helpers.SubnetMap[string]
@@ -56,16 +45,10 @@ type SecurityParameters struct {
}
// DefaultConfiguration represents the default configuration for the SNMP client.
func DefaultConfiguration() Configuration {
func DefaultConfiguration() provider.Configuration {
return Configuration{
CacheDuration: 30 * time.Minute,
CacheRefresh: time.Hour,
CacheCheckInterval: 2 * time.Minute,
CachePersistFile: "",
PollerRetries: 1,
PollerTimeout: time.Second,
PollerCoalesce: 10,
Workers: 1,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "public",

View File

@@ -5,7 +5,6 @@ package snmp
import (
"testing"
"time"
"akvorado/common/helpers"
@@ -40,12 +39,10 @@ func TestConfigurationUnmarshallerHook(t *testing.T) {
Initial: func() interface{} { return Configuration{} },
Configuration: func() interface{} {
return gin.H{
"cache-refresh": "10s",
"poller-retries": 10,
}
},
Expected: Configuration{
CacheRefresh: 10 * time.Second,
PollerRetries: 10,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "public",

View File

@@ -8,88 +8,16 @@ import (
"errors"
"fmt"
"net/netip"
"sync"
"time"
"github.com/gosnmp/gosnmp"
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
type poller interface {
Poll(ctx context.Context, exporterIP, agentIP netip.Addr, port uint16, ifIndexes []uint) error
}
// realPoller will poll exporters using real SNMP requests.
type realPoller struct {
r *reporter.Reporter
config pollerConfig
pendingRequests map[string]struct{}
pendingRequestsLock sync.Mutex
errLogger reporter.Logger
put func(exporterIP netip.Addr, exporterName string, ifIndex uint, iface Interface)
metrics struct {
pendingRequests reporter.GaugeFunc
successes *reporter.CounterVec
errors *reporter.CounterVec
retries *reporter.CounterVec
times *reporter.SummaryVec
}
}
type pollerConfig struct {
Retries int
Timeout time.Duration
Communities *helpers.SubnetMap[string]
SecurityParameters *helpers.SubnetMap[SecurityParameters]
}
// newPoller creates a new SNMP poller.
func newPoller(r *reporter.Reporter, config pollerConfig, put func(netip.Addr, string, uint, Interface)) *realPoller {
p := &realPoller{
r: r,
config: config,
pendingRequests: make(map[string]struct{}),
errLogger: r.Sample(reporter.BurstSampler(10*time.Second, 3)),
put: put,
}
p.metrics.pendingRequests = r.GaugeFunc(
reporter.GaugeOpts{
Name: "poller_pending_requests",
Help: "Number of pending requests in pollers.",
}, func() float64 {
p.pendingRequestsLock.Lock()
defer p.pendingRequestsLock.Unlock()
return float64(len(p.pendingRequests))
})
p.metrics.successes = r.CounterVec(
reporter.CounterOpts{
Name: "poller_success_requests",
Help: "Number of successful requests.",
}, []string{"exporter"})
p.metrics.errors = r.CounterVec(
reporter.CounterOpts{
Name: "poller_error_requests",
Help: "Number of failed requests.",
}, []string{"exporter", "error"})
p.metrics.retries = r.CounterVec(
reporter.CounterOpts{
Name: "poller_retry_requests",
Help: "Number of retried requests.",
}, []string{"exporter"})
p.metrics.times = r.SummaryVec(
reporter.SummaryOpts{
Name: "poller_seconds",
Help: "Time to successfully poll for values.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, []string{"exporter"})
return p
}
func (p *realPoller) Poll(ctx context.Context, exporter, agent netip.Addr, port uint16, ifIndexes []uint) error {
// Poll polls the SNMP provider for the requested interface indexes.
func (p *Provider) Poll(ctx context.Context, exporter, agent netip.Addr, port uint16, ifIndexes []uint, put func(provider.Update)) error {
// Check if already have a request running
exporterStr := exporter.Unmap().String()
filteredIfIndexes := make([]uint, 0, len(ifIndexes))
@@ -122,8 +50,8 @@ func (p *realPoller) Poll(ctx context.Context, exporter, agent netip.Addr, port
Context: ctx,
Target: agent.Unmap().String(),
Port: port,
Retries: p.config.Retries,
Timeout: p.config.Timeout,
Retries: p.config.PollerRetries,
Timeout: p.config.PollerTimeout,
UseUnconnectedUDPSocket: true,
Logger: gosnmp.NewLogger(&goSNMPLogger{p.r}),
OnRetry: func(*gosnmp.GoSNMP) {
@@ -246,17 +174,25 @@ func (p *realPoller) Poll(ctx context.Context, exporter, agent netip.Addr, port
if ifIndex > 0 && !processUint(idx+2, "ifspeed", &ifSpeedVal) {
ok = false
}
if !ok {
// Negative cache
p.put(exporter, sysNameVal, ifIndex, Interface{})
} else {
p.put(exporter, sysNameVal, ifIndex, Interface{
var iface provider.Interface
if ok {
iface = provider.Interface{
Name: ifDescrVal,
Description: ifAliasVal,
Speed: ifSpeedVal,
})
}
p.metrics.successes.WithLabelValues(exporterStr).Inc()
}
put(provider.Update{
Query: provider.Query{
ExporterIP: exporter,
IfIndex: ifIndex,
},
Answer: provider.Answer{
ExporterName: sysNameVal,
Interface: iface,
},
})
}
p.metrics.times.WithLabelValues(exporterStr).Observe(time.Now().Sub(start).Seconds())

View File

@@ -17,28 +17,47 @@ import (
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
func TestPoller(t *testing.T) {
lo := netip.MustParseAddr("::ffff:127.0.0.1")
cases := []struct {
Description string
Skip string
Config pollerConfig
Config Configuration
ExporterIP netip.Addr
}{
{
Description: "SNMPv2",
Config: pollerConfig{
Retries: 2,
Timeout: 100 * time.Millisecond,
Config: Configuration{
PollerRetries: 2,
PollerTimeout: 100 * time.Millisecond,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "private",
}),
Agents: map[netip.Addr]netip.Addr{
netip.MustParseAddr("192.0.2.1"): lo,
},
},
}, {
Description: "SNMPv2 with agent mapping",
Config: Configuration{
PollerRetries: 2,
PollerTimeout: 100 * time.Millisecond,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "private",
}),
Agents: map[netip.Addr]netip.Addr{
netip.MustParseAddr("192.0.2.1"): lo,
},
},
ExporterIP: netip.MustParseAddr("::ffff:192.0.2.1"),
}, {
Description: "SNMPv3",
Config: pollerConfig{
Retries: 2,
Timeout: 100 * time.Millisecond,
Config: Configuration{
PollerRetries: 2,
PollerTimeout: 100 * time.Millisecond,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "public",
}),
@@ -56,9 +75,9 @@ func TestPoller(t *testing.T) {
}, {
Description: "SNMPv3 no priv",
Skip: "GoSNMPServer is broken with this configuration",
Config: pollerConfig{
Retries: 2,
Timeout: 100 * time.Millisecond,
Config: Configuration{
PollerRetries: 2,
PollerTimeout: 100 * time.Millisecond,
Communities: helpers.MustNewSubnetMap(map[string]string{
"::/0": "public",
}),
@@ -79,14 +98,10 @@ func TestPoller(t *testing.T) {
if tc.Skip != "" {
t.Skip(tc.Skip)
}
got := []string{}
if !tc.ExporterIP.IsValid() {
tc.ExporterIP = lo
}
r := reporter.NewMock(t)
config := tc.Config
p := newPoller(r, config, func(exporterIP netip.Addr, exporterName string, ifIndex uint, iface Interface) {
got = append(got, fmt.Sprintf("%s %s %d %s %s %d",
exporterIP.Unmap().String(), exporterName,
ifIndex, iface.Name, iface.Description, iface.Speed))
})
// Start a new SNMP server
master := GoSNMPServer.MasterAgent{
@@ -172,8 +187,7 @@ func TestPoller(t *testing.T) {
},
}
server := GoSNMPServer.NewSNMPServer(master)
err := server.ListenUDP("udp", "127.0.0.1:0")
if err != nil {
if err := server.ListenUDP("udp", "127.0.0.1:0"); err != nil {
t.Fatalf("ListenUDP() err:\n%+v", err)
}
_, portStr, err := net.SplitHostPort(server.Address().String())
@@ -184,34 +198,49 @@ func TestPoller(t *testing.T) {
if err != nil {
panic(err)
}
p.r.Debug().Int("port", port).Msg("SNMP server listening")
r.Debug().Int("port", port).Msg("SNMP server listening")
go server.ServeForever()
defer server.Shutdown()
lo := netip.MustParseAddr("::ffff:127.0.0.1")
p.Poll(context.Background(), lo, lo, uint16(port), []uint{641})
p.Poll(context.Background(), lo, lo, uint16(port), []uint{642})
p.Poll(context.Background(), lo, lo, uint16(port), []uint{643})
p.Poll(context.Background(), lo, lo, uint16(port), []uint{644})
p.Poll(context.Background(), lo, lo, uint16(port), []uint{0})
got := []string{}
config := tc.Config
config.Ports = helpers.MustNewSubnetMap(map[string]uint16{
"::/0": uint16(port),
})
p, err := config.New(r)
if err != nil {
t.Fatalf("New() error:\n%+v", err)
}
put := func(update provider.Update) {
got = append(got, fmt.Sprintf("%s %s %d %s %s %d",
update.ExporterIP.Unmap().String(), update.ExporterName,
update.IfIndex, update.Name, update.Description, update.Speed))
}
p.Query(context.Background(), provider.Query{ExporterIP: tc.ExporterIP, IfIndex: 641}, put)
p.Query(context.Background(), provider.Query{ExporterIP: tc.ExporterIP, IfIndex: 642}, put)
p.Query(context.Background(), provider.Query{ExporterIP: tc.ExporterIP, IfIndex: 643}, put)
p.Query(context.Background(), provider.Query{ExporterIP: tc.ExporterIP, IfIndex: 644}, put)
p.Query(context.Background(), provider.Query{ExporterIP: tc.ExporterIP, IfIndex: 0}, put)
exporterStr := tc.ExporterIP.Unmap().String()
time.Sleep(50 * time.Millisecond)
if diff := helpers.Diff(got, []string{
`127.0.0.1 exporter62 641 Gi0/0/0/0 Transit 10000`,
`127.0.0.1 exporter62 642 Gi0/0/0/1 Peering 20000`,
`127.0.0.1 exporter62 643 0`, // negative cache
`127.0.0.1 exporter62 644 0`, // negative cache
`127.0.0.1 exporter62 0 0`,
fmt.Sprintf(`%s exporter62 641 Gi0/0/0/0 Transit 10000`, exporterStr),
fmt.Sprintf(`%s exporter62 642 Gi0/0/0/1 Peering 20000`, exporterStr),
fmt.Sprintf(`%s exporter62 643 0`, exporterStr), // negative cache
fmt.Sprintf(`%s exporter62 644 0`, exporterStr), // negative cache
fmt.Sprintf(`%s exporter62 0 0`, exporterStr),
}); diff != "" {
t.Fatalf("Poll() (-got, +want):\n%s", diff)
}
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_poller_", "error_", "pending_", "success_")
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_provider_snmp_poller_", "error_", "pending_", "success_")
expectedMetrics := map[string]string{
`error_requests{error="ifalias missing",exporter="127.0.0.1"}`: "2", // 643+644
`error_requests{error="ifdescr missing",exporter="127.0.0.1"}`: "1", // 644
`error_requests{error="ifspeed missing",exporter="127.0.0.1"}`: "1", // 644
fmt.Sprintf(`error_requests{error="ifalias missing",exporter="%s"}`, exporterStr): "2", // 643+644
fmt.Sprintf(`error_requests{error="ifdescr missing",exporter="%s"}`, exporterStr): "1", // 644
fmt.Sprintf(`error_requests{error="ifspeed missing",exporter="%s"}`, exporterStr): "1", // 644
`pending_requests`: "0",
`success_requests{exporter="127.0.0.1"}`: "3", // 641+642+0
fmt.Sprintf(`success_requests{exporter="%s"}`, exporterStr): "3", // 641+642+0
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Fatalf("Metrics (-got, +want):\n%s", diff)

View File

@@ -0,0 +1,99 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package snmp handles SNMP polling to get interface names and
// descriptions. It keeps a cache of retrieved entries and refresh
// them.
package snmp
import (
"context"
"net/netip"
"sync"
"time"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
// Provider represents the SNMP provider.
type Provider struct {
r *reporter.Reporter
config *Configuration
pendingRequests map[string]struct{}
pendingRequestsLock sync.Mutex
errLogger reporter.Logger
metrics struct {
pendingRequests reporter.GaugeFunc
successes *reporter.CounterVec
errors *reporter.CounterVec
retries *reporter.CounterVec
times *reporter.SummaryVec
}
}
// New creates a new SNMP provider from configuration
func (configuration Configuration) New(r *reporter.Reporter) (provider.Provider, error) {
for exporterIP, agentIP := range configuration.Agents {
if exporterIP.Is4() || agentIP.Is4() {
delete(configuration.Agents, exporterIP)
exporterIP = netip.AddrFrom16(exporterIP.As16())
agentIP = netip.AddrFrom16(agentIP.As16())
configuration.Agents[exporterIP] = agentIP
}
}
p := Provider{
r: r,
config: &configuration,
pendingRequests: make(map[string]struct{}),
errLogger: r.Sample(reporter.BurstSampler(10*time.Second, 3)),
}
p.metrics.pendingRequests = r.GaugeFunc(
reporter.GaugeOpts{
Name: "poller_pending_requests",
Help: "Number of pending requests in pollers.",
}, func() float64 {
p.pendingRequestsLock.Lock()
defer p.pendingRequestsLock.Unlock()
return float64(len(p.pendingRequests))
})
p.metrics.successes = r.CounterVec(
reporter.CounterOpts{
Name: "poller_success_requests",
Help: "Number of successful requests.",
}, []string{"exporter"})
p.metrics.errors = r.CounterVec(
reporter.CounterOpts{
Name: "poller_error_requests",
Help: "Number of failed requests.",
}, []string{"exporter", "error"})
p.metrics.retries = r.CounterVec(
reporter.CounterOpts{
Name: "poller_retry_requests",
Help: "Number of retried requests.",
}, []string{"exporter"})
p.metrics.times = r.SummaryVec(
reporter.SummaryOpts{
Name: "poller_seconds",
Help: "Time to successfully poll for values.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, []string{"exporter"})
return &p, nil
}
// Query queries exporter to get information through SNMP.
func (p *Provider) Query(ctx context.Context, query provider.Query, put func(provider.Update)) error {
// Avoid querying too much exporters with errors
agentIP, ok := p.config.Agents[query.ExporterIP]
if !ok {
agentIP = query.ExporterIP
}
agentPort := p.config.Ports.LookupOrDefault(agentIP, 161)
return p.Poll(ctx, query.ExporterIP, agentIP, agentPort, []uint{query.IfIndex}, put)
}

283
inlet/metadata/root.go Normal file
View File

@@ -0,0 +1,283 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package metadata handles metadata polling to get interface names and
// descriptions. It keeps a cache of retrieved entries and refresh them. It is
// modular and accepts several kind of providers (including SNMP).
package metadata
import (
"errors"
"fmt"
"net/netip"
"strconv"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/eapache/go-resiliency/breaker"
"gopkg.in/tomb.v2"
"akvorado/common/daemon"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
// Component represents the metadata compomenent.
type Component struct {
r *reporter.Reporter
d *Dependencies
t tomb.Tomb
config Configuration
sc *metadataCache
healthyWorkers chan reporter.ChannelHealthcheckFunc
providerChannel chan provider.Query
dispatcherChannel chan provider.Query
providerBreakersLock sync.Mutex
providerBreakerLoggers map[netip.Addr]reporter.Logger
providerBreakers map[netip.Addr]*breaker.Breaker
provider provider.Provider
metrics struct {
cacheRefreshRuns reporter.Counter
cacheRefresh reporter.Counter
providerBusyCount *reporter.CounterVec
providerBreakerOpenCount *reporter.CounterVec
}
}
// Dependencies define the dependencies of the metadata component.
type Dependencies struct {
Daemon daemon.Component
Clock clock.Clock
}
// New creates a new metadata component.
func New(r *reporter.Reporter, configuration Configuration, dependencies Dependencies) (*Component, error) {
if configuration.CacheRefresh > 0 && configuration.CacheRefresh < configuration.CacheDuration {
return nil, errors.New("cache refresh must be greater than cache duration")
}
if configuration.CacheDuration < configuration.CacheCheckInterval {
return nil, errors.New("cache duration must be greater than cache check interval")
}
if dependencies.Clock == nil {
dependencies.Clock = clock.New()
}
sc := newMetadataCache(r)
c := Component{
r: r,
d: &dependencies,
config: configuration,
sc: sc,
providerChannel: make(chan provider.Query),
dispatcherChannel: make(chan provider.Query, 100*configuration.Workers),
providerBreakers: make(map[netip.Addr]*breaker.Breaker),
providerBreakerLoggers: make(map[netip.Addr]reporter.Logger),
}
c.d.Daemon.Track(&c.t, "inlet/metadata")
// Initialize the provider
selectedProvider, err := c.config.Provider.Config.New(r)
if err != nil {
return nil, err
}
c.provider = selectedProvider
c.metrics.cacheRefreshRuns = r.Counter(
reporter.CounterOpts{
Name: "cache_refresh_runs",
Help: "Number of times the cache refresh was triggered.",
})
c.metrics.cacheRefresh = r.Counter(
reporter.CounterOpts{
Name: "cache_refresh",
Help: "Number of entries refreshed in cache.",
})
c.metrics.providerBusyCount = r.CounterVec(
reporter.CounterOpts{
Name: "provider_busy_count",
Help: "Providers where too busy and dropped requests.",
},
[]string{"exporter"})
c.metrics.providerBreakerOpenCount = r.CounterVec(
reporter.CounterOpts{
Name: "provider_breaker_open_count",
Help: "Provider breaker was opened due to too many errors.",
},
[]string{"exporter"})
return &c, nil
}
// Start starts the metadata component.
func (c *Component) Start() error {
c.r.Info().Msg("starting metadata component")
// Load cache
if c.config.CachePersistFile != "" {
if err := c.sc.Load(c.config.CachePersistFile); err != nil {
c.r.Err(err).Msg("cannot load cache, ignoring")
}
}
// Goroutine to refresh the cache
healthyTicker := make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("metadata/ticker", reporter.ChannelHealthcheck(c.t.Context(nil), healthyTicker))
c.t.Go(func() error {
c.r.Debug().Msg("starting metadata ticker")
ticker := c.d.Clock.Ticker(c.config.CacheCheckInterval)
defer ticker.Stop()
defer close(healthyTicker)
for {
select {
case <-c.t.Dying():
c.r.Debug().Msg("shutting down metadata ticker")
return nil
case cb, ok := <-healthyTicker:
if ok {
cb(reporter.HealthcheckOK, "ok")
}
case <-ticker.C:
c.expireCache()
}
}
})
// Goroutine to fetch incoming requests and dispatch them to workers
healthyDispatcher := make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("metadata/dispatcher", reporter.ChannelHealthcheck(c.t.Context(nil), healthyDispatcher))
c.t.Go(func() error {
for {
select {
case <-c.t.Dying():
c.r.Debug().Msg("stopping metadata dispatcher")
return nil
case cb, ok := <-healthyDispatcher:
if ok {
cb(reporter.HealthcheckOK, "ok")
}
case request := <-c.dispatcherChannel:
c.providerChannel <- request
}
}
})
// Goroutines to poll exporters
c.healthyWorkers = make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("metadata/worker", reporter.ChannelHealthcheck(c.t.Context(nil), c.healthyWorkers))
for i := 0; i < c.config.Workers; i++ {
workerIDStr := strconv.Itoa(i)
c.t.Go(func() error {
c.r.Debug().Str("worker", workerIDStr).Msg("starting metadata provider")
for {
select {
case <-c.t.Dying():
c.r.Debug().Str("worker", workerIDStr).Msg("stopping metadata provider")
return nil
case cb, ok := <-c.healthyWorkers:
if ok {
cb(reporter.HealthcheckOK, fmt.Sprintf("worker %s ok", workerIDStr))
}
case request := <-c.providerChannel:
c.providerIncomingRequest(request)
}
}
})
}
return nil
}
// Stop stops the metadata component
func (c *Component) Stop() error {
defer func() {
close(c.dispatcherChannel)
close(c.providerChannel)
close(c.healthyWorkers)
if c.config.CachePersistFile != "" {
if err := c.sc.Save(c.config.CachePersistFile); err != nil {
c.r.Err(err).Msg("cannot save cache")
}
}
c.r.Info().Msg("metadata component stopped")
}()
c.r.Info().Msg("stopping metadata component")
c.t.Kill(nil)
return c.t.Wait()
}
// Lookup for interface information for the provided exporter and ifIndex.
// If the information is not in the cache, it will be polled, but
// won't be returned immediately.
func (c *Component) Lookup(t time.Time, exporterIP netip.Addr, ifIndex uint) (string, Interface, bool) {
query := provider.Query{ExporterIP: exporterIP, IfIndex: ifIndex}
answer, ok := c.sc.Lookup(t, query)
if !ok {
select {
case c.dispatcherChannel <- query:
default:
c.metrics.providerBusyCount.WithLabelValues(exporterIP.Unmap().String()).Inc()
}
}
return answer.ExporterName, answer.Interface, ok
}
// providerIncomingRequest handles an incoming request to the provider. It
// uses a breaker to avoid pushing working on non-responsive exporters.
func (c *Component) providerIncomingRequest(request provider.Query) {
// Avoid querying too much exporters with errors
c.providerBreakersLock.Lock()
providerBreaker, ok := c.providerBreakers[request.ExporterIP]
if !ok {
providerBreaker = breaker.New(20, 1, time.Minute)
c.providerBreakers[request.ExporterIP] = providerBreaker
}
c.providerBreakersLock.Unlock()
if err := providerBreaker.Run(func() error {
return c.provider.Query(c.t.Context(nil), request, func(update provider.Update) {
c.sc.Put(c.d.Clock.Now(), update.Query, update.Answer)
})
}); err == breaker.ErrBreakerOpen {
c.metrics.providerBreakerOpenCount.WithLabelValues(request.ExporterIP.Unmap().String()).Inc()
c.providerBreakersLock.Lock()
l, ok := c.providerBreakerLoggers[request.ExporterIP]
if !ok {
l = c.r.Sample(reporter.BurstSampler(time.Minute, 1)).
With().
Str("exporter", request.ExporterIP.Unmap().String()).
Logger()
c.providerBreakerLoggers[request.ExporterIP] = l
}
l.Warn().Msg("provider breaker open")
c.providerBreakersLock.Unlock()
}
}
// expireCache handles cache expiration and refresh.
func (c *Component) expireCache() {
c.sc.Expire(c.d.Clock.Now().Add(-c.config.CacheDuration))
if c.config.CacheRefresh > 0 {
c.r.Debug().Msg("refresh metadata cache")
c.metrics.cacheRefreshRuns.Inc()
count := 0
toRefresh := c.sc.NeedUpdates(c.d.Clock.Now().Add(-c.config.CacheRefresh))
for exporter, ifaces := range toRefresh {
for ifIndex := range ifaces {
select {
case c.dispatcherChannel <- provider.Query{
ExporterIP: exporter,
IfIndex: ifIndex,
}:
count++
default:
c.metrics.providerBusyCount.WithLabelValues(exporter.Unmap().String()).Inc()
}
}
}
c.r.Debug().Int("count", count).Msg("refreshed metadata cache")
c.metrics.cacheRefresh.Add(float64(count))
}
}

203
inlet/metadata/root_test.go Normal file
View File

@@ -0,0 +1,203 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package metadata
import (
"context"
"errors"
"net/netip"
"path/filepath"
"testing"
"time"
"github.com/benbjohnson/clock"
"akvorado/common/daemon"
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
func expectMockLookup(t *testing.T, c *Component, exporter string, ifIndex uint, expected provider.Answer) {
t.Helper()
ip := netip.AddrFrom16(netip.MustParseAddr(exporter).As16())
gotExporterName, gotInterface, _ := c.Lookup(time.Now(), ip, ifIndex)
got := provider.Answer{ExporterName: gotExporterName, Interface: gotInterface}
if diff := helpers.Diff(got, expected); diff != "" {
t.Fatalf("Lookup() (-got, +want):\n%s", diff)
}
}
func TestLookup(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t)})
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{})
expectMockLookup(t, c, "127.0.0.1", 999, provider.Answer{})
time.Sleep(30 * time.Millisecond)
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
expectMockLookup(t, c, "127.0.0.1", 999, provider.Answer{
ExporterName: "127_0_0_1",
})
}
func TestComponentSaveLoad(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CachePersistFile = filepath.Join(t.TempDir(), "cache")
t.Run("save", func(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{})
time.Sleep(30 * time.Millisecond)
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
})
t.Run("load", func(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
})
}
func TestAutoRefresh(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
mockClock := clock.NewMock()
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t), Clock: mockClock})
// Fetch a value
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{})
time.Sleep(30 * time.Millisecond)
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
// Keep it in the cache!
mockClock.Add(25 * time.Minute)
c.Lookup(mockClock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
mockClock.Add(25 * time.Minute)
c.Lookup(mockClock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
// Go forward, we expect the entry to have been refreshed and be still present
mockClock.Add(11 * time.Minute)
time.Sleep(30 * time.Millisecond)
mockClock.Add(2 * time.Minute)
time.Sleep(30 * time.Millisecond)
expectMockLookup(t, c, "127.0.0.1", 765, provider.Answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_cache_")
expectedMetrics := map[string]string{
`expired`: "0",
`hit`: "4",
`miss`: "1",
`size`: "1",
`refresh_runs`: "31", // 63/2
`refresh`: "1",
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Fatalf("Metrics (-got, +want):\n%s", diff)
}
}
func TestConfigCheck(t *testing.T) {
t.Run("refresh", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 5 * time.Minute
configuration.CacheCheckInterval = time.Minute
configuration.Provider.Config = mockProviderConfiguration{}
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err == nil {
t.Fatal("New() should trigger an error")
}
})
t.Run("interval", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 15 * time.Minute
configuration.CacheCheckInterval = 12 * time.Minute
configuration.Provider.Config = mockProviderConfiguration{}
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err == nil {
t.Fatal("New() should trigger an error")
}
})
t.Run("refresh disabled", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 0
configuration.CacheCheckInterval = 2 * time.Minute
configuration.Provider.Config = mockProviderConfiguration{}
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err != nil {
t.Fatalf("New() error:\n%+v", err)
}
})
}
func TestStartStopWithMultipleWorkers(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
configuration.Workers = 5
NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
}
type errorProvider struct{}
func (ep errorProvider) Query(_ context.Context, _ provider.Query, _ func(provider.Update)) error {
return errors.New("noooo")
}
type errorProviderConfiguration struct{}
func (epc errorProviderConfiguration) New(_ *reporter.Reporter) (provider.Provider, error) {
return errorProvider{}, nil
}
func TestProviderBreaker(t *testing.T) {
cases := []struct {
Name string
ProviderConfiguration provider.Configuration
ExpectedCount string
}{
{"always successful provider", mockProviderConfiguration{}, "0"},
{"never successful provider", errorProviderConfiguration{}, "10"},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
configuration.Provider.Config = tc.ProviderConfiguration
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
c.metrics.providerBreakerOpenCount.WithLabelValues("127.0.0.1").Add(0)
for i := 0; i < 30; i++ {
c.Lookup(c.d.Clock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
}
for i := 0; i < 5; i++ {
c.Lookup(c.d.Clock.Now(), netip.MustParseAddr("::ffff:127.0.0.2"), 765)
}
time.Sleep(50 * time.Millisecond)
gotMetrics := r.GetMetrics("akvorado_inlet_metadata_provider_", "breaker_open_count")
expectedMetrics := map[string]string{
`breaker_open_count{exporter="127.0.0.1"}`: tc.ExpectedCount,
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Errorf("Metrics (-got, +want):\n%s", diff)
}
})
}
}

58
inlet/metadata/tests.go Normal file
View File

@@ -0,0 +1,58 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
//go:build !release
package metadata
import (
"context"
"fmt"
"strings"
"testing"
"akvorado/common/helpers"
"akvorado/common/reporter"
"akvorado/inlet/metadata/provider"
)
// mockProvider represents a mock provider.
type mockProvider struct{}
// Query query the mock provider for a value.
func (mp mockProvider) Query(_ context.Context, query provider.Query, put func(provider.Update)) error {
answer := provider.Answer{
ExporterName: strings.ReplaceAll(query.ExporterIP.Unmap().String(), ".", "_"),
}
if query.IfIndex != 999 {
answer.Interface = Interface{
Name: fmt.Sprintf("Gi0/0/%d", query.IfIndex),
Description: fmt.Sprintf("Interface %d", query.IfIndex),
Speed: 1000,
}
}
put(provider.Update{Query: query, Answer: answer})
return nil
}
// mockProviderConfiguration is the configuration for the mock provider.
type mockProviderConfiguration struct{}
// New returns a new mock provider.
func (mpc mockProviderConfiguration) New(_ *reporter.Reporter) (provider.Provider, error) {
return mockProvider{}, nil
}
// NewMock creates a new metadata component building synthetic values. It is already started.
func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configuration, dependencies Dependencies) *Component {
t.Helper()
if configuration.Provider.Config == nil {
configuration.Provider.Config = mockProviderConfiguration{}
}
c, err := New(reporter, configuration, dependencies)
if err != nil {
t.Fatalf("New() error:\n%+v", err)
}
helpers.StartStop(t, c)
return c
}

View File

@@ -1,358 +0,0 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
// Package snmp handles SNMP polling to get interface names and
// descriptions. It keeps a cache of retrieved entries and refresh
// them.
package snmp
import (
"errors"
"fmt"
"net/netip"
"strconv"
"sync"
"time"
"github.com/benbjohnson/clock"
"github.com/eapache/go-resiliency/breaker"
"gopkg.in/tomb.v2"
"akvorado/common/daemon"
"akvorado/common/reporter"
)
// Component represents the SNMP compomenent.
type Component struct {
r *reporter.Reporter
d *Dependencies
t tomb.Tomb
config Configuration
sc *snmpCache
healthyWorkers chan reporter.ChannelHealthcheckFunc
pollerChannel chan lookupRequest
dispatcherChannel chan lookupRequest
dispatcherBChannel chan (<-chan bool) // block channel for testing
pollerBreakersLock sync.Mutex
pollerBreakerLoggers map[netip.Addr]reporter.Logger
pollerBreakers map[netip.Addr]*breaker.Breaker
poller poller
metrics struct {
cacheRefreshRuns reporter.Counter
cacheRefresh reporter.Counter
pollerBusyCount *reporter.CounterVec
pollerCoalescedCount reporter.Counter
pollerBreakerOpenCount *reporter.CounterVec
}
}
// Dependencies define the dependencies of the SNMP component.
type Dependencies struct {
Daemon daemon.Component
Clock clock.Clock
}
// New creates a new SNMP component.
func New(r *reporter.Reporter, configuration Configuration, dependencies Dependencies) (*Component, error) {
if configuration.CacheRefresh > 0 && configuration.CacheRefresh < configuration.CacheDuration {
return nil, errors.New("cache refresh must be greater than cache duration")
}
if configuration.CacheDuration < configuration.CacheCheckInterval {
return nil, errors.New("cache duration must be greater than cache check interval")
}
for exporterIP, agentIP := range configuration.Agents {
if exporterIP.Is4() || agentIP.Is4() {
delete(configuration.Agents, exporterIP)
exporterIP = netip.AddrFrom16(exporterIP.As16())
agentIP = netip.AddrFrom16(agentIP.As16())
configuration.Agents[exporterIP] = agentIP
}
}
if dependencies.Clock == nil {
dependencies.Clock = clock.New()
}
sc := newSNMPCache(r)
c := Component{
r: r,
d: &dependencies,
config: configuration,
sc: sc,
pollerChannel: make(chan lookupRequest),
dispatcherChannel: make(chan lookupRequest, 100*configuration.Workers),
dispatcherBChannel: make(chan (<-chan bool)),
pollerBreakers: make(map[netip.Addr]*breaker.Breaker),
pollerBreakerLoggers: make(map[netip.Addr]reporter.Logger),
poller: newPoller(r, pollerConfig{
Retries: configuration.PollerRetries,
Timeout: configuration.PollerTimeout,
Communities: configuration.Communities,
SecurityParameters: configuration.SecurityParameters,
}, func(ip netip.Addr, exporterName string, index uint, iface Interface) {
sc.Put(dependencies.Clock.Now(), ip, exporterName, index, iface)
}),
}
c.d.Daemon.Track(&c.t, "inlet/snmp")
c.metrics.cacheRefreshRuns = r.Counter(
reporter.CounterOpts{
Name: "cache_refresh_runs",
Help: "Number of times the cache refresh was triggered.",
})
c.metrics.cacheRefresh = r.Counter(
reporter.CounterOpts{
Name: "cache_refresh",
Help: "Number of entries refreshed in cache.",
})
c.metrics.pollerBusyCount = r.CounterVec(
reporter.CounterOpts{
Name: "poller_busy_count",
Help: "Pollers where too busy and dropped requests.",
},
[]string{"exporter"})
c.metrics.pollerCoalescedCount = r.Counter(
reporter.CounterOpts{
Name: "poller_coalesced_count",
Help: "Poller was able to coalesce several requests in one.",
})
c.metrics.pollerBreakerOpenCount = r.CounterVec(
reporter.CounterOpts{
Name: "poller_breaker_open_count",
Help: "Poller breaker was opened due to too many errors.",
},
[]string{"exporter"})
return &c, nil
}
// Start starts the SNMP component.
func (c *Component) Start() error {
c.r.Info().Msg("starting SNMP component")
// Load cache
if c.config.CachePersistFile != "" {
if err := c.sc.Load(c.config.CachePersistFile); err != nil {
c.r.Err(err).Msg("cannot load cache, ignoring")
}
}
// Goroutine to refresh the cache
healthyTicker := make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("snmp/ticker", reporter.ChannelHealthcheck(c.t.Context(nil), healthyTicker))
c.t.Go(func() error {
c.r.Debug().Msg("starting SNMP ticker")
ticker := c.d.Clock.Ticker(c.config.CacheCheckInterval)
defer ticker.Stop()
defer close(healthyTicker)
for {
select {
case <-c.t.Dying():
c.r.Debug().Msg("shutting down SNMP ticker")
return nil
case cb, ok := <-healthyTicker:
if ok {
cb(reporter.HealthcheckOK, "ok")
}
case <-ticker.C:
c.expireCache()
}
}
})
// Goroutine to fetch incoming requests and dispatch them to workers
healthyDispatcher := make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("snmp/dispatcher", reporter.ChannelHealthcheck(c.t.Context(nil), healthyDispatcher))
c.t.Go(func() error {
for {
select {
case <-c.t.Dying():
c.r.Debug().Msg("stopping SNMP dispatcher")
return nil
case cb, ok := <-healthyDispatcher:
if ok {
cb(reporter.HealthcheckOK, "ok")
}
case ch := <-c.dispatcherBChannel:
// This is for test coaelescing
<-ch
case request := <-c.dispatcherChannel:
c.dispatchIncomingRequest(request)
}
}
})
// Goroutines to poll exporters
c.healthyWorkers = make(chan reporter.ChannelHealthcheckFunc)
c.r.RegisterHealthcheck("snmp/worker", reporter.ChannelHealthcheck(c.t.Context(nil), c.healthyWorkers))
for i := 0; i < c.config.Workers; i++ {
workerIDStr := strconv.Itoa(i)
c.t.Go(func() error {
c.r.Debug().Str("worker", workerIDStr).Msg("starting SNMP poller")
for {
select {
case <-c.t.Dying():
c.r.Debug().Str("worker", workerIDStr).Msg("stopping SNMP poller")
return nil
case cb, ok := <-c.healthyWorkers:
if ok {
cb(reporter.HealthcheckOK, fmt.Sprintf("worker %s ok", workerIDStr))
}
case request := <-c.pollerChannel:
c.pollerIncomingRequest(request)
}
}
})
}
return nil
}
// Stop stops the SNMP component
func (c *Component) Stop() error {
defer func() {
close(c.dispatcherChannel)
close(c.pollerChannel)
close(c.healthyWorkers)
if c.config.CachePersistFile != "" {
if err := c.sc.Save(c.config.CachePersistFile); err != nil {
c.r.Err(err).Msg("cannot save cache")
}
}
c.r.Info().Msg("SNMP component stopped")
}()
c.r.Info().Msg("stopping SNMP component")
c.t.Kill(nil)
return c.t.Wait()
}
// lookupRequest is used internally to queue a polling request.
type lookupRequest struct {
ExporterIP netip.Addr
IfIndexes []uint
}
// Lookup for interface information for the provided exporter and ifIndex.
// If the information is not in the cache, it will be polled, but
// won't be returned immediately.
func (c *Component) Lookup(t time.Time, exporterIP netip.Addr, ifIndex uint) (string, Interface, bool) {
exporterName, iface, ok := c.sc.Lookup(t, exporterIP, ifIndex)
if !ok {
req := lookupRequest{
ExporterIP: exporterIP,
IfIndexes: []uint{ifIndex},
}
select {
case c.dispatcherChannel <- req:
default:
c.metrics.pollerBusyCount.WithLabelValues(exporterIP.Unmap().String()).Inc()
}
}
return exporterName, iface, ok
}
// Dispatch an incoming request to workers. May handle more than the
// provided request if it can.
func (c *Component) dispatchIncomingRequest(request lookupRequest) {
requestsMap := map[netip.Addr][]uint{
request.ExporterIP: request.IfIndexes,
}
for c.config.PollerCoalesce > 0 {
select {
case request := <-c.dispatcherChannel:
indexes, ok := requestsMap[request.ExporterIP]
if !ok {
indexes = request.IfIndexes
} else {
indexes = append(indexes, request.IfIndexes...)
}
requestsMap[request.ExporterIP] = indexes
// We don't want to exceed the configured
// limit but also there is no point of
// coalescing requests of too many exporters.
if len(indexes) < c.config.PollerCoalesce && len(requestsMap) < 4 {
continue
}
case <-c.t.Dying():
return
default:
// No more requests in queue
}
break
}
for exporterIP, ifIndexes := range requestsMap {
if len(ifIndexes) > 1 {
c.metrics.pollerCoalescedCount.Add(float64(len(ifIndexes)))
}
select {
case <-c.t.Dying():
return
case c.pollerChannel <- lookupRequest{exporterIP, ifIndexes}:
}
}
}
// pollerIncomingRequest handles an incoming request to the poller. It
// uses a breaker to avoid pushing working on non-responsive exporters.
func (c *Component) pollerIncomingRequest(request lookupRequest) {
// Avoid querying too much exporters with errors
c.pollerBreakersLock.Lock()
pollerBreaker, ok := c.pollerBreakers[request.ExporterIP]
if !ok {
pollerBreaker = breaker.New(20, 1, time.Minute)
c.pollerBreakers[request.ExporterIP] = pollerBreaker
}
c.pollerBreakersLock.Unlock()
agentIP, ok := c.config.Agents[request.ExporterIP]
if !ok {
agentIP = request.ExporterIP
}
agentPort := c.config.Ports.LookupOrDefault(agentIP, 161)
if err := pollerBreaker.Run(func() error {
return c.poller.Poll(
c.t.Context(nil),
request.ExporterIP, agentIP, agentPort,
request.IfIndexes)
}); err == breaker.ErrBreakerOpen {
c.metrics.pollerBreakerOpenCount.WithLabelValues(request.ExporterIP.Unmap().String()).Inc()
c.pollerBreakersLock.Lock()
l, ok := c.pollerBreakerLoggers[request.ExporterIP]
if !ok {
l = c.r.Sample(reporter.BurstSampler(time.Minute, 1)).
With().
Str("exporter", request.ExporterIP.Unmap().String()).
Logger()
c.pollerBreakerLoggers[request.ExporterIP] = l
}
l.Warn().Msg("poller breaker open")
c.pollerBreakersLock.Unlock()
}
}
// expireCache handles cache expiration and refresh.
func (c *Component) expireCache() {
c.sc.Expire(c.d.Clock.Now().Add(-c.config.CacheDuration))
if c.config.CacheRefresh > 0 {
c.r.Debug().Msg("refresh SNMP cache")
c.metrics.cacheRefreshRuns.Inc()
count := 0
toRefresh := c.sc.NeedUpdates(c.d.Clock.Now().Add(-c.config.CacheRefresh))
for exporter, ifaces := range toRefresh {
for ifIndex := range ifaces {
select {
case c.dispatcherChannel <- lookupRequest{
ExporterIP: exporter,
IfIndexes: []uint{ifIndex},
}:
count++
default:
c.metrics.pollerBusyCount.WithLabelValues(exporter.Unmap().String()).Inc()
}
}
}
c.r.Debug().Int("count", count).Msg("refreshed SNMP cache")
c.metrics.cacheRefresh.Add(float64(count))
}
}

View File

@@ -1,323 +0,0 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
package snmp
import (
"context"
"errors"
"net/netip"
"path/filepath"
"sync"
"testing"
"time"
"github.com/benbjohnson/clock"
"akvorado/common/daemon"
"akvorado/common/helpers"
"akvorado/common/reporter"
)
func expectSNMPLookup(t *testing.T, c *Component, exporter string, ifIndex uint, expected answer) {
t.Helper()
ip := netip.AddrFrom16(netip.MustParseAddr(exporter).As16())
gotExporterName, gotInterface, ok := c.Lookup(time.Now(), ip, ifIndex)
got := answer{gotExporterName, gotInterface, !ok}
if diff := helpers.Diff(got, expected); diff != "" {
t.Fatalf("Lookup() (-got, +want):\n%s", diff)
}
}
func TestLookup(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t)})
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{NOk: true})
expectSNMPLookup(t, c, "127.0.0.1", 999, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
expectSNMPLookup(t, c, "127.0.0.1", 999, answer{
ExporterName: "127_0_0_1",
})
}
func TestSNMPCommunities(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
configuration.Communities, _ = helpers.NewSubnetMap(map[string]string{
"::/0": "notpublic",
"::ffff:127.0.0.1/128": "public",
"::ffff:127.0.0.2/128": "private",
})
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
// Use "public" as a community. Should work.
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
// Use "private", should not work
expectSNMPLookup(t, c, "127.0.0.2", 765, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.2", 765, answer{NOk: true})
// Use default community, should not work
expectSNMPLookup(t, c, "127.0.0.3", 765, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.3", 765, answer{NOk: true})
}
func TestComponentSaveLoad(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CachePersistFile = filepath.Join(t.TempDir(), "cache")
t.Run("save", func(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
})
t.Run("load", func(t *testing.T) {
r := reporter.NewMock(t)
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
})
}
func TestAutoRefresh(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
mockClock := clock.NewMock()
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t), Clock: mockClock})
// Fetch a value
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{NOk: true})
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
// Keep it in the cache!
mockClock.Add(25 * time.Minute)
c.Lookup(mockClock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
mockClock.Add(25 * time.Minute)
c.Lookup(mockClock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
// Go forward, we expect the entry to have been refreshed and be still present
mockClock.Add(11 * time.Minute)
time.Sleep(30 * time.Millisecond)
mockClock.Add(2 * time.Minute)
time.Sleep(30 * time.Millisecond)
expectSNMPLookup(t, c, "127.0.0.1", 765, answer{
ExporterName: "127_0_0_1",
Interface: Interface{Name: "Gi0/0/765", Description: "Interface 765", Speed: 1000},
})
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_cache_")
expectedMetrics := map[string]string{
`expired`: "0",
`hit`: "4",
`miss`: "1",
`size`: "1",
`refresh_runs`: "31", // 63/2
`refresh`: "1",
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Fatalf("Metrics (-got, +want):\n%s", diff)
}
}
func TestConfigCheck(t *testing.T) {
t.Run("refresh", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 5 * time.Minute
configuration.CacheCheckInterval = time.Minute
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err == nil {
t.Fatal("New() should trigger an error")
}
})
t.Run("interval", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 15 * time.Minute
configuration.CacheCheckInterval = 12 * time.Minute
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err == nil {
t.Fatal("New() should trigger an error")
}
})
t.Run("refresh disabled", func(t *testing.T) {
configuration := DefaultConfiguration()
configuration.CacheDuration = 10 * time.Minute
configuration.CacheRefresh = 0
configuration.CacheCheckInterval = 2 * time.Minute
if _, err := New(reporter.NewMock(t), configuration, Dependencies{Daemon: daemon.NewMock(t)}); err != nil {
t.Fatalf("New() error:\n%+v", err)
}
})
}
func TestStartStopWithMultipleWorkers(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
configuration.Workers = 5
NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
}
type logCoalescePoller struct {
received []lookupRequest
}
func (fcp *logCoalescePoller) Poll(_ context.Context, exporterIP, _ netip.Addr, _ uint16, ifIndexes []uint) error {
fcp.received = append(fcp.received, lookupRequest{exporterIP, ifIndexes})
return nil
}
func TestCoalescing(t *testing.T) {
lcp := &logCoalescePoller{
received: []lookupRequest{},
}
r := reporter.NewMock(t)
t.Run("run", func(t *testing.T) {
c := NewMock(t, r, DefaultConfiguration(), Dependencies{Daemon: daemon.NewMock(t)})
c.poller = lcp
// Block dispatcher
blocker := make(chan bool)
c.dispatcherBChannel <- blocker
defer func() {
// Unblock
time.Sleep(20 * time.Millisecond)
close(blocker)
time.Sleep(20 * time.Millisecond)
}()
// Queue requests
expectSNMPLookup(t, c, "127.0.0.1", 766, answer{NOk: true})
expectSNMPLookup(t, c, "127.0.0.1", 767, answer{NOk: true})
expectSNMPLookup(t, c, "127.0.0.1", 768, answer{NOk: true})
expectSNMPLookup(t, c, "127.0.0.1", 769, answer{NOk: true})
})
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_poller_", "coalesced_count")
expectedMetrics := map[string]string{
`coalesced_count`: "4",
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Errorf("Metrics (-got, +want):\n%s", diff)
}
expectedAccepted := []lookupRequest{
{netip.MustParseAddr("::ffff:127.0.0.1"), []uint{766, 767, 768, 769}},
}
if diff := helpers.Diff(lcp.received, expectedAccepted); diff != "" {
t.Errorf("Accepted requests (-got, +want):\n%s", diff)
}
}
type errorPoller struct{}
func (fcp *errorPoller) Poll(_ context.Context, _, _ netip.Addr, _ uint16, _ []uint) error {
return errors.New("noooo")
}
func TestPollerBreaker(t *testing.T) {
cases := []struct {
Name string
Poller poller
ExpectedCount string
}{
{"always successful poller", nil, "0"},
{"never successful poller", &errorPoller{}, "10"},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
r := reporter.NewMock(t)
configuration := DefaultConfiguration()
configuration.PollerCoalesce = 0
c := NewMock(t, r, configuration, Dependencies{Daemon: daemon.NewMock(t)})
if tc.Poller != nil {
c.poller = tc.Poller
}
c.metrics.pollerBreakerOpenCount.WithLabelValues("127.0.0.1").Add(0)
for i := 0; i < 30; i++ {
c.Lookup(c.d.Clock.Now(), netip.MustParseAddr("::ffff:127.0.0.1"), 765)
}
for i := 0; i < 5; i++ {
c.Lookup(c.d.Clock.Now(), netip.MustParseAddr("::ffff:127.0.0.2"), 765)
}
time.Sleep(50 * time.Millisecond)
gotMetrics := r.GetMetrics("akvorado_inlet_snmp_poller_", "breaker_open_count", "coalesced_count")
expectedMetrics := map[string]string{
`coalesced_count`: "0",
`breaker_open_count{exporter="127.0.0.1"}`: tc.ExpectedCount,
}
if diff := helpers.Diff(gotMetrics, expectedMetrics); diff != "" {
t.Errorf("Metrics (-got, +want):\n%s", diff)
}
})
}
}
type agentLogPoller struct {
lastExporter string
lastAgent string
mu sync.Mutex
}
func (alp *agentLogPoller) Poll(_ context.Context, exporterIP, agentIP netip.Addr, _ uint16, _ []uint) error {
alp.mu.Lock()
defer alp.mu.Unlock()
alp.lastExporter = exporterIP.Unmap().String()
alp.lastAgent = agentIP.Unmap().String()
return nil
}
func TestAgentMapping(t *testing.T) {
alp := &agentLogPoller{}
r := reporter.NewMock(t)
config := DefaultConfiguration()
config.Agents = map[netip.Addr]netip.Addr{
// Rely on IPv4 → IPv6 conversion in New()
netip.MustParseAddr("192.0.2.1"): netip.MustParseAddr("192.0.2.10"),
}
c := NewMock(t, r, config, Dependencies{Daemon: daemon.NewMock(t)})
c.poller = alp
expectSNMPLookup(t, c, "192.0.2.1", 766, answer{NOk: true})
time.Sleep(20 * time.Millisecond)
alp.mu.Lock()
if alp.lastAgent != "192.0.2.10" {
alp.mu.Unlock()
t.Fatalf("last agent should have been 192.0.2.10, not %s", alp.lastAgent)
}
alp.mu.Unlock()
expectSNMPLookup(t, c, "192.0.2.2", 766, answer{NOk: true})
time.Sleep(20 * time.Millisecond)
alp.mu.Lock()
if alp.lastAgent != "192.0.2.2" {
alp.mu.Unlock()
t.Fatalf("last agent should have been 192.0.2.2, not %s", alp.lastAgent)
}
alp.mu.Unlock()
}

View File

@@ -1,64 +0,0 @@
// SPDX-FileCopyrightText: 2022 Free Mobile
// SPDX-License-Identifier: AGPL-3.0-only
//go:build !release
package snmp
import (
"context"
"fmt"
"net/netip"
"strings"
"testing"
"akvorado/common/helpers"
"akvorado/common/reporter"
)
// mockPoller will use static data.
type mockPoller struct {
config Configuration
put func(netip.Addr, string, uint, Interface)
}
// newMockPoller creates a fake SNMP poller.
func newMockPoller(configuration Configuration, put func(netip.Addr, string, uint, Interface)) *mockPoller {
return &mockPoller{
config: configuration,
put: put,
}
}
// Poll just builds synthetic data.
func (p *mockPoller) Poll(_ context.Context, exporter, _ netip.Addr, _ uint16, ifIndexes []uint) error {
for _, ifIndex := range ifIndexes {
if p.config.Communities.LookupOrDefault(exporter, "public") == "public" {
p.put(exporter, strings.ReplaceAll(exporter.Unmap().String(), ".", "_"), ifIndex, Interface{
Name: fmt.Sprintf("Gi0/0/%d", ifIndex),
Description: fmt.Sprintf("Interface %d", ifIndex),
Speed: 1000,
})
}
}
return nil
}
// NewMock creates a new SNMP component building synthetic values. It is already started.
func NewMock(t *testing.T, reporter *reporter.Reporter, configuration Configuration, dependencies Dependencies) *Component {
t.Helper()
c, err := New(reporter, configuration, dependencies)
if err != nil {
t.Fatalf("New() error:\n%+v", err)
}
// Change the poller to a fake one.
c.poller = newMockPoller(configuration, func(ip netip.Addr, exporterName string, index uint, iface Interface) {
if index != 999 {
c.sc.Put(c.d.Clock.Now(), ip, exporterName, index, iface)
} else {
c.sc.Put(c.d.Clock.Now(), ip, exporterName, index, Interface{})
}
})
helpers.StartStop(t, c)
return c
}