chore: fix many staticcheck warnings

The most important ones were fixed in the two previous commit.
This commit is contained in:
Vincent Bernat
2025-08-02 20:54:49 +02:00
parent 7a268d271e
commit 03b947e3c5
20 changed files with 26 additions and 61 deletions

View File

@@ -20,7 +20,7 @@ func (c *Component) ExecOnCluster(ctx context.Context, query string, args ...any
} }
var ( var (
spacesRegexp = regexp.MustCompile("\\s+") spacesRegexp = regexp.MustCompile(`\s+`)
statementBeforeOnClusterRegexp = regexp.MustCompile(fmt.Sprintf("^((?i)%s)", strings.Join([]string{ statementBeforeOnClusterRegexp = regexp.MustCompile(fmt.Sprintf("^((?i)%s)", strings.Join([]string{
`ALTER TABLE \S+`, `ALTER TABLE \S+`,
`ATTACH DICTIONARY \S+`, `ATTACH DICTIONARY \S+`,

View File

@@ -72,7 +72,7 @@ func isListen(fl validator.FieldLevel) bool {
// (both fields should be a slice) // (both fields should be a slice)
func noIntersectField(fl validator.FieldLevel) bool { func noIntersectField(fl validator.FieldLevel) bool {
field := fl.Field() field := fl.Field()
currentField, _, ok := fl.GetStructFieldOK() currentField, _, _, ok := fl.GetStructFieldOK2()
if !ok { if !ok {
return false return false
} }

View File

@@ -13,11 +13,6 @@ import (
"golang.org/x/oauth2/clientcredentials" "golang.org/x/oauth2/clientcredentials"
) )
// tokenProvider implements OAuth token provider for franz-go.
type tokenProvider struct {
tokenSource oauth2.TokenSource
}
// newOAuthTokenProvider returns a token provider function using OAuth credentials. // newOAuthTokenProvider returns a token provider function using OAuth credentials.
func newOAuthTokenProvider(tlsConfig *tls.Config, oauthConfig clientcredentials.Config) func(context.Context) (oauth.Auth, error) { func newOAuthTokenProvider(tlsConfig *tls.Config, oauthConfig clientcredentials.Config) func(context.Context) (oauth.Auth, error) {
return func(ctx context.Context) (oauth.Auth, error) { return func(ctx context.Context) (oauth.Auth, error) {

View File

@@ -164,8 +164,6 @@ func TestMetrics(t *testing.T) {
type customMetrics struct { type customMetrics struct {
metric1 *reporter.MetricDesc metric1 *reporter.MetricDesc
metric2 *reporter.MetricDesc metric2 *reporter.MetricDesc
count int
} }
func (m customMetrics) Describe(ch chan<- *prometheus.Desc) { func (m customMetrics) Describe(ch chan<- *prometheus.Desc) {
@@ -174,7 +172,6 @@ func (m customMetrics) Describe(ch chan<- *prometheus.Desc) {
} }
func (m customMetrics) Collect(ch chan<- prometheus.Metric) { func (m customMetrics) Collect(ch chan<- prometheus.Metric) {
m.count++
ch <- prometheus.MustNewConstMetric(m.metric1, prometheus.GaugeValue, 18) ch <- prometheus.MustNewConstMetric(m.metric1, prometheus.GaugeValue, 18)
ch <- prometheus.MustNewConstMetric(m.metric2, prometheus.GaugeValue, 30) ch <- prometheus.MustNewConstMetric(m.metric2, prometheus.GaugeValue, 30)
} }

View File

@@ -130,8 +130,8 @@ func (c *Component) Start() error {
return fmt.Errorf("error while watching for Docker events: %w", err) return fmt.Errorf("error while watching for Docker events: %w", err)
case msg := <-msgs: case msg := <-msgs:
c.r.Info(). c.r.Info().
Str("id", msg.ID). Str("id", msg.Actor.ID).
Str("from", msg.From). Str("from", msg.Actor.Attributes["image"]).
Msg("new container started") Msg("new container started")
c.metrics.runs.WithLabelValues("new container").Inc() c.metrics.runs.WithLabelValues("new container").Inc()
trigger() trigger()

View File

@@ -12,6 +12,7 @@ import (
"time" "time"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/events"
"github.com/docker/go-connections/nat" "github.com/docker/go-connections/nat"
_ "github.com/opencontainers/image-spec/specs-go/v1" // used by mock _ "github.com/opencontainers/image-spec/specs-go/v1" // used by mock
@@ -59,12 +60,12 @@ func TestRoot(t *testing.T) {
// Initial trigger // Initial trigger
dockerClientMock.EXPECT(). dockerClientMock.EXPECT().
ContainerList(gomock.Any(), gomock.Any()). ContainerList(gomock.Any(), gomock.Any()).
Return([]types.Container{{ID: "initial"}}, nil) Return([]container.Summary{{ID: "initial"}}, nil)
dockerClientMock.EXPECT(). dockerClientMock.EXPECT().
ContainerInspect(gomock.Any(), "initial"). ContainerInspect(gomock.Any(), "initial").
Return(types.ContainerJSON{ Return(container.InspectResponse{
NetworkSettings: &types.NetworkSettings{ NetworkSettings: &container.NetworkSettings{
NetworkSettingsBase: types.NetworkSettingsBase{ NetworkSettingsBase: container.NetworkSettingsBase{
Ports: map[nat.Port][]nat.PortBinding{ Ports: map[nat.Port][]nat.PortBinding{
"2055/udp": { "2055/udp": {
nat.PortBinding{ nat.PortBinding{
@@ -134,12 +135,12 @@ func TestRoot(t *testing.T) {
t.Run("new container", func(_ *testing.T) { t.Run("new container", func(_ *testing.T) {
dockerClientMock.EXPECT(). dockerClientMock.EXPECT().
ContainerList(gomock.Any(), gomock.Any()). ContainerList(gomock.Any(), gomock.Any()).
Return([]types.Container{{ID: "new one"}}, nil) Return([]container.Summary{{ID: "new one"}}, nil)
dockerClientMock.EXPECT(). dockerClientMock.EXPECT().
ContainerInspect(gomock.Any(), "new one"). ContainerInspect(gomock.Any(), "new one").
Return(types.ContainerJSON{ Return(container.InspectResponse{
NetworkSettings: &types.NetworkSettings{ NetworkSettings: &container.NetworkSettings{
NetworkSettingsBase: types.NetworkSettingsBase{ NetworkSettingsBase: container.NetworkSettingsBase{
Ports: map[nat.Port][]nat.PortBinding{ Ports: map[nat.Port][]nat.PortBinding{
"2055/udp": { "2055/udp": {
nat.PortBinding{ nat.PortBinding{

View File

@@ -155,5 +155,4 @@ func (c *Component) startBMPClient(ctx context.Context) {
case <-done: case <-done:
case <-ctx.Done(): case <-ctx.Done():
} }
return
} }

View File

@@ -62,6 +62,9 @@ func TestClient(t *testing.T) {
// Test we get a reconnect // Test we get a reconnect
conn, err := listener.Accept() conn, err := listener.Accept()
if err != nil {
t.Fatalf("Accept() error:\n%+v", err)
}
time.Sleep(20 * time.Millisecond) time.Sleep(20 * time.Millisecond)
conn.Close() conn.Close()
conn, err = listener.Accept() conn, err = listener.Accept()
@@ -96,6 +99,7 @@ func TestClient(t *testing.T) {
} }
// Assume we got what we want. // Assume we got what we want.
_ = msgs
time.Sleep(20 * time.Millisecond) time.Sleep(20 * time.Millisecond)
gotMetrics := r.GetMetrics("akvorado_demoexporter_") gotMetrics := r.GetMetrics("akvorado_demoexporter_")

View File

@@ -20,7 +20,6 @@ type Component struct {
t tomb.Tomb t tomb.Tomb
config Configuration config Configuration
bmpPort int
metrics struct { metrics struct {
connections reporter.Counter connections reporter.Counter
errors *reporter.CounterVec errors *reporter.CounterVec

View File

@@ -35,7 +35,7 @@ type CompressionCodec kgo.CompressionCodec
// UnmarshalText produces a compression codec // UnmarshalText produces a compression codec
func (cc *CompressionCodec) UnmarshalText(text []byte) error { func (cc *CompressionCodec) UnmarshalText(text []byte) error {
codec := kgo.CompressionCodec{} var codec kgo.CompressionCodec
switch string(text) { switch string(text) {
case "none": case "none":
codec = kgo.NoCompression() codec = kgo.NoCompression()

View File

@@ -10,28 +10,9 @@ import (
"net" "net"
"strings" "strings"
"github.com/ClickHouse/clickhouse-go/v2"
"akvorado/common/reporter"
"akvorado/common/schema" "akvorado/common/schema"
) )
type migrationStep struct {
// CheckQuery to execute to check if the step is needed.
CheckQuery string
// Arguments to use for the query
Args []any
// Function to execute if the query returns no row or returns `0'.
Do func() error
}
type migrationStepFunc func(context.Context, reporter.Logger, clickhouse.Conn) migrationStep
type migrationStepWithDescription struct {
Description string
Step migrationStepFunc
}
// migrateDatabase execute database migration // migrateDatabase execute database migration
func (c *Component) migrateDatabase() error { func (c *Component) migrateDatabase() error {
ctx := c.t.Context(nil) ctx := c.t.Context(nil)

View File

@@ -63,7 +63,7 @@ func quoteString(s string) string {
func (c *Component) tableAlreadyExists(ctx context.Context, table, column, target string) (bool, error) { func (c *Component) tableAlreadyExists(ctx context.Context, table, column, target string) (bool, error) {
// Normalize a bit the target. This is far from perfect, but we test that // Normalize a bit the target. This is far from perfect, but we test that
// and we hope this does not differ between ClickHouse versions! // and we hope this does not differ between ClickHouse versions!
target = strings.TrimSpace(regexp.MustCompile("\\s+").ReplaceAllString(target, " ")) target = strings.TrimSpace(regexp.MustCompile(`\s+`).ReplaceAllString(target, " "))
// Fetch the existing one // Fetch the existing one
row := c.d.ClickHouse.QueryRow(ctx, row := c.d.ClickHouse.QueryRow(ctx,

View File

@@ -23,8 +23,6 @@ var (
regexCache = make(map[string]*regexp.Regexp) regexCache = make(map[string]*regexp.Regexp)
) )
type classifierContextKey string
// ExporterClassifierRule defines a classification rule for a exporter. // ExporterClassifierRule defines a classification rule for a exporter.
type ExporterClassifierRule struct { type ExporterClassifierRule struct {
program *vm.Program program *vm.Program

View File

@@ -30,8 +30,6 @@ type Configuration struct {
ASNProviders []ASNProvider `validate:"dive"` ASNProviders []ASNProvider `validate:"dive"`
// NetProviders defines the source used to get Prefix/Network Information // NetProviders defines the source used to get Prefix/Network Information
NetProviders []NetProvider `validate:"dive"` NetProviders []NetProvider `validate:"dive"`
// Old configuration settings
classifierCacheSize uint
} }
// DefaultConfiguration represents the default configuration for the core component. // DefaultConfiguration represents the default configuration for the core component.
@@ -130,7 +128,7 @@ func ConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
} }
if oldKey != nil { if oldKey != nil {
oldValue := helpers.ElemOrIdentity(from.MapIndex(*oldKey)) oldValue := helpers.ElemOrIdentity(from.MapIndex(*oldKey))
if oldValue.Kind() == reflect.Bool && oldValue.Bool() == true { if oldValue.Kind() == reflect.Bool && oldValue.Bool() {
newASNProviders := []ASNProvider{} newASNProviders := []ASNProvider{}
for _, p := range DefaultConfiguration().ASNProviders { for _, p := range DefaultConfiguration().ASNProviders {
if p != ASNProviderFlow && p != ASNProviderFlowExceptPrivate { if p != ASNProviderFlow && p != ASNProviderFlowExceptPrivate {
@@ -151,5 +149,5 @@ func init() {
helpers.RegisterMapstructureUnmarshallerHook(ASNProviderUnmarshallerHook()) helpers.RegisterMapstructureUnmarshallerHook(ASNProviderUnmarshallerHook())
helpers.RegisterMapstructureUnmarshallerHook(NetProviderUnmarshallerHook()) helpers.RegisterMapstructureUnmarshallerHook(NetProviderUnmarshallerHook())
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[uint]()) helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[uint]())
helpers.RegisterMapstructureDeprecatedFields[Configuration]("Workers") helpers.RegisterMapstructureDeprecatedFields[Configuration]("Workers", "ClassifierCacheSize")
} }

View File

@@ -352,7 +352,7 @@ func isPrivateAS(as uint32) bool {
if as == 0 || as == 23456 { if as == 0 || as == 23456 {
return true return true
} }
if 64496 <= as && as <= 65551 || 4_200_000_000 <= as && as <= 4_294_967_295 { if 64496 <= as && as <= 65551 || 4_200_000_000 <= as {
return true return true
} }
return false return false

View File

@@ -147,7 +147,7 @@ type samplingRateSystem struct {
func (s *samplingRateSystem) GetSamplingRate(version uint16, obsDomainID uint32, samplerID uint64) uint32 { func (s *samplingRateSystem) GetSamplingRate(version uint16, obsDomainID uint32, samplerID uint64) uint32 {
s.lock.RLock() s.lock.RLock()
defer s.lock.RUnlock() defer s.lock.RUnlock()
rate, _ := s.rates[samplingRateKey{ rate := s.rates[samplingRateKey{
version: version, version: version,
obsDomainID: obsDomainID, obsDomainID: obsDomainID,
samplerID: samplerID, samplerID: samplerID,

View File

@@ -4,7 +4,6 @@
package netflow package netflow
import ( import (
"fmt"
"net/netip" "net/netip"
"path/filepath" "path/filepath"
"testing" "testing"
@@ -603,7 +602,7 @@ func TestDecodeNFv5(t *testing.T) {
pb.RawFlow_TS_NETFLOW_PACKET, pb.RawFlow_TS_NETFLOW_PACKET,
pb.RawFlow_TS_NETFLOW_FIRST_SWITCHED, pb.RawFlow_TS_NETFLOW_FIRST_SWITCHED,
} { } {
t.Run(fmt.Sprintf("%s", tsSource), func(t *testing.T) { t.Run(tsSource.String(), func(t *testing.T) {
_, nfdecoder, bf, got, finalize := setup(t, false) _, nfdecoder, bf, got, finalize := setup(t, false)
options := decoder.Option{TimestampSource: tsSource} options := decoder.Option{TimestampSource: tsSource}

View File

@@ -12,7 +12,6 @@ import (
"net/netip" "net/netip"
"path/filepath" "path/filepath"
"slices" "slices"
"sort"
"strconv" "strconv"
"sync" "sync"
"sync/atomic" "sync/atomic"
@@ -49,11 +48,6 @@ func expectCacheLookup(t *testing.T, sc *metadataCache, exporterIP string, ifInd
} }
} }
func sortResults(t *testing.T, ifindexes []uint) {
t.Helper()
sort.Slice(ifindexes, func(i, j int) bool { return ifindexes[i] < ifindexes[j] })
}
func TestGetEmpty(t *testing.T) { func TestGetEmpty(t *testing.T) {
r, sc := setupTestCache(t) r, sc := setupTestCache(t)
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{}) expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})

View File

@@ -125,7 +125,7 @@ func (p *Provider) Dial(config RISInstance) (*RISInstanceRuntime, error) {
securityOption = grpc.WithTransportCredentials(credentials.NewTLS(config)) securityOption = grpc.WithTransportCredentials(credentials.NewTLS(config))
} }
backoff := backoff.DefaultConfig backoff := backoff.DefaultConfig
conn, err := grpc.Dial(config.GRPCAddr, securityOption, conn, err := grpc.NewClient(config.GRPCAddr, securityOption,
grpc.WithUnaryInterceptor(p.clientMetrics.UnaryClientInterceptor()), grpc.WithUnaryInterceptor(p.clientMetrics.UnaryClientInterceptor()),
grpc.WithStreamInterceptor(p.clientMetrics.StreamClientInterceptor()), grpc.WithStreamInterceptor(p.clientMetrics.StreamClientInterceptor()),
grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoff}), grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoff}),

View File

@@ -392,7 +392,7 @@ func TestBioRIS(t *testing.T) {
// Check BioRIS server // Check BioRIS server
{ {
risConn, err := grpc.Dial(rpcListener.Addr().String(), risConn, err := grpc.NewClient(rpcListener.Addr().String(),
grpc.WithTransportCredentials(insecure.NewCredentials())) grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil { if err != nil {
t.Fatalf("Dial() error:\n%+v", err) t.Fatalf("Dial() error:\n%+v", err)