chore: fix many staticcheck warnings

The most important ones were fixed in the two previous commit.
This commit is contained in:
Vincent Bernat
2025-08-02 20:54:49 +02:00
parent 7a268d271e
commit 03b947e3c5
20 changed files with 26 additions and 61 deletions

View File

@@ -20,7 +20,7 @@ func (c *Component) ExecOnCluster(ctx context.Context, query string, args ...any
}
var (
spacesRegexp = regexp.MustCompile("\\s+")
spacesRegexp = regexp.MustCompile(`\s+`)
statementBeforeOnClusterRegexp = regexp.MustCompile(fmt.Sprintf("^((?i)%s)", strings.Join([]string{
`ALTER TABLE \S+`,
`ATTACH DICTIONARY \S+`,

View File

@@ -72,7 +72,7 @@ func isListen(fl validator.FieldLevel) bool {
// (both fields should be a slice)
func noIntersectField(fl validator.FieldLevel) bool {
field := fl.Field()
currentField, _, ok := fl.GetStructFieldOK()
currentField, _, _, ok := fl.GetStructFieldOK2()
if !ok {
return false
}

View File

@@ -13,11 +13,6 @@ import (
"golang.org/x/oauth2/clientcredentials"
)
// tokenProvider implements OAuth token provider for franz-go.
type tokenProvider struct {
tokenSource oauth2.TokenSource
}
// newOAuthTokenProvider returns a token provider function using OAuth credentials.
func newOAuthTokenProvider(tlsConfig *tls.Config, oauthConfig clientcredentials.Config) func(context.Context) (oauth.Auth, error) {
return func(ctx context.Context) (oauth.Auth, error) {

View File

@@ -164,8 +164,6 @@ func TestMetrics(t *testing.T) {
type customMetrics struct {
metric1 *reporter.MetricDesc
metric2 *reporter.MetricDesc
count int
}
func (m customMetrics) Describe(ch chan<- *prometheus.Desc) {
@@ -174,7 +172,6 @@ func (m customMetrics) Describe(ch chan<- *prometheus.Desc) {
}
func (m customMetrics) Collect(ch chan<- prometheus.Metric) {
m.count++
ch <- prometheus.MustNewConstMetric(m.metric1, prometheus.GaugeValue, 18)
ch <- prometheus.MustNewConstMetric(m.metric2, prometheus.GaugeValue, 30)
}

View File

@@ -130,8 +130,8 @@ func (c *Component) Start() error {
return fmt.Errorf("error while watching for Docker events: %w", err)
case msg := <-msgs:
c.r.Info().
Str("id", msg.ID).
Str("from", msg.From).
Str("id", msg.Actor.ID).
Str("from", msg.Actor.Attributes["image"]).
Msg("new container started")
c.metrics.runs.WithLabelValues("new container").Inc()
trigger()

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/events"
"github.com/docker/go-connections/nat"
_ "github.com/opencontainers/image-spec/specs-go/v1" // used by mock
@@ -59,12 +60,12 @@ func TestRoot(t *testing.T) {
// Initial trigger
dockerClientMock.EXPECT().
ContainerList(gomock.Any(), gomock.Any()).
Return([]types.Container{{ID: "initial"}}, nil)
Return([]container.Summary{{ID: "initial"}}, nil)
dockerClientMock.EXPECT().
ContainerInspect(gomock.Any(), "initial").
Return(types.ContainerJSON{
NetworkSettings: &types.NetworkSettings{
NetworkSettingsBase: types.NetworkSettingsBase{
Return(container.InspectResponse{
NetworkSettings: &container.NetworkSettings{
NetworkSettingsBase: container.NetworkSettingsBase{
Ports: map[nat.Port][]nat.PortBinding{
"2055/udp": {
nat.PortBinding{
@@ -134,12 +135,12 @@ func TestRoot(t *testing.T) {
t.Run("new container", func(_ *testing.T) {
dockerClientMock.EXPECT().
ContainerList(gomock.Any(), gomock.Any()).
Return([]types.Container{{ID: "new one"}}, nil)
Return([]container.Summary{{ID: "new one"}}, nil)
dockerClientMock.EXPECT().
ContainerInspect(gomock.Any(), "new one").
Return(types.ContainerJSON{
NetworkSettings: &types.NetworkSettings{
NetworkSettingsBase: types.NetworkSettingsBase{
Return(container.InspectResponse{
NetworkSettings: &container.NetworkSettings{
NetworkSettingsBase: container.NetworkSettingsBase{
Ports: map[nat.Port][]nat.PortBinding{
"2055/udp": {
nat.PortBinding{

View File

@@ -155,5 +155,4 @@ func (c *Component) startBMPClient(ctx context.Context) {
case <-done:
case <-ctx.Done():
}
return
}

View File

@@ -62,6 +62,9 @@ func TestClient(t *testing.T) {
// Test we get a reconnect
conn, err := listener.Accept()
if err != nil {
t.Fatalf("Accept() error:\n%+v", err)
}
time.Sleep(20 * time.Millisecond)
conn.Close()
conn, err = listener.Accept()
@@ -96,6 +99,7 @@ func TestClient(t *testing.T) {
}
// Assume we got what we want.
_ = msgs
time.Sleep(20 * time.Millisecond)
gotMetrics := r.GetMetrics("akvorado_demoexporter_")

View File

@@ -20,7 +20,6 @@ type Component struct {
t tomb.Tomb
config Configuration
bmpPort int
metrics struct {
connections reporter.Counter
errors *reporter.CounterVec

View File

@@ -35,7 +35,7 @@ type CompressionCodec kgo.CompressionCodec
// UnmarshalText produces a compression codec
func (cc *CompressionCodec) UnmarshalText(text []byte) error {
codec := kgo.CompressionCodec{}
var codec kgo.CompressionCodec
switch string(text) {
case "none":
codec = kgo.NoCompression()

View File

@@ -10,28 +10,9 @@ import (
"net"
"strings"
"github.com/ClickHouse/clickhouse-go/v2"
"akvorado/common/reporter"
"akvorado/common/schema"
)
type migrationStep struct {
// CheckQuery to execute to check if the step is needed.
CheckQuery string
// Arguments to use for the query
Args []any
// Function to execute if the query returns no row or returns `0'.
Do func() error
}
type migrationStepFunc func(context.Context, reporter.Logger, clickhouse.Conn) migrationStep
type migrationStepWithDescription struct {
Description string
Step migrationStepFunc
}
// migrateDatabase execute database migration
func (c *Component) migrateDatabase() error {
ctx := c.t.Context(nil)

View File

@@ -63,7 +63,7 @@ func quoteString(s string) string {
func (c *Component) tableAlreadyExists(ctx context.Context, table, column, target string) (bool, error) {
// Normalize a bit the target. This is far from perfect, but we test that
// and we hope this does not differ between ClickHouse versions!
target = strings.TrimSpace(regexp.MustCompile("\\s+").ReplaceAllString(target, " "))
target = strings.TrimSpace(regexp.MustCompile(`\s+`).ReplaceAllString(target, " "))
// Fetch the existing one
row := c.d.ClickHouse.QueryRow(ctx,

View File

@@ -23,8 +23,6 @@ var (
regexCache = make(map[string]*regexp.Regexp)
)
type classifierContextKey string
// ExporterClassifierRule defines a classification rule for a exporter.
type ExporterClassifierRule struct {
program *vm.Program

View File

@@ -30,8 +30,6 @@ type Configuration struct {
ASNProviders []ASNProvider `validate:"dive"`
// NetProviders defines the source used to get Prefix/Network Information
NetProviders []NetProvider `validate:"dive"`
// Old configuration settings
classifierCacheSize uint
}
// DefaultConfiguration represents the default configuration for the core component.
@@ -130,7 +128,7 @@ func ConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
}
if oldKey != nil {
oldValue := helpers.ElemOrIdentity(from.MapIndex(*oldKey))
if oldValue.Kind() == reflect.Bool && oldValue.Bool() == true {
if oldValue.Kind() == reflect.Bool && oldValue.Bool() {
newASNProviders := []ASNProvider{}
for _, p := range DefaultConfiguration().ASNProviders {
if p != ASNProviderFlow && p != ASNProviderFlowExceptPrivate {
@@ -151,5 +149,5 @@ func init() {
helpers.RegisterMapstructureUnmarshallerHook(ASNProviderUnmarshallerHook())
helpers.RegisterMapstructureUnmarshallerHook(NetProviderUnmarshallerHook())
helpers.RegisterMapstructureUnmarshallerHook(helpers.SubnetMapUnmarshallerHook[uint]())
helpers.RegisterMapstructureDeprecatedFields[Configuration]("Workers")
helpers.RegisterMapstructureDeprecatedFields[Configuration]("Workers", "ClassifierCacheSize")
}

View File

@@ -352,7 +352,7 @@ func isPrivateAS(as uint32) bool {
if as == 0 || as == 23456 {
return true
}
if 64496 <= as && as <= 65551 || 4_200_000_000 <= as && as <= 4_294_967_295 {
if 64496 <= as && as <= 65551 || 4_200_000_000 <= as {
return true
}
return false

View File

@@ -147,7 +147,7 @@ type samplingRateSystem struct {
func (s *samplingRateSystem) GetSamplingRate(version uint16, obsDomainID uint32, samplerID uint64) uint32 {
s.lock.RLock()
defer s.lock.RUnlock()
rate, _ := s.rates[samplingRateKey{
rate := s.rates[samplingRateKey{
version: version,
obsDomainID: obsDomainID,
samplerID: samplerID,

View File

@@ -4,7 +4,6 @@
package netflow
import (
"fmt"
"net/netip"
"path/filepath"
"testing"
@@ -603,7 +602,7 @@ func TestDecodeNFv5(t *testing.T) {
pb.RawFlow_TS_NETFLOW_PACKET,
pb.RawFlow_TS_NETFLOW_FIRST_SWITCHED,
} {
t.Run(fmt.Sprintf("%s", tsSource), func(t *testing.T) {
t.Run(tsSource.String(), func(t *testing.T) {
_, nfdecoder, bf, got, finalize := setup(t, false)
options := decoder.Option{TimestampSource: tsSource}

View File

@@ -12,7 +12,6 @@ import (
"net/netip"
"path/filepath"
"slices"
"sort"
"strconv"
"sync"
"sync/atomic"
@@ -49,11 +48,6 @@ func expectCacheLookup(t *testing.T, sc *metadataCache, exporterIP string, ifInd
}
}
func sortResults(t *testing.T, ifindexes []uint) {
t.Helper()
sort.Slice(ifindexes, func(i, j int) bool { return ifindexes[i] < ifindexes[j] })
}
func TestGetEmpty(t *testing.T) {
r, sc := setupTestCache(t)
expectCacheLookup(t, sc, "127.0.0.1", 676, provider.Answer{})

View File

@@ -125,7 +125,7 @@ func (p *Provider) Dial(config RISInstance) (*RISInstanceRuntime, error) {
securityOption = grpc.WithTransportCredentials(credentials.NewTLS(config))
}
backoff := backoff.DefaultConfig
conn, err := grpc.Dial(config.GRPCAddr, securityOption,
conn, err := grpc.NewClient(config.GRPCAddr, securityOption,
grpc.WithUnaryInterceptor(p.clientMetrics.UnaryClientInterceptor()),
grpc.WithStreamInterceptor(p.clientMetrics.StreamClientInterceptor()),
grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoff}),

View File

@@ -392,7 +392,7 @@ func TestBioRIS(t *testing.T) {
// Check BioRIS server
{
risConn, err := grpc.Dial(rpcListener.Addr().String(),
risConn, err := grpc.NewClient(rpcListener.Addr().String(),
grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
t.Fatalf("Dial() error:\n%+v", err)