build: add more linting rules with revive

This commit is contained in:
Vincent Bernat
2025-11-12 22:37:44 +01:00
parent 3f47ccd714
commit 7f5950f89c
10 changed files with 96 additions and 56 deletions

View File

@@ -239,8 +239,8 @@ test-coverage-js: ; $(info $(M) running JS coverage tests…) @ ## Run JS covera
.PHONY: lint
lint: .lint-go~ .lint-js~ ## Run linting
.lint-go~: $(shell $(LSFILES) '*.go' 2> /dev/null) ; $(info $(M) running golint)
$Q $(REVIVE) -formatter stylish -set_exit_status ./...
.lint-go~: revive.toml $(shell $(LSFILES) '*.go' 2> /dev/null) ; $(info $(M) running golint)
$Q $(REVIVE) -config $(PWD)/revive.toml -formatter stylish -set_exit_status ./...
$Q touch $@
.lint-js~: $(shell $(LSFILES) '*.js' '*.ts' '*.vue' '*.html' 2> /dev/null)
.lint-js~: $(GENERATED_JS) ; $(info $(M) running jslint)

View File

@@ -147,7 +147,8 @@ var subnetLookAlikeRegex = regexp.MustCompile("^([a-fA-F:.0-9]*[:.][a-fA-F:.0-9]
// LooksLikeSubnetMap returns true iff the provided value could be a SubnetMap
// (but not 100% sure).
func LooksLikeSubnetMap(v reflect.Value) (result bool) {
func LooksLikeSubnetMap(v reflect.Value) bool {
var result bool
if v.Kind() == reflect.Map {
// When we have a map, we check if all keys look like a subnet.
result = true
@@ -163,7 +164,7 @@ func LooksLikeSubnetMap(v reflect.Value) (result bool) {
}
}
}
return
return result
}
// SubnetMapUnmarshallerHook decodes SubnetMap and notably check that valid

View File

@@ -55,7 +55,8 @@ func (m *Metrics) HTTPHandler() http.Handler {
})
}
func getPrefix(module string) (moduleName string) {
func getPrefix(module string) string {
var moduleName string
if !strings.HasPrefix(module, stack.ModuleName) {
moduleName = stack.ModuleName
} else {
@@ -64,7 +65,7 @@ func getPrefix(module string) (moduleName string) {
moduleName = strings.ReplaceAll(moduleName, "/", "_")
moduleName = strings.ReplaceAll(moduleName, ".", "_")
moduleName = fmt.Sprintf("%s_", moduleName)
return
return moduleName
}
// Factory returns a factory to register new metrics with promauto. It

View File

@@ -188,41 +188,43 @@ func TestMultipleServers(t *testing.T) {
for range 10 {
r := reporter.NewMock(t)
sch := schema.NewMock(t)
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
ctx = clickhousego.Context(ctx, clickhousego.WithSettings(clickhousego.Settings{
"allow_suspicious_low_cardinality_types": 1,
}))
func() {
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
ctx = clickhousego.Context(ctx, clickhousego.WithSettings(clickhousego.Settings{
"allow_suspicious_low_cardinality_types": 1,
}))
// Create components
dbConf := clickhousedb.DefaultConfiguration()
dbConf.Servers = servers
dbConf.DialTimeout = 100 * time.Millisecond
chdb, err := clickhousedb.New(r, dbConf, clickhousedb.Dependencies{
Daemon: daemon.NewMock(t),
})
if err != nil {
t.Fatalf("clickhousedb.New() error:\n%+v", err)
}
helpers.StartStop(t, chdb)
conf := clickhouse.DefaultConfiguration()
conf.MaximumBatchSize = 10
ch, err := clickhouse.New(r, conf, clickhouse.Dependencies{
ClickHouse: chdb,
Schema: sch,
})
if err != nil {
t.Fatalf("clickhouse.New() error:\n%+v", err)
}
helpers.StartStop(t, ch)
// Create components
dbConf := clickhousedb.DefaultConfiguration()
dbConf.Servers = servers
dbConf.DialTimeout = 100 * time.Millisecond
chdb, err := clickhousedb.New(r, dbConf, clickhousedb.Dependencies{
Daemon: daemon.NewMock(t),
})
if err != nil {
t.Fatalf("clickhousedb.New() error:\n%+v", err)
}
helpers.StartStop(t, chdb)
conf := clickhouse.DefaultConfiguration()
conf.MaximumBatchSize = 10
ch, err := clickhouse.New(r, conf, clickhouse.Dependencies{
ClickHouse: chdb,
Schema: sch,
})
if err != nil {
t.Fatalf("clickhouse.New() error:\n%+v", err)
}
helpers.StartStop(t, ch)
// Trigger an empty send
bf := sch.NewFlowMessage()
bf.AppendUint(schema.ColumnDstAS, 65000)
bf.AppendUint(schema.ColumnBytes, 200)
bf.Finalize()
w := ch.NewWorker(1, bf)
w.Flush(ctx)
// Trigger an empty send
bf := sch.NewFlowMessage()
bf.AppendUint(schema.ColumnDstAS, 65000)
bf.AppendUint(schema.ColumnBytes, 200)
bf.Finalize()
w := ch.NewWorker(1, bf)
w.Flush(ctx)
}()
// Check metrics
gotMetrics := r.GetMetrics("akvorado_outlet_clickhouse_", "errors_total")

View File

@@ -19,11 +19,14 @@ type exporterAndInterfaceInfo struct {
}
// enrichFlow adds more data to a flow.
func (w *worker) enrichFlow(exporterIP netip.Addr, exporterStr string) (skip bool) {
var flowExporterName string
var flowInIfName, flowInIfDescription, flowOutIfName, flowOutIfDescription string
var flowInIfSpeed, flowOutIfSpeed, flowInIfIndex, flowOutIfIndex uint32
var flowInIfVlan, flowOutIfVlan uint16
func (w *worker) enrichFlow(exporterIP netip.Addr, exporterStr string) bool {
var (
flowExporterName string
flowInIfName, flowInIfDescription, flowOutIfName, flowOutIfDescription string
flowInIfSpeed, flowOutIfSpeed, flowInIfIndex, flowOutIfIndex uint32
flowInIfVlan, flowOutIfVlan uint16
)
var skip bool
t := time.Now() // only call it once
expClassification := exporterClassification{}
@@ -102,7 +105,7 @@ func (w *worker) enrichFlow(exporterIP netip.Addr, exporterStr string) (skip boo
}
if skip {
return
return true
}
// Classification
@@ -148,7 +151,7 @@ func (w *worker) enrichFlow(exporterIP netip.Addr, exporterStr string) (skip boo
flow.AppendUint(schema.ColumnInIfSpeed, uint64(flowInIfSpeed))
flow.AppendUint(schema.ColumnOutIfSpeed, uint64(flowOutIfSpeed))
return
return skip
}
// getASNumber retrieves the AS number for a flow, depending on user preferences.

View File

@@ -46,6 +46,7 @@ func TestMock(t *testing.T) {
}
c.Stop()
time.Sleep(10 * time.Millisecond)
if !shutdownCalled {
t.Error("Stop() should have triggered shutdown function")
}

View File

@@ -72,9 +72,7 @@ func (s *scalerState) nextWorkerCount(request ScaleRequest, currentWorkers, minW
func scaleWhileDraining(ctx context.Context, ch <-chan ScaleRequest, scaleFn func()) {
var wg sync.WaitGroup
done := make(chan struct{})
wg.Add(2)
go func() {
defer wg.Done()
wg.Go(func() {
for {
select {
case <-ctx.Done():
@@ -85,12 +83,11 @@ func scaleWhileDraining(ctx context.Context, ch <-chan ScaleRequest, scaleFn fun
// Discard signal
}
}
}()
go func() {
defer wg.Done()
})
wg.Go(func() {
scaleFn()
close(done)
}()
})
wg.Wait()
}

View File

@@ -36,8 +36,8 @@ func (c *mockComponent) StartWorkers(workerBuilder WorkerBuilderFunc) error {
}()
for i := range c.config.MinWorkers {
callback, shutdown := workerBuilder(i, ch)
defer shutdown()
go func() {
defer shutdown()
for {
message, ok := <-c.incoming
if !ok {

View File

@@ -4,7 +4,7 @@
package gnmi
import (
"fmt"
"errors"
"net/netip"
"reflect"
"time"
@@ -197,7 +197,7 @@ func AuthenticationParameterUnmarshallerHook() mapstructure.DecodeHookFunc {
// If we have new TLS key and any old keys, that's an error
if tlsKey != nil && (insecureKey != nil || skipVerifyKey != nil || tlsCAKey != nil || tlsCertKey != nil || tlsKeyKey != nil) {
return nil, fmt.Errorf("cannot mix old TLS configuration (Insecure, SkipVerify, TLSCA, TLSCert, TLSKey) with new TLS configuration")
return nil, errors.New("cannot mix old TLS configuration (Insecure, SkipVerify, TLSCA, TLSCert, TLSKey) with new TLS configuration")
}
// If no old keys, we need to set default TLS.Enable

35
revive.toml Normal file
View File

@@ -0,0 +1,35 @@
enableDefaultRules = true
ignoreGeneratedHeader = false
confidence = 0.8
[rule.atomic]
[rule.bare-return]
[rule.blank-imports]
[rule.bool-literal-in-expr]
[rule.context-as-argument]
[rule.context-keys-type]
[rule.defer]
[rule.dot-imports]
[rule.empty-block]
[rule.error-naming]
[rule.error-return]
[rule.error-strings]
[rule.errorf]
[rule.exported]
[rule.increment-decrement]
[rule.indent-error-flow]
[rule.package-comments]
[rule.range]
[rule.receiver-naming]
[rule.redefines-builtin-id]
[rule.superfluous-else]
[rule.time-equal]
[rule.time-naming]
[rule.unexported-return]
[rule.unreachable-code]
[rule.unused-parameter]
[rule.use-any]
[rule.use-errors-new]
[rule.use-waitgroup-go]
[rule.var-declaration]
[rule.var-naming]