build: fix various lint issues

revive default configuration has changed in 1.3.0. Some stuff is a bit
silly (like empty blocks), but I find it easier to follow that than to
try to tweak the configuration.
This commit is contained in:
Vincent Bernat
2023-03-21 00:01:13 +01:00
parent 1408634bad
commit 1ec89aac1f
17 changed files with 41 additions and 53 deletions

View File

@@ -189,9 +189,9 @@ func DefaultHook() (mapstructure.DecodeHookFunc, func()) {
}
}
if to.IsNil() {
new := reflect.New(to.Type().Elem())
method.Func.Call([]reflect.Value{new})
to.Set(new)
newV := reflect.New(to.Type().Elem())
method.Func.Call([]reflect.Value{newV})
to.Set(newV)
return from.Interface(), nil
}
method.Func.Call([]reflect.Value{to})

View File

@@ -39,5 +39,5 @@ func (c *MockComponent) Stop() error {
}
// Track does nothing
func (c *MockComponent) Track(t *tomb.Tomb, who string) {
func (c *MockComponent) Track(_ *tomb.Tomb, _ string) {
}

View File

@@ -148,9 +148,9 @@ func ParametrizedConfigurationUnmarshallerHook[OuterConfiguration any, InnerConf
// Use the value we already have instead of default.
original = reflect.Indirect(configField.Elem())
}
copy := reflect.New(original.Type())
copy.Elem().Set(reflect.ValueOf(original.Interface()))
configField.Set(copy)
copied := reflect.New(original.Type())
copied.Elem().Set(reflect.ValueOf(original.Interface()))
configField.Set(copied)
// Resume decoding
return from.Interface(), nil

View File

@@ -20,13 +20,13 @@ func Enable() {
}
// Acquire has the same semantics as runtime.Acquire.
func Acquire(addr unsafe.Pointer) {
func Acquire(_ unsafe.Pointer) {
}
// Release has the same semantics as runtime.Release.
func Release(addr unsafe.Pointer) {
func Release(_ unsafe.Pointer) {
}
// ReleaseMerge has the same semantics as runtime.ReleaseMerge.
func ReleaseMerge(addr unsafe.Pointer) {
func ReleaseMerge(_ unsafe.Pointer) {
}

View File

@@ -28,7 +28,7 @@ type Logger struct {
}
// New creates a new logger
func New(config Configuration) (Logger, error) {
func New(_ Configuration) (Logger, error) {
// Initialize the logger
logger := log.Logger.Hook(contextHook{})
return Logger{logger}, nil
@@ -37,7 +37,7 @@ func New(config Configuration) (Logger, error) {
type contextHook struct{}
// Run adds more context to an event, including "module" and "caller".
func (h contextHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
func (h contextHook) Run(e *zerolog.Event, _ zerolog.Level, _ string) {
callStack := stack.Callers()
callStack = callStack[3:] // Trial and error, there is a test to check it works
caller := callStack[0].SourceFile(true)

View File

@@ -44,9 +44,9 @@ func TestColumnIndex(t *testing.T) {
func TestFinalizeTwice(t *testing.T) {
c := NewMock(t)
old := c.Schema
new := c.finalize()
if diff := helpers.Diff(old, new, helpers.DiffUnexported); diff != "" {
oldSchema := c.Schema
newSchema := c.finalize()
if diff := helpers.Diff(oldSchema, newSchema, helpers.DiffUnexported); diff != "" {
t.Fatalf("finalize() (-old, +new):\n%s", diff)
}
}

View File

@@ -22,19 +22,19 @@ func (l *logger) LogMode(gormlogger.LogLevel) gormlogger.Interface {
return l
}
func (l *logger) Info(ctx context.Context, s string, args ...interface{}) {
func (l *logger) Info(_ context.Context, s string, args ...interface{}) {
l.r.Info().Msgf(s, args...)
}
func (l *logger) Warn(ctx context.Context, s string, args ...interface{}) {
func (l *logger) Warn(_ context.Context, s string, args ...interface{}) {
l.r.Warn().Msgf(s, args...)
}
func (l *logger) Error(ctx context.Context, s string, args ...interface{}) {
func (l *logger) Error(_ context.Context, s string, args ...interface{}) {
l.r.Error().Msgf(s, args...)
}
func (l *logger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
func (l *logger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, _ := fc()
fields := gin.H{

View File

@@ -49,10 +49,7 @@ func (c *Component) Start() error {
if err := c.db.AutoMigrate(&SavedFilter{}); err != nil {
return fmt.Errorf("cannot migrate database: %w", err)
}
if err := c.populate(); err != nil {
return err
}
return nil
return c.populate()
}
// Stop stops the database component.

View File

@@ -135,7 +135,7 @@ func (c *Component) docsHandlerFunc(gc *gin.Context) {
type internalLinkTransformer struct{}
func (r *internalLinkTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
func (r *internalLinkTransformer) Transform(node *ast.Document, _ text.Reader, _ parser.Context) {
replaceLinks := func(n ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {
return ast.WalkContinue, nil
@@ -156,7 +156,7 @@ type imageEmbedder struct {
root fs.FS
}
func (r *imageEmbedder) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
func (r *imageEmbedder) Transform(node *ast.Document, _ text.Reader, _ parser.Context) {
replaceLinks := func(n ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {
return ast.WalkContinue, nil
@@ -187,7 +187,7 @@ type tocLogger struct {
headers []Header
}
func (r *tocLogger) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
func (r *tocLogger) Transform(node *ast.Document, reader text.Reader, _ parser.Context) {
r.headers = []Header{}
logHeaders := func(n ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {

View File

@@ -183,10 +183,10 @@ func (c *Component) handlePeerUpNotification(pkey peerKey, body *bmp.BMPPeerUpNo
for _, param := range received.OptParams {
switch param := param.(type) {
case *bgp.OptionParameterCapability:
for _, cap := range param.Capability {
switch cap := cap.(type) {
for _, capability := range param.Capability {
switch capability := capability.(type) {
case *bgp.CapAddPath:
for _, tuple := range cap.Tuples {
for _, tuple := range capability.Tuples {
receivedAddPath[tuple.RouteFamily] = tuple.Mode
}
}
@@ -198,10 +198,10 @@ func (c *Component) handlePeerUpNotification(pkey peerKey, body *bmp.BMPPeerUpNo
for _, param := range sent.OptParams {
switch param := param.(type) {
case *bgp.OptionParameterCapability:
for _, cap := range param.Capability {
switch cap := cap.(type) {
for _, capability := range param.Capability {
switch capability := capability.(type) {
case *bgp.CapAddPath:
for _, sent := range cap.Tuples {
for _, sent := range capability.Tuples {
receivedMode := receivedAddPath[sent.RouteFamily]
if receivedMode == bgp.BGP_ADD_PATH_BOTH || receivedMode == bgp.BGP_ADD_PATH_SEND {
if sent.Mode == bgp.BGP_ADD_PATH_BOTH || sent.Mode == bgp.BGP_ADD_PATH_RECEIVE {

View File

@@ -547,12 +547,11 @@ ClassifyProviderRegex(Interface.Description, "^Transit: ([^ ]+)", "$1")`,
}
return nil
})
} else {
// We should not get a message, but that's not possible to test.
}
flowComponent.Inject(t, tc.InputFlow())
// Else, we should not get a message, but that's not possible to test.
flowComponent.Inject(tc.InputFlow())
time.Sleep(50 * time.Millisecond) // Needed to let poller does its job
flowComponent.Inject(t, tc.InputFlow())
flowComponent.Inject(tc.InputFlow())
if tc.OutputFlow != nil {
select {
case <-received:

View File

@@ -20,16 +20,13 @@ func (nd *Decoder) decode(msgDec interface{}, samplingRateSys producer.SamplingR
var obsDomainID uint32
var dataFlowSet []netflow.DataFlowSet
var optionsDataFlowSet []netflow.OptionsDataFlowSet
var version int
switch msgDecConv := msgDec.(type) {
case netflow.NFv9Packet:
dataFlowSet, _, _, optionsDataFlowSet = producer.SplitNetFlowSets(msgDecConv)
obsDomainID = msgDecConv.SourceId
version = 9
case netflow.IPFIXPacket:
dataFlowSet, _, _, optionsDataFlowSet = producer.SplitIPFIXSets(msgDecConv)
obsDomainID = msgDecConv.ObservationDomainId
version = 10
default:
return nil
}
@@ -47,7 +44,7 @@ func (nd *Decoder) decode(msgDec interface{}, samplingRateSys producer.SamplingR
// Parse fields
for _, dataFlowSetItem := range dataFlowSet {
for _, record := range dataFlowSetItem.Records {
flow := nd.decodeRecord(version, record.Values)
flow := nd.decodeRecord(record.Values)
if flow != nil {
flow.SamplingRate = samplingRate
flowMessageSet = append(flowMessageSet, flow)
@@ -58,7 +55,7 @@ func (nd *Decoder) decode(msgDec interface{}, samplingRateSys producer.SamplingR
return flowMessageSet
}
func (nd *Decoder) decodeRecord(version int, fields []netflow.DataField) *schema.FlowMessage {
func (nd *Decoder) decodeRecord(fields []netflow.DataField) *schema.FlowMessage {
var etype uint16
bf := &schema.FlowMessage{}
for _, field := range fields {

View File

@@ -44,6 +44,6 @@ func NewMock(t *testing.T, r *reporter.Reporter, config Configuration) *Componen
}
// Inject inject the provided flow message, as if it was received.
func (c *Component) Inject(t *testing.T, fmsg *schema.FlowMessage) {
func (c *Component) Inject(fmsg *schema.FlowMessage) {
c.outgoingFlows <- fmsg
}

View File

@@ -117,9 +117,7 @@ func ConfigurationUnmarshallerHook() mapstructure.DecodeHookFunc {
// communities should contain ::/0
if mapKey == nil {
from.SetMapIndex(reflect.ValueOf("communities"), reflect.ValueOf("public"))
} else if communities.Kind() == reflect.String {
// Do nothing
} else if !communities.MapIndex(reflect.ValueOf("::/0")).IsValid() {
} else if communities.Kind() != reflect.String && !communities.MapIndex(reflect.ValueOf("::/0")).IsValid() {
communities.SetMapIndex(reflect.ValueOf("::/0"), reflect.ValueOf("public"))
}
}

View File

@@ -235,10 +235,7 @@ func (p *realPoller) Poll(ctx context.Context, exporter, agent netip.Addr, port
ifIndex := ifIndexes[(idx-1)/3]
ok := true
// We do not process results when index is 0 (this can happen for local
// traffic, we only care for exporter name).
if ifIndex > 0 && !processStr(idx, "ifdescr", &ifDescrVal) {
// This is not mandatory.
}
// traffic, we only care for exporter name). ifDescr is not mandatory.
if ifIndex > 0 && !processStr(idx+1, "ifalias", &ifAliasVal) {
ok = false
}

View File

@@ -185,7 +185,7 @@ type logCoalescePoller struct {
received []lookupRequest
}
func (fcp *logCoalescePoller) Poll(ctx context.Context, exporterIP, agentIP netip.Addr, port uint16, ifIndexes []uint) error {
func (fcp *logCoalescePoller) Poll(_ context.Context, exporterIP, _ netip.Addr, _ uint16, ifIndexes []uint) error {
fcp.received = append(fcp.received, lookupRequest{exporterIP, ifIndexes})
return nil
}
@@ -235,7 +235,7 @@ func TestCoalescing(t *testing.T) {
type errorPoller struct{}
func (fcp *errorPoller) Poll(ctx context.Context, exporterIP, agentIP netip.Addr, port uint16, ifIndexes []uint) error {
func (fcp *errorPoller) Poll(_ context.Context, _, _ netip.Addr, _ uint16, _ []uint) error {
return errors.New("noooo")
}
@@ -285,7 +285,7 @@ type agentLogPoller struct {
mu sync.Mutex
}
func (alp *agentLogPoller) Poll(ctx context.Context, exporterIP, agentIP netip.Addr, port uint16, ifIndexes []uint) error {
func (alp *agentLogPoller) Poll(_ context.Context, exporterIP, agentIP netip.Addr, _ uint16, _ []uint) error {
alp.mu.Lock()
defer alp.mu.Unlock()
alp.lastExporter = exporterIP.Unmap().String()

View File

@@ -31,7 +31,7 @@ func newMockPoller(configuration Configuration, put func(netip.Addr, string, uin
}
// Poll just builds synthetic data.
func (p *mockPoller) Poll(ctx context.Context, exporter, agent netip.Addr, port uint16, ifIndexes []uint) error {
func (p *mockPoller) Poll(_ context.Context, exporter, _ netip.Addr, _ uint16, ifIndexes []uint) error {
for _, ifIndex := range ifIndexes {
if p.config.Communities.LookupOrDefault(exporter, "public") == "public" {
p.put(exporter, strings.ReplaceAll(exporter.Unmap().String(), ".", "_"), ifIndex, Interface{