Skip to content

Commit

Permalink
Fix linting issues
Browse files Browse the repository at this point in the history
Signed-off-by: Arve Knudsen <[email protected]>
  • Loading branch information
aknuds1 committed Apr 3, 2024
1 parent 5f63986 commit 0492b48
Show file tree
Hide file tree
Showing 95 changed files with 341 additions and 342 deletions.
2 changes: 1 addition & 1 deletion cmd/mimirtool/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ func main() {
remoteReadCommand.Register(app, envVars)
ruleCommand.Register(app, envVars, prometheus.DefaultRegisterer)

app.Command("version", "Get the version of the mimirtool CLI").Action(func(k *kingpin.ParseContext) error {
app.Command("version", "Get the version of the mimirtool CLI").Action(func(*kingpin.ParseContext) error {
fmt.Fprintln(os.Stdout, mimirversion.Print("Mimirtool"))
version.CheckLatest(mimirversion.Version)
return nil
Expand Down
2 changes: 1 addition & 1 deletion integration/e2emimir/services.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ func WithConfigFile(configFile string) Option {
}

// WithNoopOption returns an option that doesn't change anything.
func WithNoopOption() Option { return func(options *Options) {} }
func WithNoopOption() Option { return func(*Options) {} }

// FlagMapper is the type of function that maps flags, just to reduce some verbosity.
type FlagMapper func(flags map[string]string) map[string]string
Expand Down
10 changes: 5 additions & 5 deletions integration/kv_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func TestKVList(t *testing.T) {
// Create keys to list back
keysToCreate := []string{"key-a", "key-b", "key-c"}
for _, key := range keysToCreate {
err := client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) {
err := client.CAS(context.Background(), key, func(interface{}) (out interface{}, retry bool, err error) {
return key, false, nil
})
require.NoError(t, err, "could not create key")
Expand All @@ -53,7 +53,7 @@ func TestKVList(t *testing.T) {
func TestKVDelete(t *testing.T) {
testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) {
// Create a key
err := client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) {
err := client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) {
return "key-to-delete", false, nil
})
require.NoError(t, err, "object could not be created")
Expand All @@ -76,11 +76,11 @@ func TestKVDelete(t *testing.T) {
}

func TestKVWatchAndDelete(t *testing.T) {
testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) {
testKVs(t, func(t *testing.T, client kv.Client, _ *prometheus.Registry) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

err := client.CAS(context.Background(), "key-before-watch", func(in interface{}) (out interface{}, retry bool, err error) {
err := client.CAS(context.Background(), "key-before-watch", func(interface{}) (out interface{}, retry bool, err error) {
return "value-before-watch", false, nil
})
require.NoError(t, err)
Expand All @@ -93,7 +93,7 @@ func TestKVWatchAndDelete(t *testing.T) {
w.watch(ctx, client)
}()

err = client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) {
err = client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) {
return "value-to-delete", false, nil
})
require.NoError(t, err, "object could not be created")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func (c *ConfigExtractor) ResolveConfigs() ([]*yaml.RNode, error) {
return nil, err
}

err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(ctx context.Context, idx int) error {
err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(_ context.Context, idx int) error {
pod, ok, err := extractPodSpec(c.allItems[idx])
if err != nil {
return err
Expand Down
10 changes: 5 additions & 5 deletions pkg/alertmanager/multitenant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected error
}{
"should pass with default config": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {},
setup: func(*testing.T, *MultitenantAlertmanagerConfig) {},
expected: nil,
},
"should fail with empty external URL": {
Expand All @@ -142,13 +142,13 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected: errEmptyExternalURL,
},
"should fail if persistent interval is 0": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.Persister.Interval = 0
},
expected: errInvalidPersistInterval,
},
"should fail if persistent interval is negative": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.Persister.Interval = -1
},
expected: errInvalidPersistInterval,
Expand Down Expand Up @@ -178,7 +178,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
expected: errInvalidExternalURLMissingHostname,
},
"should fail if zone aware is enabled but zone is not set": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {
setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) {
cfg.ShardingRing.ZoneAwarenessEnabled = true
},
expected: errZoneAwarenessEnabledWithoutZoneInfo,
Expand Down Expand Up @@ -624,7 +624,7 @@ receivers:
serverInvoked := atomic.NewBool(false)

// Create a local HTTP server to test whether the request is received.
server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, _ *http.Request) {
serverInvoked.Store(true)
writer.WriteHeader(http.StatusOK)
}))
Expand Down
2 changes: 1 addition & 1 deletion pkg/api/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ func TestApiGzip(t *testing.T) {
})
}

t.Run("compressed with gzip", func(t *testing.T) {
t.Run("compressed with gzip", func(*testing.T) {
})
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/api/handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler
})
template.Must(templ.Parse(indexPageHTML))

return func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
err := templ.Execute(w, indexPageContents{LinkGroups: content.GetContent()})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
Expand Down Expand Up @@ -180,7 +180,7 @@ type configResponse struct {
}

func (cfg *Config) statusConfigHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
response := configResponse{
Status: "success",
Config: map[string]string{},
Expand All @@ -195,7 +195,7 @@ type flagsResponse struct {
}

func (cfg *Config) statusFlagsHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
response := flagsResponse{
Status: "success",
Flags: map[string]string{},
Expand Down
2 changes: 1 addition & 1 deletion pkg/api/handlers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ func TestConfigDiffHandler(t *testing.T) {
func TestConfigOverrideHandler(t *testing.T) {
cfg := &Config{
CustomConfigHandler: func(_ interface{}, _ interface{}) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, _ *http.Request) {
_, err := w.Write([]byte("config"))
assert.NoError(t, err)
}
Expand Down
10 changes: 5 additions & 5 deletions pkg/api/tenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@ func newTenantValidationMiddleware(federation bool, maxTenants int) middleware.I
return
}

numIds := len(ids)
if !federation && numIds > 1 {
http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIds), http.StatusUnprocessableEntity)
numIDs := len(ids)
if !federation && numIDs > 1 {
http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIDs), http.StatusUnprocessableEntity)
return
}

if federation && maxTenants > 0 && numIds > maxTenants {
http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIds), http.StatusUnprocessableEntity)
if federation && maxTenants > 0 && numIDs > maxTenants {
http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIDs), http.StatusUnprocessableEntity)
return
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/api/tenant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func TestNewTenantValidationMiddleware(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
nop := http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {})
nop := http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})
// Note that we add the authentication middleware since the tenant validation middleware relies
// on tenant ID being set in the context associated with the request.
handler := middleware.Merge(middleware.AuthenticateUser, newTenantValidationMiddleware(tc.federation, tc.maxTenants)).Wrap(nop)
Expand Down
4 changes: 2 additions & 2 deletions pkg/compactor/block_upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1769,7 +1769,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) {
},
{
name: "updating validation file succeeds",
assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) {
assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) {
test.Poll(t, heartbeatInterval*2, true, func() interface{} {
return validationExists(t, bkt)
})
Expand All @@ -1787,7 +1787,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) {
{
name: "context cancelled before update",
cancelContext: true,
assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) {
assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) {
require.False(t, validationExists(t, bkt))
},
},
Expand Down
2 changes: 1 addition & 1 deletion pkg/compactor/bucket_compactor.go
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,7 @@ func NewBucketCompactorMetrics(blocksMarkedForDeletion prometheus.Counter, reg p
type ownCompactionJobFunc func(job *Job) (bool, error)

// ownAllJobs is a ownCompactionJobFunc that always return true.
var ownAllJobs = func(job *Job) (bool, error) {
var ownAllJobs = func(*Job) (bool, error) {
return true, nil
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/compactor/bucket_compactor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,21 +94,21 @@ func TestBucketCompactor_FilterOwnJobs(t *testing.T) {
expectedJobs int
}{
"should return all planned jobs if the compactor instance owns all of them": {
ownJob: func(job *Job) (bool, error) {
ownJob: func(*Job) (bool, error) {
return true, nil
},
expectedJobs: 4,
},
"should return no jobs if the compactor instance owns none of them": {
ownJob: func(job *Job) (bool, error) {
ownJob: func(*Job) (bool, error) {
return false, nil
},
expectedJobs: 0,
},
"should return some jobs if the compactor instance owns some of them": {
ownJob: func() ownCompactionJobFunc {
count := 0
return func(job *Job) (bool, error) {
return func(*Job) (bool, error) {
count++
return count%2 == 0, nil
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/compactor/compactor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func TestConfig_Validate(t *testing.T) {
expected string
}{
"should pass with the default config": {
setup: func(cfg *Config) {},
setup: func(*Config) {},
expected: "",
},
"should pass with only 1 block range period": {
Expand Down Expand Up @@ -1352,7 +1352,7 @@ func TestMultitenantCompactor_ShouldSkipCompactionForJobsNoMoreOwnedAfterPlannin
c, _, tsdbPlanner, logs, registry := prepareWithConfigProvider(t, cfg, bucketClient, limits)

// Mock the planner as if there's no compaction to do, in order to simplify tests.
tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(args mock.Arguments) {
tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(mock.Arguments) {
// As soon as the first Plan() is called by the compactor, we do switch
// the instance to LEAVING state. This way, after this call, we expect the compactor
// to skip next compaction job because not owned anymore by this instance.
Expand Down Expand Up @@ -1783,11 +1783,11 @@ func prepareWithConfigProvider(t *testing.T, compactorCfg Config, bucketClient o
logger := &componentLogger{component: "compactor", log: log.NewLogfmtLogger(logs)}
registry := prometheus.NewRegistry()

bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) {
bucketClientFactory := func(context.Context) (objstore.Bucket, error) {
return bucketClient, nil
}

blocksCompactorFactory := func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) {
blocksCompactorFactory := func(context.Context, Config, log.Logger, prometheus.Registerer) (Compactor, Planner, error) {
return tsdbCompactor, tsdbPlanner, nil
}

Expand Down
18 changes: 9 additions & 9 deletions pkg/distributor/distributor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1885,7 +1885,7 @@ func BenchmarkDistributor_Push(b *testing.B) {
expectedErr string
}{
"all samples successfully pushed": {
prepareConfig: func(limits *validation.Limits) {},
prepareConfig: func(*validation.Limits) {},
prepareSeries: func() ([][]mimirpb.LabelAdapter, []mimirpb.Sample) {
metrics := make([][]mimirpb.LabelAdapter, numSeriesPerRequest)
samples := make([]mimirpb.Sample, numSeriesPerRequest)
Expand Down Expand Up @@ -2075,7 +2075,7 @@ func BenchmarkDistributor_Push(b *testing.B) {
limits.IngestionRate = float64(rate.Inf) // Unlimited.
testData.prepareConfig(&limits)

distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) {
distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) {
return &noopIngester{}, nil
})

Expand Down Expand Up @@ -4397,7 +4397,7 @@ func TestHaDedupeMiddleware(t *testing.T) {

nextCallCount := 0
var gotReqs []*mimirpb.WriteRequest
next := func(ctx context.Context, pushReq *Request) error {
next := func(_ context.Context, pushReq *Request) error {
nextCallCount++
req, err := pushReq.WriteRequest()
require.NoError(t, err)
Expand Down Expand Up @@ -4463,7 +4463,7 @@ func TestInstanceLimitsBeforeHaDedupe(t *testing.T) {

// Capture the submitted write requests which the middlewares pass into the mock push function.
var submittedWriteReqs []*mimirpb.WriteRequest
mockPush := func(ctx context.Context, pushReq *Request) error {
mockPush := func(_ context.Context, pushReq *Request) error {
defer pushReq.CleanUp()
writeReq, err := pushReq.WriteRequest()
require.NoError(t, err)
Expand Down Expand Up @@ -4646,7 +4646,7 @@ func TestRelabelMiddleware(t *testing.T) {
}

var gotReqs []*mimirpb.WriteRequest
next := func(ctx context.Context, pushReq *Request) error {
next := func(_ context.Context, pushReq *Request) error {
req, err := pushReq.WriteRequest()
require.NoError(t, err)
gotReqs = append(gotReqs, req)
Expand Down Expand Up @@ -4724,7 +4724,7 @@ func TestSortAndFilterMiddleware(t *testing.T) {
}

var gotReqs []*mimirpb.WriteRequest
next := func(ctx context.Context, pushReq *Request) error {
next := func(_ context.Context, pushReq *Request) error {
req, err := pushReq.WriteRequest()
require.NoError(t, err)
gotReqs = append(gotReqs, req)
Expand Down Expand Up @@ -6685,7 +6685,7 @@ func TestDistributor_MetricsWithRequestModifications(t *testing.T) {
exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter {
return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}}
}
metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata {
metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata {
return &mimirpb.MetricMetadata{
Type: mimirpb.COUNTER,
MetricFamilyName: metricName,
Expand Down Expand Up @@ -7039,7 +7039,7 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) {
exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter {
return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}}
}
metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata {
metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata {
return &mimirpb.MetricMetadata{
Type: mimirpb.COUNTER,
MetricFamilyName: metricName,
Expand Down Expand Up @@ -7430,7 +7430,7 @@ func TestSendMessageMetadata(t *testing.T) {
require.NoError(t, err)

mock := &mockInstanceClient{}
distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) {
distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) {
return mock, nil
})

Expand Down
2 changes: 1 addition & 1 deletion pkg/distributor/otel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func BenchmarkOTLPHandler(b *testing.B) {
}
exportReq := TimeseriesToOTLPRequest(sampleSeries, sampleMetadata)

pushFunc := func(ctx context.Context, pushReq *Request) error {
pushFunc := func(_ context.Context, pushReq *Request) error {
if _, err := pushReq.WriteRequest(); err != nil {
return err
}
Expand Down
Loading

0 comments on commit 0492b48

Please sign in to comment.