From 0492b484ee21c91590196bdff29b2fb198082857 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 2 Apr 2024 09:42:31 +0200 Subject: [PATCH] Fix linting issues Signed-off-by: Arve Knudsen --- cmd/mimirtool/main.go | 2 +- integration/e2emimir/services.go | 2 +- integration/kv_test.go | 10 ++-- .../plugins/resolve-config/main.go | 2 +- pkg/alertmanager/multitenant_test.go | 10 ++-- pkg/api/api_test.go | 2 +- pkg/api/handlers.go | 6 +-- pkg/api/handlers_test.go | 2 +- pkg/api/tenant.go | 10 ++-- pkg/api/tenant_test.go | 2 +- pkg/compactor/block_upload_test.go | 4 +- pkg/compactor/bucket_compactor.go | 2 +- pkg/compactor/bucket_compactor_test.go | 6 +-- pkg/compactor/compactor_test.go | 8 ++-- pkg/distributor/distributor_test.go | 18 +++---- pkg/distributor/otel_test.go | 2 +- pkg/distributor/push_test.go | 32 ++++++------- pkg/frontend/frontend_test.go | 4 +- .../astmapper/subtree_folder_test.go | 4 +- .../cardinality_query_cache_test.go | 2 +- .../querymiddleware/cardinality_test.go | 2 +- .../generic_query_cache_test.go | 2 +- .../querymiddleware/querysharding_test.go | 18 +++---- pkg/frontend/querymiddleware/retry_test.go | 14 +++--- .../querymiddleware/roundtrip_test.go | 2 +- pkg/frontend/querymiddleware/running_test.go | 6 +-- .../querymiddleware/sharded_queryable_test.go | 4 +- .../querymiddleware/split_and_cache_test.go | 6 +-- .../split_by_instant_interval.go | 10 ++-- pkg/frontend/transport/handler_test.go | 4 +- pkg/frontend/v1/frontend_test.go | 8 ++-- pkg/frontend/v2/frontend_test.go | 8 ++-- pkg/ingester/client/buffering_client_test.go | 2 +- pkg/ingester/client/circuitbreaker.go | 6 +-- pkg/ingester/client/circuitbreaker_test.go | 4 +- pkg/ingester/ingester.go | 6 +-- pkg/ingester/ingester_ingest_storage_test.go | 4 +- pkg/ingester/ingester_test.go | 38 +++++++-------- pkg/ingester/label_names_and_values_test.go | 6 +-- pkg/ingester/owned_series_test.go | 34 ++++++------- pkg/mimir/mimir_test.go | 2 +- pkg/mimir/modules.go | 2 +- pkg/mimir/runtime_config_test.go | 4 +- pkg/mimir/sanity_check_test.go | 8 ++-- pkg/mimirtool/analyze/grafana.go | 2 +- pkg/mimirtool/analyze/ruler.go | 2 +- pkg/mimirtool/commands/rules.go | 2 +- pkg/mimirtool/config/convert_test.go | 20 ++++---- pkg/mimirtool/config/cortex.go | 2 +- pkg/mimirtool/config/mapping.go | 2 +- pkg/mimirtool/rules/rules.go | 2 +- pkg/mimirtool/rules/rules_test.go | 2 +- pkg/querier/block_test.go | 2 +- pkg/querier/blocks_store_queryable_test.go | 6 +-- .../blocks_store_replicated_set_test.go | 4 +- pkg/querier/querier_test.go | 4 +- pkg/querier/remote_read_test.go | 8 ++-- .../merge_exemplar_queryable.go | 12 ++--- pkg/querier/worker/frontend_processor_test.go | 4 +- .../worker/scheduler_processor_test.go | 4 +- pkg/querier/worker/worker_test.go | 2 +- pkg/ruler/api_test.go | 6 +-- pkg/ruler/compat_test.go | 4 +- pkg/ruler/remotequerier_test.go | 18 +++---- pkg/ruler/ruler.go | 2 +- pkg/ruler/ruler_test.go | 22 ++++----- pkg/ruler/rulestore/config_test.go | 2 +- .../schedulerdiscovery/config_test.go | 2 +- .../ingest/partition_offset_reader_test.go | 8 ++-- pkg/storage/ingest/pusher_test.go | 2 +- pkg/storage/ingest/reader_test.go | 48 +++++++++---------- pkg/storage/ingest/writer_test.go | 6 +-- pkg/storage/tsdb/block/block_test.go | 6 +-- pkg/storage/tsdb/block/index_test.go | 2 +- pkg/storage/tsdb/config_test.go | 34 ++++++------- pkg/storage/tsdb/users_scanner_test.go | 2 +- pkg/storegateway/bucket_chunk_reader_test.go | 8 ++-- pkg/storegateway/bucket_stores_test.go | 2 +- pkg/storegateway/bucket_test.go | 8 ++-- pkg/storegateway/gateway_test.go | 8 ++-- pkg/storegateway/indexcache/remote_test.go | 14 +++--- .../indexheader/lazy_binary_reader_test.go | 2 +- .../indexheader/reader_benchmarks_test.go | 4 +- pkg/storegateway/series_refs_test.go | 6 +-- pkg/storegateway/sharding_strategy_test.go | 2 +- pkg/usagestats/seed_test.go | 6 +-- pkg/util/flags_test.go | 2 +- pkg/util/gziphandler/gzip_test.go | 24 +++++----- .../instrumentation/tracer_transport_test.go | 4 +- pkg/util/noauth/no_auth.go | 2 +- pkg/util/pool/fast_releasing_pool_test.go | 2 +- pkg/util/validation/exporter/exporter_test.go | 2 +- pkg/util/validation/exporter/ring_test.go | 4 +- pkg/util/version/info_handler.go | 3 +- tools/querytee/proxy_endpoint_test.go | 6 +-- 95 files changed, 341 insertions(+), 342 deletions(-) diff --git a/cmd/mimirtool/main.go b/cmd/mimirtool/main.go index 416dcef20c5..4beff2a5f58 100644 --- a/cmd/mimirtool/main.go +++ b/cmd/mimirtool/main.go @@ -49,7 +49,7 @@ func main() { remoteReadCommand.Register(app, envVars) ruleCommand.Register(app, envVars, prometheus.DefaultRegisterer) - app.Command("version", "Get the version of the mimirtool CLI").Action(func(k *kingpin.ParseContext) error { + app.Command("version", "Get the version of the mimirtool CLI").Action(func(*kingpin.ParseContext) error { fmt.Fprintln(os.Stdout, mimirversion.Print("Mimirtool")) version.CheckLatest(mimirversion.Version) return nil diff --git a/integration/e2emimir/services.go b/integration/e2emimir/services.go index 945edaa9948..0184b0c362d 100644 --- a/integration/e2emimir/services.go +++ b/integration/e2emimir/services.go @@ -407,7 +407,7 @@ func WithConfigFile(configFile string) Option { } // WithNoopOption returns an option that doesn't change anything. -func WithNoopOption() Option { return func(options *Options) {} } +func WithNoopOption() Option { return func(*Options) {} } // FlagMapper is the type of function that maps flags, just to reduce some verbosity. type FlagMapper func(flags map[string]string) map[string]string diff --git a/integration/kv_test.go b/integration/kv_test.go index beedaf559ed..cf9b725919e 100644 --- a/integration/kv_test.go +++ b/integration/kv_test.go @@ -31,7 +31,7 @@ func TestKVList(t *testing.T) { // Create keys to list back keysToCreate := []string{"key-a", "key-b", "key-c"} for _, key := range keysToCreate { - err := client.CAS(context.Background(), key, func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(context.Background(), key, func(interface{}) (out interface{}, retry bool, err error) { return key, false, nil }) require.NoError(t, err, "could not create key") @@ -53,7 +53,7 @@ func TestKVList(t *testing.T) { func TestKVDelete(t *testing.T) { testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) { // Create a key - err := client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) { return "key-to-delete", false, nil }) require.NoError(t, err, "object could not be created") @@ -76,11 +76,11 @@ func TestKVDelete(t *testing.T) { } func TestKVWatchAndDelete(t *testing.T) { - testKVs(t, func(t *testing.T, client kv.Client, reg *prometheus.Registry) { + testKVs(t, func(t *testing.T, client kv.Client, _ *prometheus.Registry) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - err := client.CAS(context.Background(), "key-before-watch", func(in interface{}) (out interface{}, retry bool, err error) { + err := client.CAS(context.Background(), "key-before-watch", func(interface{}) (out interface{}, retry bool, err error) { return "value-before-watch", false, nil }) require.NoError(t, err) @@ -93,7 +93,7 @@ func TestKVWatchAndDelete(t *testing.T) { w.watch(ctx, client) }() - err = client.CAS(context.Background(), "key-to-delete", func(in interface{}) (out interface{}, retry bool, err error) { + err = client.CAS(context.Background(), "key-to-delete", func(interface{}) (out interface{}, retry bool, err error) { return "value-to-delete", false, nil }) require.NoError(t, err, "object could not be created") diff --git a/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go b/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go index da0a6257500..43dee755be7 100644 --- a/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go +++ b/operations/compare-helm-with-jsonnet/plugins/resolve-config/main.go @@ -114,7 +114,7 @@ func (c *ConfigExtractor) ResolveConfigs() ([]*yaml.RNode, error) { return nil, err } - err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(ctx context.Context, idx int) error { + err = concurrency.ForEachJob(context.Background(), len(c.allItems), runtime.NumCPU(), func(_ context.Context, idx int) error { pod, ok, err := extractPodSpec(c.allItems[idx]) if err != nil { return err diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index 4036334c3b3..43129e4c771 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -132,7 +132,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) { expected error }{ "should pass with default config": { - setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) {}, + setup: func(*testing.T, *MultitenantAlertmanagerConfig) {}, expected: nil, }, "should fail with empty external URL": { @@ -142,13 +142,13 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) { expected: errEmptyExternalURL, }, "should fail if persistent interval is 0": { - setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) { + setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) { cfg.Persister.Interval = 0 }, expected: errInvalidPersistInterval, }, "should fail if persistent interval is negative": { - setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) { + setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) { cfg.Persister.Interval = -1 }, expected: errInvalidPersistInterval, @@ -178,7 +178,7 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) { expected: errInvalidExternalURLMissingHostname, }, "should fail if zone aware is enabled but zone is not set": { - setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig) { + setup: func(_ *testing.T, cfg *MultitenantAlertmanagerConfig) { cfg.ShardingRing.ZoneAwarenessEnabled = true }, expected: errZoneAwarenessEnabledWithoutZoneInfo, @@ -624,7 +624,7 @@ receivers: serverInvoked := atomic.NewBool(false) // Create a local HTTP server to test whether the request is received. - server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, _ *http.Request) { serverInvoked.Store(true) writer.WriteHeader(http.StatusOK) })) diff --git a/pkg/api/api_test.go b/pkg/api/api_test.go index 919ba65d2ae..27f5906c97b 100644 --- a/pkg/api/api_test.go +++ b/pkg/api/api_test.go @@ -189,7 +189,7 @@ func TestApiGzip(t *testing.T) { }) } - t.Run("compressed with gzip", func(t *testing.T) { + t.Run("compressed with gzip", func(*testing.T) { }) } diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 4677d0ee0db..45a0b941d65 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -125,7 +125,7 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler }) template.Must(templ.Parse(indexPageHTML)) - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { err := templ.Execute(w, indexPageContents{LinkGroups: content.GetContent()}) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -180,7 +180,7 @@ type configResponse struct { } func (cfg *Config) statusConfigHandler() http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { response := configResponse{ Status: "success", Config: map[string]string{}, @@ -195,7 +195,7 @@ type flagsResponse struct { } func (cfg *Config) statusFlagsHandler() http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { response := flagsResponse{ Status: "success", Flags: map[string]string{}, diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go index 8aafb96ac89..1fbe8555208 100644 --- a/pkg/api/handlers_test.go +++ b/pkg/api/handlers_test.go @@ -175,7 +175,7 @@ func TestConfigDiffHandler(t *testing.T) { func TestConfigOverrideHandler(t *testing.T) { cfg := &Config{ CustomConfigHandler: func(_ interface{}, _ interface{}) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte("config")) assert.NoError(t, err) } diff --git a/pkg/api/tenant.go b/pkg/api/tenant.go index 227c072241a..16f84c5de3c 100644 --- a/pkg/api/tenant.go +++ b/pkg/api/tenant.go @@ -29,14 +29,14 @@ func newTenantValidationMiddleware(federation bool, maxTenants int) middleware.I return } - numIds := len(ids) - if !federation && numIds > 1 { - http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIds), http.StatusUnprocessableEntity) + numIDs := len(ids) + if !federation && numIDs > 1 { + http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, 1, numIDs), http.StatusUnprocessableEntity) return } - if federation && maxTenants > 0 && numIds > maxTenants { - http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIds), http.StatusUnprocessableEntity) + if federation && maxTenants > 0 && numIDs > maxTenants { + http.Error(w, fmt.Sprintf(tooManyTenantsTemplate, maxTenants, numIDs), http.StatusUnprocessableEntity) return } diff --git a/pkg/api/tenant_test.go b/pkg/api/tenant_test.go index 034b4661f22..431c37faec9 100644 --- a/pkg/api/tenant_test.go +++ b/pkg/api/tenant_test.go @@ -90,7 +90,7 @@ func TestNewTenantValidationMiddleware(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - nop := http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {}) + nop := http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}) // Note that we add the authentication middleware since the tenant validation middleware relies // on tenant ID being set in the context associated with the request. handler := middleware.Merge(middleware.AuthenticateUser, newTenantValidationMiddleware(tc.federation, tc.maxTenants)).Wrap(nop) diff --git a/pkg/compactor/block_upload_test.go b/pkg/compactor/block_upload_test.go index 1b8071f6cfe..7bb124cb90b 100644 --- a/pkg/compactor/block_upload_test.go +++ b/pkg/compactor/block_upload_test.go @@ -1769,7 +1769,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) { }, { name: "updating validation file succeeds", - assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) { + assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) { test.Poll(t, heartbeatInterval*2, true, func() interface{} { return validationExists(t, bkt) }) @@ -1787,7 +1787,7 @@ func TestMultitenantCompactor_PeriodicValidationUpdater(t *testing.T) { { name: "context cancelled before update", cancelContext: true, - assertions: func(t *testing.T, ctx context.Context, bkt objstore.Bucket) { + assertions: func(t *testing.T, _ context.Context, bkt objstore.Bucket) { require.False(t, validationExists(t, bkt)) }, }, diff --git a/pkg/compactor/bucket_compactor.go b/pkg/compactor/bucket_compactor.go index ea6ab82c017..8c245d4b2c8 100644 --- a/pkg/compactor/bucket_compactor.go +++ b/pkg/compactor/bucket_compactor.go @@ -711,7 +711,7 @@ func NewBucketCompactorMetrics(blocksMarkedForDeletion prometheus.Counter, reg p type ownCompactionJobFunc func(job *Job) (bool, error) // ownAllJobs is a ownCompactionJobFunc that always return true. -var ownAllJobs = func(job *Job) (bool, error) { +var ownAllJobs = func(*Job) (bool, error) { return true, nil } diff --git a/pkg/compactor/bucket_compactor_test.go b/pkg/compactor/bucket_compactor_test.go index 67e550d0642..0f999a694bd 100644 --- a/pkg/compactor/bucket_compactor_test.go +++ b/pkg/compactor/bucket_compactor_test.go @@ -94,13 +94,13 @@ func TestBucketCompactor_FilterOwnJobs(t *testing.T) { expectedJobs int }{ "should return all planned jobs if the compactor instance owns all of them": { - ownJob: func(job *Job) (bool, error) { + ownJob: func(*Job) (bool, error) { return true, nil }, expectedJobs: 4, }, "should return no jobs if the compactor instance owns none of them": { - ownJob: func(job *Job) (bool, error) { + ownJob: func(*Job) (bool, error) { return false, nil }, expectedJobs: 0, @@ -108,7 +108,7 @@ func TestBucketCompactor_FilterOwnJobs(t *testing.T) { "should return some jobs if the compactor instance owns some of them": { ownJob: func() ownCompactionJobFunc { count := 0 - return func(job *Job) (bool, error) { + return func(*Job) (bool, error) { count++ return count%2 == 0, nil } diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index b1984487c56..208446650b8 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -95,7 +95,7 @@ func TestConfig_Validate(t *testing.T) { expected string }{ "should pass with the default config": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, expected: "", }, "should pass with only 1 block range period": { @@ -1352,7 +1352,7 @@ func TestMultitenantCompactor_ShouldSkipCompactionForJobsNoMoreOwnedAfterPlannin c, _, tsdbPlanner, logs, registry := prepareWithConfigProvider(t, cfg, bucketClient, limits) // Mock the planner as if there's no compaction to do, in order to simplify tests. - tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(args mock.Arguments) { + tsdbPlanner.On("Plan", mock.Anything, mock.Anything).Return([]*block.Meta{}, nil).Run(func(mock.Arguments) { // As soon as the first Plan() is called by the compactor, we do switch // the instance to LEAVING state. This way, after this call, we expect the compactor // to skip next compaction job because not owned anymore by this instance. @@ -1783,11 +1783,11 @@ func prepareWithConfigProvider(t *testing.T, compactorCfg Config, bucketClient o logger := &componentLogger{component: "compactor", log: log.NewLogfmtLogger(logs)} registry := prometheus.NewRegistry() - bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) { + bucketClientFactory := func(context.Context) (objstore.Bucket, error) { return bucketClient, nil } - blocksCompactorFactory := func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) { + blocksCompactorFactory := func(context.Context, Config, log.Logger, prometheus.Registerer) (Compactor, Planner, error) { return tsdbCompactor, tsdbPlanner, nil } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 8632a5129ac..59505e98479 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1885,7 +1885,7 @@ func BenchmarkDistributor_Push(b *testing.B) { expectedErr string }{ "all samples successfully pushed": { - prepareConfig: func(limits *validation.Limits) {}, + prepareConfig: func(*validation.Limits) {}, prepareSeries: func() ([][]mimirpb.LabelAdapter, []mimirpb.Sample) { metrics := make([][]mimirpb.LabelAdapter, numSeriesPerRequest) samples := make([]mimirpb.Sample, numSeriesPerRequest) @@ -2075,7 +2075,7 @@ func BenchmarkDistributor_Push(b *testing.B) { limits.IngestionRate = float64(rate.Inf) // Unlimited. testData.prepareConfig(&limits) - distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) { + distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) { return &noopIngester{}, nil }) @@ -4397,7 +4397,7 @@ func TestHaDedupeMiddleware(t *testing.T) { nextCallCount := 0 var gotReqs []*mimirpb.WriteRequest - next := func(ctx context.Context, pushReq *Request) error { + next := func(_ context.Context, pushReq *Request) error { nextCallCount++ req, err := pushReq.WriteRequest() require.NoError(t, err) @@ -4463,7 +4463,7 @@ func TestInstanceLimitsBeforeHaDedupe(t *testing.T) { // Capture the submitted write requests which the middlewares pass into the mock push function. var submittedWriteReqs []*mimirpb.WriteRequest - mockPush := func(ctx context.Context, pushReq *Request) error { + mockPush := func(_ context.Context, pushReq *Request) error { defer pushReq.CleanUp() writeReq, err := pushReq.WriteRequest() require.NoError(t, err) @@ -4646,7 +4646,7 @@ func TestRelabelMiddleware(t *testing.T) { } var gotReqs []*mimirpb.WriteRequest - next := func(ctx context.Context, pushReq *Request) error { + next := func(_ context.Context, pushReq *Request) error { req, err := pushReq.WriteRequest() require.NoError(t, err) gotReqs = append(gotReqs, req) @@ -4724,7 +4724,7 @@ func TestSortAndFilterMiddleware(t *testing.T) { } var gotReqs []*mimirpb.WriteRequest - next := func(ctx context.Context, pushReq *Request) error { + next := func(_ context.Context, pushReq *Request) error { req, err := pushReq.WriteRequest() require.NoError(t, err) gotReqs = append(gotReqs, req) @@ -6685,7 +6685,7 @@ func TestDistributor_MetricsWithRequestModifications(t *testing.T) { exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter { return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}} } - metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata { + metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata { return &mimirpb.MetricMetadata{ Type: mimirpb.COUNTER, MetricFamilyName: metricName, @@ -7039,7 +7039,7 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) { exemplarLabelGen := func(sampleIdx int) []mimirpb.LabelAdapter { return []mimirpb.LabelAdapter{{Name: "exemplarLabel", Value: fmt.Sprintf("value_%d", sampleIdx)}} } - metaDataGen := func(metricIdx int, metricName string) *mimirpb.MetricMetadata { + metaDataGen := func(_ int, metricName string) *mimirpb.MetricMetadata { return &mimirpb.MetricMetadata{ Type: mimirpb.COUNTER, MetricFamilyName: metricName, @@ -7430,7 +7430,7 @@ func TestSendMessageMetadata(t *testing.T) { require.NoError(t, err) mock := &mockInstanceClient{} - distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(inst ring.InstanceDesc) (ring_client.PoolClient, error) { + distributorCfg.IngesterClientFactory = ring_client.PoolInstFunc(func(ring.InstanceDesc) (ring_client.PoolClient, error) { return mock, nil }) diff --git a/pkg/distributor/otel_test.go b/pkg/distributor/otel_test.go index f7039f454a0..41860df6411 100644 --- a/pkg/distributor/otel_test.go +++ b/pkg/distributor/otel_test.go @@ -54,7 +54,7 @@ func BenchmarkOTLPHandler(b *testing.B) { } exportReq := TimeseriesToOTLPRequest(sampleSeries, sampleMetadata) - pushFunc := func(ctx context.Context, pushReq *Request) error { + pushFunc := func(_ context.Context, pushReq *Request) error { if _, err := pushReq.WriteRequest(); err != nil { return err } diff --git a/pkg/distributor/push_test.go b/pkg/distributor/push_test.go index dcbca4a3dbb..0354c657cd5 100644 --- a/pkg/distributor/push_test.go +++ b/pkg/distributor/push_test.go @@ -219,7 +219,7 @@ func TestHandlerOTLPPush(t *testing.T) { maxMsgSize: 30, series: sampleSeries, metadata: sampleMetadata, - verifyFunc: func(t *testing.T, pushReq *Request) error { + verifyFunc: func(_ *testing.T, pushReq *Request) error { _, err := pushReq.WriteRequest() return err }, @@ -232,7 +232,7 @@ func TestHandlerOTLPPush(t *testing.T) { maxMsgSize: 100000, series: sampleSeries, metadata: sampleMetadata, - verifyFunc: func(t *testing.T, pushReq *Request) error { + verifyFunc: func(_ *testing.T, pushReq *Request) error { _, err := pushReq.WriteRequest() return err }, @@ -295,7 +295,7 @@ func TestHandlerOTLPPush(t *testing.T) { validation.NewMockTenantLimits(map[string]*validation.Limits{}), ) require.NoError(t, err) - pusher := func(ctx context.Context, pushReq *Request) error { + pusher := func(_ context.Context, pushReq *Request) error { t.Helper() t.Cleanup(pushReq.CleanUp) return tt.verifyFunc(t, pushReq) @@ -361,7 +361,7 @@ func TestHandler_otlpDroppedMetricsPanic(t *testing.T) { req := createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false) resp := httptest.NewRecorder() - handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error { + handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 3) @@ -407,7 +407,7 @@ func TestHandler_otlpDroppedMetricsPanic2(t *testing.T) { req := createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false) resp := httptest.NewRecorder() - handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error { + handler := OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 2) @@ -433,7 +433,7 @@ func TestHandler_otlpDroppedMetricsPanic2(t *testing.T) { req = createOTLPProtoRequest(t, pmetricotlp.NewExportRequestFromMetrics(md), false) resp = httptest.NewRecorder() - handler = OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(ctx context.Context, pushReq *Request) error { + handler = OTLPHandler(100000, nil, false, true, limits, RetryConfig{}, nil, func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 10) // 6 buckets (including +Inf) + 2 sum/count + 2 from the first case @@ -503,7 +503,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { name: "config flag set to false means SkipLabelNameValidation is false", allowSkipLabelNameValidation: false, req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, false)), - verifyReqHandler: func(ctx context.Context, pushReq *Request) error { + verifyReqHandler: func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 1) @@ -521,7 +521,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { name: "config flag set to false means SkipLabelNameValidation is always false even if write requests sets it to true", allowSkipLabelNameValidation: false, req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)), - verifyReqHandler: func(ctx context.Context, pushReq *Request) error { + verifyReqHandler: func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() require.NoError(t, err) t.Cleanup(pushReq.CleanUp) @@ -539,7 +539,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { name: "config flag set to true but write request set to false means SkipLabelNameValidation is false", allowSkipLabelNameValidation: true, req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, false)), - verifyReqHandler: func(ctx context.Context, pushReq *Request) error { + verifyReqHandler: func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 1) @@ -556,7 +556,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { name: "config flag set to true and write request set to true means SkipLabelNameValidation is true", allowSkipLabelNameValidation: true, req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)), - verifyReqHandler: func(ctx context.Context, pushReq *Request) error { + verifyReqHandler: func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 1) @@ -573,7 +573,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { name: "config flag set to true and write request set to true but header not sent means SkipLabelNameValidation is false", allowSkipLabelNameValidation: true, req: createRequest(t, createMimirWriteRequestProtobufWithNonSupportedLabelNames(t, true)), - verifyReqHandler: func(ctx context.Context, pushReq *Request) error { + verifyReqHandler: func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() assert.NoError(t, err) assert.Len(t, request.Timeseries, 1) @@ -603,7 +603,7 @@ func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { func verifyWritePushFunc(t *testing.T, expectSource mimirpb.WriteRequest_SourceEnum) PushFunc { t.Helper() - return func(ctx context.Context, pushReq *Request) error { + return func(_ context.Context, pushReq *Request) error { request, err := pushReq.WriteRequest() require.NoError(t, err) t.Cleanup(pushReq.CleanUp) @@ -618,7 +618,7 @@ func verifyWritePushFunc(t *testing.T, expectSource mimirpb.WriteRequest_SourceE func readBodyPushFunc(t *testing.T) PushFunc { t.Helper() - return func(ctx context.Context, req *Request) error { + return func(_ context.Context, req *Request) error { _, err := req.WriteRequest() return err } @@ -706,7 +706,7 @@ func BenchmarkPushHandler(b *testing.B) { protobuf := createPrometheusRemoteWriteProtobuf(b) buf := bytes.NewBuffer(snappy.Encode(nil, protobuf)) req := createRequest(b, protobuf) - pushFunc := func(ctx context.Context, pushReq *Request) error { + pushFunc := func(_ context.Context, pushReq *Request) error { if _, err := pushReq.WriteRequest(); err != nil { return err } @@ -764,7 +764,7 @@ func TestHandler_ErrorTranslation(t *testing.T) { parserFunc := func(context.Context, *http.Request, int, *util.RequestBuffers, *mimirpb.PreallocWriteRequest, log.Logger) error { return tc.err } - pushFunc := func(ctx context.Context, req *Request) error { + pushFunc := func(_ context.Context, req *Request) error { _, err := req.WriteRequest() // just read the body so we can trigger the parser return err } @@ -831,7 +831,7 @@ func TestHandler_ErrorTranslation(t *testing.T) { parserFunc := func(context.Context, *http.Request, int, *util.RequestBuffers, *mimirpb.PreallocWriteRequest, log.Logger) error { return nil } - pushFunc := func(ctx context.Context, req *Request) error { + pushFunc := func(_ context.Context, req *Request) error { _, err := req.WriteRequest() // just read the body so we can trigger the parser if err != nil { return err diff --git a/pkg/frontend/frontend_test.go b/pkg/frontend/frontend_test.go index 4f2a9fccc01..44ac58f58be 100644 --- a/pkg/frontend/frontend_test.go +++ b/pkg/frontend/frontend_test.go @@ -105,7 +105,7 @@ func TestFrontend_LogsSlowQueriesFormValues(t *testing.T) { require.NoError(t, err) downstreamServer := http.Server{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte(responseBody)) require.NoError(t, err) }), @@ -167,7 +167,7 @@ func TestFrontend_ReturnsRequestBodyTooLargeError(t *testing.T) { require.NoError(t, err) downstreamServer := http.Server{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte(responseBody)) require.NoError(t, err) }), diff --git a/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go b/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go index 3d7d8fab8ff..383dff3a081 100644 --- a/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go +++ b/pkg/frontend/querymiddleware/astmapper/subtree_folder_test.go @@ -22,7 +22,7 @@ func TestEvalPredicate(t *testing.T) { }{ "should return error if the predicate returns error": { input: "selector1{} or selector2{}", - fn: func(node parser.Node) (bool, error) { + fn: func(parser.Node) (bool, error) { return false, errors.New("some err") }, expectedRes: false, @@ -30,7 +30,7 @@ func TestEvalPredicate(t *testing.T) { }, "should return false if the predicate returns false for all nodes in the subtree": { input: "selector1{} or selector2{}", - fn: func(node parser.Node) (bool, error) { + fn: func(parser.Node) (bool, error) { return false, nil }, expectedRes: false, diff --git a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go index 1af331906f0..89c5f2d6401 100644 --- a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go +++ b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go @@ -46,7 +46,7 @@ func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { // Mock the downstream. - downstream := RoundTripFunc(func(request *http.Request) (*http.Response, error) { + downstream := RoundTripFunc(func(*http.Request) (*http.Response, error) { return &http.Response{ StatusCode: 200, Body: io.NopCloser(strings.NewReader("{}")), diff --git a/pkg/frontend/querymiddleware/cardinality_test.go b/pkg/frontend/querymiddleware/cardinality_test.go index 40918fe75dd..ff1738de83b 100644 --- a/pkg/frontend/querymiddleware/cardinality_test.go +++ b/pkg/frontend/querymiddleware/cardinality_test.go @@ -205,7 +205,7 @@ func Test_cardinalityEstimation_Do(t *testing.T) { { name: "with empty cache", tenantID: "1", - downstreamHandler: func(ctx context.Context, request MetricsQueryRequest) (Response, error) { + downstreamHandler: func(ctx context.Context, _ MetricsQueryRequest) (Response, error) { queryStats := stats.FromContext(ctx) queryStats.AddFetchedSeries(numSeries) return &PrometheusResponse{}, nil diff --git a/pkg/frontend/querymiddleware/generic_query_cache_test.go b/pkg/frontend/querymiddleware/generic_query_cache_test.go index b535a61836d..9cf1fd1d71b 100644 --- a/pkg/frontend/querymiddleware/generic_query_cache_test.go +++ b/pkg/frontend/querymiddleware/generic_query_cache_test.go @@ -131,7 +131,7 @@ func testGenericQueryCacheRoundTrip(t *testing.T, newRoundTripper newGenericQuer expectedStoredToCache: false, // Should not store anything to the cache. }, "should fetch the response from the downstream and overwrite the cached response if corrupted": { - init: func(t *testing.T, c cache.Cache, _, reqHashedCacheKey string) { + init: func(_ *testing.T, c cache.Cache, _, reqHashedCacheKey string) { c.StoreAsync(map[string][]byte{reqHashedCacheKey: []byte("corrupted")}, time.Minute) }, cacheTTL: time.Minute, diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index fa1c2ff3146..7231c493be5 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -51,7 +51,7 @@ var ( ) func mockHandlerWith(resp *PrometheusResponse, err error) MetricsQueryHandler { - return HandlerFunc(func(ctx context.Context, req MetricsQueryRequest) (Response, error) { + return HandlerFunc(func(ctx context.Context, _ MetricsQueryRequest) (Response, error) { if expired := ctx.Err(); expired != nil { return nil, expired } @@ -1499,7 +1499,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { LookbackDelta: lookbackDelta, EnableAtModifier: true, EnableNegativeOffset: true, - NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 { + NoStepSubqueryIntervalFn: func(int64) int64 { return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond)) }, }) @@ -1512,14 +1512,14 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { LookbackDelta: lookbackDelta, EnableAtModifier: true, EnableNegativeOffset: true, - NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 { + NoStepSubqueryIntervalFn: func(int64) int64 { return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond)) }, }) - queryableInternalErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + queryableInternalErr = storage.QueryableFunc(func(int64, int64) (storage.Querier, error) { return nil, apierror.New(apierror.TypeInternal, "some internal error") }) - queryablePrometheusExecErr = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + queryablePrometheusExecErr = storage.QueryableFunc(func(int64, int64) (storage.Querier, error) { return nil, apierror.Newf(apierror.TypeExec, "expanding series: %s", querier.NewMaxQueryLengthError(744*time.Hour, 720*time.Hour)) }) queryable = storageSeriesQueryable([]*promql.StorageSeries{ @@ -1633,7 +1633,7 @@ func TestQuerySharding_EngineErrorMapping(t *testing.T) { series = append(series, newSeries(newTestCounterLabels(i), start.Add(-lookbackDelta), end, step, factor(float64(i)*0.1))) } - queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + queryable := storage.QueryableFunc(func(int64, int64) (storage.Querier, error) { return &querierMock{series: series}, nil }) @@ -2174,7 +2174,7 @@ func (h *downstreamHandler) Do(ctx context.Context, r MetricsQueryRequest) (Resp } func storageSeriesQueryable(series []*promql.StorageSeries) storage.Queryable { - return storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + return storage.QueryableFunc(func(int64, int64) (storage.Querier, error) { return &querierMock{series: series}, nil }) } @@ -2391,7 +2391,7 @@ func stale(from, to time.Time, wrap generator) generator { // constant returns a generator that generates a constant value func constant(value float64) generator { - return func(ts int64) float64 { + return func(int64) float64 { return value } } @@ -2440,7 +2440,7 @@ func newEngine() *promql.Engine { LookbackDelta: lookbackDelta, EnableAtModifier: true, EnableNegativeOffset: true, - NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 { + NoStepSubqueryIntervalFn: func(int64) int64 { return int64(1 * time.Minute / (time.Millisecond / time.Nanosecond)) }, }) diff --git a/pkg/frontend/querymiddleware/retry_test.go b/pkg/frontend/querymiddleware/retry_test.go index 23e7c9bfbce..af2e9b11846 100644 --- a/pkg/frontend/querymiddleware/retry_test.go +++ b/pkg/frontend/querymiddleware/retry_test.go @@ -43,7 +43,7 @@ func TestRetry(t *testing.T) { { name: "retry failures", expectedRetries: 4, - handler: HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { if try.Inc() == 5 { return &PrometheusResponse{Status: "Hello World"}, nil } @@ -54,7 +54,7 @@ func TestRetry(t *testing.T) { { name: "don't retry 400s", expectedRetries: 0, - handler: HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { return nil, errBadRequest }), err: errBadRequest, @@ -62,7 +62,7 @@ func TestRetry(t *testing.T) { { name: "don't retry bad-data", expectedRetries: 0, - handler: HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { return nil, errUnprocessable }), err: errUnprocessable, @@ -70,7 +70,7 @@ func TestRetry(t *testing.T) { { name: "retry 500s", expectedRetries: 5, - handler: HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { return nil, errInternal }), err: errInternal, @@ -78,7 +78,7 @@ func TestRetry(t *testing.T) { { name: "last error", expectedRetries: 4, - handler: HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + handler: HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { if try.Inc() == 5 { return nil, errBadRequest } @@ -112,7 +112,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := newRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap( - HandlerFunc(func(c context.Context, r MetricsQueryRequest) (Response, error) { + HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { try.Inc() return nil, ctx.Err() }), @@ -122,7 +122,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) { ctx, cancel = context.WithCancel(context.Background()) _, err = newRetryMiddleware(log.NewNopLogger(), 5, nil).Wrap( - HandlerFunc(func(c context.Context, r MetricsQueryRequest) (Response, error) { + HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { try.Inc() cancel() return nil, errors.New("failed") diff --git a/pkg/frontend/querymiddleware/roundtrip_test.go b/pkg/frontend/querymiddleware/roundtrip_test.go index dbea50cf822..dc4fdcb81c4 100644 --- a/pkg/frontend/querymiddleware/roundtrip_test.go +++ b/pkg/frontend/querymiddleware/roundtrip_test.go @@ -367,7 +367,7 @@ func TestTripperware_Metrics(t *testing.T) { s := httptest.NewServer( middleware.AuthenticateUser.Wrap( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", jsonMimeType) _, err := w.Write([]byte("{}")) require.NoError(t, err) diff --git a/pkg/frontend/querymiddleware/running_test.go b/pkg/frontend/querymiddleware/running_test.go index cce833240bc..0d11f03e729 100644 --- a/pkg/frontend/querymiddleware/running_test.go +++ b/pkg/frontend/querymiddleware/running_test.go @@ -28,7 +28,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsReady(t *testing.T) { func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyWaitDisabled(t *testing.T) { startChan := make(chan struct{}) - start := func(ctx context.Context) error { + start := func(context.Context) error { <-startChan return nil } @@ -45,7 +45,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyWaitDisabled(t *testi func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyInitially(t *testing.T) { startChan := make(chan struct{}) - start := func(ctx context.Context) error { + start := func(context.Context) error { <-startChan return nil } @@ -69,7 +69,7 @@ func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyInitially(t *testing. func TestAwaitQueryFrontendServiceRunning_ServiceIsNotReadyAfterTimeout(t *testing.T) { serviceChan := make(chan struct{}) - start := func(ctx context.Context) error { + start := func(context.Context) error { <-serviceChan return nil } diff --git a/pkg/frontend/querymiddleware/sharded_queryable_test.go b/pkg/frontend/querymiddleware/sharded_queryable_test.go index a2f1bd8858d..e72a60b7932 100644 --- a/pkg/frontend/querymiddleware/sharded_queryable_test.go +++ b/pkg/frontend/querymiddleware/sharded_queryable_test.go @@ -59,7 +59,7 @@ func TestShardedQuerier_Select(t *testing.T) { // override handler func to assert new query has been substituted q.handler = HandlerFunc( - func(ctx context.Context, req MetricsQueryRequest) (Response, error) { + func(_ context.Context, req MetricsQueryRequest) (Response, error) { require.Equal(t, `http_requests_total{cluster="prod"}`, req.GetQuery()) return expected, nil }, @@ -218,7 +218,7 @@ func TestShardedQuerier_Select_ShouldConcurrentlyRunEmbeddedQueries(t *testing.T downstreamWg := sync.WaitGroup{} downstreamWg.Add(len(embeddedQueries)) - querier := mkShardedQuerier(HandlerFunc(func(ctx context.Context, req MetricsQueryRequest) (Response, error) { + querier := mkShardedQuerier(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { // Wait until the downstream handler has been concurrently called for each embedded query. downstreamWg.Done() downstreamWg.Wait() diff --git a/pkg/frontend/querymiddleware/split_and_cache_test.go b/pkg/frontend/querymiddleware/split_and_cache_test.go index dba5581cb8b..ccc7a660c7d 100644 --- a/pkg/frontend/querymiddleware/split_and_cache_test.go +++ b/pkg/frontend/querymiddleware/split_and_cache_test.go @@ -293,7 +293,7 @@ func TestSplitAndCacheMiddleware_ResultsCache(t *testing.T) { } downstreamReqs := 0 - rc := mw.Wrap(HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { downstreamReqs++ return expectedResponse, nil })) @@ -425,7 +425,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_ShouldNotLookupCacheIfStepIsNotAli } downstreamReqs := 0 - rc := mw.Wrap(HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { downstreamReqs++ return expectedResponse, nil })) @@ -518,7 +518,7 @@ func TestSplitAndCacheMiddleware_ResultsCache_EnabledCachingOfStepUnalignedReque } downstreamReqs := 0 - rc := mw.Wrap(HandlerFunc(func(_ context.Context, req MetricsQueryRequest) (Response, error) { + rc := mw.Wrap(HandlerFunc(func(context.Context, MetricsQueryRequest) (Response, error) { downstreamReqs++ return expectedResponse, nil })) diff --git a/pkg/frontend/querymiddleware/split_by_instant_interval.go b/pkg/frontend/querymiddleware/split_by_instant_interval.go index 78a24d54695..fde1598defe 100644 --- a/pkg/frontend/querymiddleware/split_by_instant_interval.go +++ b/pkg/frontend/querymiddleware/split_by_instant_interval.go @@ -107,12 +107,12 @@ func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req Metr spanLog, ctx := spanlogger.NewWithLogger(ctx, logger, "splitInstantQueryByIntervalMiddleware.Do") defer spanLog.Span.Finish() - tenantsIds, err := tenant.TenantIDs(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, apierror.New(apierror.TypeBadData, err.Error()) } - splitInterval := s.getSplitIntervalForQuery(tenantsIds, req, spanLog) + splitInterval := s.getSplitIntervalForQuery(tenantIDs, req, spanLog) if splitInterval <= 0 { spanLog.DebugLog("msg", "query splitting is disabled for this query or tenant") return s.next.Do(ctx, req) @@ -201,13 +201,13 @@ func (s *splitInstantQueryByIntervalMiddleware) Do(ctx context.Context, req Metr } // getSplitIntervalForQuery calculates and return the split interval that should be used to run the instant query. -func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenantsIds []string, r MetricsQueryRequest, spanLog *spanlogger.SpanLogger) time.Duration { +func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenantIDs []string, r MetricsQueryRequest, spanLog *spanlogger.SpanLogger) time.Duration { // Check if splitting is disabled for the given request. if r.GetOptions().InstantSplitDisabled { return 0 } - splitInterval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantsIds, s.limits.SplitInstantQueriesByInterval) + splitInterval := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, s.limits.SplitInstantQueriesByInterval) if splitInterval <= 0 { return 0 } @@ -217,7 +217,7 @@ func (s *splitInstantQueryByIntervalMiddleware) getSplitIntervalForQuery(tenants splitInterval = time.Duration(r.GetOptions().InstantSplitInterval) } - spanLog.DebugLog("msg", "getting split instant query interval", "tenantsIds", tenantsIds, "split interval", splitInterval) + spanLog.DebugLog("msg", "getting split instant query interval", "tenantIDs", tenantIDs, "split interval", splitInterval) return splitInterval } diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index 2823d27229e..0a0e8ca2432 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -334,7 +334,7 @@ func TestHandler_Stop(t *testing.T) { ) inProgress := make(chan int32) var reqID atomic.Int32 - roundTripper := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + roundTripper := roundTripperFunc(func(*http.Request) (*http.Response, error) { id := reqID.Inc() t.Logf("request %d sending its ID", id) inProgress <- id @@ -464,7 +464,7 @@ func TestHandler_LogsFormattedQueryDetails(t *testing.T) { // the details aren't set by the query stats middleware if the request isn't a query name: "not a query request", requestFormFields: []string{}, - setQueryDetails: func(d *querymiddleware.QueryDetails) {}, + setQueryDetails: func(*querymiddleware.QueryDetails) {}, expectedLoggedFields: map[string]string{}, expectedApproximateDurations: map[string]time.Duration{}, expectedMissingFields: []string{"length", "param_time", "time_since_param_start", "time_since_param_end"}, diff --git a/pkg/frontend/v1/frontend_test.go b/pkg/frontend/v1/frontend_test.go index b380dc67a00..d4ade4357cd 100644 --- a/pkg/frontend/v1/frontend_test.go +++ b/pkg/frontend/v1/frontend_test.go @@ -47,7 +47,7 @@ const ( ) func TestFrontend(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte("Hello World")) require.NoError(t, err) }) @@ -161,7 +161,7 @@ func TestFrontendCheckReady(t *testing.T) { // the underlying query is correctly cancelled _and not retried_. func TestFrontendCancel(t *testing.T) { var tries atomic.Int32 - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { <-r.Context().Done() tries.Inc() }) @@ -189,7 +189,7 @@ func TestFrontendCancel(t *testing.T) { } func TestFrontendMetricsCleanup(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte("Hello World")) require.NoError(t, err) }) @@ -238,7 +238,7 @@ func TestFrontendStats(t *testing.T) { tl := testLogger{} - test := func(addr string, fr *Frontend) { + test := func(addr string, _ *Frontend) { req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/", addr), nil) require.NoError(t, err) err = user.InjectOrgIDIntoHTTPRequest(user.InjectOrgID(context.Background(), "1"), req) diff --git a/pkg/frontend/v2/frontend_test.go b/pkg/frontend/v2/frontend_test.go index 10de96a47af..238306893b2 100644 --- a/pkg/frontend/v2/frontend_test.go +++ b/pkg/frontend/v2/frontend_test.go @@ -227,7 +227,7 @@ func TestFrontendRetryEnqueue(t *testing.T) { } func TestFrontendTooManyRequests(t *testing.T) { - f, _ := setupFrontend(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + f, _ := setupFrontend(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT} }) @@ -240,7 +240,7 @@ func TestFrontendTooManyRequests(t *testing.T) { } func TestFrontendEnqueueFailure(t *testing.T) { - f, _ := setupFrontend(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + f, _ := setupFrontend(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN} }) @@ -630,7 +630,7 @@ func TestConfig_Validate(t *testing.T) { expectedErr string }{ "should pass with default config": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, }, "should pass if scheduler address is configured, and query-scheduler discovery mode is the default one": { setup: func(cfg *Config) { @@ -668,7 +668,7 @@ func TestWithClosingGrpcServer(t *testing.T) { const frontendConcurrency = 1 const userID = "test" - f, _ := setupFrontendWithConcurrencyAndServerOptions(t, nil, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + f, _ := setupFrontendWithConcurrencyAndServerOptions(t, nil, func(*Frontend, *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.TOO_MANY_REQUESTS_PER_TENANT} }, frontendConcurrency, grpc.KeepaliveParams(keepalive.ServerParameters{ MaxConnectionIdle: 100 * time.Millisecond, diff --git a/pkg/ingester/client/buffering_client_test.go b/pkg/ingester/client/buffering_client_test.go index 0878f15bd04..743065bfc17 100644 --- a/pkg/ingester/client/buffering_client_test.go +++ b/pkg/ingester/client/buffering_client_test.go @@ -157,7 +157,7 @@ func TestWriteRequestBufferingClient_Push_WithMultipleMarshalCalls(t *testing.T) func BenchmarkWriteRequestBufferingClient_Push(b *testing.B) { bufferingClient := newBufferPoolingIngesterClient(&dummyIngesterClient{}, nil) - bufferingClient.pushRawFn = func(ctx context.Context, conn *grpc.ClientConn, msg interface{}, opts ...grpc.CallOption) (*mimirpb.WriteResponse, error) { + bufferingClient.pushRawFn = func(_ context.Context, _ *grpc.ClientConn, msg interface{}, _ ...grpc.CallOption) (*mimirpb.WriteResponse, error) { _, err := msg.(proto.Marshaler).Marshal() return nil, err } diff --git a/pkg/ingester/client/circuitbreaker.go b/pkg/ingester/client/circuitbreaker.go index 7a418f06beb..31ad338b40e 100644 --- a/pkg/ingester/client/circuitbreaker.go +++ b/pkg/ingester/client/circuitbreaker.go @@ -70,10 +70,10 @@ func NewCircuitBreaker(inst ring.InstanceDesc, cfg CircuitBreakerConfig, metrics breaker := circuitbreaker.Builder[any](). WithFailureRateThreshold(cfg.FailureThreshold, cfg.FailureExecutionThreshold, cfg.ThresholdingPeriod). WithDelay(cfg.CooldownPeriod). - OnFailure(func(event failsafe.ExecutionEvent[any]) { + OnFailure(func(failsafe.ExecutionEvent[any]) { countError.Inc() }). - OnSuccess(func(event failsafe.ExecutionEvent[any]) { + OnSuccess(func(failsafe.ExecutionEvent[any]) { countSuccess.Inc() }). OnClose(func(event circuitbreaker.StateChangedEvent) { @@ -88,7 +88,7 @@ func NewCircuitBreaker(inst ring.InstanceDesc, cfg CircuitBreakerConfig, metrics transitionHalfOpen.Inc() level.Info(logger).Log("msg", "circuit breaker is half-open", "ingester", inst.Id, "previous", event.OldState, "current", event.NewState) }). - HandleIf(func(r any, err error) bool { return isFailure(err) }). + HandleIf(func(_ any, err error) bool { return isFailure(err) }). Build() executor := failsafe.NewExecutor[any](breaker) diff --git a/pkg/ingester/client/circuitbreaker_test.go b/pkg/ingester/client/circuitbreaker_test.go index cee718073dd..747d7877062 100644 --- a/pkg/ingester/client/circuitbreaker_test.go +++ b/pkg/ingester/client/circuitbreaker_test.go @@ -76,12 +76,12 @@ func perInstanceLimitError(t *testing.T) error { func TestNewCircuitBreaker(t *testing.T) { // gRPC invoker that does not return an error - success := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + success := func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error { return nil } // gRPC invoker that returns an error that will be treated as an error by the circuit breaker - failure := func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { + failure := func(context.Context, string, interface{}, interface{}, *grpc.ClientConn, ...grpc.CallOption) error { return perInstanceLimitError(t) } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 9067bbc01d1..db929597fa7 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -487,7 +487,7 @@ func New(cfg Config, limits *validation.Overrides, ingestersRing ring.ReadRing, i.subservicesWatcher.WatchService(i.metricsUpdaterService) // Init metadata purger service, responsible to periodically delete metrics metadata past their retention period. - i.metadataPurgerService = services.NewTimerService(metadataPurgePeriod, nil, func(ctx context.Context) error { + i.metadataPurgerService = services.NewTimerService(metadataPurgePeriod, nil, func(context.Context) error { i.purgeUserMetricsMetadata() return nil }, nil) @@ -1852,7 +1852,7 @@ func (i *Ingester) LabelNamesAndValues(request *client.LabelNamesAndValuesReques var valueFilter func(name, value string) (bool, error) switch request.GetCountMethod() { case client.IN_MEMORY: - valueFilter = func(name, value string) (bool, error) { + valueFilter = func(string, string) (bool, error) { return true, nil } case client.ACTIVE: @@ -3029,7 +3029,7 @@ func (i *Ingester) compactionServiceInterval() (firstInterval, standardInterval // Compacts all compactable blocks. Force flag will force compaction even if head is not compactable yet. func (i *Ingester) compactBlocks(ctx context.Context, force bool, forcedCompactionMaxTime int64, allowed *util.AllowedTenants) { - _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(ctx context.Context, userID string) error { + _ = concurrency.ForEachUser(ctx, i.getTSDBUsers(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(_ context.Context, userID string) error { if !allowed.IsAllowed(userID) { return nil } diff --git a/pkg/ingester/ingester_ingest_storage_test.go b/pkg/ingester/ingester_ingest_storage_test.go index 2c52e5b4948..8c55a6e9d21 100644 --- a/pkg/ingester/ingester_ingest_storage_test.go +++ b/pkg/ingester/ingester_ingest_storage_test.go @@ -86,7 +86,7 @@ func TestIngester_Start(t *testing.T) { // - Count the Fetch requests. // - Mock the ListOffsets response, returning the offset expected once the ingester can be // considered having successfully caught up. - kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kreq kmsg.Request) (kmsg.Response, error, bool) { + kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { kafkaCluster.KeepControl() fetchRequestsCount.Inc() @@ -266,7 +266,7 @@ func TestIngester_QueryStream_IngestStorageReadConsistency(t *testing.T) { // to "strong" then a query shouldn't succeed until the Fetch requests succeed. failFetch := atomic.NewBool(true) - kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kreq kmsg.Request) (kmsg.Response, error, bool) { + kafkaCluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { kafkaCluster.KeepControl() if failFetch.Load() { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index eb8a8fc4425..df72a1258c3 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -2624,8 +2624,8 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { runBenchmark func(b *testing.B, ingester *Ingester, metrics [][]mimirpb.LabelAdapter, samples []mimirpb.Sample) }{ "out of bound samples": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + prepareConfig: func(*validation.Limits, *InstanceLimits) bool { return true }, + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { // Push a single time series to set the TSDB min time. currTimeReq := mimirpb.ToWriteRequest( [][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: metricName}}}, @@ -2649,7 +2649,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "out-of-order samples": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, + prepareConfig: func(*validation.Limits, *InstanceLimits) bool { return true }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // For each series, push a single sample with a timestamp greater than next pushes. for i := 0; i < numSeriesPerRequest; i++ { @@ -2676,11 +2676,11 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "per-user series limit reached": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(limits *validation.Limits, _ *InstanceLimits) bool { limits.MaxGlobalSeriesPerUser = 1 return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { // Push a series with a metric name different than the one used during the benchmark. currTimeReq := mimirpb.ToWriteRequest( [][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: "another"}}}, @@ -2701,11 +2701,11 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "per-metric series limit reached": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(limits *validation.Limits, _ *InstanceLimits) bool { limits.MaxGlobalSeriesPerMetric = 1 return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { // Push a series with the same metric name but different labels than the one used during the benchmark. currTimeReq := mimirpb.ToWriteRequest( [][]mimirpb.LabelAdapter{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: "another"}}}, @@ -2726,14 +2726,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "very low ingestion rate limit": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool { if instanceLimits == nil { return false } instanceLimits.MaxIngestionRate = 0.00001 // very low return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { // Send a lot of samples _, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000)) require.NoError(b, err) @@ -2749,14 +2749,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "max number of tenants reached": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool { if instanceLimits == nil { return false } instanceLimits.MaxInMemoryTenants = 1 return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { // Send some samples for one tenant (not the same that is used during the test) ctx := user.InjectOrgID(context.Background(), "different_tenant") _, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000)) @@ -2771,14 +2771,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "max number of series reached": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool { if instanceLimits == nil { return false } instanceLimits.MaxInMemorySeries = 1 return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(b *testing.B, ingester *Ingester, _ int) { _, err := ingester.Push(ctx, generateSamplesForLabel(labels.FromStrings(labels.MetricName, "test"), 1, 10000)) require.NoError(b, err) }, @@ -2790,14 +2790,14 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { }, }, "max inflight requests reached": { - prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { + prepareConfig: func(_ *validation.Limits, instanceLimits *InstanceLimits) bool { if instanceLimits == nil { return false } instanceLimits.MaxInflightPushRequests = 1 return true }, - beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { + beforeBenchmark: func(_ *testing.B, ingester *Ingester, _ int) { ingester.inflightPushRequests.Inc() }, runBenchmark: func(b *testing.B, ingester *Ingester, metrics [][]mimirpb.LabelAdapter, samples []mimirpb.Sample) { @@ -5066,7 +5066,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, "should not load any TSDB if the root directory is empty": { walReplayConcurrency: 10, - setup: func(t *testing.T, dir string) {}, + setup: func(*testing.T, string) {}, check: func(t *testing.T, i *Ingester) { require.Zero(t, len(i.tsdbs)) }, @@ -5400,7 +5400,7 @@ func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInP // Mock the shipper to slow down Sync() execution. s := mockUserShipper(t, i) - s.On("Sync", mock.Anything).Run(func(args mock.Arguments) { + s.On("Sync", mock.Anything).Run(func(mock.Arguments) { time.Sleep(3 * time.Second) }).Return(0, nil) @@ -5865,7 +5865,7 @@ func TestIngester_flushing(t *testing.T) { cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown = false }, - action: func(t *testing.T, i *Ingester, reg *prometheus.Registry) { + action: func(t *testing.T, i *Ingester, _ *prometheus.Registry) { // Stop the ingester. require.NoError(t, services.StopAndAwaitTerminated(context.Background(), i)) @@ -8362,7 +8362,7 @@ func TestIngesterActiveSeries(t *testing.T) { }, }, "active series for cardinality API": { - test: func(t *testing.T, ingester *Ingester, gatherer prometheus.Gatherer) { + test: func(t *testing.T, ingester *Ingester, _ prometheus.Gatherer) { pushWithUser(t, ingester, labelsToPush, userID, req) pushWithUser(t, ingester, labelsToPush, userID2, req) pushWithUser(t, ingester, labelsToPushHist, userID, reqHist) diff --git a/pkg/ingester/label_names_and_values_test.go b/pkg/ingester/label_names_and_values_test.go index 2f0f710d81c..8d9f9badbf1 100644 --- a/pkg/ingester/label_names_and_values_test.go +++ b/pkg/ingester/label_names_and_values_test.go @@ -56,7 +56,7 @@ func TestLabelNamesAndValuesAreSentInBatches(t *testing.T) { } mockServer := mockLabelNamesAndValuesServer{context: context.Background()} var stream client.Ingester_LabelNamesAndValuesServer = &mockServer - var valueFilter = func(name, value string) (bool, error) { + var valueFilter = func(string, string) (bool, error) { return true, nil } require.NoError(t, labelNamesAndValues(&mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, stream, valueFilter)) @@ -95,7 +95,7 @@ func TestLabelNamesAndValues_FilteredValues(t *testing.T) { } mockServer := mockLabelNamesAndValuesServer{context: context.Background()} var stream client.Ingester_LabelNamesAndValuesServer = &mockServer - var valueFilter = func(name, value string) (bool, error) { + var valueFilter = func(_, value string) (bool, error) { return strings.Contains(value, "0"), nil } require.NoError(t, labelNamesAndValues(&mockIndex{existingLabels: existingLabels}, []*labels.Matcher{}, 32, stream, valueFilter)) @@ -286,7 +286,7 @@ func TestLabelNamesAndValues_ContextCancellation(t *testing.T) { opDelay: idxOpDelay, } - var valueFilter = func(name, value string) (bool, error) { + var valueFilter = func(string, string) (bool, error) { return true, nil } doneCh := make(chan error, 1) diff --git a/pkg/ingester/owned_series_test.go b/pkg/ingester/owned_series_test.go index a399a481a85..22d2a4bba9a 100644 --- a/pkg/ingester/owned_series_test.go +++ b/pkg/ingester/owned_series_test.go @@ -153,7 +153,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { testFunc func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) }{ "empty ingester": { - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { require.Equal(t, 0, c.ownedSeries.updateAllTenants(context.Background(), false)) }, }, @@ -164,7 +164,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) // first ingester owns all the series, even without any ownedSeries run. this is because each created series is automatically counted as "owned". @@ -192,7 +192,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -223,7 +223,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 1, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -272,7 +272,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 1, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -320,7 +320,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -550,7 +550,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -578,7 +578,7 @@ func TestOwnedSeriesServiceWithIngesterRing(t *testing.T) { IngestionTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithIngesterRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -806,7 +806,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { testFunc func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) }{ "empty ingester": { - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { require.Equal(t, 0, c.ownedSeries.updateAllTenants(context.Background(), false)) }, }, @@ -817,7 +817,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) // first ingester owns all the series, even without any ownedSeries run. this is because each created series is automatically counted as "owned". @@ -845,7 +845,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -876,7 +876,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 1, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -926,7 +926,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 1, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -974,7 +974,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -1209,7 +1209,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -1237,7 +1237,7 @@ func TestOwnedSeriesServiceWithPartitionsRing(t *testing.T) { IngestionPartitionsTenantShardSize: 0, }, }, - testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, limits map[string]*validation.Limits) { + testFunc: func(t *testing.T, c *ownedSeriesWithPartitionsRingTestContext, _ map[string]*validation.Limits) { c.pushUserSeries(t) c.updateOwnedSeriesAndCheckResult(t, false, 1, recomputeOwnedSeriesReasonNewUser) c.checkUpdateReasonForUser(t, "") @@ -1462,7 +1462,7 @@ func TestOwnedSeriesPartitionsRingStrategyRingChanged(t *testing.T) { partitionRing.AddPartition(1, ring.PartitionActive, time.Now()) }) - t.Run("first call with active partition in the ring reports change", func(t *testing.T) { + t.Run("first call with active partition in the ring reports change", func(*testing.T) { // State of the ring: 1: Active checkExpectedRingChange(true) // second call reports no change diff --git a/pkg/mimir/mimir_test.go b/pkg/mimir/mimir_test.go index 7ff85d2c99a..dde93f51e01 100644 --- a/pkg/mimir/mimir_test.go +++ b/pkg/mimir/mimir_test.go @@ -521,7 +521,7 @@ func TestConfig_validateFilesystemPaths(t *testing.T) { expectedErr string }{ "should succeed with the default configuration": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, }, "should fail if alertmanager data directory contains bucket store sync directory when running mimir-backend": { setup: func(cfg *Config) { diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 5985688589e..fb55498fd91 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -621,7 +621,7 @@ func (t *Mimir) initQuerier() (serv services.Service, err error) { } // Add a middleware to extract the trace context and add a header. - internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(r *http.Request) string { + internalQuerierRouter = nethttp.MiddlewareFunc(opentracing.GlobalTracer(), internalQuerierRouter.ServeHTTP, nethttp.OperationNameFunc(func(*http.Request) string { return "internalQuerier" })) diff --git a/pkg/mimir/runtime_config_test.go b/pkg/mimir/runtime_config_test.go index 81145988c33..5224becb5d6 100644 --- a/pkg/mimir/runtime_config_test.go +++ b/pkg/mimir/runtime_config_test.go @@ -132,7 +132,7 @@ func TestRuntimeConfigLoader_RunsValidation(t *testing.T) { }{ { name: "successful validate doesn't return error", - validate: func(limits validation.Limits) error { + validate: func(validation.Limits) error { return nil }, }, @@ -141,7 +141,7 @@ func TestRuntimeConfigLoader_RunsValidation(t *testing.T) { }, { name: "unsuccessful validate returns error", - validate: func(limits validation.Limits) error { + validate: func(validation.Limits) error { return errors.New("validation failed") }, hasError: true, diff --git a/pkg/mimir/sanity_check_test.go b/pkg/mimir/sanity_check_test.go index 23554ccea2f..74a7da2d4ac 100644 --- a/pkg/mimir/sanity_check_test.go +++ b/pkg/mimir/sanity_check_test.go @@ -216,19 +216,19 @@ func TestCheckDirectoryReadWriteAccess(t *testing.T) { expected string }{ "should fail on directory without write access": { - dirExistsFn: func(dir string) (bool, error) { + dirExistsFn: func(string) (bool, error) { return true, nil }, - isDirReadWritable: func(dir string) error { + isDirReadWritable: func(string) error { return errors.New("read only") }, expected: fmt.Sprintf("failed to access directory %s: read only", configuredPath), }, "should pass on directory with read-write access": { - dirExistsFn: func(dir string) (bool, error) { + dirExistsFn: func(string) (bool, error) { return true, nil }, - isDirReadWritable: func(dir string) error { + isDirReadWritable: func(string) error { return nil }, expected: "", diff --git a/pkg/mimirtool/analyze/grafana.go b/pkg/mimirtool/analyze/grafana.go index ed7816a8126..0e8176df70c 100644 --- a/pkg/mimirtool/analyze/grafana.go +++ b/pkg/mimirtool/analyze/grafana.go @@ -217,7 +217,7 @@ func parseQuery(query string, metrics map[string]struct{}) error { return err } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { // VectorSelector has .Name when it's explicitly set as `name{...}`. // Otherwise we need to look into the matchers. diff --git a/pkg/mimirtool/analyze/ruler.go b/pkg/mimirtool/analyze/ruler.go index 58f7a4da6b2..a33c83fe01c 100644 --- a/pkg/mimirtool/analyze/ruler.go +++ b/pkg/mimirtool/analyze/ruler.go @@ -48,7 +48,7 @@ func ParseMetricsInRuleGroup(mir *MetricsInRuler, group rwrulefmt.RuleGroup, ns continue } - parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + parser.Inspect(expr, func(node parser.Node, _ []parser.Node) error { if n, ok := node.(*parser.VectorSelector); ok { refMetrics[n.Name] = struct{}{} } diff --git a/pkg/mimirtool/commands/rules.go b/pkg/mimirtool/commands/rules.go index 824c0982864..7690804b11f 100644 --- a/pkg/mimirtool/commands/rules.go +++ b/pkg/mimirtool/commands/rules.go @@ -680,7 +680,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error { } // Do not apply the aggregation label to excluded rule groups. - applyTo := func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool { + applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { _, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name] return !excluded } diff --git a/pkg/mimirtool/config/convert_test.go b/pkg/mimirtool/config/convert_test.go index 34db4cf2d07..52585f8b931 100644 --- a/pkg/mimirtool/config/convert_test.go +++ b/pkg/mimirtool/config/convert_test.go @@ -323,7 +323,7 @@ func TestConvert_Cortex(t *testing.T) { flags: loadFlags(t, tc.inFlagsFile), } - assertion := func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + assertion := func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) { assert.NoError(t, err) assert.ElementsMatch(t, expectedOutFlags, outFlags) assert.YAMLEq(t, string(expectedOut), string(outYAML)) @@ -396,7 +396,7 @@ func TestConvert_GEM(t *testing.T) { flags: loadFlags(t, tc.inFlagsFile), } - testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) { assert.NoError(t, err) assert.ElementsMatch(t, expectedOutFlags, outFlags) assert.YAMLEq(t, string(expectedOut), string(outYAML)) @@ -434,7 +434,7 @@ func TestConvert_InvalidConfigs(t *testing.T) { yaml: loadFile(t, tc.inFile), dontLoadCommonOpts: true, } - testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortexAndGEM(t, in, func(t *testing.T, _ []byte, _ []string, _ ConversionNotices, err error) { assert.EqualError(t, err, tc.expectedErr) }) }) @@ -604,7 +604,7 @@ func TestChangedCortexDefaults(t *testing.T) { yaml: config, } - testConvertCortex(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortex(t, in, func(t *testing.T, _ []byte, _ []string, notices ConversionNotices, err error) { require.NoError(t, err) assert.ElementsMatch(t, changedCortexDefaults, notices.ChangedDefaults) }) @@ -670,7 +670,7 @@ func TestChangedGEMDefaults(t *testing.T) { yaml: config, } - testConvertGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertGEM(t, in, func(t *testing.T, _ []byte, _ []string, notices ConversionNotices, err error) { require.NoError(t, err) assert.ElementsMatch(t, expectedChangedDefaults, notices.ChangedDefaults) }) @@ -726,7 +726,7 @@ func TestConvert_UseNewDefaults(t *testing.T) { useNewDefaults: tc.useNewDefaults, } - testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, _ []string, notices ConversionNotices, err error) { require.NoError(t, err) assert.YAMLEq(t, string(tc.expectedYAML), string(outYAML)) @@ -753,7 +753,7 @@ func TestConvert_NotInYAMLIsNotPrinted(t *testing.T) { outputDefaults: showDefaults, dontLoadCommonOpts: true, } - testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, _ []string, _ ConversionNotices, err error) { assert.NoError(t, err) assert.NotContains(t, string(outYAML), notInYaml) }) @@ -770,7 +770,7 @@ func TestConvert_PassingOnlyYAMLReturnsOnlyYAML(t *testing.T) { yaml: inYAML, } - testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) { assert.NoError(t, err) assert.YAMLEq(t, string(expectedOutYAML), string(outYAML)) assert.Empty(t, outFlags) @@ -785,7 +785,7 @@ func TestConvert_PassingOnlyFlagsReturnsOnlyFlags(t *testing.T) { flags: inFlags, } - testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, notices ConversionNotices, err error) { + testConvertCortexAndGEM(t, in, func(t *testing.T, outYAML []byte, outFlags []string, _ ConversionNotices, err error) { assert.NoError(t, err) assert.Empty(t, outYAML) assert.ElementsMatch(t, expectedOutFlags, outFlags) @@ -809,7 +809,7 @@ func TestRemovedParamsAndFlagsAreCorrect(t *testing.T) { allCLIFlagsNames := func(p Parameters) map[string]bool { flags := map[string]bool{} - assert.NoError(t, p.Walk(func(path string, v Value) error { + assert.NoError(t, p.Walk(func(path string, _ Value) error { flagName, err := p.GetFlag(path) assert.NoError(t, err) flags[flagName] = true diff --git a/pkg/mimirtool/config/cortex.go b/pkg/mimirtool/config/cortex.go index d259fc9c6ec..0387d0136ad 100644 --- a/pkg/mimirtool/config/cortex.go +++ b/pkg/mimirtool/config/cortex.go @@ -405,7 +405,7 @@ func mapRulerAlertmanagerS3URL(dotStoragePath, storagePath string) MapperFunc { if s3URL.User != nil { username := s3URL.User.Username() password, _ := s3URL.User.Password() - setIfNonEmpty := func(p Parameters, path, val string) error { + setIfNonEmpty := func(_ Parameters, path, val string) error { currentVal, _ := target.GetValue(path) currentStr := currentVal.AsString() if val == "" || currentStr != "" { diff --git a/pkg/mimirtool/config/mapping.go b/pkg/mimirtool/config/mapping.go index b69aa6533bf..25332c117a1 100644 --- a/pkg/mimirtool/config/mapping.go +++ b/pkg/mimirtool/config/mapping.go @@ -69,7 +69,7 @@ func (m MapperFunc) DoMap(source, target Parameters) error { } func RenameMapping(to string) Mapping { - return func(oldPath string, oldVal Value) (newPath string, newVal Value) { + return func(_ string, oldVal Value) (newPath string, newVal Value) { newPath = to newVal = oldVal return diff --git a/pkg/mimirtool/rules/rules.go b/pkg/mimirtool/rules/rules.go index 7f0b7da1f2f..d2146cd1d52 100644 --- a/pkg/mimirtool/rules/rules.go +++ b/pkg/mimirtool/rules/rules.go @@ -157,7 +157,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru // exprNodeInspectorFunc returns a PromQL inspector. // It modifies most PromQL expressions to include a given label. func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error { - return func(node parser.Node, path []parser.Node) error { + return func(node parser.Node, _ []parser.Node) error { var err error switch n := node.(type) { case *parser.AggregateExpr: diff --git a/pkg/mimirtool/rules/rules_test.go b/pkg/mimirtool/rules/rules_test.go index 74294602be0..28613490f86 100644 --- a/pkg/mimirtool/rules/rules_test.go +++ b/pkg/mimirtool/rules/rules_test.go @@ -181,7 +181,7 @@ func TestAggregateBy(t *testing.T) { }, }, }, - applyTo: func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool { + applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { return group.Name != "CountSkipped" }, expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`}, diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index 483e919012b..5036bd3de67 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -304,7 +304,7 @@ func TestBlockQuerierSeriesSet(t *testing.T) { t.Run(fmt.Sprintf("consume with .Next() method, perform .At() after every %dth call to .Next()", callAtEvery), func(t *testing.T) { t.Parallel() - advance := func(it chunkenc.Iterator, wantTs int64) chunkenc.ValueType { return it.Next() } + advance := func(it chunkenc.Iterator, _ int64) chunkenc.ValueType { return it.Next() } ss := getSeriesSet() verifyNextSeries(t, ss, labels.FromStrings("__name__", "first", "a", "a"), 3*time.Millisecond, []timeRange{ diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index f9dfa3a3858..1358ccd0925 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1046,7 +1046,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile srv, q := prepareTestCase(t) - srv.onSeries = func(req *storepb.SeriesRequest, srv storegatewaypb.StoreGateway_SeriesServer) error { + srv.onSeries = func(*storepb.SeriesRequest, storegatewaypb.StoreGateway_SeriesServer) error { if numExecutions.Inc() == 1 { close(waitExecution) <-continueExecution @@ -1082,7 +1082,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile srv, q := prepareTestCase(t) - srv.onLabelNames = func(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { + srv.onLabelNames = func(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { if numExecutions.Inc() == 1 { close(waitExecution) <-continueExecution @@ -1117,7 +1117,7 @@ func TestBlocksStoreQuerier_ShouldReturnContextCanceledIfContextWasCanceledWhile srv, q := prepareTestCase(t) - srv.onLabelValues = func(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + srv.onLabelValues = func(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { if numExecutions.Inc() == 1 { close(waitExecution) <-continueExecution diff --git a/pkg/querier/blocks_store_replicated_set_test.go b/pkg/querier/blocks_store_replicated_set_test.go index c69f79a381b..5b6ec13ab09 100644 --- a/pkg/querier/blocks_store_replicated_set_test.go +++ b/pkg/querier/blocks_store_replicated_set_test.go @@ -322,7 +322,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) { d := ring.NewDesc() testData.setup(d) return d, true, nil @@ -389,7 +389,7 @@ func TestBlocksStoreReplicationSet_GetClientsFor_ShouldSupportRandomLoadBalancin ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - require.NoError(t, ringStore.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, ringStore.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) { d := ring.NewDesc() for n := 1; n <= numInstances; n++ { d.AddIngester(fmt.Sprintf("instance-%d", n), fmt.Sprintf("127.0.0.%d", n), "", []uint32{uint32(n)}, ring.ACTIVE, registeredAt) diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index b4e2d85ad57..df95d36e0e7 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1284,10 +1284,10 @@ func TestConfig_ValidateLimits(t *testing.T) { expected error }{ "should pass with default config": { - setup: func(cfg *Config, limits *validation.Limits) {}, + setup: func(*Config, *validation.Limits) {}, }, "should pass if 'query store after' is enabled and shuffle-sharding is disabled": { - setup: func(cfg *Config, limits *validation.Limits) { + setup: func(cfg *Config, _ *validation.Limits) { cfg.QueryStoreAfter = time.Hour }, }, diff --git a/pkg/querier/remote_read_test.go b/pkg/querier/remote_read_test.go index 8379ffd5c9d..1f071f61618 100644 --- a/pkg/querier/remote_read_test.go +++ b/pkg/querier/remote_read_test.go @@ -102,7 +102,7 @@ func (p *partiallyFailingSeriesSet) Warnings() annotations.Annotations { func TestSampledRemoteRead(t *testing.T) { q := &mockSampleAndChunkQueryable{ - queryableFn: func(mint, maxt int64) (storage.Querier, error) { + queryableFn: func(int64, int64) (storage.Querier, error) { return mockQuerier{ seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{ series.NewConcreteSeries( @@ -326,7 +326,7 @@ func TestStreamedRemoteRead(t *testing.T) { for tn, tc := range tcs { t.Run(tn, func(t *testing.T) { q := &mockSampleAndChunkQueryable{ - chunkQueryableFn: func(mint, maxt int64) (storage.ChunkQuerier, error) { + chunkQueryableFn: func(int64, int64) (storage.ChunkQuerier, error) { return mockChunkQuerier{ seriesSet: series.NewConcreteSeriesSetFromUnsortedSeries([]storage.Series{ series.NewConcreteSeries( @@ -519,7 +519,7 @@ func TestRemoteReadErrorParsing(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { q := &mockSampleAndChunkQueryable{ - queryableFn: func(mint, maxt int64) (storage.Querier, error) { + queryableFn: func(int64, int64) (storage.Querier, error) { return mockQuerier{ seriesSet: tc.seriesSet, }, tc.getQuerierErr @@ -555,7 +555,7 @@ func TestRemoteReadErrorParsing(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { q := &mockSampleAndChunkQueryable{ - chunkQueryableFn: func(mint, maxt int64) (storage.ChunkQuerier, error) { + chunkQueryableFn: func(int64, int64) (storage.ChunkQuerier, error) { return mockChunkQuerier{ seriesSet: tc.seriesSet, }, tc.getQuerierErr diff --git a/pkg/querier/tenantfederation/merge_exemplar_queryable.go b/pkg/querier/tenantfederation/merge_exemplar_queryable.go index 7436d62e816..51c441c0d35 100644 --- a/pkg/querier/tenantfederation/merge_exemplar_queryable.go +++ b/pkg/querier/tenantfederation/merge_exemplar_queryable.go @@ -176,7 +176,7 @@ func (m *mergeExemplarQuerier) Select(start, end int64, matchers ...[]*labels.Ma // Each task grabs a job object from the slice and stores its results in the corresponding // index in the results slice. The job handles performing a tenant-specific exemplar query // and adding a tenant ID label to each of the results. - run := func(ctx context.Context, idx int) error { + run := func(_ context.Context, idx int) error { job := jobs[idx] res, err := job.querier.Select(start, end, job.matchers...) @@ -217,7 +217,7 @@ func filterTenantsAndRewriteMatchers(idLabelName string, ids []string, allMatche return sliceToSet(ids), allMatchers } - outIds := make(map[string]struct{}) + outIDs := make(map[string]struct{}) outMatchers := make([][]*labels.Matcher, len(allMatchers)) // The ExemplarQuerier.Select method accepts a slice of slices of matchers. The matchers within @@ -225,13 +225,13 @@ func filterTenantsAndRewriteMatchers(idLabelName string, ids []string, allMatche // In order to support that, we start with a set of 0 tenant IDs and add any tenant IDs that remain // after filtering (based on the inner slice of matchers), for each outer slice. for i, matchers := range allMatchers { - filteredIds, unrelatedMatchers := filterValuesByMatchers(idLabelName, ids, matchers...) - for k := range filteredIds { - outIds[k] = struct{}{} + filteredIDs, unrelatedMatchers := filterValuesByMatchers(idLabelName, ids, matchers...) + for k := range filteredIDs { + outIDs[k] = struct{}{} } outMatchers[i] = unrelatedMatchers } - return outIds, outMatchers + return outIDs, outMatchers } diff --git a/pkg/querier/worker/frontend_processor_test.go b/pkg/querier/worker/frontend_processor_test.go index daec7e8e102..aff6fddc212 100644 --- a/pkg/querier/worker/frontend_processor_test.go +++ b/pkg/querier/worker/frontend_processor_test.go @@ -72,12 +72,12 @@ func TestFrontendProcessor_processQueriesOnSingleStream(t *testing.T) { workerCtx, workerCancel := context.WithCancel(context.Background()) - requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(mock.Arguments) { // Cancel the worker context while the query execution is in progress. workerCancel() // Ensure the execution context hasn't been canceled yet. - require.Nil(t, processClient.Context().Err()) + require.NoError(t, processClient.Context().Err()) // Intentionally slow down the query execution, to double check the worker waits until done. time.Sleep(time.Second) diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go index 898fe59bdff..18291bea3d1 100644 --- a/pkg/querier/worker/scheduler_processor_test.go +++ b/pkg/querier/worker/scheduler_processor_test.go @@ -83,7 +83,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) { workerCtx, workerCancel := context.WithCancel(context.Background()) - requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + requestHandler.On("Handle", mock.Anything, mock.Anything).Run(func(mock.Arguments) { // Cancel the worker context while the query execution is in progress. workerCancel() @@ -405,7 +405,7 @@ func TestSchedulerProcessor_ResponseStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) requestHandler.On("Handle", mock.Anything, mock.Anything).Run( - func(arguments mock.Arguments) { cancel() }, + func(mock.Arguments) { cancel() }, ).Return(returnResponses(responses)()) reqProcessor.processQueriesOnSingleStream(ctx, nil, "127.0.0.1") diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go index 993637ebd92..5465851c9b0 100644 --- a/pkg/querier/worker/worker_test.go +++ b/pkg/querier/worker/worker_test.go @@ -29,7 +29,7 @@ func TestConfig_Validate(t *testing.T) { expectedErr string }{ "should pass with default config": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, }, "should pass if frontend address is configured, but not scheduler address": { setup: func(cfg *Config) { diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go index 8f5b1df3a5b..481f3cfaaa8 100644 --- a/pkg/ruler/api_test.go +++ b/pkg/ruler/api_test.go @@ -298,7 +298,7 @@ func TestRuler_PrometheusRules(t *testing.T) { }, }, expectedConfigured: 1, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits[userID] = validation.MockDefaultLimits() tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = true tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = false @@ -330,7 +330,7 @@ func TestRuler_PrometheusRules(t *testing.T) { }, }, expectedConfigured: 1, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits[userID] = validation.MockDefaultLimits() tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = false tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = true @@ -364,7 +364,7 @@ func TestRuler_PrometheusRules(t *testing.T) { }, }, expectedConfigured: 0, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits[userID] = validation.MockDefaultLimits() tenantLimits[userID].RulerRecordingRulesEvaluationEnabled = false tenantLimits[userID].RulerAlertingRulesEvaluationEnabled = false diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index c51bd33a41c..539be45fcb7 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -379,7 +379,7 @@ func TestMetricsQueryFuncErrors(t *testing.T) { queries := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) failures := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) - mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + mockFunc := func(context.Context, string, time.Time) (promql.Vector, error) { return promql.Vector{}, tc.returnedError } qf := MetricsQueryFunc(mockFunc, queries, failures, tc.remoteQuerier) @@ -397,7 +397,7 @@ func TestRecordAndReportRuleQueryMetrics(t *testing.T) { queryTime := promauto.With(nil).NewCounterVec(prometheus.CounterOpts{}, []string{"user"}) zeroFetchedSeriesCount := promauto.With(nil).NewCounterVec(prometheus.CounterOpts{}, []string{"user"}) - mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + mockFunc := func(context.Context, string, time.Time) (promql.Vector, error) { time.Sleep(1 * time.Second) return promql.Vector{}, nil } diff --git a/pkg/ruler/remotequerier_test.go b/pkg/ruler/remotequerier_test.go index 37decb97d9a..d01802013a0 100644 --- a/pkg/ruler/remotequerier_test.go +++ b/pkg/ruler/remotequerier_test.go @@ -42,7 +42,7 @@ func TestRemoteQuerier_Read(t *testing.T) { setup := func() (mockHTTPGRPCClient, *httpgrpc.HTTPRequest) { var inReq httpgrpc.HTTPRequest - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(_ context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { inReq = *req b, err := proto.Marshal(&prompb.ReadResponse{ @@ -97,7 +97,7 @@ func TestRemoteQuerier_Read(t *testing.T) { } func TestRemoteQuerier_ReadReqTimeout(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(ctx context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { <-ctx.Done() return nil, ctx.Err() } @@ -117,7 +117,7 @@ func TestRemoteQuerier_Query(t *testing.T) { setup := func() (mockHTTPGRPCClient, *httpgrpc.HTTPRequest) { var inReq httpgrpc.HTTPRequest - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(_ context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { inReq = *req return &httpgrpc.HTTPResponse{ @@ -266,7 +266,7 @@ func TestRemoteQuerier_QueryRetryOnFailure(t *testing.T) { var count atomic.Int64 ctx, cancel := context.WithCancel(context.Background()) - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { count.Add(1) if testCase.err != nil { if grpcutil.IsCanceled(testCase.err) { @@ -396,7 +396,7 @@ func TestRemoteQuerier_QueryJSONDecoding(t *testing.T) { for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return &httpgrpc.HTTPResponse{ Code: http.StatusOK, Headers: []*httpgrpc.Header{ @@ -664,7 +664,7 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { b, err := scenario.body.Marshal() if err != nil { return nil, err @@ -692,7 +692,7 @@ func TestRemoteQuerier_QueryProtobufDecoding(t *testing.T) { } func TestRemoteQuerier_QueryUnknownResponseContentType(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return &httpgrpc.HTTPResponse{ Code: http.StatusOK, Headers: []*httpgrpc.Header{ @@ -709,7 +709,7 @@ func TestRemoteQuerier_QueryUnknownResponseContentType(t *testing.T) { } func TestRemoteQuerier_QueryReqTimeout(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(ctx context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { <-ctx.Done() return nil, ctx.Err() } @@ -767,7 +767,7 @@ func TestRemoteQuerier_StatusErrorResponses(t *testing.T) { } for testName, testCase := range testCases { t.Run(testName, func(t *testing.T) { - mockClientFn := func(ctx context.Context, req *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + mockClientFn := func(context.Context, *httpgrpc.HTTPRequest, ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return testCase.resp, testCase.err } logger := newLoggerWithCounter() diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 1fb9d210949..cd3384c353b 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -1290,7 +1290,7 @@ func (r *Ruler) notifySyncRules(ctx context.Context, userIDs []string) { // the client-side gRPC instrumentation fails. ctx = user.InjectOrgID(ctx, "") - errs.Add(r.forEachRulerInTheRing(ctx, r.ring, RuleSyncRingOp, func(ctx context.Context, inst *ring.InstanceDesc, rulerClient RulerClient, rulerClientErr error) error { + errs.Add(r.forEachRulerInTheRing(ctx, r.ring, RuleSyncRingOp, func(ctx context.Context, _ *ring.InstanceDesc, rulerClient RulerClient, rulerClientErr error) error { var err error if rulerClientErr != nil { diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index a4291006bda..f848a4ce05f 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -224,10 +224,10 @@ func prepareRuler(t *testing.T, cfg Config, storage rulestore.RuleStore, opts .. func prepareRulerManager(t *testing.T, cfg Config, opts ...prepareOption) *DefaultMultiTenantManager { options := applyPrepareOptions(t, cfg.Ring.Common.InstanceID, opts...) - noopQueryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { + noopQueryable := storage.QueryableFunc(func(int64, int64) (storage.Querier, error) { return storage.NoopQuerier(), nil }) - noopQueryFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + noopQueryFunc := func(context.Context, string, time.Time) (promql.Vector, error) { return nil, nil } @@ -249,7 +249,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { // We do expect 1 API call for the user create with the getOrCreateNotifier() wg.Add(1) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) require.NoError(t, err) assert.Equal(t, "1", userID) @@ -1019,7 +1019,7 @@ func TestRuler_NotifySyncRulesAsync_ShouldTriggerRulesSyncingOnAllRulersWhenEnab rulerCfg.Ring.Common.InstanceAddr = rulerAddr rulerCfg.Ring.Common.KVStore = kv.Config{Mock: kvStore} - limits := validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits := validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) { defaults.RulerTenantShardSize = rulerShardSize }) @@ -1674,7 +1674,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) { createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")), }, }, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits["user-1"] = validation.MockDefaultLimits() tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = true tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = false @@ -1704,7 +1704,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) { createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")), }, }, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits["user-1"] = validation.MockDefaultLimits() tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = false tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = true @@ -1734,7 +1734,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) { createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")), }, }, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits["user-1"] = validation.MockDefaultLimits() tenantLimits["user-1"].RulerRecordingRulesEvaluationEnabled = false tenantLimits["user-1"].RulerAlertingRulesEvaluationEnabled = false @@ -1760,7 +1760,7 @@ func TestFilterRuleGroupsByEnabled(t *testing.T) { createRuleGroup("group-3", "user-2", createAlertingRule("alert-6", "6"), createAlertingRule("alert-7", "7")), }, }, - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) { defaults.RulerRecordingRulesEvaluationEnabled = false defaults.RulerAlertingRulesEvaluationEnabled = false }), @@ -1909,17 +1909,17 @@ func BenchmarkFilterRuleGroupsByEnabled(b *testing.B) { limits: validation.MockDefaultOverrides(), }, "recording rules disabled": { - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) { defaults.RulerRecordingRulesEvaluationEnabled = false }), }, "alerting rules disabled": { - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) { defaults.RulerAlertingRulesEvaluationEnabled = false }), }, "all rules disabled": { - limits: validation.MockOverrides(func(defaults *validation.Limits, tenantLimits map[string]*validation.Limits) { + limits: validation.MockOverrides(func(defaults *validation.Limits, _ map[string]*validation.Limits) { defaults.RulerRecordingRulesEvaluationEnabled = false defaults.RulerAlertingRulesEvaluationEnabled = false }), diff --git a/pkg/ruler/rulestore/config_test.go b/pkg/ruler/rulestore/config_test.go index cf46d5f44b6..db6bee19317 100644 --- a/pkg/ruler/rulestore/config_test.go +++ b/pkg/ruler/rulestore/config_test.go @@ -24,7 +24,7 @@ func TestIsDefaults(t *testing.T) { expected: true, }, "should return false if the config contains zero values": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, expected: false, }, "should return false if the config contains default values and some overrides": { diff --git a/pkg/scheduler/schedulerdiscovery/config_test.go b/pkg/scheduler/schedulerdiscovery/config_test.go index 5681bff9458..a7c3935244c 100644 --- a/pkg/scheduler/schedulerdiscovery/config_test.go +++ b/pkg/scheduler/schedulerdiscovery/config_test.go @@ -16,7 +16,7 @@ func TestConfig_Validate(t *testing.T) { expectedErr string }{ "should pass with default config": { - setup: func(cfg *Config) {}, + setup: func(*Config) {}, }, "should fail if service discovery mode is invalid": { setup: func(cfg *Config) { diff --git a/pkg/storage/ingest/partition_offset_reader_test.go b/pkg/storage/ingest/partition_offset_reader_test.go index d840cdd0f33..5518532c834 100644 --- a/pkg/storage/ingest/partition_offset_reader_test.go +++ b/pkg/storage/ingest/partition_offset_reader_test.go @@ -141,7 +141,7 @@ func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) { expectedOffset := int64(1) // Slow down the 1st ListOffsets request. - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { if firstRequest.CompareAndSwap(true, false) { close(firstRequestReceived) time.Sleep(2 * firstRequestTimeout) @@ -187,7 +187,7 @@ func TestPartitionOffsetReader_FetchLastProducedOffset(t *testing.T) { // Make the ListOffsets request failing. actualTries := atomic.NewInt64(0) - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() actualTries.Inc() return nil, errors.New("mocked error"), true @@ -305,7 +305,7 @@ func TestPartitionOffsetReader_FetchPartitionStartOffset(t *testing.T) { expectedStartOffset := int64(1) // Slow down the 1st ListOffsets request. - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { if firstRequest.CompareAndSwap(true, false) { close(firstRequestReceived) time.Sleep(2 * firstRequestTimeout) @@ -351,7 +351,7 @@ func TestPartitionOffsetReader_FetchPartitionStartOffset(t *testing.T) { // Make the ListOffsets request failing. actualTries := atomic.NewInt64(0) - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() actualTries.Inc() return nil, errors.New("mocked error"), true diff --git a/pkg/storage/ingest/pusher_test.go b/pkg/storage/ingest/pusher_test.go index 159a115e5ad..878b795ba78 100644 --- a/pkg/storage/ingest/pusher_test.go +++ b/pkg/storage/ingest/pusher_test.go @@ -202,7 +202,7 @@ func TestPusherConsumer_consume_ShouldLogErrorsHonoringOptionalLogging(t *testin // Utility function used to setup the test. setupTest := func(pusherErr error) (*pusherConsumer, *concurrency.SyncBuffer, *prometheus.Registry) { - pusher := pusherFunc(func(ctx context.Context, request *mimirpb.WriteRequest) error { + pusher := pusherFunc(func(context.Context, *mimirpb.WriteRequest) error { return pusherErr }) diff --git a/pkg/storage/ingest/reader_test.go b/pkg/storage/ingest/reader_test.go index 6948e8a3838..59206beb83e 100644 --- a/pkg/storage/ingest/reader_test.go +++ b/pkg/storage/ingest/reader_test.go @@ -174,7 +174,7 @@ func TestPartitionReader_WaitReadConsistency(t *testing.T) { // We define a custom consume function which introduces a delay once the 2nd record // has been consumed but before the function returns. From the PartitionReader perspective, // the 2nd record consumption will be delayed. - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { for _, record := range records { // Introduce a delay before returning from the consume function once // the 2nd record has been consumed. @@ -306,7 +306,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { var ( _, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) reg = prometheus.NewPedanticRegistry() ) @@ -328,12 +328,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) reg = prometheus.NewPedanticRegistry() ) // Mock Kafka to fail the Fetch request. - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() return nil, errors.New("mocked error"), true @@ -368,12 +368,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { consumedRecordsCount = atomic.NewInt64(0) ) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { consumedRecordsCount.Add(int64(len(records))) return nil }) - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() fetchRequestsCount.Inc() @@ -438,12 +438,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { consumedRecordsCount = atomic.NewInt64(0) ) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { consumedRecordsCount.Add(int64(len(records))) return nil }) - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() listOffsetsRequestsCount.Inc() @@ -510,7 +510,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { consumedRecords []string ) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { consumedRecordsMx.Lock() defer consumedRecordsMx.Unlock() @@ -520,7 +520,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { return nil }) - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() fetchRequestsCount.Inc() @@ -591,7 +591,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { consumedRecords []string ) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { consumedRecordsMx.Lock() defer consumedRecordsMx.Unlock() @@ -601,7 +601,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { return nil }) - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() fetchRequestsCount.Inc() @@ -683,7 +683,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { consumedRecords []string ) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(_ context.Context, records []record) error { consumedRecordsMx.Lock() defer consumedRecordsMx.Unlock() @@ -693,7 +693,7 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { return nil }) - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() fetchRequestsCount.Inc() @@ -769,12 +769,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) listOffsetsRequestsCount = atomic.NewInt64(0) ) // Mock Kafka to always fail the ListOffsets request. - cluster.ControlKey(int16(kmsg.ListOffsets), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.ListOffsets), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() listOffsetsRequestsCount.Inc() @@ -806,12 +806,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateCluster(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) fetchRequestsCount = atomic.NewInt64(0) ) // Mock Kafka to always fail the Fetch request. - cluster.ControlKey(int16(kmsg.Fetch), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() fetchRequestsCount.Inc() @@ -856,12 +856,12 @@ func TestPartitionReader_ConsumeAtStartup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - consumer := consumerFunc(func(ctx context.Context, records []record) error { + consumer := consumerFunc(func(context.Context, []record) error { return nil }) cluster, clusterAddr := testkafka.CreateCluster(t, partitionID+1, topicName) - cluster.ControlKey(int16(kmsg.Fetch), func(req kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Fetch), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() // Throttle the Fetch request. @@ -938,7 +938,7 @@ func TestPartitionReader_fetchLastCommittedOffset(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second)) ) @@ -968,7 +968,7 @@ func TestPartitionReader_fetchLastCommittedOffset(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second)) ) @@ -1008,7 +1008,7 @@ func TestPartitionReader_fetchLastCommittedOffset(t *testing.T) { var ( cluster, clusterAddr = testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName) - consumer = consumerFunc(func(ctx context.Context, records []record) error { return nil }) + consumer = consumerFunc(func(context.Context, []record) error { return nil }) reader = createReader(t, clusterAddr, topicName, partitionID, consumer, withMaxConsumerLagAtStartup(time.Second)) ) @@ -1189,7 +1189,7 @@ func TestPartitionCommitter_commit(t *testing.T) { cluster, clusterAddr := testkafka.CreateClusterWithoutCustomConsumerGroupsSupport(t, partitionID+1, topicName) // Mock the cluster to fail any offset commit request. - cluster.ControlKey(kmsg.OffsetCommit.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(kmsg.OffsetCommit.Int16(), func(kmsg.Request) (kmsg.Response, error, bool) { cluster.KeepControl() return nil, errors.New("mocked error"), true }) diff --git a/pkg/storage/ingest/writer_test.go b/pkg/storage/ingest/writer_test.go index 5332b20a19b..bee7e968782 100644 --- a/pkg/storage/ingest/writer_test.go +++ b/pkg/storage/ingest/writer_test.go @@ -55,7 +55,7 @@ func TestWriter_WriteSync(t *testing.T) { produceRequestProcessed := atomic.NewBool(false) - cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) { // Add a delay, so that if WriteSync() will not wait then the test will fail. time.Sleep(time.Second) produceRequestProcessed.Store(true) @@ -238,7 +238,7 @@ func TestWriter_WriteSync(t *testing.T) { kafkaCfg := createTestKafkaConfig(clusterAddr, topicName) writer, _ := createTestWriter(t, kafkaCfg) - cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) { // Keep failing every request. cluster.KeepControl() return nil, errors.New("mock error"), true @@ -267,7 +267,7 @@ func TestWriter_WriteSync(t *testing.T) { ) wg.Add(1) - cluster.ControlKey(int16(kmsg.Produce), func(request kmsg.Request) (kmsg.Response, error, bool) { + cluster.ControlKey(int16(kmsg.Produce), func(kmsg.Request) (kmsg.Response, error, bool) { // Ensure the test waits for this too, since the client request will fail earlier // (if we don't wait, the test will end before this function and then goleak will // report a goroutine leak). diff --git a/pkg/storage/tsdb/block/block_test.go b/pkg/storage/tsdb/block/block_test.go index 5527438a678..3078f2a7895 100644 --- a/pkg/storage/tsdb/block/block_test.go +++ b/pkg/storage/tsdb/block/block_test.go @@ -330,7 +330,7 @@ func TestMarkForDeletion(t *testing.T) { }{ { name: "block marked for deletion", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + preUpload: func(testing.TB, ulid.ULID, objstore.Bucket) {}, blocksMarked: 1, }, { @@ -379,7 +379,7 @@ func TestMarkForNoCompact(t *testing.T) { }{ { name: "block marked", - preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + preUpload: func(testing.TB, ulid.ULID, objstore.Bucket) {}, blocksMarked: 1, }, { @@ -440,7 +440,7 @@ func TestUnMarkForNoCompact(t *testing.T) { }, }, "unmark non-existing block should fail": { - setupTest: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, + setupTest: func(testing.TB, ulid.ULID, objstore.Bucket) {}, expectedError: func(id ulid.ULID) error { return errors.Errorf("deletion of no-compaction marker for block %s has failed: inmem: object not found", id.String()) }, diff --git a/pkg/storage/tsdb/block/index_test.go b/pkg/storage/tsdb/block/index_test.go index cf5cdddfd64..ca1d8efe9bd 100644 --- a/pkg/storage/tsdb/block/index_test.go +++ b/pkg/storage/tsdb/block/index_test.go @@ -63,7 +63,7 @@ func TestRewrite(t *testing.T) { totalChunks := 0 ignoredChunks := 0 - require.NoError(t, rewrite(ctx, log.NewNopLogger(), ir, cr, iw, cw, m, []ignoreFnType{func(mint, maxt int64, prev *chunks.Meta, curr *chunks.Meta) (bool, error) { + require.NoError(t, rewrite(ctx, log.NewNopLogger(), ir, cr, iw, cw, m, []ignoreFnType{func(_, _ int64, _ *chunks.Meta, curr *chunks.Meta) (bool, error) { totalChunks++ if curr.OverlapsClosedInterval(excludeTime, excludeTime) { // Ignores all chunks that overlap with the excludeTime. excludeTime was randomly selected inside the block. diff --git a/pkg/storage/tsdb/config_test.go b/pkg/storage/tsdb/config_test.go index 19d8ac3075c..591e22f6c73 100644 --- a/pkg/storage/tsdb/config_test.go +++ b/pkg/storage/tsdb/config_test.go @@ -25,98 +25,98 @@ func TestConfig_Validate(t *testing.T) { expectedErr error }{ "should pass on S3 backend": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.Bucket.Backend = "s3" }, expectedErr: nil, }, "should pass on GCS backend": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.Bucket.Backend = "gcs" }, expectedErr: nil, }, "should fail on unknown storage backend": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.Bucket.Backend = "unknown" }, expectedErr: bucket.ErrUnsupportedStorageBackend, }, "should fail on invalid ship concurrency": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.ShipConcurrency = 0 }, expectedErr: errInvalidShipConcurrency, }, "should pass on invalid ship concurrency but shipping is disabled": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.ShipConcurrency = 0 cfg.TSDB.ShipInterval = 0 }, expectedErr: nil, }, "should fail on invalid compaction interval": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.HeadCompactionInterval = 0 }, expectedErr: errInvalidCompactionInterval, }, "should fail on too high compaction interval": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.HeadCompactionInterval = 20 * time.Minute }, expectedErr: errInvalidCompactionInterval, }, "should fail on invalid compaction concurrency": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.HeadCompactionConcurrency = 0 }, expectedErr: errInvalidCompactionConcurrency, }, "should pass on valid compaction concurrency": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.HeadCompactionConcurrency = 10 }, expectedErr: nil, }, "should fail on negative stripe size": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.StripeSize = -2 }, expectedErr: errInvalidStripeSize, }, "should fail on stripe size 0": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.StripeSize = 0 }, expectedErr: errInvalidStripeSize, }, "should fail on stripe size 1": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.StripeSize = 1 }, expectedErr: errInvalidStripeSize, }, "should pass on valid stripe size": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.StripeSize = 1 << 14 }, expectedErr: nil, }, "should fail on empty block ranges": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.BlockRanges = nil }, expectedErr: errEmptyBlockranges, }, "should fail on invalid TSDB WAL segment size": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.WALSegmentSizeBytes = 0 }, expectedErr: errInvalidWALSegmentSizeBytes, }, "should fail on invalid store-gateway streaming batch size": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.BucketStore.StreamingBatchSize = 0 }, expectedErr: errInvalidStreamingBatchSize, @@ -129,7 +129,7 @@ func TestConfig_Validate(t *testing.T) { expectedErr: errEarlyCompactionRequiresActiveSeries, }, "should fail on invalid forced compaction min series reduction percentage": { - setup: func(cfg *BlocksStorageConfig, activeSeriesCfg *activeseries.Config) { + setup: func(cfg *BlocksStorageConfig, _ *activeseries.Config) { cfg.TSDB.EarlyHeadCompactionMinEstimatedSeriesReductionPercentage = 101 }, expectedErr: errInvalidEarlyHeadCompactionMinSeriesReduction, diff --git a/pkg/storage/tsdb/users_scanner_test.go b/pkg/storage/tsdb/users_scanner_test.go index cc00a5ec87d..0af741d7778 100644 --- a/pkg/storage/tsdb/users_scanner_test.go +++ b/pkg/storage/tsdb/users_scanner_test.go @@ -43,7 +43,7 @@ func TestUsersScanner_ScanUsers_ShouldReturnUsersForWhichOwnerCheckOrTenantDelet bucketClient.MockExists(path.Join("user-1", TenantDeletionMarkPath), false, nil) bucketClient.MockExists(path.Join("user-2", TenantDeletionMarkPath), false, errors.New("fail")) - isOwned := func(userID string) (bool, error) { + isOwned := func(string) (bool, error) { return false, errors.New("failed to check if user is owned") } diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go index 8951228b22f..03bce0fc253 100644 --- a/pkg/storegateway/bucket_chunk_reader_test.go +++ b/pkg/storegateway/bucket_chunk_reader_test.go @@ -54,12 +54,12 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { // Each func takes the estimated length and returns a new length. chunkLengthSkewingFuncs := map[string]func(uint32) uint32{ - "tsdb.EstimatedMaxChunkSize": func(chunkLength uint32) uint32 { return tsdb.EstimatedMaxChunkSize }, - "10xtsdb.EstimatedMaxChunkSize": func(chunkLength uint32) uint32 { return 10 * tsdb.EstimatedMaxChunkSize }, + "tsdb.EstimatedMaxChunkSize": func(uint32) uint32 { return tsdb.EstimatedMaxChunkSize }, + "10xtsdb.EstimatedMaxChunkSize": func(uint32) uint32 { return 10 * tsdb.EstimatedMaxChunkSize }, "size-1": func(chunkLength uint32) uint32 { return chunkLength - 1 }, "size/2": func(chunkLength uint32) uint32 { return chunkLength / 2 }, - "1": func(chunkLength uint32) uint32 { return 1 }, - "0": func(chunkLength uint32) uint32 { return 0 }, + "1": func(uint32) uint32 { return 1 }, + "0": func(uint32) uint32 { return 0 }, } for name, skewChunkLen := range chunkLengthSkewingFuncs { diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index e66b23d0566..e98a41ad843 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -323,7 +323,7 @@ func TestBucketStores_syncUsersBlocks(t *testing.T) { // Sync user stores and count the number of times the callback is called. var storesCount atomic.Int32 - err = stores.syncUsersBlocks(context.Background(), func(ctx context.Context, bs *BucketStore) error { + err = stores.syncUsersBlocks(context.Background(), func(context.Context, *BucketStore) error { storesCount.Inc() return nil }) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index c68bb012d6a..1d89be74739 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -340,7 +340,7 @@ func TestBlockLabelNames(t *testing.T) { onLabelNamesCalled: func() error { return fmt.Errorf("not expected the LabelNames() calls with matchers") }, - onLabelValuesOffsetsCalled: func(name string) error { + onLabelValuesOffsetsCalled: func(string) error { expectedCalls-- if expectedCalls < 0 { return fmt.Errorf("didn't expect another index.Reader.LabelValues() call") @@ -392,7 +392,7 @@ func TestBlockLabelValues(t *testing.T) { b := newTestBucketBlock() b.indexHeaderReader = &interceptedIndexReader{ Reader: b.indexHeaderReader, - onLabelValuesOffsetsCalled: func(name string) error { return context.DeadlineExceeded }, + onLabelValuesOffsetsCalled: func(string) error { return context.DeadlineExceeded }, } b.indexCache = cacheNotExpectingToStoreLabelValues{t: t} @@ -405,7 +405,7 @@ func TestBlockLabelValues(t *testing.T) { b := newTestBucketBlock() b.indexHeaderReader = &interceptedIndexReader{ Reader: b.indexHeaderReader, - onLabelValuesOffsetsCalled: func(name string) error { + onLabelValuesOffsetsCalled: func(string) error { expectedCalls-- if expectedCalls < 0 { return fmt.Errorf("didn't expect another index.Reader.LabelValues() call") @@ -1904,7 +1904,7 @@ func TestBucketStore_Series_RequestAndResponseHints(t *testing.T) { tb, store, seriesSet1, seriesSet2, block1, block2, cleanup := setupStoreForHintsTest(t, 5000) tb.Cleanup(cleanup) for _, streamingBatchSize := range []int{0, 1, 5} { - t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(*testing.T) { runTestServerSeries(tb, store, streamingBatchSize, newTestCases(seriesSet1, seriesSet2, block1, block2)...) }) } diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index c7d39710630..27a0e5fe055 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -63,17 +63,17 @@ func TestConfig_Validate(t *testing.T) { expected error }{ "should pass by default": { - setup: func(cfg *Config, limits *validation.Limits) {}, + setup: func(*Config, *validation.Limits) {}, expected: nil, }, "should fail if shard size is negative": { - setup: func(cfg *Config, limits *validation.Limits) { + setup: func(_ *Config, limits *validation.Limits) { limits.StoreGatewayTenantShardSize = -3 }, expected: errInvalidTenantShardSize, }, "should pass if shard size has been set": { - setup: func(cfg *Config, limits *validation.Limits) { + setup: func(_ *Config, limits *validation.Limits) { limits.StoreGatewayTenantShardSize = 3 }, expected: nil, @@ -193,7 +193,7 @@ func TestStoreGateway_InitialSyncFailure(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - bucketClient := &bucket.ErrorInjectedBucketClient{Injector: func(operation bucket.Operation, s string) error { return assert.AnError }} + bucketClient := &bucket.ErrorInjectedBucketClient{Injector: func(bucket.Operation, string) error { return assert.AnError }} g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), log.NewLogfmtLogger(os.Stdout), nil, nil) require.NoError(t, err) diff --git a/pkg/storegateway/indexcache/remote_test.go b/pkg/storegateway/indexcache/remote_test.go index c4a89e3b201..a9d695c14d1 100644 --- a/pkg/storegateway/indexcache/remote_test.go +++ b/pkg/storegateway/indexcache/remote_test.go @@ -215,7 +215,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { mockedErr error fetchUserID string fetchBlockID ulid.ULID - fetchIds []storage.SeriesRef + fetchIDs []storage.SeriesRef expectedHits map[storage.SeriesRef][]byte expectedMisses []storage.SeriesRef }{ @@ -223,7 +223,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { setup: []mockedSeriesForRef{}, fetchUserID: user1, fetchBlockID: block1, - fetchIds: []storage.SeriesRef{1, 2}, + fetchIDs: []storage.SeriesRef{1, 2}, expectedHits: nil, expectedMisses: []storage.SeriesRef{1, 2}, }, @@ -237,7 +237,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { }, fetchUserID: user1, fetchBlockID: block1, - fetchIds: []storage.SeriesRef{1, 2}, + fetchIDs: []storage.SeriesRef{1, 2}, expectedHits: map[storage.SeriesRef][]byte{ 1: value1, 2: value2, @@ -251,7 +251,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { }, fetchUserID: user1, fetchBlockID: block1, - fetchIds: []storage.SeriesRef{1, 2}, + fetchIDs: []storage.SeriesRef{1, 2}, expectedHits: map[storage.SeriesRef][]byte{1: value1}, expectedMisses: []storage.SeriesRef{2}, }, @@ -264,7 +264,7 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { mockedErr: errors.New("mocked error"), fetchUserID: user1, fetchBlockID: block1, - fetchIds: []storage.SeriesRef{1, 2}, + fetchIDs: []storage.SeriesRef{1, 2}, expectedHits: nil, expectedMisses: []storage.SeriesRef{1, 2}, }, @@ -283,12 +283,12 @@ func TestRemoteIndexCache_FetchMultiSeriesForRef(t *testing.T) { } // Fetch series from cached and assert on it. - hits, misses := c.FetchMultiSeriesForRefs(ctx, testData.fetchUserID, testData.fetchBlockID, testData.fetchIds) + hits, misses := c.FetchMultiSeriesForRefs(ctx, testData.fetchUserID, testData.fetchBlockID, testData.fetchIDs) assert.Equal(t, testData.expectedHits, hits) assert.Equal(t, testData.expectedMisses, misses) // Assert on metrics. - assert.Equal(t, float64(len(testData.fetchIds)), prom_testutil.ToFloat64(c.requests.WithLabelValues(cacheTypeSeriesForRef))) + assert.Equal(t, float64(len(testData.fetchIDs)), prom_testutil.ToFloat64(c.requests.WithLabelValues(cacheTypeSeriesForRef))) assert.Equal(t, float64(len(testData.expectedHits)), prom_testutil.ToFloat64(c.hits.WithLabelValues(cacheTypeSeriesForRef))) for _, typ := range remove(allCacheTypes, cacheTypeSeriesForRef) { assert.Equal(t, 0.0, prom_testutil.ToFloat64(c.requests.WithLabelValues(typ))) diff --git a/pkg/storegateway/indexheader/lazy_binary_reader_test.go b/pkg/storegateway/indexheader/lazy_binary_reader_test.go index 93cd0b3b7ae..785f633ef69 100644 --- a/pkg/storegateway/indexheader/lazy_binary_reader_test.go +++ b/pkg/storegateway/indexheader/lazy_binary_reader_test.go @@ -34,7 +34,7 @@ func TestNewLazyBinaryReader_ShouldFailIfUnableToBuildIndexHeader(t *testing.T) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, bkt.Close()) }) - testLazyBinaryReader(t, bkt, tmpDir, ulid.ULID{}, func(t *testing.T, r *LazyBinaryReader, err error) { + testLazyBinaryReader(t, bkt, tmpDir, ulid.ULID{}, func(t *testing.T, _ *LazyBinaryReader, err error) { require.Error(t, err) }) } diff --git a/pkg/storegateway/indexheader/reader_benchmarks_test.go b/pkg/storegateway/indexheader/reader_benchmarks_test.go index 0f03cc877a8..2fbfe210f03 100644 --- a/pkg/storegateway/indexheader/reader_benchmarks_test.go +++ b/pkg/storegateway/indexheader/reader_benchmarks_test.go @@ -177,7 +177,7 @@ func BenchmarkLabelValuesOffsetsIndexV1(b *testing.B) { for i := 0; i < b.N; i++ { name := names[i%len(names)] - values, err := br.LabelValuesOffsets(name, "", func(s string) bool { + values, err := br.LabelValuesOffsets(name, "", func(string) bool { return true }) @@ -221,7 +221,7 @@ func BenchmarkLabelValuesOffsetsIndexV2(b *testing.B) { for i := 0; i < b.N; i++ { name := names[i%len(names)] - values, err := br.LabelValuesOffsets(name, "", func(s string) bool { + values, err := br.LabelValuesOffsets(name, "", func(string) bool { return true }) diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 5ce1f159736..ee5070e6cfd 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1489,7 +1489,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) - newTestBlock := prepareTestBlock(test.NewTB(t), func(tb testing.TB, appenderFactory func() storage.Appender) { + newTestBlock := prepareTestBlock(test.NewTB(t), func(_ testing.TB, appenderFactory func() storage.Appender) { const ( samplesFor1Chunk = 100 // not a complete chunk samplesFor2Chunks = samplesFor1Chunk * 2 // not a complete chunk @@ -2008,10 +2008,10 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { for ts := int64(0); ts < 10; ts++ { for _, s := range existingSeries { _, err := appender.Append(0, s, ts, 0) - assert.NoError(t, err) + assert.NoError(tb, err) } } - assert.NoError(t, appender.Commit()) + assert.NoError(tb, appender.Commit()) }) mockedSeriesHashes := map[string]uint64{ diff --git a/pkg/storegateway/sharding_strategy_test.go b/pkg/storegateway/sharding_strategy_test.go index 063855f7cdd..28207b8d118 100644 --- a/pkg/storegateway/sharding_strategy_test.go +++ b/pkg/storegateway/sharding_strategy_test.go @@ -363,7 +363,7 @@ func TestShuffleShardingStrategy(t *testing.T) { t.Cleanup(func() { assert.NoError(t, closer.Close()) }) // Initialize the ring state. - require.NoError(t, store.CAS(ctx, "test", func(in interface{}) (interface{}, bool, error) { + require.NoError(t, store.CAS(ctx, "test", func(interface{}) (interface{}, bool, error) { d := ring.NewDesc() testData.setupRing(d) return d, true, nil diff --git a/pkg/usagestats/seed_test.go b/pkg/usagestats/seed_test.go index ad6a2902cce..80c5647092a 100644 --- a/pkg/usagestats/seed_test.go +++ b/pkg/usagestats/seed_test.go @@ -127,14 +127,14 @@ func TestWaitSeedFileStability(t *testing.T) { } tests := map[string]func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations{ - "should immediately return if seed file does not exist": func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations { + "should immediately return if seed file does not exist": func(_ *testing.T, bucketClient *bucket.ClientMock) testExpectations { bucketClient.MockGet(ClusterSeedFileName, "", bucket.ErrObjectDoesNotExist) return testExpectations{ expectedErr: bucket.ErrObjectDoesNotExist, } }, - "should immediately return if seed file is corrupted": func(t *testing.T, bucketClient *bucket.ClientMock) testExpectations { + "should immediately return if seed file is corrupted": func(_ *testing.T, bucketClient *bucket.ClientMock) testExpectations { bucketClient.MockGet(ClusterSeedFileName, "xxx", nil) return testExpectations{ @@ -224,7 +224,7 @@ func TestInitSeedFile(t *testing.T) { expectedMinDuration: minStability, } }, - "should create the seed file if doesn't exist and then wait for 'min stability'": func(t *testing.T, bucketClient objstore.Bucket) testExpectations { + "should create the seed file if doesn't exist and then wait for 'min stability'": func(*testing.T, objstore.Bucket) testExpectations { return testExpectations{ expectedMinDuration: minStability, } diff --git a/pkg/util/flags_test.go b/pkg/util/flags_test.go index d214b316195..bbcf2c1b592 100644 --- a/pkg/util/flags_test.go +++ b/pkg/util/flags_test.go @@ -19,7 +19,7 @@ func TestTrackRegisteredFlags(t *testing.T) { var previous, registered, nonPrefixed string fs.StringVar(&previous, "previous.flag", "previous", "") - rf := TrackRegisteredFlags(prefix, fs, func(prefix string, f *flag.FlagSet) { + rf := TrackRegisteredFlags(prefix, fs, func(prefix string, _ *flag.FlagSet) { fs.StringVar(®istered, prefix+flagName, "registered", "") fs.StringVar(&nonPrefixed, flagName, "non-prefixed", "") }) diff --git a/pkg/util/gziphandler/gzip_test.go b/pkg/util/gziphandler/gzip_test.go index cdc374e1355..fbacaf42d61 100644 --- a/pkg/util/gziphandler/gzip_test.go +++ b/pkg/util/gziphandler/gzip_test.go @@ -157,7 +157,7 @@ func TestGzipHandlerAlreadyCompressed(t *testing.T) { } func TestNewGzipLevelHandler(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) _, _ = io.WriteString(w, testBody) }) @@ -216,7 +216,7 @@ func TestGzipHandlerNoBody(t *testing.T) { } for num, test := range tests { - handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(test.statusCode) if test.body != nil { _, _ = w.Write(test.body) @@ -284,7 +284,7 @@ func TestGzipHandlerContentLength(t *testing.T) { go func() { _ = srv.Serve(ln) }() for num, test := range tests { - srv.Handler = GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + srv.Handler = GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { if test.bodyLen > 0 { w.Header().Set("Content-Length", strconv.Itoa(test.bodyLen)) } @@ -336,13 +336,13 @@ func TestGzipHandlerMinSize(t *testing.T) { wrapper, _ := NewGzipLevelAndMinSize(gzip.DefaultCompression, 128) handler := wrapper(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { + func(w http.ResponseWriter, _ *http.Request) { // Write responses one byte at a time to ensure that the flush // mechanism, if used, is working properly. for i := 0; i < responseLength; i++ { n, err := w.Write(b) assert.Equal(t, 1, n) - assert.Nil(t, err) + assert.NoError(t, err) } }, )) @@ -372,7 +372,7 @@ func TestGzipDoubleClose(t *testing.T) { // aren't added back by double close addLevelPool(gzip.DefaultCompression) - handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { // call close here and it'll get called again interally by // NewGzipLevelHandler's handler defer _, _ = w.Write([]byte("test")) @@ -406,7 +406,7 @@ func (w *panicOnSecondWriteHeaderWriter) WriteHeader(s int) { } func TestGzipHandlerDoubleWriteHeader(t *testing.T) { - handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Length", "15000") // Specifically write the header here w.WriteHeader(304) @@ -459,7 +459,7 @@ func TestStatusCodes(t *testing.T) { func TestFlushBeforeWrite(t *testing.T) { b := []byte(testBody) - handler := GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + handler := GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { rw.WriteHeader(http.StatusNotFound) rw.(http.Flusher).Flush() _, _ = rw.Write(b) @@ -478,14 +478,14 @@ func TestFlushBeforeWrite(t *testing.T) { func TestImplementFlusher(t *testing.T) { request := httptest.NewRequest(http.MethodGet, "/", nil) request.Header.Set(acceptEncoding, "gzip") - GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + GzipHandler(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { _, okFlusher := rw.(http.Flusher) assert.True(t, okFlusher, "response writer must implement http.Flusher") })).ServeHTTP(httptest.NewRecorder(), request) } func TestIgnoreSubsequentWriteHeader(t *testing.T) { - handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(500) w.WriteHeader(404) })) @@ -505,7 +505,7 @@ func TestDontWriteWhenNotWrittenTo(t *testing.T) { // ensure the gzip middleware doesn't touch the actual ResponseWriter // either. - handler0 := GzipHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler0 := GzipHandler(http.HandlerFunc(func(http.ResponseWriter, *http.Request) { })) handler1 := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -588,7 +588,7 @@ var contentTypeTests = []struct { func TestContentTypes(t *testing.T) { for _, tt := range contentTypeTests { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", tt.contentType) _, _ = io.WriteString(w, testBody) diff --git a/pkg/util/instrumentation/tracer_transport_test.go b/pkg/util/instrumentation/tracer_transport_test.go index 917ca5371eb..4a25a66b5fb 100644 --- a/pkg/util/instrumentation/tracer_transport_test.go +++ b/pkg/util/instrumentation/tracer_transport_test.go @@ -26,7 +26,7 @@ func TestTracerTransportPropagatesTrace(t *testing.T) { }{ { name: "no next transport", - handlerAssert: func(t *testing.T, req *http.Request) {}, + handlerAssert: func(*testing.T, *http.Request) {}, }, { name: "with next transport", @@ -45,7 +45,7 @@ func TestTracerTransportPropagatesTrace(t *testing.T) { defer closer.Close() observedTraceID := make(chan string, 2) - handler := middleware.Tracer{}.Wrap(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler := middleware.Tracer{}.Wrap(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { sp := opentracing.SpanFromContext(r.Context()) defer sp.Finish() diff --git a/pkg/util/noauth/no_auth.go b/pkg/util/noauth/no_auth.go index 8abcaadd136..dd4145debb0 100644 --- a/pkg/util/noauth/no_auth.go +++ b/pkg/util/noauth/no_auth.go @@ -45,7 +45,7 @@ func SetupAuthMiddleware(config *server.Config, multitenancyEnabled bool, noMult } config.GRPCMiddleware = append(config.GRPCMiddleware, - func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { ctx = user.InjectOrgID(ctx, noMultitenancyTenant) return handler(ctx, req) }, diff --git a/pkg/util/pool/fast_releasing_pool_test.go b/pkg/util/pool/fast_releasing_pool_test.go index 3b488517dfd..2f442599f89 100644 --- a/pkg/util/pool/fast_releasing_pool_test.go +++ b/pkg/util/pool/fast_releasing_pool_test.go @@ -151,7 +151,7 @@ func TestFastReleasingSlabPool(t *testing.T) { require.Greater(t, int(delegatePool.Gets.Load()), 0) }) - t.Run("releasing slabID 0", func(t *testing.T) { + t.Run("releasing slabID 0", func(*testing.T) { delegatePool := &TrackedPool{Parent: &sync.Pool{}} slabPool := NewFastReleasingSlabPool[byte](delegatePool, 10) diff --git a/pkg/util/validation/exporter/exporter_test.go b/pkg/util/validation/exporter/exporter_test.go index c819df6f2d6..cbb79835638 100644 --- a/pkg/util/validation/exporter/exporter_test.go +++ b/pkg/util/validation/exporter/exporter_test.go @@ -284,7 +284,7 @@ func TestOverridesExporter_withRing(t *testing.T) { // Create an empty ring. ctx := context.Background() - require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) { return ring.NewDesc(), true, nil })) diff --git a/pkg/util/validation/exporter/ring_test.go b/pkg/util/validation/exporter/ring_test.go index a1b96b6a752..efd7419dfa7 100644 --- a/pkg/util/validation/exporter/ring_test.go +++ b/pkg/util/validation/exporter/ring_test.go @@ -22,7 +22,7 @@ func TestOverridesExporter_emptyRing(t *testing.T) { // Create an empty ring. ctx := context.Background() - require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) { return ring.NewDesc(), true, nil })) @@ -66,7 +66,7 @@ func TestOverridesExporterRing_scaleDown(t *testing.T) { // Register instances in the ring (manually, to be able to assign tokens). ctx := context.Background() - require.NoError(t, ringStore.CAS(ctx, ringKey, func(in interface{}) (out interface{}, retry bool, err error) { + require.NoError(t, ringStore.CAS(ctx, ringKey, func(interface{}) (out interface{}, retry bool, err error) { desc := ring.NewDesc() desc.AddIngester(l1.GetInstanceID(), l1.GetInstanceAddr(), "", []uint32{leaderToken + 1}, ring.ACTIVE, time.Now()) desc.AddIngester(l2.GetInstanceID(), l2.GetInstanceAddr(), "", []uint32{leaderToken + 2}, ring.ACTIVE, time.Now()) diff --git a/pkg/util/version/info_handler.go b/pkg/util/version/info_handler.go index 3d6038f57f3..ee9008b906e 100644 --- a/pkg/util/version/info_handler.go +++ b/pkg/util/version/info_handler.go @@ -30,8 +30,7 @@ type BuildInfoFeatures struct { } func BuildInfoHandler(application string, features interface{}) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { response := BuildInfoResponse{ Status: "success", BuildInfo: BuildInfo{ diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 0b549f41b9e..95b5ec0a579 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -212,7 +212,7 @@ func Test_ProxyEndpoint_Requests(t *testing.T) { wg.Add(2) if tc.handler == nil { - testHandler = func(w http.ResponseWriter, r *http.Request) { + testHandler = func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("ok")) } @@ -288,7 +288,7 @@ func Test_ProxyEndpoint_Comparison(t *testing.T) { for name, scenario := range scenarios { t.Run(name, func(t *testing.T) { - preferredBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + preferredBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", scenario.preferredResponseContentType) w.WriteHeader(scenario.preferredResponseStatusCode) _, err := w.Write([]byte("preferred response")) @@ -299,7 +299,7 @@ func Test_ProxyEndpoint_Comparison(t *testing.T) { preferredBackendURL, err := url.Parse(preferredBackend.URL) require.NoError(t, err) - secondaryBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + secondaryBackend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", scenario.secondaryResponseContentType) w.WriteHeader(scenario.secondaryResponseStatusCode) _, err := w.Write([]byte("secondary response"))