diff --git a/CHANGELOG.md b/CHANGELOG.md
index 17745fd403..96f697c043 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -32,6 +32,7 @@
* [6415](https://github.com/grafana/loki/pull/6415) **salvacorts**: Evenly spread queriers across kubernetes nodes.
* [6349](https://github.com/grafana/loki/pull/6349) **simonswine**: Update the default HTTP listen port from 80 to 3100. Make sure to configure the port explicitly if you are using port 80.
* [6835](https://github.com/grafana/loki/pull/6835) **DylanGuedes**: Add new per-tenant query timeout configuration and remove engine query timeout.
+* [7212](https://github.com/grafana/loki/pull/7212) **Juneezee**: Replaces deprecated `io/ioutil` with `io` and `os`.
#### Promtail
diff --git a/clients/cmd/docker-driver/config.go b/clients/cmd/docker-driver/config.go
index 5552ecfdfb..c38cdcc33e 100644
--- a/clients/cmd/docker-driver/config.go
+++ b/clients/cmd/docker-driver/config.go
@@ -3,7 +3,6 @@ package main
import (
"bytes"
"fmt"
- "io/ioutil"
"net/url"
"os"
"strconv"
@@ -385,7 +384,7 @@ func parseBoolean(key string, logCtx logger.Info, defaultValue bool) (bool, erro
// loadConfig read YAML-formatted config from filename into cfg.
func loadConfig(filename string, cfg interface{}) error {
- buf, err := ioutil.ReadFile(filename)
+ buf, err := os.ReadFile(filename)
if err != nil {
return errors.Wrap(err, "Error reading config file")
}
diff --git a/clients/cmd/docker-driver/config_test.go b/clients/cmd/docker-driver/config_test.go
index 289274ef3e..b20d40ee23 100644
--- a/clients/cmd/docker-driver/config_test.go
+++ b/clients/cmd/docker-driver/config_test.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"reflect"
"testing"
@@ -82,7 +81,7 @@ var pipeline = PipelineConfig{
}
func Test_parsePipeline(t *testing.T) {
- f, err := ioutil.TempFile("/tmp", "Test_parsePipeline")
+ f, err := os.CreateTemp("/tmp", "Test_parsePipeline")
if err != nil {
t.Fatal(err)
}
diff --git a/clients/cmd/fluent-bit/config.go b/clients/cmd/fluent-bit/config.go
index 143afb686e..d8448d5acf 100644
--- a/clients/cmd/fluent-bit/config.go
+++ b/clients/cmd/fluent-bit/config.go
@@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "os"
"strconv"
"strings"
"time"
@@ -200,7 +200,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) {
labelMapPath := cfg.Get("LabelMapPath")
if labelMapPath != "" {
- content, err := ioutil.ReadFile(labelMapPath)
+ content, err := os.ReadFile(labelMapPath)
if err != nil {
return nil, fmt.Errorf("failed to open LabelMap file: %s", err)
}
diff --git a/clients/cmd/fluent-bit/config_test.go b/clients/cmd/fluent-bit/config_test.go
index e4bedf1f4d..2dd9c0f37e 100644
--- a/clients/cmd/fluent-bit/config_test.go
+++ b/clients/cmd/fluent-bit/config_test.go
@@ -1,7 +1,6 @@
package main
import (
- "io/ioutil"
"net/url"
"os"
"reflect"
@@ -229,7 +228,7 @@ func mustParseDuration(u string) time.Duration {
}
func createTempLabelMap(t *testing.T) string {
- file, err := ioutil.TempFile("", "labelmap")
+ file, err := os.CreateTemp("", "labelmap")
if err != nil {
t.Fatal(err)
}
diff --git a/clients/pkg/promtail/positions/positions.go b/clients/pkg/promtail/positions/positions.go
index 72818ad91a..8b3ba3e42d 100644
--- a/clients/pkg/promtail/positions/positions.go
+++ b/clients/pkg/promtail/positions/positions.go
@@ -3,7 +3,6 @@ package positions
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -215,7 +214,7 @@ func (p *positions) cleanup() {
func readPositionsFile(cfg Config, logger log.Logger) (map[string]string, error) {
cleanfn := filepath.Clean(cfg.PositionsFile)
- buf, err := ioutil.ReadFile(cleanfn)
+ buf, err := os.ReadFile(cleanfn)
if err != nil {
if os.IsNotExist(err) {
return map[string]string{}, nil
diff --git a/clients/pkg/promtail/positions/positions_test.go b/clients/pkg/promtail/positions/positions_test.go
index 64b30669b8..9bbff9fa17 100644
--- a/clients/pkg/promtail/positions/positions_test.go
+++ b/clients/pkg/promtail/positions/positions_test.go
@@ -1,7 +1,6 @@
package positions
import (
- "io/ioutil"
"os"
"strings"
"testing"
@@ -16,7 +15,7 @@ import (
func tempFilename(t *testing.T) string {
t.Helper()
- temp, err := ioutil.TempFile("", "positions")
+ temp, err := os.CreateTemp("", "positions")
if err != nil {
t.Fatal("tempFilename:", err)
}
@@ -43,7 +42,7 @@ func TestReadPositionsOK(t *testing.T) {
yaml := []byte(`positions:
/tmp/random.log: "17623"
`)
- err := ioutil.WriteFile(temp, yaml, 0644)
+ err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}
@@ -63,7 +62,7 @@ func TestReadPositionsEmptyFile(t *testing.T) {
}()
yaml := []byte(``)
- err := ioutil.WriteFile(temp, yaml, 0644)
+ err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}
@@ -104,7 +103,7 @@ func TestReadPositionsFromBadYaml(t *testing.T) {
badYaml := []byte(`positions:
/tmp/random.log: "176
`)
- err := ioutil.WriteFile(temp, badYaml, 0644)
+ err := os.WriteFile(temp, badYaml, 0644)
if err != nil {
t.Fatal(err)
}
@@ -126,7 +125,7 @@ func TestReadPositionsFromBadYamlIgnoreCorruption(t *testing.T) {
badYaml := []byte(`positions:
/tmp/random.log: "176
`)
- err := ioutil.WriteFile(temp, badYaml, 0644)
+ err := os.WriteFile(temp, badYaml, 0644)
if err != nil {
t.Fatal(err)
}
@@ -148,7 +147,7 @@ func Test_ReadOnly(t *testing.T) {
yaml := []byte(`positions:
/tmp/random.log: "17623"
`)
- err := ioutil.WriteFile(temp, yaml, 0644)
+ err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}
diff --git a/clients/pkg/promtail/positions/write_positions_windows.go b/clients/pkg/promtail/positions/write_positions_windows.go
index 6d6002f9f2..c139376d49 100644
--- a/clients/pkg/promtail/positions/write_positions_windows.go
+++ b/clients/pkg/promtail/positions/write_positions_windows.go
@@ -4,7 +4,6 @@
package positions
import (
- "io/ioutil"
"os"
"path/filepath"
@@ -24,7 +23,7 @@ func writePositionFile(filename string, positions map[string]string) error {
target := filepath.Clean(filename)
temp := target + "-new"
- err = ioutil.WriteFile(temp, buf, os.FileMode(positionFileMode))
+ err = os.WriteFile(temp, buf, os.FileMode(positionFileMode))
if err != nil {
return err
}
diff --git a/clients/pkg/promtail/promtail_test.go b/clients/pkg/promtail/promtail_test.go
index ab0a6ded0a..c17f2af3da 100644
--- a/clients/pkg/promtail/promtail_test.go
+++ b/clients/pkg/promtail/promtail_test.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"math"
"math/rand"
"net"
@@ -510,7 +509,7 @@ func getPromMetrics(t *testing.T, httpListenAddr net.Addr) ([]byte, string) {
t.Fatal("Received a non 200 status code from /metrics endpoint", resp.StatusCode)
}
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal("Error reading response body from /metrics endpoint", err)
}
@@ -656,7 +655,7 @@ func randName() string {
}
func Test_DryRun(t *testing.T) {
- f, err := ioutil.TempFile("/tmp", "Test_DryRun")
+ f, err := os.CreateTemp("/tmp", "Test_DryRun")
require.NoError(t, err)
defer os.Remove(f.Name())
diff --git a/clients/pkg/promtail/server/template.go b/clients/pkg/promtail/server/template.go
index 416f3993b6..1ed7fde545 100644
--- a/clients/pkg/promtail/server/template.go
+++ b/clients/pkg/promtail/server/template.go
@@ -3,7 +3,6 @@ package server
import (
"context"
"io"
- "io/ioutil"
"net/http"
"net/url"
"path"
@@ -77,7 +76,7 @@ func getTemplate(name string) (string, error) {
defer func() {
_ = f.Close()
}()
- b, err := ioutil.ReadAll(f)
+ b, err := io.ReadAll(f)
if err != nil {
return err
}
diff --git a/clients/pkg/promtail/targets/journal/journaltarget.go b/clients/pkg/promtail/targets/journal/journaltarget.go
index 33ccee896f..000268daf4 100644
--- a/clients/pkg/promtail/targets/journal/journaltarget.go
+++ b/clients/pkg/promtail/targets/journal/journaltarget.go
@@ -6,7 +6,6 @@ package journal
import (
"fmt"
"io"
- "io/ioutil"
"strings"
"syscall"
"time"
@@ -201,7 +200,7 @@ func journalTargetWithReader(
go func() {
for {
- err := t.r.Follow(until, ioutil.Discard)
+ err := t.r.Follow(until, io.Discard)
if err != nil {
level.Error(t.logger).Log("msg", "received error during sdjournal follow", "err", err.Error())
diff --git a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go
index 4beb286b70..49ba88b5f8 100644
--- a/clients/pkg/promtail/targets/syslog/syslogtarget_test.go
+++ b/clients/pkg/promtail/targets/syslog/syslogtarget_test.go
@@ -5,7 +5,6 @@ import (
"crypto/x509"
"fmt"
"io"
- "io/ioutil"
"net"
"os"
"testing"
@@ -656,7 +655,7 @@ func testSyslogTargetWithTLS(t *testing.T, fmtFunc formatFunc) {
}
func createTempFile(data []byte) (*os.File, error) {
- tmpFile, err := ioutil.TempFile("", "")
+ tmpFile, err := os.CreateTemp("", "")
if err != nil {
return nil, fmt.Errorf("failed to create temporary file: %s", err)
}
diff --git a/clients/pkg/promtail/targets/syslog/transport.go b/clients/pkg/promtail/targets/syslog/transport.go
index 0d88d5a784..8fb8af9eb3 100644
--- a/clients/pkg/promtail/targets/syslog/transport.go
+++ b/clients/pkg/promtail/targets/syslog/transport.go
@@ -6,8 +6,8 @@ import (
"crypto/x509"
"fmt"
"io"
- "io/ioutil"
"net"
+ "os"
"strings"
"sync"
"time"
@@ -207,7 +207,7 @@ func newTLSConfig(certFile string, keyFile string, caFile string) (*tls.Config,
}
if caFile != "" {
- caCert, err := ioutil.ReadFile(caFile)
+ caCert, err := os.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA certificate: %w", err)
}
diff --git a/clients/pkg/promtail/targets/windows/bookmark.go b/clients/pkg/promtail/targets/windows/bookmark.go
index 70a5a087a9..b7a4a7698c 100644
--- a/clients/pkg/promtail/targets/windows/bookmark.go
+++ b/clients/pkg/promtail/targets/windows/bookmark.go
@@ -4,7 +4,7 @@
package windows
import (
- "io/ioutil"
+ "io"
"os"
"github.com/spf13/afero"
@@ -52,7 +52,7 @@ func newBookMark(path string) (*bookMark, error) {
if err != nil {
return nil, err
}
- fileContent, err := ioutil.ReadAll(file)
+ fileContent, err := io.ReadAll(file)
if err != nil {
return nil, err
}
diff --git a/cmd/chunks-inspect/loki.go b/cmd/chunks-inspect/loki.go
index 42bf7c3cbb..2ae2f9df8d 100644
--- a/cmd/chunks-inspect/loki.go
+++ b/cmd/chunks-inspect/loki.go
@@ -7,7 +7,6 @@ import (
"fmt"
"hash/crc32"
"io"
- "io/ioutil"
"github.com/golang/snappy"
"github.com/klauspost/compress/flate"
@@ -187,7 +186,7 @@ func parseLokiBlock(compression Encoding, data []byte) ([]byte, []LokiEntry, err
return nil, nil, err
}
- decompressed, err := ioutil.ReadAll(r)
+ decompressed, err := io.ReadAll(r)
origDecompressed := decompressed
if err != nil {
return nil, nil, err
diff --git a/cmd/chunks-inspect/main.go b/cmd/chunks-inspect/main.go
index 3a004b4998..0aa99a57f1 100644
--- a/cmd/chunks-inspect/main.go
+++ b/cmd/chunks-inspect/main.go
@@ -4,7 +4,6 @@ import (
"crypto/sha256"
"flag"
"fmt"
- "io/ioutil"
"log"
"os"
"strings"
@@ -122,7 +121,7 @@ func printFile(filename string, blockDetails, printLines, storeBlocks bool) {
}
func writeBlockToFile(data []byte, blockIndex int, filename string) {
- err := ioutil.WriteFile(filename, data, 0644)
+ err := os.WriteFile(filename, data, 0644)
if err != nil {
log.Println("Failed to store block", blockIndex, "to file", filename, "due to error:", err)
} else {
diff --git a/operator/cmd/loki-broker/main.go b/operator/cmd/loki-broker/main.go
index c3c401b5c1..c4c7722be9 100644
--- a/operator/cmd/loki-broker/main.go
+++ b/operator/cmd/loki-broker/main.go
@@ -3,7 +3,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"path"
"strings"
@@ -117,7 +116,7 @@ func main() {
cfg.validateFlags(logger)
- b, err := ioutil.ReadFile(cfg.crFilepath)
+ b, err := os.ReadFile(cfg.crFilepath)
if err != nil {
logger.Info("failed to read custom resource file", "path", cfg.crFilepath)
os.Exit(1)
@@ -169,7 +168,7 @@ func main() {
if cfg.writeToDir != "" {
basename := fmt.Sprintf("%s-%s.yaml", o.GetObjectKind().GroupVersionKind().Kind, o.GetName())
fname := strings.ToLower(path.Join(cfg.writeToDir, basename))
- if err := ioutil.WriteFile(fname, b, 0o644); err != nil {
+ if err := os.WriteFile(fname, b, 0o644); err != nil {
logger.Error(err, "failed to write file to directory", "path", fname)
os.Exit(1)
}
diff --git a/operator/controllers/loki/lokistack_controller_test.go b/operator/controllers/loki/lokistack_controller_test.go
index 7ab6481393..b098f26295 100644
--- a/operator/controllers/loki/lokistack_controller_test.go
+++ b/operator/controllers/loki/lokistack_controller_test.go
@@ -2,7 +2,7 @@ package controllers
import (
"flag"
- "io/ioutil"
+ "io"
"os"
"testing"
@@ -38,7 +38,7 @@ func TestMain(m *testing.M) {
if testing.Verbose() {
logger = log.NewLogger("testing", log.WithVerbosity(5))
} else {
- logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard))
+ logger = log.NewLogger("testing", log.WithOutput(io.Discard))
}
// Register the clientgo and CRD schemes
diff --git a/operator/internal/handlers/lokistack_create_or_update_test.go b/operator/internal/handlers/lokistack_create_or_update_test.go
index c04c926586..c0c6e41743 100644
--- a/operator/internal/handlers/lokistack_create_or_update_test.go
+++ b/operator/internal/handlers/lokistack_create_or_update_test.go
@@ -4,7 +4,7 @@ import (
"context"
"errors"
"flag"
- "io/ioutil"
+ "io"
"os"
"testing"
@@ -94,7 +94,7 @@ func TestMain(m *testing.M) {
if testing.Verbose() {
logger = log.NewLogger("testing", log.WithVerbosity(5))
} else {
- logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard))
+ logger = log.NewLogger("testing", log.WithOutput(io.Discard))
}
// Register the clientgo and CRD schemes
diff --git a/operator/internal/manifests/internal/config/build.go b/operator/internal/manifests/internal/config/build.go
index b5fad138ab..399e9efd49 100644
--- a/operator/internal/manifests/internal/config/build.go
+++ b/operator/internal/manifests/internal/config/build.go
@@ -3,7 +3,7 @@ package config
import (
"bytes"
"embed"
- "io/ioutil"
+ "io"
"text/template"
"github.com/ViaQ/logerr/v2/kverrors"
@@ -38,7 +38,7 @@ func Build(opts Options) ([]byte, []byte, error) {
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to create loki configuration")
}
- cfg, err := ioutil.ReadAll(w)
+ cfg, err := io.ReadAll(w)
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@@ -48,7 +48,7 @@ func Build(opts Options) ([]byte, []byte, error) {
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to create loki runtime configuration")
}
- rcfg, err := ioutil.ReadAll(w)
+ rcfg, err := io.ReadAll(w)
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
diff --git a/operator/internal/manifests/internal/gateway/build.go b/operator/internal/manifests/internal/gateway/build.go
index 0b5e308411..1e1a4cb0d9 100644
--- a/operator/internal/manifests/internal/gateway/build.go
+++ b/operator/internal/manifests/internal/gateway/build.go
@@ -3,7 +3,7 @@ package gateway
import (
"bytes"
"embed"
- "io/ioutil"
+ "io"
"text/template"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@@ -47,7 +47,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway rbac configuration")
}
- rbacCfg, err = ioutil.ReadAll(w)
+ rbacCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@@ -57,7 +57,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway tenants configuration")
}
- tenantsCfg, err = ioutil.ReadAll(w)
+ tenantsCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@@ -68,7 +68,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create lokistack gateway rego configuration")
}
- regoCfg, err = ioutil.ReadAll(w)
+ regoCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
diff --git a/pkg/canary/reader/reader.go b/pkg/canary/reader/reader.go
index aadfff8fa1..7dec6347e1 100644
--- a/pkg/canary/reader/reader.go
+++ b/pkg/canary/reader/reader.go
@@ -6,7 +6,6 @@ import (
"encoding/base64"
"fmt"
"io"
- "io/ioutil"
"log"
"net"
"net/http"
@@ -226,7 +225,7 @@ func (r *Reader) QueryCountOverTime(queryRange string) (float64, error) {
r.backoffMtx.Lock()
r.nextQuery = nextBackoff(r.w, resp.StatusCode, r.backoff)
r.backoffMtx.Unlock()
- buf, _ := ioutil.ReadAll(resp.Body)
+ buf, _ := io.ReadAll(resp.Body)
return 0, fmt.Errorf("error response from server: %s (%v)", string(buf), err)
}
// No Errors, reset backoff
@@ -317,7 +316,7 @@ func (r *Reader) Query(start time.Time, end time.Time) ([]time.Time, error) {
r.backoffMtx.Lock()
r.nextQuery = nextBackoff(r.w, resp.StatusCode, r.backoff)
r.backoffMtx.Unlock()
- buf, _ := ioutil.ReadAll(resp.Body)
+ buf, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("error response from server: %s (%v)", string(buf), err)
}
// No Errors, reset backoff
diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go
index ebe810acbc..9b72ea85c7 100644
--- a/pkg/distributor/http_test.go
+++ b/pkg/distributor/http_test.go
@@ -1,7 +1,7 @@
package distributor
import (
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"testing"
@@ -33,7 +33,7 @@ func TestDistributorRingHandler(t *testing.T) {
require.NoError(t, err)
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "
Instance ID | ")
require.NotContains(t, string(body), "Not running with Global Rating Limit - ring not being used by the Distributor")
@@ -48,7 +48,7 @@ func TestDistributorRingHandler(t *testing.T) {
require.NoError(t, err)
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "Not running with Global Rating Limit - ring not being used by the Distributor")
require.NotContains(t, string(body), "Instance ID | ")
diff --git a/pkg/ingester/checkpoint.go b/pkg/ingester/checkpoint.go
index e90e72e678..fd8b982a36 100644
--- a/pkg/ingester/checkpoint.go
+++ b/pkg/ingester/checkpoint.go
@@ -3,8 +3,7 @@ package ingester
import (
"bytes"
"context"
- fmt "fmt"
- "io/ioutil"
+ "fmt"
"os"
"path/filepath"
"regexp"
@@ -425,7 +424,7 @@ func checkpointIndex(filename string, includeTmp bool) (int, error) {
// lastCheckpoint returns the directory name and index of the most recent checkpoint.
// If dir does not contain any checkpoints, -1 is returned as index.
func lastCheckpoint(dir string) (string, int, error) {
- dirs, err := ioutil.ReadDir(dir)
+ dirs, err := os.ReadDir(dir)
if err != nil {
return "", -1, err
}
@@ -466,7 +465,7 @@ func (w *WALCheckpointWriter) deleteCheckpoints(maxIndex int) (err error) {
errs := tsdb_errors.NewMulti()
- files, err := ioutil.ReadDir(w.segmentWAL.Dir())
+ files, err := os.ReadDir(w.segmentWAL.Dir())
if err != nil {
return err
}
diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go
index 62ae6ca9f4..85734c8b9a 100644
--- a/pkg/ingester/checkpoint_test.go
+++ b/pkg/ingester/checkpoint_test.go
@@ -2,8 +2,8 @@ package ingester
import (
"context"
- fmt "fmt"
- "io/ioutil"
+ "fmt"
+ "os"
"sort"
"testing"
"time"
@@ -333,7 +333,7 @@ func expectCheckpoint(t *testing.T, walDir string, shouldExist bool, max time.Du
<-time.After(max / 10) // check 10x over the duration
}
- fs, err := ioutil.ReadDir(walDir)
+ fs, err := os.ReadDir(walDir)
require.Nil(t, err)
var found bool
for _, f := range fs {
diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go
index cedc62af4e..19c88850e0 100644
--- a/pkg/ingester/transfer_test.go
+++ b/pkg/ingester/transfer_test.go
@@ -3,7 +3,6 @@ package ingester
import (
"fmt"
"io"
- "io/ioutil"
"sort"
"testing"
"time"
@@ -143,7 +142,7 @@ func (f *testIngesterFactory) getIngester(joinAfter time.Duration, t *testing.T)
PusherClient: nil,
QuerierClient: nil,
IngesterClient: &testIngesterClient{t: f.t, i: ingester},
- Closer: ioutil.NopCloser(nil),
+ Closer: io.NopCloser(nil),
}, nil
}
diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go
index 69a9601a7a..5b00c1f4ba 100644
--- a/pkg/logcli/client/client.go
+++ b/pkg/logcli/client/client.go
@@ -3,10 +3,11 @@ package client
import (
"encoding/base64"
"fmt"
- "io/ioutil"
+ "io"
"log"
"net/http"
"net/url"
+ "os"
"path"
"strings"
"time"
@@ -221,7 +222,7 @@ func (c *DefaultClient) doRequest(path, query string, quiet bool, out interface{
continue
}
if resp.StatusCode/100 != 2 {
- buf, _ := ioutil.ReadAll(resp.Body) // nolint
+ buf, _ := io.ReadAll(resp.Body) // nolint
log.Printf("Error response from server: %s (%v) attempts remaining: %d", string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
log.Println("error closing body", err)
@@ -283,7 +284,7 @@ func (c *DefaultClient) getHTTPRequestHeader() (http.Header, error) {
}
if c.BearerTokenFile != "" {
- b, err := ioutil.ReadFile(c.BearerTokenFile)
+ b, err := os.ReadFile(c.BearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.BearerTokenFile, err)
}
@@ -335,7 +336,7 @@ func (c *DefaultClient) wsConnect(path, query string, quiet bool) (*websocket.Co
if resp == nil {
return nil, err
}
- buf, _ := ioutil.ReadAll(resp.Body) // nolint
+ buf, _ := io.ReadAll(resp.Body) // nolint
return nil, fmt.Errorf("Error response from server: %s (%v)", string(buf), err)
}
diff --git a/pkg/logcli/client/file.go b/pkg/logcli/client/file.go
index 46f0f10be3..8c2dded380 100644
--- a/pkg/logcli/client/file.go
+++ b/pkg/logcli/client/file.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"sort"
"strings"
"time"
@@ -222,7 +221,7 @@ func newFileIterator(
) (iter.EntryIterator, error) {
lr := io.LimitReader(r, defaultMaxFileSize)
- b, err := ioutil.ReadAll(lr)
+ b, err := io.ReadAll(lr)
if err != nil {
return nil, err
}
diff --git a/pkg/logql/syntax/fuzz_test.go b/pkg/logql/syntax/fuzz_test.go
index b02551b1be..bc6b5f1e55 100644
--- a/pkg/logql/syntax/fuzz_test.go
+++ b/pkg/logql/syntax/fuzz_test.go
@@ -4,7 +4,6 @@
package syntax
import (
- "io/ioutil"
"os"
"testing"
@@ -15,7 +14,7 @@ const fuzzTestCaseEnvName = "FUZZ_TESTCASE_PATH"
func Test_Fuzz(t *testing.T) {
fuzzTestPath := os.Getenv(fuzzTestCaseEnvName)
- data, err := ioutil.ReadFile(fuzzTestPath)
+ data, err := os.ReadFile(fuzzTestPath)
require.NoError(t, err)
_, _ = ParseExpr(string(data))
}
diff --git a/pkg/logqlanalyzer/http.go b/pkg/logqlanalyzer/http.go
index ccb5025e12..6f13243488 100644
--- a/pkg/logqlanalyzer/http.go
+++ b/pkg/logqlanalyzer/http.go
@@ -3,7 +3,7 @@ package logqlanalyzer
import (
"context"
"encoding/json"
- "io/ioutil"
+ "io"
"net/http"
"github.com/go-kit/log/level"
@@ -31,7 +31,7 @@ type LogQLAnalyzeHandler struct {
}
func (s *LogQLAnalyzeHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- payload, err := ioutil.ReadAll(req.Body)
+ payload, err := io.ReadAll(req.Body)
if err != nil {
writeError(req.Context(), w, err, http.StatusBadRequest, "unable to read request body")
return
diff --git a/pkg/loki/config_handler_test.go b/pkg/loki/config_handler_test.go
index 9a49f65bd2..45765c2532 100644
--- a/pkg/loki/config_handler_test.go
+++ b/pkg/loki/config_handler_test.go
@@ -1,7 +1,7 @@
package loki
import (
- "io/ioutil"
+ "io"
"net/http/httptest"
"testing"
@@ -107,7 +107,7 @@ func TestConfigDiffHandler(t *testing.T) {
resp := w.Result()
assert.Equal(t, tc.expectedStatusCode, resp.StatusCode)
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, tc.expectedBody, string(body))
})
diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go
index d88624edd5..1761f2f8bf 100644
--- a/pkg/loki/config_wrapper_test.go
+++ b/pkg/loki/config_wrapper_test.go
@@ -3,7 +3,6 @@ package loki
import (
"flag"
"fmt"
- "io/ioutil"
"net/url"
"os"
"reflect"
@@ -39,7 +38,7 @@ func configWrapperFromYAML(t *testing.T, configFileString string, args []string)
config := ConfigWrapper{}
fs := flag.NewFlagSet(t.Name(), flag.PanicOnError)
- file, err := ioutil.TempFile("", "config.yaml")
+ file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()
@@ -987,7 +986,7 @@ query_range:
func TestDefaultUnmarshal(t *testing.T) {
t.Run("with a minimal config file and no command line args, defaults are use", func(t *testing.T) {
- file, err := ioutil.TempFile("", "config.yaml")
+ file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()
diff --git a/pkg/loki/loki_test.go b/pkg/loki/loki_test.go
index 5fc790b38c..bfec1595a7 100644
--- a/pkg/loki/loki_test.go
+++ b/pkg/loki/loki_test.go
@@ -5,7 +5,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"strings"
@@ -223,7 +222,7 @@ schema_config:
defer resp.Body.Close()
- bBytes, err := ioutil.ReadAll(resp.Body)
+ bBytes, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, string(bBytes), "abc")
assert.True(t, customHandlerInvoked)
diff --git a/pkg/loki/runtime_config_test.go b/pkg/loki/runtime_config_test.go
index 91ba5e7e8a..edaf59f524 100644
--- a/pkg/loki/runtime_config_test.go
+++ b/pkg/loki/runtime_config_test.go
@@ -4,7 +4,7 @@ import (
"context"
"flag"
"io"
- "io/ioutil"
+ "os"
"strings"
"testing"
"time"
@@ -85,7 +85,7 @@ overrides:
func newTestOverrides(t *testing.T, yaml string) *validation.Overrides {
t.Helper()
- f, err := ioutil.TempFile(t.TempDir(), "bar")
+ f, err := os.CreateTemp(t.TempDir(), "bar")
require.NoError(t, err)
path := f.Name()
// fake loader to load from string instead of file.
diff --git a/pkg/loki/version_handler_test.go b/pkg/loki/version_handler_test.go
index 083f611861..c7b9094b4a 100644
--- a/pkg/loki/version_handler_test.go
+++ b/pkg/loki/version_handler_test.go
@@ -1,7 +1,7 @@
package loki
import (
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"testing"
@@ -35,7 +35,7 @@ func TestVersionHandler(t *testing.T) {
"revision":"foobar",
"goVersion": "42"
}`
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
assert.JSONEq(t, expected, string(body))
}
diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go
index 7f42084b43..4c0f5ca7ca 100644
--- a/pkg/lokifrontend/frontend/transport/handler.go
+++ b/pkg/lokifrontend/frontend/transport/handler.go
@@ -6,7 +6,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strconv"
@@ -123,7 +122,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Buffer the body for later use to track slow queries.
var buf bytes.Buffer
r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize)
- r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf))
+ r.Body = io.NopCloser(io.TeeReader(r.Body, &buf))
startTime := time.Now()
resp, err := f.roundTripper.RoundTrip(r)
@@ -207,7 +206,7 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer
func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values {
// Use previously buffered body.
- r.Body = ioutil.NopCloser(&bodyBuf)
+ r.Body = io.NopCloser(&bodyBuf)
// Ensure the form has been parsed so all the parameters are present
err := r.ParseForm()
diff --git a/pkg/lokifrontend/frontend/transport/roundtripper.go b/pkg/lokifrontend/frontend/transport/roundtripper.go
index d9ba57ccba..583fc22d04 100644
--- a/pkg/lokifrontend/frontend/transport/roundtripper.go
+++ b/pkg/lokifrontend/frontend/transport/roundtripper.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"io"
- "io/ioutil"
"net/http"
"github.com/weaveworks/common/httpgrpc"
@@ -47,7 +46,7 @@ func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, er
httpResp := &http.Response{
StatusCode: int(resp.Code),
- Body: &buffer{buff: resp.Body, ReadCloser: ioutil.NopCloser(bytes.NewReader(resp.Body))},
+ Body: &buffer{buff: resp.Body, ReadCloser: io.NopCloser(bytes.NewReader(resp.Body))},
Header: http.Header{},
ContentLength: int64(len(resp.Body)),
}
diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go
index 911d0d6897..54a2d65e6e 100644
--- a/pkg/lokifrontend/frontend/v1/frontend_test.go
+++ b/pkg/lokifrontend/frontend/v1/frontend_test.go
@@ -3,7 +3,7 @@ package v1
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"strings"
@@ -55,7 +55,7 @@ func TestFrontend(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "Hello World", string(body))
@@ -105,7 +105,7 @@ func TestFrontendPropagateTrace(t *testing.T) {
require.Equal(t, 200, resp.StatusCode)
defer resp.Body.Close()
- _, err = ioutil.ReadAll(resp.Body)
+ _, err = io.ReadAll(resp.Body)
require.NoError(t, err)
// Query should do one call.
@@ -201,7 +201,7 @@ func TestFrontendMetricsCleanup(t *testing.T) {
require.Equal(t, 200, resp.StatusCode)
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "Hello World", string(body))
diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go
index 5ad89ac908..9e7fb39fca 100644
--- a/pkg/querier/queryrange/codec.go
+++ b/pkg/querier/queryrange/codec.go
@@ -6,7 +6,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ io "io"
"net/http"
"net/url"
"sort"
@@ -407,7 +407,7 @@ type Buffer interface {
func (Codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrangebase.Request) (queryrangebase.Response, error) {
if r.StatusCode/100 != 2 {
- body, _ := ioutil.ReadAll(r.Body)
+ body, _ := io.ReadAll(r.Body)
return nil, httpgrpc.Errorf(r.StatusCode, string(body))
}
@@ -416,7 +416,7 @@ func (Codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrang
if buffer, ok := r.Body.(Buffer); ok {
buf = buffer.Bytes()
} else {
- buf, err = ioutil.ReadAll(r.Body)
+ buf, err = io.ReadAll(r.Body)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
}
@@ -590,7 +590,7 @@ func (Codec) EncodeResponse(ctx context.Context, res queryrangebase.Response) (*
Header: http.Header{
"Content-Type": []string{"application/json"},
},
- Body: ioutil.NopCloser(&buf),
+ Body: io.NopCloser(&buf),
StatusCode: http.StatusOK,
}
return &resp, nil
diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go
index a372502cce..d4a3fe5134 100644
--- a/pkg/querier/queryrange/codec_test.go
+++ b/pkg/querier/queryrange/codec_test.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net/http"
strings "strings"
"testing"
@@ -117,13 +116,13 @@ func Test_codec_DecodeResponse(t *testing.T) {
want queryrangebase.Response
wantErr bool
}{
- {"500", &http.Response{StatusCode: 500, Body: ioutil.NopCloser(strings.NewReader("some error"))}, nil, nil, true},
- {"no body", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(badReader{})}, nil, nil, true},
- {"bad json", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))}, nil, nil, true},
- {"not success", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true},
- {"unknown", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true},
+ {"500", &http.Response{StatusCode: 500, Body: io.NopCloser(strings.NewReader("some error"))}, nil, nil, true},
+ {"no body", &http.Response{StatusCode: 200, Body: io.NopCloser(badReader{})}, nil, nil, true},
+ {"bad json", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil, nil, true},
+ {"not success", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true},
+ {"unknown", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true},
{
- "matrix", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(matrixString))}, nil,
+ "matrix", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(matrixString))}, nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@@ -137,7 +136,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
{
"matrix-empty-streams",
- &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(matrixStringEmptyResult))},
+ &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(matrixStringEmptyResult))},
nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
@@ -152,7 +151,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
{
"vector-empty-streams",
- &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(vectorStringEmptyResult))},
+ &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(vectorStringEmptyResult))},
nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
@@ -166,7 +165,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
- "streams v1", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))},
+ "streams v1", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
&LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/loki/api/v1/query_range"},
&LokiResponse{
Status: loghttp.QueryStatusSuccess,
@@ -181,7 +180,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
- "streams legacy", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))},
+ "streams legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
&LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/api/prom/query_range"},
&LokiResponse{
Status: loghttp.QueryStatusSuccess,
@@ -196,7 +195,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
- "series", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(seriesString))},
+ "series", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(seriesString))},
&LokiSeriesRequest{Path: "/loki/api/v1/series"},
&LokiSeriesResponse{
Status: "success",
@@ -205,7 +204,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
- "labels legacy", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(labelsString))},
+ "labels legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(labelsString))},
&LokiLabelNamesRequest{Path: "/api/prom/label"},
&LokiLabelNamesResponse{
Status: "success",
@@ -214,7 +213,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
- "index stats", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(indexStatsString))},
+ "index stats", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(indexStatsString))},
&logproto.IndexStatsRequest{},
&IndexStatsResponse{
Response: &logproto.IndexStatsResponse{
@@ -456,7 +455,7 @@ func Test_codec_EncodeResponse(t *testing.T) {
}
if err == nil {
require.Equal(t, 200, got.StatusCode)
- body, err := ioutil.ReadAll(got.Body)
+ body, err := io.ReadAll(got.Body)
require.Nil(t, err)
bodyString := string(body)
require.JSONEq(t, tt.body, bodyString)
@@ -1308,7 +1307,7 @@ func Benchmark_CodecDecodeLogs(b *testing.B) {
require.Nil(b, err)
reader := bytes.NewReader(buf)
resp.Body = &buffer{
- ReadCloser: ioutil.NopCloser(reader),
+ ReadCloser: io.NopCloser(reader),
buff: buf,
}
b.ResetTimer()
@@ -1344,7 +1343,7 @@ func Benchmark_CodecDecodeSamples(b *testing.B) {
buf, err := io.ReadAll(resp.Body)
require.Nil(b, err)
reader := bytes.NewReader(buf)
- resp.Body = ioutil.NopCloser(reader)
+ resp.Body = io.NopCloser(reader)
b.ResetTimer()
b.ReportAllocs()
diff --git a/pkg/querier/queryrange/prometheus.go b/pkg/querier/queryrange/prometheus.go
index 7733c5d52f..bc1b77b7be 100644
--- a/pkg/querier/queryrange/prometheus.go
+++ b/pkg/querier/queryrange/prometheus.go
@@ -3,7 +3,7 @@ package queryrange
import (
"bytes"
"context"
- "io/ioutil"
+ "io"
"net/http"
jsoniter "github.com/json-iterator/go"
@@ -64,7 +64,7 @@ func (p *LokiPromResponse) encode(ctx context.Context) (*http.Response, error) {
Header: http.Header{
"Content-Type": []string{"application/json"},
},
- Body: ioutil.NopCloser(bytes.NewBuffer(b)),
+ Body: io.NopCloser(bytes.NewBuffer(b)),
StatusCode: http.StatusOK,
}
return &resp, nil
diff --git a/pkg/querier/queryrange/queryrangebase/marshaling_test.go b/pkg/querier/queryrange/queryrangebase/marshaling_test.go
index 60814d910a..3f09ed9ab3 100644
--- a/pkg/querier/queryrange/queryrangebase/marshaling_test.go
+++ b/pkg/querier/queryrange/queryrangebase/marshaling_test.go
@@ -3,7 +3,7 @@ package queryrangebase
import (
"bytes"
"context"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
"testing"
@@ -31,7 +31,7 @@ func BenchmarkPrometheusCodec_DecodeResponse(b *testing.B) {
for n := 0; n < b.N; n++ {
_, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{
StatusCode: 200,
- Body: ioutil.NopCloser(bytes.NewReader(encodedRes)),
+ Body: io.NopCloser(bytes.NewReader(encodedRes)),
ContentLength: int64(len(encodedRes)),
}, nil)
require.NoError(b, err)
diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go
index 1fd8fc5580..7995b81bc3 100644
--- a/pkg/querier/queryrange/queryrangebase/query_range.go
+++ b/pkg/querier/queryrange/queryrangebase/query_range.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
- "io/ioutil"
+ "io"
"math"
"net/http"
"net/url"
@@ -228,7 +228,7 @@ func (prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Requ
func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) {
if r.StatusCode/100 != 2 {
- body, _ := ioutil.ReadAll(r.Body)
+ body, _ := io.ReadAll(r.Body)
return nil, httpgrpc.Errorf(r.StatusCode, string(body))
}
log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck
@@ -297,7 +297,7 @@ func (prometheusCodec) EncodeResponse(ctx context.Context, res Response) (*http.
Header: http.Header{
"Content-Type": []string{"application/json"},
},
- Body: ioutil.NopCloser(bytes.NewBuffer(b)),
+ Body: io.NopCloser(bytes.NewBuffer(b)),
StatusCode: http.StatusOK,
ContentLength: int64(len(b)),
}
diff --git a/pkg/querier/queryrange/queryrangebase/query_range_test.go b/pkg/querier/queryrange/queryrangebase/query_range_test.go
index 5604e59dcd..cfcda3e686 100644
--- a/pkg/querier/queryrange/queryrangebase/query_range_test.go
+++ b/pkg/querier/queryrange/queryrangebase/query_range_test.go
@@ -3,7 +3,7 @@ package queryrangebase
import (
"bytes"
"context"
- "io/ioutil"
+ "io"
"net/http"
"strconv"
"testing"
@@ -96,7 +96,7 @@ func TestResponse(t *testing.T) {
response := &http.Response{
StatusCode: 200,
Header: http.Header{"Content-Type": []string{"application/json"}},
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))),
+ Body: io.NopCloser(bytes.NewBuffer([]byte(tc.body))),
}
resp, err := PrometheusCodec.DecodeResponse(context.Background(), response, nil)
require.NoError(t, err)
@@ -106,7 +106,7 @@ func TestResponse(t *testing.T) {
response = &http.Response{
StatusCode: 200,
Header: http.Header{"Content-Type": []string{"application/json"}},
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))),
+ Body: io.NopCloser(bytes.NewBuffer([]byte(tc.body))),
ContentLength: int64(len(tc.body)),
}
resp2, err := PrometheusCodec.EncodeResponse(context.Background(), resp)
diff --git a/pkg/querier/queryrange/queryrangebase/roundtrip.go b/pkg/querier/queryrange/queryrangebase/roundtrip.go
index 210de8ee06..611582f2b9 100644
--- a/pkg/querier/queryrange/queryrangebase/roundtrip.go
+++ b/pkg/querier/queryrange/queryrangebase/roundtrip.go
@@ -19,7 +19,6 @@ import (
"context"
"flag"
"io"
- "io/ioutil"
"net/http"
"time"
@@ -176,7 +175,7 @@ func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) {
return nil, err
}
defer func() {
- _, _ = io.Copy(ioutil.Discard, io.LimitReader(response.Body, 1024)) //nolint:errcheck
+ _, _ = io.Copy(io.Discard, io.LimitReader(response.Body, 1024)) //nolint:errcheck
response.Body.Close()
}()
diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go
index 23e4f195a2..1e1aec8693 100644
--- a/pkg/querier/queryrange/roundtrip_test.go
+++ b/pkg/querier/queryrange/roundtrip_test.go
@@ -3,7 +3,7 @@ package queryrange
import (
"bytes"
"context"
- "io/ioutil"
+ "io"
"math"
"net/http"
"net/http/httptest"
@@ -428,7 +428,7 @@ func TestPostQueries(t *testing.T) {
"query": {`{app="foo"} |~ "foo"`},
}
body := bytes.NewBufferString(data.Encode())
- req.Body = ioutil.NopCloser(body)
+ req.Body = io.NopCloser(body)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
req = req.WithContext(user.InjectOrgID(context.Background(), "1"))
diff --git a/pkg/ruler/base/api.go b/pkg/ruler/base/api.go
index 189b063acd..5240854bf0 100644
--- a/pkg/ruler/base/api.go
+++ b/pkg/ruler/base/api.go
@@ -2,7 +2,7 @@ package base
import (
"encoding/json"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"sort"
@@ -448,7 +448,7 @@ func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) {
return
}
- payload, err := ioutil.ReadAll(req.Body)
+ payload, err := io.ReadAll(req.Body)
if err != nil {
level.Error(logger).Log("msg", "unable to read rule group payload", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
diff --git a/pkg/ruler/base/api_test.go b/pkg/ruler/base/api_test.go
index 9a7c48bd08..19d6c3c0d7 100644
--- a/pkg/ruler/base/api_test.go
+++ b/pkg/ruler/base/api_test.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"io"
- "io/ioutil"
"net/http"
"net/http/httptest"
"strings"
@@ -33,7 +32,7 @@ func TestRuler_rules(t *testing.T) {
a.PrometheusRules(w, req)
resp := w.Result()
- body, _ := ioutil.ReadAll(resp.Body)
+ body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}
@@ -88,7 +87,7 @@ func TestRuler_rules_special_characters(t *testing.T) {
a.PrometheusRules(w, req)
resp := w.Result()
- body, _ := ioutil.ReadAll(resp.Body)
+ body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}
@@ -143,7 +142,7 @@ func TestRuler_alerts(t *testing.T) {
a.PrometheusAlerts(w, req)
resp := w.Result()
- body, _ := ioutil.ReadAll(resp.Body)
+ body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}
diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go
index f5a03d9950..bb9019f043 100644
--- a/pkg/ruler/base/ruler_test.go
+++ b/pkg/ruler/base/ruler_test.go
@@ -3,7 +3,7 @@ package base
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
"net/http/httptest"
@@ -1089,7 +1089,7 @@ func TestRuler_ListAllRules(t *testing.T) {
router.ServeHTTP(w, req)
resp := w.Result()
- body, _ := ioutil.ReadAll(resp.Body)
+ body, _ := io.ReadAll(resp.Body)
// Check status code and header
require.Equal(t, http.StatusOK, resp.StatusCode)
diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index 5dcd508022..36f1344247 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
- "io/ioutil"
+ "os"
"strings"
"time"
@@ -184,7 +184,7 @@ func (GroupLoader) Parse(query string) (parser.Expr, error) {
}
func (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {
- b, err := ioutil.ReadFile(identifier)
+ b, err := os.ReadFile(identifier)
if err != nil {
return nil, []error{errors.Wrap(err, identifier)}
}
diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go
index 99be6a64d9..b3e36cf395 100644
--- a/pkg/ruler/compat_test.go
+++ b/pkg/ruler/compat_test.go
@@ -3,7 +3,6 @@ package ruler
import (
"context"
"fmt"
- "io/ioutil"
"os"
"strings"
"testing"
@@ -250,10 +249,10 @@ groups:
} {
t.Run(tc.desc, func(t *testing.T) {
var loader GroupLoader
- f, err := ioutil.TempFile(os.TempDir(), "rules")
+ f, err := os.CreateTemp(os.TempDir(), "rules")
require.Nil(t, err)
defer os.Remove(f.Name())
- err = ioutil.WriteFile(f.Name(), []byte(tc.data), 0777)
+ err = os.WriteFile(f.Name(), []byte(tc.data), 0777)
require.Nil(t, err)
_, errs := loader.Load(f.Name())
diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client.go b/pkg/ruler/rulestore/bucketclient/bucket_client.go
index ee73481a8e..da3b574865 100644
--- a/pkg/ruler/rulestore/bucketclient/bucket_client.go
+++ b/pkg/ruler/rulestore/bucketclient/bucket_client.go
@@ -5,7 +5,7 @@ import (
"context"
"encoding/base64"
"fmt"
- "io/ioutil"
+ "io"
"strings"
"github.com/go-kit/log"
@@ -66,7 +66,7 @@ func (b *BucketRuleStore) getRuleGroup(ctx context.Context, userID, namespace, g
}
defer func() { _ = reader.Close() }()
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey)
}
diff --git a/pkg/ruler/rulestore/local/local.go b/pkg/ruler/rulestore/local/local.go
index dc6add25dd..e633e11ae0 100644
--- a/pkg/ruler/rulestore/local/local.go
+++ b/pkg/ruler/rulestore/local/local.go
@@ -3,7 +3,6 @@ package local
import (
"context"
"flag"
- "io/ioutil"
"os"
"path/filepath"
@@ -46,25 +45,31 @@ func NewLocalRulesClient(cfg Config, loader promRules.GroupLoader) (*Client, err
func (l *Client) ListAllUsers(ctx context.Context) ([]string, error) {
root := l.cfg.Directory
- infos, err := ioutil.ReadDir(root)
+ dirEntries, err := os.ReadDir(root)
if err != nil {
return nil, errors.Wrapf(err, "unable to read dir %s", root)
}
var result []string
- for _, info := range infos {
- // After resolving link, info.Name() may be different than user, so keep original name.
- user := info.Name()
+ for _, entry := range dirEntries {
+ // After resolving link, entry.Name() may be different than user, so keep original name.
+ user := entry.Name()
- if info.Mode()&os.ModeSymlink != 0 {
- // ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink.
- info, err = os.Stat(filepath.Join(root, info.Name()))
+ var isDir bool
+
+ if entry.Type()&os.ModeSymlink != 0 {
+ // os.ReadDir only returns result of LStat. Calling Stat resolves symlink.
+ fi, err := os.Stat(filepath.Join(root, entry.Name()))
if err != nil {
return nil, err
}
+
+ isDir = fi.IsDir()
+ } else {
+ isDir = entry.IsDir()
}
- if info.IsDir() {
+ if isDir {
result = append(result, user)
}
}
@@ -130,25 +135,30 @@ func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (
var allLists rulespb.RuleGroupList
root := filepath.Join(l.cfg.Directory, userID)
- infos, err := ioutil.ReadDir(root)
+ dirEntries, err := os.ReadDir(root)
if err != nil {
return nil, errors.Wrapf(err, "unable to read rule dir %s", root)
}
- for _, info := range infos {
- // After resolving link, info.Name() may be different than namespace, so keep original name.
- namespace := info.Name()
+ for _, entry := range dirEntries {
+ // After resolving link, entry.Name() may be different than namespace, so keep original name.
+ namespace := entry.Name()
+
+ var isDir bool
- if info.Mode()&os.ModeSymlink != 0 {
- // ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink.
- path := filepath.Join(root, info.Name())
- info, err = os.Stat(path)
+ if entry.Type()&os.ModeSymlink != 0 {
+ // os.ReadDir only returns result of LStat. Calling Stat resolves symlink.
+ path := filepath.Join(root, entry.Name())
+ fi, err := os.Stat(path)
if err != nil {
return nil, errors.Wrapf(err, "unable to stat rule file %s", path)
}
+ isDir = fi.IsDir()
+ } else {
+ isDir = entry.IsDir()
}
- if info.IsDir() {
+ if isDir {
continue
}
diff --git a/pkg/ruler/rulestore/local/local_test.go b/pkg/ruler/rulestore/local/local_test.go
index e006a88cef..8922781a20 100644
--- a/pkg/ruler/rulestore/local/local_test.go
+++ b/pkg/ruler/rulestore/local/local_test.go
@@ -2,7 +2,6 @@ package local
import (
"context"
- "io/ioutil"
"os"
"path"
"testing"
@@ -51,7 +50,7 @@ func TestClient_LoadAllRuleGroups(t *testing.T) {
err = os.Symlink(user1, path.Join(dir, user2))
require.NoError(t, err)
- err = ioutil.WriteFile(path.Join(dir, user1, namespace1), b, 0777)
+ err = os.WriteFile(path.Join(dir, user1, namespace1), b, 0777)
require.NoError(t, err)
const ignoredDir = "ignored-dir"
diff --git a/pkg/ruler/rulestore/objectclient/rule_store.go b/pkg/ruler/rulestore/objectclient/rule_store.go
index a1fcab657e..8e7effbcaa 100644
--- a/pkg/ruler/rulestore/objectclient/rule_store.go
+++ b/pkg/ruler/rulestore/objectclient/rule_store.go
@@ -5,7 +5,7 @@ import (
"context"
"encoding/base64"
"fmt"
- "io/ioutil"
+ "io"
"strings"
"github.com/go-kit/log"
@@ -63,7 +63,7 @@ func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rule
}
defer func() { _ = reader.Close() }()
- buf, err := ioutil.ReadAll(reader)
+ buf, err := io.ReadAll(reader)
if err != nil {
return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey)
}
diff --git a/pkg/storage/chunk/client/aws/mock.go b/pkg/storage/chunk/client/aws/mock.go
index 3d2dfa576d..341ebafd29 100644
--- a/pkg/storage/chunk/client/aws/mock.go
+++ b/pkg/storage/chunk/client/aws/mock.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
- "io/ioutil"
+ "io"
"sort"
"strings"
"sync"
@@ -404,7 +404,7 @@ func (m *mockS3) PutObjectWithContext(_ aws.Context, req *s3.PutObjectInput, _ .
m.Lock()
defer m.Unlock()
- buf, err := ioutil.ReadAll(req.Body)
+ buf, err := io.ReadAll(req.Body)
if err != nil {
return nil, err
}
@@ -423,6 +423,6 @@ func (m *mockS3) GetObjectWithContext(_ aws.Context, req *s3.GetObjectInput, _ .
}
return &s3.GetObjectOutput{
- Body: ioutil.NopCloser(bytes.NewReader(buf)),
+ Body: io.NopCloser(bytes.NewReader(buf)),
}, nil
}
diff --git a/pkg/storage/chunk/client/cassandra/storage_client.go b/pkg/storage/chunk/client/cassandra/storage_client.go
index 646ef1c866..5cb4b45c08 100644
--- a/pkg/storage/chunk/client/cassandra/storage_client.go
+++ b/pkg/storage/chunk/client/cassandra/storage_client.go
@@ -6,7 +6,7 @@ import (
"crypto/tls"
"flag"
"fmt"
- "io/ioutil"
+ "os"
"strings"
"time"
@@ -202,7 +202,7 @@ func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error {
if cfg.Auth {
password := cfg.Password.String()
if cfg.PasswordFile != "" {
- passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile)
+ passwordBytes, err := os.ReadFile(cfg.PasswordFile)
if err != nil {
return errors.Errorf("Could not read Cassandra password file: %v", err)
}
diff --git a/pkg/storage/chunk/client/local/fixtures.go b/pkg/storage/chunk/client/local/fixtures.go
index 73ffdad1cb..757d858a34 100644
--- a/pkg/storage/chunk/client/local/fixtures.go
+++ b/pkg/storage/chunk/client/local/fixtures.go
@@ -2,7 +2,6 @@ package local
import (
"io"
- "io/ioutil"
"os"
"time"
@@ -27,7 +26,7 @@ func (f *fixture) Clients() (
indexClient index.Client, chunkClient client.Client, tableClient index.TableClient,
schemaConfig config.SchemaConfig, closer io.Closer, err error,
) {
- f.dirname, err = ioutil.TempDir(os.TempDir(), "boltdb")
+ f.dirname, err = os.MkdirTemp(os.TempDir(), "boltdb")
if err != nil {
return
}
diff --git a/pkg/storage/chunk/client/local/fs_object_client_test.go b/pkg/storage/chunk/client/local/fs_object_client_test.go
index 56f601eec2..1f8c976f82 100644
--- a/pkg/storage/chunk/client/local/fs_object_client_test.go
+++ b/pkg/storage/chunk/client/local/fs_object_client_test.go
@@ -3,7 +3,6 @@ package local
import (
"bytes"
"context"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -41,12 +40,12 @@ func TestFSObjectClient_DeleteChunksBefore(t *testing.T) {
require.NoError(t, f.Close())
// Verify whether all files are created
- files, _ := ioutil.ReadDir(".")
+ files, _ := os.ReadDir(".")
require.Equal(t, 2, len(files), "Number of files should be 2")
// No files should be deleted, since all of them are not much older
require.NoError(t, bucketClient.DeleteChunksBefore(context.Background(), time.Now().Add(-deleteFilesOlderThan)))
- files, _ = ioutil.ReadDir(".")
+ files, _ = os.ReadDir(".")
require.Equal(t, 2, len(files), "Number of files should be 2")
// Changing mtime of file1 to make it look older
@@ -54,7 +53,7 @@ func TestFSObjectClient_DeleteChunksBefore(t *testing.T) {
require.NoError(t, bucketClient.DeleteChunksBefore(context.Background(), time.Now().Add(-deleteFilesOlderThan)))
// Verifying whether older file got deleted
- files, _ = ioutil.ReadDir(".")
+ files, _ = os.ReadDir(".")
require.Equal(t, 1, len(files), "Number of files should be 1 after enforcing retention")
}
diff --git a/pkg/storage/chunk/client/openstack/swift_object_client.go b/pkg/storage/chunk/client/openstack/swift_object_client.go
index f19521d33f..e020e29ea4 100644
--- a/pkg/storage/chunk/client/openstack/swift_object_client.go
+++ b/pkg/storage/chunk/client/openstack/swift_object_client.go
@@ -6,7 +6,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"net/http"
"time"
@@ -132,7 +131,7 @@ func (s *SwiftObjectClient) GetObject(ctx context.Context, objectKey string) (io
return nil, 0, err
}
- return ioutil.NopCloser(&buf), int64(buf.Len()), nil
+ return io.NopCloser(&buf), int64(buf.Len()), nil
}
// PutObject puts the specified bytes into the configured Swift container at the provided key
diff --git a/pkg/storage/chunk/client/testutils/inmemory_storage_client.go b/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
index ac7da04d7f..4581cdb6c9 100644
--- a/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
+++ b/pkg/storage/chunk/client/testutils/inmemory_storage_client.go
@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"sort"
"strings"
"sync"
@@ -435,11 +434,11 @@ func (m *MockStorage) GetObject(ctx context.Context, objectKey string) (io.ReadC
return nil, 0, errStorageObjectNotFound
}
- return ioutil.NopCloser(bytes.NewReader(buf)), int64(len(buf)), nil
+ return io.NopCloser(bytes.NewReader(buf)), int64(len(buf)), nil
}
func (m *MockStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
- buf, err := ioutil.ReadAll(object)
+ buf, err := io.ReadAll(object)
if err != nil {
return err
}
diff --git a/pkg/storage/stores/indexshipper/compactor/compactor_test.go b/pkg/storage/stores/indexshipper/compactor/compactor_test.go
index 5b5a3031d1..b18574cfa1 100644
--- a/pkg/storage/stores/indexshipper/compactor/compactor_test.go
+++ b/pkg/storage/stores/indexshipper/compactor/compactor_test.go
@@ -3,7 +3,7 @@ package compactor
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"strings"
"testing"
@@ -139,7 +139,7 @@ func TestCompactor_RunCompaction(t *testing.T) {
for i := tableNumStart; i <= tableNumEnd; i++ {
name := fmt.Sprintf("%s%d", indexTablePrefix, i)
// verify that we have only 1 file left in storage after compaction.
- files, err := ioutil.ReadDir(filepath.Join(tablesPath, name))
+ files, err := os.ReadDir(filepath.Join(tablesPath, name))
require.NoError(t, err)
require.Len(t, files, 1)
require.True(t, strings.HasSuffix(files[0].Name(), ".gz"))
diff --git a/pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_client.go b/pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_client.go
index 34c2c1f19e..3e8639e50c 100644
--- a/pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_client.go
+++ b/pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_client.go
@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"sync"
@@ -167,7 +166,7 @@ func (c *deleteRequestsClient) getRequestsFromServer(ctx context.Context, userID
return nil, err
}
defer func() {
- _, _ = io.Copy(ioutil.Discard, resp.Body)
+ _, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()
diff --git a/pkg/storage/stores/indexshipper/compactor/retention/marker.go b/pkg/storage/stores/indexshipper/compactor/retention/marker.go
index b6a2a795dd..662e6a2eec 100644
--- a/pkg/storage/stores/indexshipper/compactor/retention/marker.go
+++ b/pkg/storage/stores/indexshipper/compactor/retention/marker.go
@@ -5,7 +5,6 @@ import (
"encoding/binary"
"fmt"
"io/fs"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -253,7 +252,7 @@ func (r *markerProcessor) processPath(path string, deleteFunc func(ctx context.C
queue = make(chan *keyPair)
)
// we use a copy to view the file so that we can read and update at the same time.
- viewFile, err := ioutil.TempFile("/tmp/", "marker-view-")
+ viewFile, err := os.CreateTemp("/tmp/", "marker-view-")
if err != nil {
return err
}
diff --git a/pkg/storage/stores/indexshipper/compactor/table_test.go b/pkg/storage/stores/indexshipper/compactor/table_test.go
index 8ce9a54335..3c93ad0929 100644
--- a/pkg/storage/stores/indexshipper/compactor/table_test.go
+++ b/pkg/storage/stores/indexshipper/compactor/table_test.go
@@ -3,7 +3,6 @@ package compactor
import (
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -303,7 +302,7 @@ func TestTable_CompactionRetention(t *testing.T) {
"emptied table": {
dbsSetup: setup,
assert: func(t *testing.T, storagePath, tableName string) {
- _, err := ioutil.ReadDir(filepath.Join(storagePath, tableName))
+ _, err := os.ReadDir(filepath.Join(storagePath, tableName))
require.True(t, os.IsNotExist(err))
},
tableMarker: TableMarkerFunc(func(ctx context.Context, tableName, userID string, indexFile retention.IndexProcessor, logger log.Logger) (bool, bool, error) {
@@ -411,14 +410,14 @@ func validateTable(t *testing.T, path string, expectedNumCommonDBs, numUsers int
}
func listDir(t *testing.T, path string) (files, folders []string) {
- filesInfo, err := ioutil.ReadDir(path)
+ dirEntries, err := os.ReadDir(path)
require.NoError(t, err)
- for _, fileInfo := range filesInfo {
- if fileInfo.IsDir() {
- folders = append(folders, fileInfo.Name())
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
+ folders = append(folders, entry.Name())
} else {
- files = append(files, fileInfo.Name())
+ files = append(files, entry.Name())
}
}
@@ -446,7 +445,7 @@ func TestTable_CompactionFailure(t *testing.T) {
SetupTable(t, filepath.Join(objectStoragePath, tableName), IndexesConfig{NumCompactedFiles: numDBs}, PerUserIndexesConfig{})
// put a corrupt zip file in the table which should cause the compaction to fail in the middle because it would fail to open that file with boltdb client.
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, "fail.gz"), []byte("fail the compaction"), 0o666))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, "fail.gz"), []byte("fail the compaction"), 0o666))
// do the compaction
objectClient, err := local.NewFSObjectClient(local.FSConfig{Directory: objectStoragePath})
@@ -460,7 +459,7 @@ func TestTable_CompactionFailure(t *testing.T) {
require.Error(t, table.compact(false))
// ensure that files in storage are intact.
- files, err := ioutil.ReadDir(tablePathInStorage)
+ files, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
require.Len(t, files, numDBs+1)
diff --git a/pkg/storage/stores/indexshipper/compactor/testutil.go b/pkg/storage/stores/indexshipper/compactor/testutil.go
index 4265fd94c8..cf2b6d15e5 100644
--- a/pkg/storage/stores/indexshipper/compactor/testutil.go
+++ b/pkg/storage/stores/indexshipper/compactor/testutil.go
@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -82,7 +81,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
idx := 0
for filename, content := range commonIndexes {
filePath := filepath.Join(path, strings.TrimSuffix(filename, ".gz"))
- require.NoError(t, ioutil.WriteFile(filePath, []byte(content), 0777))
+ require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
@@ -93,7 +92,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
require.NoError(t, util.EnsureDirectory(filepath.Join(path, userID)))
for filename, content := range files {
filePath := filepath.Join(path, userID, strings.TrimSuffix(filename, ".gz"))
- require.NoError(t, ioutil.WriteFile(filePath, []byte(content), 0777))
+ require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
@@ -349,15 +348,15 @@ func (i testIndexCompactor) OpenCompactedIndexFile(_ context.Context, path, _, _
func verifyCompactedIndexTable(t *testing.T, commonDBsConfig IndexesConfig, perUserDBsConfig PerUserIndexesConfig, tablePathInStorage string) {
commonIndexes, perUserIndexes := buildFilesContent(commonDBsConfig, perUserDBsConfig)
- filesInfo, err := ioutil.ReadDir(tablePathInStorage)
+ dirEntries, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
files, folders := []string{}, []string{}
- for _, fileInfo := range filesInfo {
- if fileInfo.IsDir() {
- folders = append(folders, fileInfo.Name())
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
+ folders = append(folders, entry.Name())
} else {
- files = append(files, fileInfo.Name())
+ files = append(files, entry.Name())
}
}
@@ -401,12 +400,12 @@ func verifyCompactedIndexTable(t *testing.T, commonDBsConfig IndexesConfig, perU
require.Len(t, folders, len(expectedUserIndexContent), fmt.Sprintf("%v", commonIndexes))
for _, userID := range folders {
- filesInfo, err := ioutil.ReadDir(filepath.Join(tablePathInStorage, userID))
+ entries, err := os.ReadDir(filepath.Join(tablePathInStorage, userID))
require.NoError(t, err)
- require.Len(t, filesInfo, 1)
- require.False(t, filesInfo[0].IsDir())
+ require.Len(t, entries, 1)
+ require.False(t, entries[0].IsDir())
sort.Strings(expectedUserIndexContent[userID])
- require.Equal(t, strings.Join(expectedUserIndexContent[userID], ""), string(readFile(t, filepath.Join(tablePathInStorage, userID, filesInfo[0].Name()))))
+ require.Equal(t, strings.Join(expectedUserIndexContent[userID], ""), string(readFile(t, filepath.Join(tablePathInStorage, userID, entries[0].Name()))))
}
}
@@ -418,7 +417,7 @@ func readFile(t *testing.T, path string) []byte {
path = decompressedFilePath
}
- fileContent, err := ioutil.ReadFile(path)
+ fileContent, err := os.ReadFile(path)
require.NoError(t, err)
return fileContent
diff --git a/pkg/storage/stores/indexshipper/downloads/index_set.go b/pkg/storage/stores/indexshipper/downloads/index_set.go
index 7b80d5d6dd..7e3635121e 100644
--- a/pkg/storage/stores/indexshipper/downloads/index_set.go
+++ b/pkg/storage/stores/indexshipper/downloads/index_set.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -113,18 +112,18 @@ func (t *indexSet) Init(forQuerying bool) (err error) {
t.indexMtx.markReady()
}()
- filesInfo, err := ioutil.ReadDir(t.cacheLocation)
+ dirEntries, err := os.ReadDir(t.cacheLocation)
if err != nil {
return err
}
// open all the locally present files first to avoid downloading them again during sync operation below.
- for _, fileInfo := range filesInfo {
- if fileInfo.IsDir() {
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
continue
}
- fullPath := filepath.Join(t.cacheLocation, fileInfo.Name())
+ fullPath := filepath.Join(t.cacheLocation, entry.Name())
// if we fail to open an index file, lets skip it and let sync operation re-download the file from storage.
idx, err := t.openIndexFileFunc(fullPath)
if err != nil {
@@ -138,7 +137,7 @@ func (t *indexSet) Init(forQuerying bool) (err error) {
continue
}
- t.index[fileInfo.Name()] = idx
+ t.index[entry.Name()] = idx
}
level.Debug(logger).Log("msg", fmt.Sprintf("opened %d local files, now starting sync operation", len(t.index)))
diff --git a/pkg/storage/stores/indexshipper/downloads/index_set_test.go b/pkg/storage/stores/indexshipper/downloads/index_set_test.go
index 173cba4b38..722b9ab1cb 100644
--- a/pkg/storage/stores/indexshipper/downloads/index_set_test.go
+++ b/pkg/storage/stores/indexshipper/downloads/index_set_test.go
@@ -3,7 +3,6 @@ package downloads
import (
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -135,14 +134,14 @@ func TestIndexSet_Sync(t *testing.T) {
// first, let us add a new file and refresh the index list cache
oneMoreDB := "one-more-db"
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
indexSet.baseIndexSet.RefreshIndexListCache(context.Background())
// now, without syncing the indexset, let us compact the index in storage
compactedDBName := "compacted-db"
require.NoError(t, os.RemoveAll(tablePathInStorage))
require.NoError(t, util.EnsureDirectory(tablePathInStorage))
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
indexesSetup = []string{compactedDBName}
// verify that we are getting errIndexListCacheTooStale without refreshing the list cache
diff --git a/pkg/storage/stores/indexshipper/downloads/table.go b/pkg/storage/stores/indexshipper/downloads/table.go
index 911c82ace5..6b148f8917 100644
--- a/pkg/storage/stores/indexshipper/downloads/table.go
+++ b/pkg/storage/stores/indexshipper/downloads/table.go
@@ -3,7 +3,7 @@ package downloads
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"sync"
"time"
@@ -76,7 +76,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, openInd
return nil, err
}
- filesInfo, err := ioutil.ReadDir(cacheLocation)
+ dirEntries, err := os.ReadDir(cacheLocation)
if err != nil {
return nil, err
}
@@ -93,15 +93,15 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, openInd
metrics: metrics,
}
- level.Debug(table.logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo))
+ level.Debug(table.logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(dirEntries))
// common index files are outside the directories and user index files are in the directories
- for _, fileInfo := range filesInfo {
- if !fileInfo.IsDir() {
+ for _, entry := range dirEntries {
+ if !entry.IsDir() {
continue
}
- userID := fileInfo.Name()
+ userID := entry.Name()
userIndexSet, err := NewIndexSet(name, userID, filepath.Join(cacheLocation, userID),
table.baseUserIndexSet, openIndexFileFunc, loggerWithUserID(table.logger, userID))
if err != nil {
diff --git a/pkg/storage/stores/indexshipper/downloads/table_manager.go b/pkg/storage/stores/indexshipper/downloads/table_manager.go
index 32ca8a2019..016fa71071 100644
--- a/pkg/storage/stores/indexshipper/downloads/table_manager.go
+++ b/pkg/storage/stores/indexshipper/downloads/table_manager.go
@@ -3,7 +3,7 @@ package downloads
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"regexp"
"strconv"
@@ -373,33 +373,33 @@ func (tm *tableManager) findUsersInTableForQueryReadiness(tableNumber int64, use
// loadLocalTables loads tables present locally.
func (tm *tableManager) loadLocalTables() error {
- filesInfo, err := ioutil.ReadDir(tm.cfg.CacheDir)
+ dirEntries, err := os.ReadDir(tm.cfg.CacheDir)
if err != nil {
return err
}
- for _, fileInfo := range filesInfo {
- if !fileInfo.IsDir() {
+ for _, entry := range dirEntries {
+ if !entry.IsDir() {
continue
}
- tableNumber, err := extractTableNumberFromName(fileInfo.Name())
+ tableNumber, err := extractTableNumberFromName(entry.Name())
if err != nil {
return err
}
- if tableNumber == -1 || !tm.tableRangesToHandle.TableInRange(tableNumber, fileInfo.Name()) {
+ if tableNumber == -1 || !tm.tableRangesToHandle.TableInRange(tableNumber, entry.Name()) {
continue
}
- level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name()))
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", entry.Name()))
- table, err := LoadTable(fileInfo.Name(), filepath.Join(tm.cfg.CacheDir, fileInfo.Name()),
+ table, err := LoadTable(entry.Name(), filepath.Join(tm.cfg.CacheDir, entry.Name()),
tm.indexStorageClient, tm.openIndexFileFunc, tm.metrics)
if err != nil {
return err
}
- tm.tables[fileInfo.Name()] = table
+ tm.tables[entry.Name()] = table
}
return nil
diff --git a/pkg/storage/stores/indexshipper/downloads/table_test.go b/pkg/storage/stores/indexshipper/downloads/table_test.go
index 34c726f7fc..043e878f7a 100644
--- a/pkg/storage/stores/indexshipper/downloads/table_test.go
+++ b/pkg/storage/stores/indexshipper/downloads/table_test.go
@@ -2,7 +2,7 @@ package downloads
import (
"context"
- "io/ioutil"
+ "io"
"os"
"path/filepath"
"sort"
@@ -301,8 +301,8 @@ func TestTable_Sync(t *testing.T) {
newDB := "new"
require.NoError(t, os.MkdirAll(tablePathInStorage, 0755))
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, deleteDB), []byte(deleteDB), 0755))
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, noUpdatesDB), []byte(noUpdatesDB), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, deleteDB), []byte(deleteDB), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, noUpdatesDB), []byte(noUpdatesDB), 0755))
// create table instance
table, stopFunc := buildTestTable(t, tempDir)
@@ -326,7 +326,7 @@ func TestTable_Sync(t *testing.T) {
// remove deleteDB and add the newDB
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, deleteDB)))
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, newDB), []byte(newDB), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, newDB), []byte(newDB), 0755))
// sync the table
table.storageClient.RefreshIndexListCache(context.Background())
@@ -347,13 +347,13 @@ func TestTable_Sync(t *testing.T) {
noUpdatesDB: {},
newDB: {},
}
- filesInfo, err := ioutil.ReadDir(tablePathInStorage)
+ dirEntries, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
require.Len(t, table.indexSets[""].(*indexSet).index, len(expectedFilesInDir))
- for _, fileInfo := range filesInfo {
- require.False(t, fileInfo.IsDir())
- _, ok := expectedFilesInDir[fileInfo.Name()]
+ for _, entry := range dirEntries {
+ require.False(t, entry.IsDir())
+ _, ok := expectedFilesInDir[entry.Name()]
require.True(t, ok)
}
@@ -361,12 +361,12 @@ func TestTable_Sync(t *testing.T) {
// first, let us add a new file and refresh the index list cache
oneMoreDB := "one-more-db"
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
table.storageClient.RefreshIndexListCache(context.Background())
// now, without syncing the table, let us compact the index in storage
compactedDBName := "compacted-db"
- require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
+ require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, noUpdatesDB)))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, newDB)))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, oneMoreDB)))
@@ -461,7 +461,7 @@ func verifyIndexForEach(t *testing.T, expectedIndexes []string, forEachFunc func
require.NoError(t, err)
// read the contents of the index.
- buf, err := ioutil.ReadAll(readSeeker)
+ buf, err := io.ReadAll(readSeeker)
require.NoError(t, err)
// see if it matches the name of the file
diff --git a/pkg/storage/stores/indexshipper/downloads/testutil.go b/pkg/storage/stores/indexshipper/downloads/testutil.go
index 2e8b0d4690..4f30762bdd 100644
--- a/pkg/storage/stores/indexshipper/downloads/testutil.go
+++ b/pkg/storage/stores/indexshipper/downloads/testutil.go
@@ -2,7 +2,6 @@ package downloads
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -40,7 +39,7 @@ func setupIndexesAtPath(t *testing.T, userID, path string, start, end int) []str
fileName := buildIndexFilename(userID, start)
indexPath := filepath.Join(path, fileName)
- require.NoError(t, ioutil.WriteFile(indexPath, []byte(fileName), 0755))
+ require.NoError(t, os.WriteFile(indexPath, []byte(fileName), 0755))
testIndexes = append(testIndexes, indexPath)
}
diff --git a/pkg/storage/stores/indexshipper/storage/client_test.go b/pkg/storage/stores/indexshipper/storage/client_test.go
index 991aebce5c..dd080e8e55 100644
--- a/pkg/storage/stores/indexshipper/storage/client_test.go
+++ b/pkg/storage/stores/indexshipper/storage/client_test.go
@@ -3,7 +3,8 @@ package storage
import (
"bytes"
"context"
- "io/ioutil"
+ "io"
+ "os"
"path/filepath"
"testing"
@@ -28,7 +29,7 @@ func TestIndexStorageClient(t *testing.T) {
for tableName, files := range tablesToSetup {
require.NoError(t, util.EnsureDirectory(filepath.Join(tempDir, storageKeyPrefix, tableName)))
for _, file := range files {
- err := ioutil.WriteFile(filepath.Join(tempDir, storageKeyPrefix, tableName, file), []byte(tableName+file), 0o666)
+ err := os.WriteFile(filepath.Join(tempDir, storageKeyPrefix, tableName, file), []byte(tableName+file), 0o666)
require.NoError(t, err)
}
}
@@ -53,7 +54,7 @@ func TestIndexStorageClient(t *testing.T) {
readCloser, err := indexStorageClient.GetFile(context.Background(), table, fileInStorage.Name)
require.NoError(t, err)
- b, err := ioutil.ReadAll(readCloser)
+ b, err := io.ReadAll(readCloser)
require.NoError(t, readCloser.Close())
require.NoError(t, err)
require.EqualValues(t, []byte(table+fileInStorage.Name), b)
diff --git a/pkg/storage/stores/indexshipper/storage/util_test.go b/pkg/storage/stores/indexshipper/storage/util_test.go
index 6dfa71a2df..3136d553c8 100644
--- a/pkg/storage/stores/indexshipper/storage/util_test.go
+++ b/pkg/storage/stores/indexshipper/storage/util_test.go
@@ -3,7 +3,6 @@ package storage
import (
"context"
"io"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -23,7 +22,7 @@ func Test_GetFileFromStorage(t *testing.T) {
testData := []byte("test-data")
tableName := "test-table"
require.NoError(t, util.EnsureDirectory(filepath.Join(tempDir, tableName)))
- require.NoError(t, ioutil.WriteFile(filepath.Join(tempDir, tableName, "src"), testData, 0o666))
+ require.NoError(t, os.WriteFile(filepath.Join(tempDir, tableName, "src"), testData, 0o666))
// try downloading the file from the storage.
objectClient, err := local.NewFSObjectClient(local.FSConfig{Directory: tempDir})
@@ -37,7 +36,7 @@ func Test_GetFileFromStorage(t *testing.T) {
}))
// verify the contents of the downloaded file.
- b, err := ioutil.ReadFile(filepath.Join(tempDir, "dest"))
+ b, err := os.ReadFile(filepath.Join(tempDir, "dest"))
require.NoError(t, err)
require.Equal(t, testData, b)
@@ -52,7 +51,7 @@ func Test_GetFileFromStorage(t *testing.T) {
}))
// verify the contents of the downloaded gz file.
- b, err = ioutil.ReadFile(filepath.Join(tempDir, "dest.gz"))
+ b, err = os.ReadFile(filepath.Join(tempDir, "dest.gz"))
require.NoError(t, err)
require.Equal(t, testData, b)
diff --git a/pkg/storage/stores/indexshipper/uploads/index_set_test.go b/pkg/storage/stores/indexshipper/uploads/index_set_test.go
index fb8e231913..15da352f42 100644
--- a/pkg/storage/stores/indexshipper/uploads/index_set_test.go
+++ b/pkg/storage/stores/indexshipper/uploads/index_set_test.go
@@ -2,7 +2,8 @@ package uploads
import (
"context"
- "io/ioutil"
+ "io"
+ "os"
"path/filepath"
"testing"
"time"
@@ -72,7 +73,7 @@ func TestIndexSet_Upload(t *testing.T) {
// compare the contents of created test index and uploaded index in storage
_, err = testIndex.Seek(0, 0)
require.NoError(t, err)
- expectedIndexContent, err := ioutil.ReadAll(testIndex.File)
+ expectedIndexContent, err := io.ReadAll(testIndex.File)
require.NoError(t, err)
require.Equal(t, expectedIndexContent, readCompressedFile(t, indexPathInStorage))
}
@@ -156,7 +157,7 @@ func readCompressedFile(t *testing.T, path string) []byte {
decompressedFilePath := filepath.Join(tempDir, "decompressed")
testutil.DecompressFile(t, path, decompressedFilePath)
- fileContent, err := ioutil.ReadFile(decompressedFilePath)
+ fileContent, err := os.ReadFile(decompressedFilePath)
require.NoError(t, err)
return fileContent
diff --git a/pkg/storage/stores/shipper/index/compactor/table_compactor_test.go b/pkg/storage/stores/shipper/index/compactor/table_compactor_test.go
index dc157b6cdc..0d13515681 100644
--- a/pkg/storage/stores/shipper/index/compactor/table_compactor_test.go
+++ b/pkg/storage/stores/shipper/index/compactor/table_compactor_test.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io"
"io/fs"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -523,7 +522,7 @@ func compareCompactedTable(t *testing.T, srcTable string, tableCompactor *tableC
func readIndexFromFiles(t *testing.T, tablePath string) map[string]map[string]string {
tempDir := t.TempDir()
- filesInfo, err := ioutil.ReadDir(tablePath)
+ dirEntries, err := os.ReadDir(tablePath)
if err != nil && os.IsNotExist(err) {
return map[string]map[string]string{}
}
@@ -531,15 +530,15 @@ func readIndexFromFiles(t *testing.T, tablePath string) map[string]map[string]st
dbRecords := make(map[string]map[string]string)
- for _, fileInfo := range filesInfo {
- if fileInfo.IsDir() {
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
continue
}
- filePath := filepath.Join(tablePath, fileInfo.Name())
+ filePath := filepath.Join(tablePath, entry.Name())
if strings.HasSuffix(filePath, ".gz") {
- filePath = filepath.Join(tempDir, fileInfo.Name())
- testutil.DecompressFile(t, filepath.Join(tablePath, fileInfo.Name()), filePath)
+ filePath = filepath.Join(tempDir, entry.Name())
+ testutil.DecompressFile(t, filepath.Join(tablePath, entry.Name()), filePath)
}
db, err := openBoltdbFileWithNoSync(filePath)
diff --git a/pkg/storage/stores/shipper/index/compactor/util_test.go b/pkg/storage/stores/shipper/index/compactor/util_test.go
index e9aefe320a..880d134ba4 100644
--- a/pkg/storage/stores/shipper/index/compactor/util_test.go
+++ b/pkg/storage/stores/shipper/index/compactor/util_test.go
@@ -3,7 +3,7 @@ package compactor
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"sort"
"testing"
@@ -142,12 +142,12 @@ type table struct {
func (t *testStore) indexTables() []table {
t.t.Helper()
res := []table{}
- indexFilesInfo, err := ioutil.ReadDir(t.indexDir)
+ dirEntries, err := os.ReadDir(t.indexDir)
require.NoError(t.t, err)
- for _, indexFileInfo := range indexFilesInfo {
- db, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(t.indexDir, indexFileInfo.Name()))
+ for _, entry := range dirEntries {
+ db, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(t.indexDir, entry.Name()))
require.NoError(t.t, err)
- res = append(res, table{name: indexFileInfo.Name(), DB: db})
+ res = append(res, table{name: entry.Name(), DB: db})
}
return res
}
diff --git a/pkg/storage/stores/shipper/index/table.go b/pkg/storage/stores/shipper/index/table.go
index 3ee5d70876..10cde71017 100644
--- a/pkg/storage/stores/shipper/index/table.go
+++ b/pkg/storage/stores/shipper/index/table.go
@@ -3,7 +3,6 @@ package index
import (
"context"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -351,18 +350,18 @@ func (lt *Table) buildFileName(dbName string) string {
func loadBoltDBsFromDir(dir string, metrics *metrics) (map[string]*bbolt.DB, error) {
dbs := map[string]*bbolt.DB{}
- filesInfo, err := ioutil.ReadDir(dir)
+ dirEntries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
- for _, fileInfo := range filesInfo {
- if fileInfo.IsDir() {
+ for _, entry := range dirEntries {
+ if entry.IsDir() {
continue
}
- fullPath := filepath.Join(dir, fileInfo.Name())
+ fullPath := filepath.Join(dir, entry.Name())
- if strings.HasSuffix(fileInfo.Name(), indexfile.TempFileSuffix) || strings.HasSuffix(fileInfo.Name(), snapshotFileSuffix) {
+ if strings.HasSuffix(entry.Name(), indexfile.TempFileSuffix) || strings.HasSuffix(entry.Name(), snapshotFileSuffix) {
// If an ingester is killed abruptly in the middle of an upload operation it could leave out a temp file which holds the snapshot of db for uploading.
// Cleaning up those temp files to avoid problems.
if err := os.Remove(fullPath); err != nil {
@@ -395,7 +394,7 @@ func loadBoltDBsFromDir(dir string, metrics *metrics) (map[string]*bbolt.DB, err
continue
}
- dbs[fileInfo.Name()] = db
+ dbs[entry.Name()] = db
}
return dbs, nil
diff --git a/pkg/storage/stores/shipper/index/table_manager.go b/pkg/storage/stores/shipper/index/table_manager.go
index 4f82dbba87..0f13a3ee79 100644
--- a/pkg/storage/stores/shipper/index/table_manager.go
+++ b/pkg/storage/stores/shipper/index/table_manager.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -184,7 +183,7 @@ func (tm *TableManager) handoverIndexesToShipper(force bool) {
func (tm *TableManager) loadTables() (map[string]*Table, error) {
localTables := make(map[string]*Table)
- filesInfo, err := ioutil.ReadDir(tm.cfg.IndexDir)
+ dirEntries, err := os.ReadDir(tm.cfg.IndexDir)
if err != nil {
return nil, err
}
@@ -195,16 +194,16 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
return nil, err
}
- for _, fileInfo := range filesInfo {
- if !re.MatchString(fileInfo.Name()) {
+ for _, entry := range dirEntries {
+ if !re.MatchString(entry.Name()) {
continue
}
// since we are moving to keeping files for same table in a folder, if current element is a file we need to move it inside a directory with the same name
// i.e file index_123 would be moved to path index_123/index_123.
- if !fileInfo.IsDir() {
- level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name()))
- filePath := filepath.Join(tm.cfg.IndexDir, fileInfo.Name())
+ if !entry.IsDir() {
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", entry.Name()))
+ filePath := filepath.Join(tm.cfg.IndexDir, entry.Name())
// create a folder with .temp suffix since we can't create a directory with same name as file.
tempDirPath := filePath + ".temp"
@@ -213,7 +212,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
}
// move the file to temp dir.
- if err := os.Rename(filePath, filepath.Join(tempDirPath, fileInfo.Name())); err != nil {
+ if err := os.Rename(filePath, filepath.Join(tempDirPath, entry.Name())); err != nil {
return nil, err
}
@@ -223,17 +222,17 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
}
}
- level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name()))
- table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()), tm.cfg.Uploader, tm.indexShipper, tm.cfg.MakePerTenantBuckets, tm.metrics)
+ level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", entry.Name()))
+ table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, entry.Name()), tm.cfg.Uploader, tm.indexShipper, tm.cfg.MakePerTenantBuckets, tm.metrics)
if err != nil {
return nil, err
}
if table == nil {
// if table is nil it means it has no files in it so remove the folder for that table.
- err := os.Remove(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()))
+ err := os.Remove(filepath.Join(tm.cfg.IndexDir, entry.Name()))
if err != nil {
- level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err)
+ level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", entry.Name(), "err", err)
}
continue
}
@@ -250,7 +249,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
return nil, err
}
- localTables[fileInfo.Name()] = table
+ localTables[entry.Name()] = table
}
return localTables, nil
diff --git a/pkg/storage/stores/shipper/index/table_test.go b/pkg/storage/stores/shipper/index/table_test.go
index 0e5f1a4e7d..155086eadd 100644
--- a/pkg/storage/stores/shipper/index/table_test.go
+++ b/pkg/storage/stores/shipper/index/table_test.go
@@ -3,7 +3,7 @@ package index
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"strconv"
"testing"
@@ -104,7 +104,7 @@ func TestLoadTable(t *testing.T) {
// change a boltdb file to text file which would fail to open.
invalidFilePath := filepath.Join(tablePath, "invalid")
- require.NoError(t, ioutil.WriteFile(invalidFilePath, []byte("invalid boltdb file"), 0o666))
+ require.NoError(t, os.WriteFile(invalidFilePath, []byte("invalid boltdb file"), 0o666))
// verify that changed boltdb file can't be opened.
_, err = local.OpenBoltdbFile(invalidFilePath)
@@ -120,9 +120,9 @@ func TestLoadTable(t *testing.T) {
}()
// verify that we still have 3 files(2 valid, 1 invalid)
- filesInfo, err := ioutil.ReadDir(tablePath)
+ dirEntries, err := os.ReadDir(tablePath)
require.NoError(t, err)
- require.Len(t, filesInfo, 3)
+ require.Len(t, dirEntries, 3)
// query the loaded table to see if it has right data.
require.NoError(t, table.Snapshot())
@@ -301,9 +301,9 @@ func Test_LoadBoltDBsFromDir(t *testing.T) {
require.NoError(t, boltdb.Close())
}
- filesInfo, err := ioutil.ReadDir(tablePath)
+ dirEntries, err := os.ReadDir(tablePath)
require.NoError(t, err)
- require.Len(t, filesInfo, 2)
+ require.Len(t, dirEntries, 2)
}
func TestTable_ImmutableUploads(t *testing.T) {
diff --git a/pkg/storage/stores/shipper/shipper_index_client.go b/pkg/storage/stores/shipper/shipper_index_client.go
index c2b62b278f..5d47c78cec 100644
--- a/pkg/storage/stores/shipper/shipper_index_client.go
+++ b/pkg/storage/stores/shipper/shipper_index_client.go
@@ -4,7 +4,6 @@ import (
"context"
"flag"
"fmt"
- "io/ioutil"
"os"
"path"
"sync"
@@ -126,11 +125,11 @@ func (i *indexClient) getUploaderName() (string, error) {
if !os.IsNotExist(err) {
return "", err
}
- if err := ioutil.WriteFile(uploaderFilePath, []byte(uploader), 0o666); err != nil {
+ if err := os.WriteFile(uploaderFilePath, []byte(uploader), 0o666); err != nil {
return "", err
}
} else {
- ub, err := ioutil.ReadFile(uploaderFilePath)
+ ub, err := os.ReadFile(uploaderFilePath)
if err != nil {
return "", err
}
diff --git a/pkg/storage/stores/tsdb/head_manager.go b/pkg/storage/stores/tsdb/head_manager.go
index a5b90887b6..13574e4ed2 100644
--- a/pkg/storage/stores/tsdb/head_manager.go
+++ b/pkg/storage/stores/tsdb/head_manager.go
@@ -3,7 +3,6 @@ package tsdb
import (
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -423,7 +422,7 @@ func walsByPeriod(dir string, period period) ([]WalGroup, error) {
}
func walGroups(dir string, period period) (map[int]*WalGroup, error) {
- files, err := ioutil.ReadDir(managerWalDir(dir))
+ files, err := os.ReadDir(managerWalDir(dir))
if err != nil {
return nil, err
}
diff --git a/pkg/storage/stores/tsdb/index/index.go b/pkg/storage/stores/tsdb/index/index.go
index 5c77aae823..3d2d0856de 100644
--- a/pkg/storage/stores/tsdb/index/index.go
+++ b/pkg/storage/stores/tsdb/index/index.go
@@ -22,7 +22,6 @@ import (
"hash"
"hash/crc32"
"io"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -1200,7 +1199,7 @@ func (b RealByteSlice) Sub(start, end int) ByteSlice {
// NewReader returns a new index reader on the given byte slice. It automatically
// handles different format versions.
func NewReader(b ByteSlice) (*Reader, error) {
- return newReader(b, ioutil.NopCloser(nil))
+ return newReader(b, io.NopCloser(nil))
}
type nopCloser struct{}
@@ -1209,7 +1208,7 @@ func (nopCloser) Close() error { return nil }
// NewFileReader returns a new index reader against the given index file.
func NewFileReader(path string) (*Reader, error) {
- b, err := ioutil.ReadFile(path)
+ b, err := os.ReadFile(path)
if err != nil {
return nil, err
}
diff --git a/pkg/storage/stores/tsdb/index/index_test.go b/pkg/storage/stores/tsdb/index/index_test.go
index 962e5dfb5c..547f2350d1 100644
--- a/pkg/storage/stores/tsdb/index/index_test.go
+++ b/pkg/storage/stores/tsdb/index/index_test.go
@@ -17,7 +17,6 @@ import (
"context"
"fmt"
"hash/crc32"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -484,7 +483,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
dir := testutil.NewTemporaryDirectory("block", t)
idxName := filepath.Join(dir.Path(), "index")
- err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0o666)
+ err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666)
require.NoError(t, err)
_, err = NewFileReader(idxName)
diff --git a/pkg/storage/stores/tsdb/manager.go b/pkg/storage/stores/tsdb/manager.go
index 33e96a72cb..4218345054 100644
--- a/pkg/storage/stores/tsdb/manager.go
+++ b/pkg/storage/stores/tsdb/manager.go
@@ -3,7 +3,7 @@ package tsdb
import (
"context"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"regexp"
"strconv"
@@ -95,7 +95,7 @@ func (m *tsdbManager) Start() (err error) {
// load list of multitenant tsdbs
mulitenantDir := managerMultitenantDir(m.dir)
- files, err := ioutil.ReadDir(mulitenantDir)
+ files, err := os.ReadDir(mulitenantDir)
if err != nil {
return err
}
@@ -116,7 +116,7 @@ func (m *tsdbManager) Start() (err error) {
}
buckets++
- tsdbs, err := ioutil.ReadDir(filepath.Join(mulitenantDir, bucket))
+ tsdbs, err := os.ReadDir(filepath.Join(mulitenantDir, bucket))
if err != nil {
level.Warn(m.log).Log(
"msg", "failed to open period bucket dir",
diff --git a/pkg/storage/stores/tsdb/single_file_index.go b/pkg/storage/stores/tsdb/single_file_index.go
index 43df6a1ec7..42c2c2eedd 100644
--- a/pkg/storage/stores/tsdb/single_file_index.go
+++ b/pkg/storage/stores/tsdb/single_file_index.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"io"
- "io/ioutil"
+ "os"
"strings"
"time"
@@ -86,7 +86,7 @@ type TSDBIndex struct {
// Return the index as well as the underlying []byte which isn't exposed as an index
// method but is helpful for building an io.reader for the index shipper
func NewTSDBIndexFromFile(location string, gzip bool) (*TSDBIndex, []byte, error) {
- raw, err := ioutil.ReadFile(location)
+ raw, err := os.ReadFile(location)
if err != nil {
return nil, nil, err
}
diff --git a/pkg/util/cfg/cfg_test.go b/pkg/util/cfg/cfg_test.go
index cbdab43dbb..a4ca0ed9d8 100644
--- a/pkg/util/cfg/cfg_test.go
+++ b/pkg/util/cfg/cfg_test.go
@@ -2,7 +2,6 @@ package cfg
import (
"flag"
- "io/ioutil"
"os"
"testing"
"time"
@@ -69,7 +68,7 @@ tls:
func TestDefaultUnmarshal(t *testing.T) {
testContext := func(yamlString string, args []string) TestConfigWrapper {
- file, err := ioutil.TempFile("", "config.yaml")
+ file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()
diff --git a/pkg/util/cfg/dynamic_test.go b/pkg/util/cfg/dynamic_test.go
index d4b67bb46c..b76cc2e79c 100644
--- a/pkg/util/cfg/dynamic_test.go
+++ b/pkg/util/cfg/dynamic_test.go
@@ -2,7 +2,7 @@ package cfg
import (
"flag"
- "io/ioutil"
+ "os"
"testing"
"time"
@@ -21,7 +21,7 @@ server:
data := NewDynamicConfig(mockApplyDynamicConfig)
fs := flag.NewFlagSet(t.Name(), flag.PanicOnError)
- file, err := ioutil.TempFile("", "config.yaml")
+ file, err := os.CreateTemp("", "config.yaml")
require.NoError(t, err)
_, err = file.WriteString(config)
require.NoError(t, err)
diff --git a/pkg/util/cfg/files.go b/pkg/util/cfg/files.go
index 6eba631785..9f25238245 100644
--- a/pkg/util/cfg/files.go
+++ b/pkg/util/cfg/files.go
@@ -4,7 +4,6 @@ import (
"encoding/json"
"flag"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -21,7 +20,7 @@ func JSON(f *string) Source {
return nil
}
- j, err := ioutil.ReadFile(*f)
+ j, err := os.ReadFile(*f)
if err != nil {
return err
}
@@ -43,7 +42,7 @@ func dJSON(y []byte) Source {
// using https://pkg.go.dev/github.com/drone/envsubst?tab=overview
func YAML(f string, expandEnvVars bool, strict bool) Source {
return func(dst Cloneable) error {
- y, err := ioutil.ReadFile(f)
+ y, err := os.ReadFile(f)
if err != nil {
return err
}
diff --git a/pkg/util/http_test.go b/pkg/util/http_test.go
index 029912619f..ba365f777d 100644
--- a/pkg/util/http_test.go
+++ b/pkg/util/http_test.go
@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"html/template"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
"net/http/httptest"
@@ -215,6 +215,6 @@ func (b bytesBuffered) BytesBuffer() *bytes.Buffer {
}
func TestIsRequestBodyTooLargeRegression(t *testing.T) {
- _, err := ioutil.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), ioutil.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
+ _, err := io.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), io.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
assert.True(t, util.IsRequestBodyTooLarge(err))
}
diff --git a/pkg/util/server/error_test.go b/pkg/util/server/error_test.go
index 706dda6a1a..fbf898723d 100644
--- a/pkg/util/server/error_test.go
+++ b/pkg/util/server/error_test.go
@@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"testing"
@@ -55,7 +55,7 @@ func Test_writeError(t *testing.T) {
rec := httptest.NewRecorder()
WriteError(tt.err, rec)
require.Equal(t, tt.expectedStatus, rec.Result().StatusCode)
- b, err := ioutil.ReadAll(rec.Result().Body)
+ b, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatal(err)
}
diff --git a/pkg/util/unmarshal/legacy/unmarshal_test.go b/pkg/util/unmarshal/legacy/unmarshal_test.go
index aa2aedd925..e82682107e 100644
--- a/pkg/util/unmarshal/legacy/unmarshal_test.go
+++ b/pkg/util/unmarshal/legacy/unmarshal_test.go
@@ -1,7 +1,7 @@
package unmarshal
import (
- "io/ioutil"
+ "io"
"log"
"strings"
"testing"
@@ -49,7 +49,7 @@ func Test_DecodePushRequest(t *testing.T) {
for i, pushTest := range pushTests {
var actual logproto.PushRequest
- closer := ioutil.NopCloser(strings.NewReader(pushTest.actual))
+ closer := io.NopCloser(strings.NewReader(pushTest.actual))
err := DecodePushRequest(closer, &actual)
require.NoError(t, err)
diff --git a/pkg/util/unmarshal/unmarshal_test.go b/pkg/util/unmarshal/unmarshal_test.go
index f4c82acd00..dcb0df8c18 100644
--- a/pkg/util/unmarshal/unmarshal_test.go
+++ b/pkg/util/unmarshal/unmarshal_test.go
@@ -2,7 +2,7 @@ package unmarshal
import (
"fmt"
- "io/ioutil"
+ "io"
"strings"
"testing"
"time"
@@ -50,7 +50,7 @@ var pushTests = []struct {
func Test_DecodePushRequest(t *testing.T) {
for i, pushTest := range pushTests {
var actual logproto.PushRequest
- closer := ioutil.NopCloser(strings.NewReader(pushTest.actual))
+ closer := io.NopCloser(strings.NewReader(pushTest.actual))
err := DecodePushRequest(closer, &actual)
require.NoError(t, err)
diff --git a/tools/lambda-promtail/lambda-promtail/kinesis_test.go b/tools/lambda-promtail/lambda-promtail/kinesis_test.go
index 5bd246ab26..4b342110ee 100644
--- a/tools/lambda-promtail/lambda-promtail/kinesis_test.go
+++ b/tools/lambda-promtail/lambda-promtail/kinesis_test.go
@@ -3,7 +3,7 @@ package main
import (
"context"
"encoding/json"
- "io/ioutil"
+ "os"
"testing"
"github.com/aws/aws-lambda-go/events"
@@ -34,7 +34,7 @@ func (b *MockBatch) createPushRequest() (*logproto.PushRequest, int) {
}
func ReadJSONFromFile(t *testing.T, inputFile string) []byte {
- inputJSON, err := ioutil.ReadFile(inputFile)
+ inputJSON, err := os.ReadFile(inputFile)
if err != nil {
t.Errorf("could not open test file. details: %v", err)
}
@@ -43,7 +43,7 @@ func ReadJSONFromFile(t *testing.T, inputFile string) []byte {
}
func TestLambdaPromtail_KinesisParseEvents(t *testing.T) {
- inputJson, err := ioutil.ReadFile("../testdata/kinesis-event.json")
+ inputJson, err := os.ReadFile("../testdata/kinesis-event.json")
if err != nil {
t.Errorf("could not open test file. details: %v", err)
diff --git a/tools/querytee/proxy_backend.go b/tools/querytee/proxy_backend.go
index 57c4ca464c..acebfe6a01 100644
--- a/tools/querytee/proxy_backend.go
+++ b/tools/querytee/proxy_backend.go
@@ -3,7 +3,6 @@ package querytee
import (
"context"
"io"
- "io/ioutil"
"net"
"net/http"
"net/url"
@@ -107,7 +106,7 @@ func (b *ProxyBackend) doBackendRequest(req *http.Request) (int, []byte, error)
// Read the entire response body.
defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
+ body, err := io.ReadAll(res.Body)
if err != nil {
return 0, nil, errors.Wrap(err, "reading backend response")
}
diff --git a/tools/querytee/proxy_endpoint.go b/tools/querytee/proxy_endpoint.go
index 344c703715..3697282bce 100644
--- a/tools/querytee/proxy_endpoint.go
+++ b/tools/querytee/proxy_endpoint.go
@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"net/http"
"strconv"
"strings"
@@ -85,7 +84,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
)
if r.Body != nil {
- body, err = ioutil.ReadAll(r.Body)
+ body, err = io.ReadAll(r.Body)
if err != nil {
level.Warn(p.logger).Log("msg", "Unable to read request body", "err", err)
return
@@ -94,7 +93,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
level.Warn(p.logger).Log("msg", "Unable to close request body", "err", err)
}
- r.Body = ioutil.NopCloser(bytes.NewReader(body))
+ r.Body = io.NopCloser(bytes.NewReader(body))
if err := r.ParseForm(); err != nil {
level.Warn(p.logger).Log("msg", "Unable to parse form", "err", err)
}
@@ -115,7 +114,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
start = time.Now()
)
if len(body) > 0 {
- bodyReader = ioutil.NopCloser(bytes.NewReader(body))
+ bodyReader = io.NopCloser(bytes.NewReader(body))
}
status, body, err := b.ForwardRequest(r, bodyReader)
diff --git a/tools/querytee/proxy_test.go b/tools/querytee/proxy_test.go
index e98f24274a..9878ce42ff 100644
--- a/tools/querytee/proxy_test.go
+++ b/tools/querytee/proxy_test.go
@@ -2,7 +2,7 @@ package querytee
import (
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"net/url"
@@ -179,7 +179,7 @@ func Test_Proxy_RequestsForwarding(t *testing.T) {
require.NoError(t, err)
defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
+ body, err := io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, testData.expectedStatus, res.StatusCode)
@@ -330,7 +330,7 @@ func TestProxy_Passthrough(t *testing.T) {
require.NoError(t, err)
defer res.Body.Close()
- body, err := ioutil.ReadAll(res.Body)
+ body, err := io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, query.expectedStatusCode, res.StatusCode)