Replace deprecated `io/ioutil` with `io` and `os` packages (#7212)

The `io/ioutil` package has been deprecated in Go 1.16 (See
https://pkg.go.dev/io/ioutil). This PR replaces the existing `io/ioutil`
functions with their new definitions in `io` and `os` packages.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
pull/6671/head^2
Eng Zer Jun 3 years ago committed by GitHub
parent 557fdf2206
commit e2842c69c5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      CHANGELOG.md
  2. 3
      clients/cmd/docker-driver/config.go
  3. 3
      clients/cmd/docker-driver/config_test.go
  4. 4
      clients/cmd/fluent-bit/config.go
  5. 3
      clients/cmd/fluent-bit/config_test.go
  6. 3
      clients/pkg/promtail/positions/positions.go
  7. 13
      clients/pkg/promtail/positions/positions_test.go
  8. 3
      clients/pkg/promtail/positions/write_positions_windows.go
  9. 5
      clients/pkg/promtail/promtail_test.go
  10. 3
      clients/pkg/promtail/server/template.go
  11. 3
      clients/pkg/promtail/targets/journal/journaltarget.go
  12. 3
      clients/pkg/promtail/targets/syslog/syslogtarget_test.go
  13. 4
      clients/pkg/promtail/targets/syslog/transport.go
  14. 4
      clients/pkg/promtail/targets/windows/bookmark.go
  15. 3
      cmd/chunks-inspect/loki.go
  16. 3
      cmd/chunks-inspect/main.go
  17. 5
      operator/cmd/loki-broker/main.go
  18. 4
      operator/controllers/loki/lokistack_controller_test.go
  19. 4
      operator/internal/handlers/lokistack_create_or_update_test.go
  20. 6
      operator/internal/manifests/internal/config/build.go
  21. 8
      operator/internal/manifests/internal/gateway/build.go
  22. 5
      pkg/canary/reader/reader.go
  23. 6
      pkg/distributor/http_test.go
  24. 7
      pkg/ingester/checkpoint.go
  25. 6
      pkg/ingester/checkpoint_test.go
  26. 3
      pkg/ingester/transfer_test.go
  27. 9
      pkg/logcli/client/client.go
  28. 3
      pkg/logcli/client/file.go
  29. 3
      pkg/logql/syntax/fuzz_test.go
  30. 4
      pkg/logqlanalyzer/http.go
  31. 4
      pkg/loki/config_handler_test.go
  32. 5
      pkg/loki/config_wrapper_test.go
  33. 3
      pkg/loki/loki_test.go
  34. 4
      pkg/loki/runtime_config_test.go
  35. 4
      pkg/loki/version_handler_test.go
  36. 5
      pkg/lokifrontend/frontend/transport/handler.go
  37. 3
      pkg/lokifrontend/frontend/transport/roundtripper.go
  38. 8
      pkg/lokifrontend/frontend/v1/frontend_test.go
  39. 8
      pkg/querier/queryrange/codec.go
  40. 33
      pkg/querier/queryrange/codec_test.go
  41. 4
      pkg/querier/queryrange/prometheus.go
  42. 4
      pkg/querier/queryrange/queryrangebase/marshaling_test.go
  43. 6
      pkg/querier/queryrange/queryrangebase/query_range.go
  44. 6
      pkg/querier/queryrange/queryrangebase/query_range_test.go
  45. 3
      pkg/querier/queryrange/queryrangebase/roundtrip.go
  46. 4
      pkg/querier/queryrange/roundtrip_test.go
  47. 4
      pkg/ruler/base/api.go
  48. 7
      pkg/ruler/base/api_test.go
  49. 4
      pkg/ruler/base/ruler_test.go
  50. 4
      pkg/ruler/compat.go
  51. 5
      pkg/ruler/compat_test.go
  52. 4
      pkg/ruler/rulestore/bucketclient/bucket_client.go
  53. 46
      pkg/ruler/rulestore/local/local.go
  54. 3
      pkg/ruler/rulestore/local/local_test.go
  55. 4
      pkg/ruler/rulestore/objectclient/rule_store.go
  56. 6
      pkg/storage/chunk/client/aws/mock.go
  57. 4
      pkg/storage/chunk/client/cassandra/storage_client.go
  58. 3
      pkg/storage/chunk/client/local/fixtures.go
  59. 7
      pkg/storage/chunk/client/local/fs_object_client_test.go
  60. 3
      pkg/storage/chunk/client/openstack/swift_object_client.go
  61. 5
      pkg/storage/chunk/client/testutils/inmemory_storage_client.go
  62. 4
      pkg/storage/stores/indexshipper/compactor/compactor_test.go
  63. 3
      pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_client.go
  64. 3
      pkg/storage/stores/indexshipper/compactor/retention/marker.go
  65. 17
      pkg/storage/stores/indexshipper/compactor/table_test.go
  66. 25
      pkg/storage/stores/indexshipper/compactor/testutil.go
  67. 11
      pkg/storage/stores/indexshipper/downloads/index_set.go
  68. 5
      pkg/storage/stores/indexshipper/downloads/index_set_test.go
  69. 12
      pkg/storage/stores/indexshipper/downloads/table.go
  70. 18
      pkg/storage/stores/indexshipper/downloads/table_manager.go
  71. 22
      pkg/storage/stores/indexshipper/downloads/table_test.go
  72. 3
      pkg/storage/stores/indexshipper/downloads/testutil.go
  73. 7
      pkg/storage/stores/indexshipper/storage/client_test.go
  74. 7
      pkg/storage/stores/indexshipper/storage/util_test.go
  75. 7
      pkg/storage/stores/indexshipper/uploads/index_set_test.go
  76. 13
      pkg/storage/stores/shipper/index/compactor/table_compactor_test.go
  77. 10
      pkg/storage/stores/shipper/index/compactor/util_test.go
  78. 13
      pkg/storage/stores/shipper/index/table.go
  79. 25
      pkg/storage/stores/shipper/index/table_manager.go
  80. 12
      pkg/storage/stores/shipper/index/table_test.go
  81. 5
      pkg/storage/stores/shipper/shipper_index_client.go
  82. 3
      pkg/storage/stores/tsdb/head_manager.go
  83. 5
      pkg/storage/stores/tsdb/index/index.go
  84. 3
      pkg/storage/stores/tsdb/index/index_test.go
  85. 6
      pkg/storage/stores/tsdb/manager.go
  86. 4
      pkg/storage/stores/tsdb/single_file_index.go
  87. 3
      pkg/util/cfg/cfg_test.go
  88. 4
      pkg/util/cfg/dynamic_test.go
  89. 5
      pkg/util/cfg/files.go
  90. 4
      pkg/util/http_test.go
  91. 4
      pkg/util/server/error_test.go
  92. 4
      pkg/util/unmarshal/legacy/unmarshal_test.go
  93. 4
      pkg/util/unmarshal/unmarshal_test.go
  94. 6
      tools/lambda-promtail/lambda-promtail/kinesis_test.go
  95. 3
      tools/querytee/proxy_backend.go
  96. 7
      tools/querytee/proxy_endpoint.go
  97. 6
      tools/querytee/proxy_test.go

@ -32,6 +32,7 @@
* [6415](https://github.com/grafana/loki/pull/6415) **salvacorts**: Evenly spread queriers across kubernetes nodes.
* [6349](https://github.com/grafana/loki/pull/6349) **simonswine**: Update the default HTTP listen port from 80 to 3100. Make sure to configure the port explicitly if you are using port 80.
* [6835](https://github.com/grafana/loki/pull/6835) **DylanGuedes**: Add new per-tenant query timeout configuration and remove engine query timeout.
* [7212](https://github.com/grafana/loki/pull/7212) **Juneezee**: Replaces deprecated `io/ioutil` with `io` and `os`.
#### Promtail

@ -3,7 +3,6 @@ package main
import (
"bytes"
"fmt"
"io/ioutil"
"net/url"
"os"
"strconv"
@ -385,7 +384,7 @@ func parseBoolean(key string, logCtx logger.Info, defaultValue bool) (bool, erro
// loadConfig read YAML-formatted config from filename into cfg.
func loadConfig(filename string, cfg interface{}) error {
buf, err := ioutil.ReadFile(filename)
buf, err := os.ReadFile(filename)
if err != nil {
return errors.Wrap(err, "Error reading config file")
}

@ -2,7 +2,6 @@ package main
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
@ -82,7 +81,7 @@ var pipeline = PipelineConfig{
}
func Test_parsePipeline(t *testing.T) {
f, err := ioutil.TempFile("/tmp", "Test_parsePipeline")
f, err := os.CreateTemp("/tmp", "Test_parsePipeline")
if err != nil {
t.Fatal(err)
}

@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
@ -200,7 +200,7 @@ func parseConfig(cfg ConfigGetter) (*config, error) {
labelMapPath := cfg.Get("LabelMapPath")
if labelMapPath != "" {
content, err := ioutil.ReadFile(labelMapPath)
content, err := os.ReadFile(labelMapPath)
if err != nil {
return nil, fmt.Errorf("failed to open LabelMap file: %s", err)
}

@ -1,7 +1,6 @@
package main
import (
"io/ioutil"
"net/url"
"os"
"reflect"
@ -229,7 +228,7 @@ func mustParseDuration(u string) time.Duration {
}
func createTempLabelMap(t *testing.T) string {
file, err := ioutil.TempFile("", "labelmap")
file, err := os.CreateTemp("", "labelmap")
if err != nil {
t.Fatal(err)
}

@ -3,7 +3,6 @@ package positions
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -215,7 +214,7 @@ func (p *positions) cleanup() {
func readPositionsFile(cfg Config, logger log.Logger) (map[string]string, error) {
cleanfn := filepath.Clean(cfg.PositionsFile)
buf, err := ioutil.ReadFile(cleanfn)
buf, err := os.ReadFile(cleanfn)
if err != nil {
if os.IsNotExist(err) {
return map[string]string{}, nil

@ -1,7 +1,6 @@
package positions
import (
"io/ioutil"
"os"
"strings"
"testing"
@ -16,7 +15,7 @@ import (
func tempFilename(t *testing.T) string {
t.Helper()
temp, err := ioutil.TempFile("", "positions")
temp, err := os.CreateTemp("", "positions")
if err != nil {
t.Fatal("tempFilename:", err)
}
@ -43,7 +42,7 @@ func TestReadPositionsOK(t *testing.T) {
yaml := []byte(`positions:
/tmp/random.log: "17623"
`)
err := ioutil.WriteFile(temp, yaml, 0644)
err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}
@ -63,7 +62,7 @@ func TestReadPositionsEmptyFile(t *testing.T) {
}()
yaml := []byte(``)
err := ioutil.WriteFile(temp, yaml, 0644)
err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}
@ -104,7 +103,7 @@ func TestReadPositionsFromBadYaml(t *testing.T) {
badYaml := []byte(`positions:
/tmp/random.log: "176
`)
err := ioutil.WriteFile(temp, badYaml, 0644)
err := os.WriteFile(temp, badYaml, 0644)
if err != nil {
t.Fatal(err)
}
@ -126,7 +125,7 @@ func TestReadPositionsFromBadYamlIgnoreCorruption(t *testing.T) {
badYaml := []byte(`positions:
/tmp/random.log: "176
`)
err := ioutil.WriteFile(temp, badYaml, 0644)
err := os.WriteFile(temp, badYaml, 0644)
if err != nil {
t.Fatal(err)
}
@ -148,7 +147,7 @@ func Test_ReadOnly(t *testing.T) {
yaml := []byte(`positions:
/tmp/random.log: "17623"
`)
err := ioutil.WriteFile(temp, yaml, 0644)
err := os.WriteFile(temp, yaml, 0644)
if err != nil {
t.Fatal(err)
}

@ -4,7 +4,6 @@
package positions
import (
"io/ioutil"
"os"
"path/filepath"
@ -24,7 +23,7 @@ func writePositionFile(filename string, positions map[string]string) error {
target := filepath.Clean(filename)
temp := target + "-new"
err = ioutil.WriteFile(temp, buf, os.FileMode(positionFileMode))
err = os.WriteFile(temp, buf, os.FileMode(positionFileMode))
if err != nil {
return err
}

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
@ -510,7 +509,7 @@ func getPromMetrics(t *testing.T, httpListenAddr net.Addr) ([]byte, string) {
t.Fatal("Received a non 200 status code from /metrics endpoint", resp.StatusCode)
}
b, err := ioutil.ReadAll(resp.Body)
b, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal("Error reading response body from /metrics endpoint", err)
}
@ -656,7 +655,7 @@ func randName() string {
}
func Test_DryRun(t *testing.T) {
f, err := ioutil.TempFile("/tmp", "Test_DryRun")
f, err := os.CreateTemp("/tmp", "Test_DryRun")
require.NoError(t, err)
defer os.Remove(f.Name())

@ -3,7 +3,6 @@ package server
import (
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@ -77,7 +76,7 @@ func getTemplate(name string) (string, error) {
defer func() {
_ = f.Close()
}()
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return err
}

@ -6,7 +6,6 @@ package journal
import (
"fmt"
"io"
"io/ioutil"
"strings"
"syscall"
"time"
@ -201,7 +200,7 @@ func journalTargetWithReader(
go func() {
for {
err := t.r.Follow(until, ioutil.Discard)
err := t.r.Follow(until, io.Discard)
if err != nil {
level.Error(t.logger).Log("msg", "received error during sdjournal follow", "err", err.Error())

@ -5,7 +5,6 @@ import (
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"testing"
@ -656,7 +655,7 @@ func testSyslogTargetWithTLS(t *testing.T, fmtFunc formatFunc) {
}
func createTempFile(data []byte) (*os.File, error) {
tmpFile, err := ioutil.TempFile("", "")
tmpFile, err := os.CreateTemp("", "")
if err != nil {
return nil, fmt.Errorf("failed to create temporary file: %s", err)
}

@ -6,8 +6,8 @@ import (
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"sync"
"time"
@ -207,7 +207,7 @@ func newTLSConfig(certFile string, keyFile string, caFile string) (*tls.Config,
}
if caFile != "" {
caCert, err := ioutil.ReadFile(caFile)
caCert, err := os.ReadFile(caFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA certificate: %w", err)
}

@ -4,7 +4,7 @@
package windows
import (
"io/ioutil"
"io"
"os"
"github.com/spf13/afero"
@ -52,7 +52,7 @@ func newBookMark(path string) (*bookMark, error) {
if err != nil {
return nil, err
}
fileContent, err := ioutil.ReadAll(file)
fileContent, err := io.ReadAll(file)
if err != nil {
return nil, err
}

@ -7,7 +7,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"github.com/golang/snappy"
"github.com/klauspost/compress/flate"
@ -187,7 +186,7 @@ func parseLokiBlock(compression Encoding, data []byte) ([]byte, []LokiEntry, err
return nil, nil, err
}
decompressed, err := ioutil.ReadAll(r)
decompressed, err := io.ReadAll(r)
origDecompressed := decompressed
if err != nil {
return nil, nil, err

@ -4,7 +4,6 @@ import (
"crypto/sha256"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
@ -122,7 +121,7 @@ func printFile(filename string, blockDetails, printLines, storeBlocks bool) {
}
func writeBlockToFile(data []byte, blockIndex int, filename string) {
err := ioutil.WriteFile(filename, data, 0644)
err := os.WriteFile(filename, data, 0644)
if err != nil {
log.Println("Failed to store block", blockIndex, "to file", filename, "due to error:", err)
} else {

@ -3,7 +3,6 @@ package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
@ -117,7 +116,7 @@ func main() {
cfg.validateFlags(logger)
b, err := ioutil.ReadFile(cfg.crFilepath)
b, err := os.ReadFile(cfg.crFilepath)
if err != nil {
logger.Info("failed to read custom resource file", "path", cfg.crFilepath)
os.Exit(1)
@ -169,7 +168,7 @@ func main() {
if cfg.writeToDir != "" {
basename := fmt.Sprintf("%s-%s.yaml", o.GetObjectKind().GroupVersionKind().Kind, o.GetName())
fname := strings.ToLower(path.Join(cfg.writeToDir, basename))
if err := ioutil.WriteFile(fname, b, 0o644); err != nil {
if err := os.WriteFile(fname, b, 0o644); err != nil {
logger.Error(err, "failed to write file to directory", "path", fname)
os.Exit(1)
}

@ -2,7 +2,7 @@ package controllers
import (
"flag"
"io/ioutil"
"io"
"os"
"testing"
@ -38,7 +38,7 @@ func TestMain(m *testing.M) {
if testing.Verbose() {
logger = log.NewLogger("testing", log.WithVerbosity(5))
} else {
logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard))
logger = log.NewLogger("testing", log.WithOutput(io.Discard))
}
// Register the clientgo and CRD schemes

@ -4,7 +4,7 @@ import (
"context"
"errors"
"flag"
"io/ioutil"
"io"
"os"
"testing"
@ -94,7 +94,7 @@ func TestMain(m *testing.M) {
if testing.Verbose() {
logger = log.NewLogger("testing", log.WithVerbosity(5))
} else {
logger = log.NewLogger("testing", log.WithOutput(ioutil.Discard))
logger = log.NewLogger("testing", log.WithOutput(io.Discard))
}
// Register the clientgo and CRD schemes

@ -3,7 +3,7 @@ package config
import (
"bytes"
"embed"
"io/ioutil"
"io"
"text/template"
"github.com/ViaQ/logerr/v2/kverrors"
@ -38,7 +38,7 @@ func Build(opts Options) ([]byte, []byte, error) {
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to create loki configuration")
}
cfg, err := ioutil.ReadAll(w)
cfg, err := io.ReadAll(w)
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@ -48,7 +48,7 @@ func Build(opts Options) ([]byte, []byte, error) {
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to create loki runtime configuration")
}
rcfg, err := ioutil.ReadAll(w)
rcfg, err := io.ReadAll(w)
if err != nil {
return nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}

@ -3,7 +3,7 @@ package gateway
import (
"bytes"
"embed"
"io/ioutil"
"io"
"text/template"
lokiv1 "github.com/grafana/loki/operator/apis/loki/v1"
@ -47,7 +47,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway rbac configuration")
}
rbacCfg, err = ioutil.ReadAll(w)
rbacCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@ -57,7 +57,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create loki gateway tenants configuration")
}
tenantsCfg, err = ioutil.ReadAll(w)
tenantsCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}
@ -68,7 +68,7 @@ func Build(opts Options) (rbacCfg []byte, tenantsCfg []byte, regoCfg []byte, err
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to create lokistack gateway rego configuration")
}
regoCfg, err = ioutil.ReadAll(w)
regoCfg, err = io.ReadAll(w)
if err != nil {
return nil, nil, nil, kverrors.Wrap(err, "failed to read configuration from buffer")
}

@ -6,7 +6,6 @@ import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
@ -226,7 +225,7 @@ func (r *Reader) QueryCountOverTime(queryRange string) (float64, error) {
r.backoffMtx.Lock()
r.nextQuery = nextBackoff(r.w, resp.StatusCode, r.backoff)
r.backoffMtx.Unlock()
buf, _ := ioutil.ReadAll(resp.Body)
buf, _ := io.ReadAll(resp.Body)
return 0, fmt.Errorf("error response from server: %s (%v)", string(buf), err)
}
// No Errors, reset backoff
@ -317,7 +316,7 @@ func (r *Reader) Query(start time.Time, end time.Time) ([]time.Time, error) {
r.backoffMtx.Lock()
r.nextQuery = nextBackoff(r.w, resp.StatusCode, r.backoff)
r.backoffMtx.Unlock()
buf, _ := ioutil.ReadAll(resp.Body)
buf, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("error response from server: %s (%v)", string(buf), err)
}
// No Errors, reset backoff

@ -1,7 +1,7 @@
package distributor
import (
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"testing"
@ -33,7 +33,7 @@ func TestDistributorRingHandler(t *testing.T) {
require.NoError(t, err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "<th>Instance ID</th>")
require.NotContains(t, string(body), "Not running with Global Rating Limit - ring not being used by the Distributor")
@ -48,7 +48,7 @@ func TestDistributorRingHandler(t *testing.T) {
require.NoError(t, err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "Not running with Global Rating Limit - ring not being used by the Distributor")
require.NotContains(t, string(body), "<th>Instance ID</th>")

@ -3,8 +3,7 @@ package ingester
import (
"bytes"
"context"
fmt "fmt"
"io/ioutil"
"fmt"
"os"
"path/filepath"
"regexp"
@ -425,7 +424,7 @@ func checkpointIndex(filename string, includeTmp bool) (int, error) {
// lastCheckpoint returns the directory name and index of the most recent checkpoint.
// If dir does not contain any checkpoints, -1 is returned as index.
func lastCheckpoint(dir string) (string, int, error) {
dirs, err := ioutil.ReadDir(dir)
dirs, err := os.ReadDir(dir)
if err != nil {
return "", -1, err
}
@ -466,7 +465,7 @@ func (w *WALCheckpointWriter) deleteCheckpoints(maxIndex int) (err error) {
errs := tsdb_errors.NewMulti()
files, err := ioutil.ReadDir(w.segmentWAL.Dir())
files, err := os.ReadDir(w.segmentWAL.Dir())
if err != nil {
return err
}

@ -2,8 +2,8 @@ package ingester
import (
"context"
fmt "fmt"
"io/ioutil"
"fmt"
"os"
"sort"
"testing"
"time"
@ -333,7 +333,7 @@ func expectCheckpoint(t *testing.T, walDir string, shouldExist bool, max time.Du
<-time.After(max / 10) // check 10x over the duration
}
fs, err := ioutil.ReadDir(walDir)
fs, err := os.ReadDir(walDir)
require.Nil(t, err)
var found bool
for _, f := range fs {

@ -3,7 +3,6 @@ package ingester
import (
"fmt"
"io"
"io/ioutil"
"sort"
"testing"
"time"
@ -143,7 +142,7 @@ func (f *testIngesterFactory) getIngester(joinAfter time.Duration, t *testing.T)
PusherClient: nil,
QuerierClient: nil,
IngesterClient: &testIngesterClient{t: f.t, i: ingester},
Closer: ioutil.NopCloser(nil),
Closer: io.NopCloser(nil),
}, nil
}

@ -3,10 +3,11 @@ package client
import (
"encoding/base64"
"fmt"
"io/ioutil"
"io"
"log"
"net/http"
"net/url"
"os"
"path"
"strings"
"time"
@ -221,7 +222,7 @@ func (c *DefaultClient) doRequest(path, query string, quiet bool, out interface{
continue
}
if resp.StatusCode/100 != 2 {
buf, _ := ioutil.ReadAll(resp.Body) // nolint
buf, _ := io.ReadAll(resp.Body) // nolint
log.Printf("Error response from server: %s (%v) attempts remaining: %d", string(buf), err, attempts)
if err := resp.Body.Close(); err != nil {
log.Println("error closing body", err)
@ -283,7 +284,7 @@ func (c *DefaultClient) getHTTPRequestHeader() (http.Header, error) {
}
if c.BearerTokenFile != "" {
b, err := ioutil.ReadFile(c.BearerTokenFile)
b, err := os.ReadFile(c.BearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", c.BearerTokenFile, err)
}
@ -335,7 +336,7 @@ func (c *DefaultClient) wsConnect(path, query string, quiet bool) (*websocket.Co
if resp == nil {
return nil, err
}
buf, _ := ioutil.ReadAll(resp.Body) // nolint
buf, _ := io.ReadAll(resp.Body) // nolint
return nil, fmt.Errorf("Error response from server: %s (%v)", string(buf), err)
}

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"sort"
"strings"
"time"
@ -222,7 +221,7 @@ func newFileIterator(
) (iter.EntryIterator, error) {
lr := io.LimitReader(r, defaultMaxFileSize)
b, err := ioutil.ReadAll(lr)
b, err := io.ReadAll(lr)
if err != nil {
return nil, err
}

@ -4,7 +4,6 @@
package syntax
import (
"io/ioutil"
"os"
"testing"
@ -15,7 +14,7 @@ const fuzzTestCaseEnvName = "FUZZ_TESTCASE_PATH"
func Test_Fuzz(t *testing.T) {
fuzzTestPath := os.Getenv(fuzzTestCaseEnvName)
data, err := ioutil.ReadFile(fuzzTestPath)
data, err := os.ReadFile(fuzzTestPath)
require.NoError(t, err)
_, _ = ParseExpr(string(data))
}

@ -3,7 +3,7 @@ package logqlanalyzer
import (
"context"
"encoding/json"
"io/ioutil"
"io"
"net/http"
"github.com/go-kit/log/level"
@ -31,7 +31,7 @@ type LogQLAnalyzeHandler struct {
}
func (s *LogQLAnalyzeHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
payload, err := ioutil.ReadAll(req.Body)
payload, err := io.ReadAll(req.Body)
if err != nil {
writeError(req.Context(), w, err, http.StatusBadRequest, "unable to read request body")
return

@ -1,7 +1,7 @@
package loki
import (
"io/ioutil"
"io"
"net/http/httptest"
"testing"
@ -107,7 +107,7 @@ func TestConfigDiffHandler(t *testing.T) {
resp := w.Result()
assert.Equal(t, tc.expectedStatusCode, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, tc.expectedBody, string(body))
})

@ -3,7 +3,6 @@ package loki
import (
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"reflect"
@ -39,7 +38,7 @@ func configWrapperFromYAML(t *testing.T, configFileString string, args []string)
config := ConfigWrapper{}
fs := flag.NewFlagSet(t.Name(), flag.PanicOnError)
file, err := ioutil.TempFile("", "config.yaml")
file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()
@ -987,7 +986,7 @@ query_range:
func TestDefaultUnmarshal(t *testing.T) {
t.Run("with a minimal config file and no command line args, defaults are use", func(t *testing.T) {
file, err := ioutil.TempFile("", "config.yaml")
file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()

@ -5,7 +5,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strings"
@ -223,7 +222,7 @@ schema_config:
defer resp.Body.Close()
bBytes, err := ioutil.ReadAll(resp.Body)
bBytes, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, string(bBytes), "abc")
assert.True(t, customHandlerInvoked)

@ -4,7 +4,7 @@ import (
"context"
"flag"
"io"
"io/ioutil"
"os"
"strings"
"testing"
"time"
@ -85,7 +85,7 @@ overrides:
func newTestOverrides(t *testing.T, yaml string) *validation.Overrides {
t.Helper()
f, err := ioutil.TempFile(t.TempDir(), "bar")
f, err := os.CreateTemp(t.TempDir(), "bar")
require.NoError(t, err)
path := f.Name()
// fake loader to load from string instead of file.

@ -1,7 +1,7 @@
package loki
import (
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"testing"
@ -35,7 +35,7 @@ func TestVersionHandler(t *testing.T) {
"revision":"foobar",
"goVersion": "42"
}`
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
assert.JSONEq(t, expected, string(body))
}

@ -6,7 +6,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@ -123,7 +122,7 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Buffer the body for later use to track slow queries.
var buf bytes.Buffer
r.Body = http.MaxBytesReader(w, r.Body, f.cfg.MaxBodySize)
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &buf))
r.Body = io.NopCloser(io.TeeReader(r.Body, &buf))
startTime := time.Now()
resp, err := f.roundTripper.RoundTrip(r)
@ -207,7 +206,7 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer
func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values {
// Use previously buffered body.
r.Body = ioutil.NopCloser(&bodyBuf)
r.Body = io.NopCloser(&bodyBuf)
// Ensure the form has been parsed so all the parameters are present
err := r.ParseForm()

@ -4,7 +4,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"github.com/weaveworks/common/httpgrpc"
@ -47,7 +46,7 @@ func (a *grpcRoundTripperAdapter) RoundTrip(r *http.Request) (*http.Response, er
httpResp := &http.Response{
StatusCode: int(resp.Code),
Body: &buffer{buff: resp.Body, ReadCloser: ioutil.NopCloser(bytes.NewReader(resp.Body))},
Body: &buffer{buff: resp.Body, ReadCloser: io.NopCloser(bytes.NewReader(resp.Body))},
Header: http.Header{},
ContentLength: int64(len(resp.Body)),
}

@ -3,7 +3,7 @@ package v1
import (
"context"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"strings"
@ -55,7 +55,7 @@ func TestFrontend(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "Hello World", string(body))
@ -105,7 +105,7 @@ func TestFrontendPropagateTrace(t *testing.T) {
require.Equal(t, 200, resp.StatusCode)
defer resp.Body.Close()
_, err = ioutil.ReadAll(resp.Body)
_, err = io.ReadAll(resp.Body)
require.NoError(t, err)
// Query should do one call.
@ -201,7 +201,7 @@ func TestFrontendMetricsCleanup(t *testing.T) {
require.Equal(t, 200, resp.StatusCode)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, "Hello World", string(body))

@ -6,7 +6,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
io "io"
"net/http"
"net/url"
"sort"
@ -407,7 +407,7 @@ type Buffer interface {
func (Codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrangebase.Request) (queryrangebase.Response, error) {
if r.StatusCode/100 != 2 {
body, _ := ioutil.ReadAll(r.Body)
body, _ := io.ReadAll(r.Body)
return nil, httpgrpc.Errorf(r.StatusCode, string(body))
}
@ -416,7 +416,7 @@ func (Codec) DecodeResponse(ctx context.Context, r *http.Response, req queryrang
if buffer, ok := r.Body.(Buffer); ok {
buf = buffer.Bytes()
} else {
buf, err = ioutil.ReadAll(r.Body)
buf, err = io.ReadAll(r.Body)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error decoding response: %v", err)
}
@ -590,7 +590,7 @@ func (Codec) EncodeResponse(ctx context.Context, res queryrangebase.Response) (*
Header: http.Header{
"Content-Type": []string{"application/json"},
},
Body: ioutil.NopCloser(&buf),
Body: io.NopCloser(&buf),
StatusCode: http.StatusOK,
}
return &resp, nil

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
strings "strings"
"testing"
@ -117,13 +116,13 @@ func Test_codec_DecodeResponse(t *testing.T) {
want queryrangebase.Response
wantErr bool
}{
{"500", &http.Response{StatusCode: 500, Body: ioutil.NopCloser(strings.NewReader("some error"))}, nil, nil, true},
{"no body", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(badReader{})}, nil, nil, true},
{"bad json", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))}, nil, nil, true},
{"not success", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true},
{"unknown", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true},
{"500", &http.Response{StatusCode: 500, Body: io.NopCloser(strings.NewReader("some error"))}, nil, nil, true},
{"no body", &http.Response{StatusCode: 200, Body: io.NopCloser(badReader{})}, nil, nil, true},
{"bad json", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil, nil, true},
{"not success", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"fail"}`))}, nil, nil, true},
{"unknown", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(`{"status":"success"}`))}, nil, nil, true},
{
"matrix", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(matrixString))}, nil,
"matrix", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(matrixString))}, nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
Status: loghttp.QueryStatusSuccess,
@ -137,7 +136,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
{
"matrix-empty-streams",
&http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(matrixStringEmptyResult))},
&http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(matrixStringEmptyResult))},
nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
@ -152,7 +151,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
},
{
"vector-empty-streams",
&http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(vectorStringEmptyResult))},
&http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(vectorStringEmptyResult))},
nil,
&LokiPromResponse{
Response: &queryrangebase.PrometheusResponse{
@ -166,7 +165,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
"streams v1", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))},
"streams v1", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
&LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/loki/api/v1/query_range"},
&LokiResponse{
Status: loghttp.QueryStatusSuccess,
@ -181,7 +180,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
"streams legacy", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(streamsString))},
"streams legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(streamsString))},
&LokiRequest{Direction: logproto.FORWARD, Limit: 100, Path: "/api/prom/query_range"},
&LokiResponse{
Status: loghttp.QueryStatusSuccess,
@ -196,7 +195,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
"series", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(seriesString))},
"series", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(seriesString))},
&LokiSeriesRequest{Path: "/loki/api/v1/series"},
&LokiSeriesResponse{
Status: "success",
@ -205,7 +204,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
"labels legacy", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(labelsString))},
"labels legacy", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(labelsString))},
&LokiLabelNamesRequest{Path: "/api/prom/label"},
&LokiLabelNamesResponse{
Status: "success",
@ -214,7 +213,7 @@ func Test_codec_DecodeResponse(t *testing.T) {
}, false,
},
{
"index stats", &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(indexStatsString))},
"index stats", &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(indexStatsString))},
&logproto.IndexStatsRequest{},
&IndexStatsResponse{
Response: &logproto.IndexStatsResponse{
@ -456,7 +455,7 @@ func Test_codec_EncodeResponse(t *testing.T) {
}
if err == nil {
require.Equal(t, 200, got.StatusCode)
body, err := ioutil.ReadAll(got.Body)
body, err := io.ReadAll(got.Body)
require.Nil(t, err)
bodyString := string(body)
require.JSONEq(t, tt.body, bodyString)
@ -1308,7 +1307,7 @@ func Benchmark_CodecDecodeLogs(b *testing.B) {
require.Nil(b, err)
reader := bytes.NewReader(buf)
resp.Body = &buffer{
ReadCloser: ioutil.NopCloser(reader),
ReadCloser: io.NopCloser(reader),
buff: buf,
}
b.ResetTimer()
@ -1344,7 +1343,7 @@ func Benchmark_CodecDecodeSamples(b *testing.B) {
buf, err := io.ReadAll(resp.Body)
require.Nil(b, err)
reader := bytes.NewReader(buf)
resp.Body = ioutil.NopCloser(reader)
resp.Body = io.NopCloser(reader)
b.ResetTimer()
b.ReportAllocs()

@ -3,7 +3,7 @@ package queryrange
import (
"bytes"
"context"
"io/ioutil"
"io"
"net/http"
jsoniter "github.com/json-iterator/go"
@ -64,7 +64,7 @@ func (p *LokiPromResponse) encode(ctx context.Context) (*http.Response, error) {
Header: http.Header{
"Content-Type": []string{"application/json"},
},
Body: ioutil.NopCloser(bytes.NewBuffer(b)),
Body: io.NopCloser(bytes.NewBuffer(b)),
StatusCode: http.StatusOK,
}
return &resp, nil

@ -3,7 +3,7 @@ package queryrangebase
import (
"bytes"
"context"
"io/ioutil"
"io"
"math/rand"
"net/http"
"testing"
@ -31,7 +31,7 @@ func BenchmarkPrometheusCodec_DecodeResponse(b *testing.B) {
for n := 0; n < b.N; n++ {
_, err := PrometheusCodec.DecodeResponse(context.Background(), &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader(encodedRes)),
Body: io.NopCloser(bytes.NewReader(encodedRes)),
ContentLength: int64(len(encodedRes)),
}, nil)
require.NoError(b, err)

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"math"
"net/http"
"net/url"
@ -228,7 +228,7 @@ func (prometheusCodec) EncodeRequest(ctx context.Context, r Request) (*http.Requ
func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) {
if r.StatusCode/100 != 2 {
body, _ := ioutil.ReadAll(r.Body)
body, _ := io.ReadAll(r.Body)
return nil, httpgrpc.Errorf(r.StatusCode, string(body))
}
log, ctx := spanlogger.New(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck
@ -297,7 +297,7 @@ func (prometheusCodec) EncodeResponse(ctx context.Context, res Response) (*http.
Header: http.Header{
"Content-Type": []string{"application/json"},
},
Body: ioutil.NopCloser(bytes.NewBuffer(b)),
Body: io.NopCloser(bytes.NewBuffer(b)),
StatusCode: http.StatusOK,
ContentLength: int64(len(b)),
}

@ -3,7 +3,7 @@ package queryrangebase
import (
"bytes"
"context"
"io/ioutil"
"io"
"net/http"
"strconv"
"testing"
@ -96,7 +96,7 @@ func TestResponse(t *testing.T) {
response := &http.Response{
StatusCode: 200,
Header: http.Header{"Content-Type": []string{"application/json"}},
Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))),
Body: io.NopCloser(bytes.NewBuffer([]byte(tc.body))),
}
resp, err := PrometheusCodec.DecodeResponse(context.Background(), response, nil)
require.NoError(t, err)
@ -106,7 +106,7 @@ func TestResponse(t *testing.T) {
response = &http.Response{
StatusCode: 200,
Header: http.Header{"Content-Type": []string{"application/json"}},
Body: ioutil.NopCloser(bytes.NewBuffer([]byte(tc.body))),
Body: io.NopCloser(bytes.NewBuffer([]byte(tc.body))),
ContentLength: int64(len(tc.body)),
}
resp2, err := PrometheusCodec.EncodeResponse(context.Background(), resp)

@ -19,7 +19,6 @@ import (
"context"
"flag"
"io"
"io/ioutil"
"net/http"
"time"
@ -176,7 +175,7 @@ func (q roundTripper) Do(ctx context.Context, r Request) (Response, error) {
return nil, err
}
defer func() {
_, _ = io.Copy(ioutil.Discard, io.LimitReader(response.Body, 1024)) //nolint:errcheck
_, _ = io.Copy(io.Discard, io.LimitReader(response.Body, 1024)) //nolint:errcheck
response.Body.Close()
}()

@ -3,7 +3,7 @@ package queryrange
import (
"bytes"
"context"
"io/ioutil"
"io"
"math"
"net/http"
"net/http/httptest"
@ -428,7 +428,7 @@ func TestPostQueries(t *testing.T) {
"query": {`{app="foo"} |~ "foo"`},
}
body := bytes.NewBufferString(data.Encode())
req.Body = ioutil.NopCloser(body)
req.Body = io.NopCloser(body)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
req = req.WithContext(user.InjectOrgID(context.Background(), "1"))

@ -2,7 +2,7 @@ package base
import (
"encoding/json"
"io/ioutil"
"io"
"net/http"
"net/url"
"sort"
@ -448,7 +448,7 @@ func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) {
return
}
payload, err := ioutil.ReadAll(req.Body)
payload, err := io.ReadAll(req.Body)
if err != nil {
level.Error(logger).Log("msg", "unable to read rule group payload", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)

@ -5,7 +5,6 @@ import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
@ -33,7 +32,7 @@ func TestRuler_rules(t *testing.T) {
a.PrometheusRules(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}
@ -88,7 +87,7 @@ func TestRuler_rules_special_characters(t *testing.T) {
a.PrometheusRules(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}
@ -143,7 +142,7 @@ func TestRuler_alerts(t *testing.T) {
a.PrometheusAlerts(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
// Check status code and status response
responseJSON := response{}

@ -3,7 +3,7 @@ package base
import (
"context"
"fmt"
"io/ioutil"
"io"
"math/rand"
"net/http"
"net/http/httptest"
@ -1089,7 +1089,7 @@ func TestRuler_ListAllRules(t *testing.T) {
router.ServeHTTP(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
// Check status code and header
require.Equal(t, http.StatusOK, resp.StatusCode)

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@ -184,7 +184,7 @@ func (GroupLoader) Parse(query string) (parser.Expr, error) {
}
func (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {
b, err := ioutil.ReadFile(identifier)
b, err := os.ReadFile(identifier)
if err != nil {
return nil, []error{errors.Wrap(err, identifier)}
}

@ -3,7 +3,6 @@ package ruler
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
@ -250,10 +249,10 @@ groups:
} {
t.Run(tc.desc, func(t *testing.T) {
var loader GroupLoader
f, err := ioutil.TempFile(os.TempDir(), "rules")
f, err := os.CreateTemp(os.TempDir(), "rules")
require.Nil(t, err)
defer os.Remove(f.Name())
err = ioutil.WriteFile(f.Name(), []byte(tc.data), 0777)
err = os.WriteFile(f.Name(), []byte(tc.data), 0777)
require.Nil(t, err)
_, errs := loader.Load(f.Name())

@ -5,7 +5,7 @@ import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"io"
"strings"
"github.com/go-kit/log"
@ -66,7 +66,7 @@ func (b *BucketRuleStore) getRuleGroup(ctx context.Context, userID, namespace, g
}
defer func() { _ = reader.Close() }()
buf, err := ioutil.ReadAll(reader)
buf, err := io.ReadAll(reader)
if err != nil {
return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey)
}

@ -3,7 +3,6 @@ package local
import (
"context"
"flag"
"io/ioutil"
"os"
"path/filepath"
@ -46,25 +45,31 @@ func NewLocalRulesClient(cfg Config, loader promRules.GroupLoader) (*Client, err
func (l *Client) ListAllUsers(ctx context.Context) ([]string, error) {
root := l.cfg.Directory
infos, err := ioutil.ReadDir(root)
dirEntries, err := os.ReadDir(root)
if err != nil {
return nil, errors.Wrapf(err, "unable to read dir %s", root)
}
var result []string
for _, info := range infos {
// After resolving link, info.Name() may be different than user, so keep original name.
user := info.Name()
for _, entry := range dirEntries {
// After resolving link, entry.Name() may be different than user, so keep original name.
user := entry.Name()
if info.Mode()&os.ModeSymlink != 0 {
// ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink.
info, err = os.Stat(filepath.Join(root, info.Name()))
var isDir bool
if entry.Type()&os.ModeSymlink != 0 {
// os.ReadDir only returns result of LStat. Calling Stat resolves symlink.
fi, err := os.Stat(filepath.Join(root, entry.Name()))
if err != nil {
return nil, err
}
isDir = fi.IsDir()
} else {
isDir = entry.IsDir()
}
if info.IsDir() {
if isDir {
result = append(result, user)
}
}
@ -130,25 +135,30 @@ func (l *Client) loadAllRulesGroupsForUser(ctx context.Context, userID string) (
var allLists rulespb.RuleGroupList
root := filepath.Join(l.cfg.Directory, userID)
infos, err := ioutil.ReadDir(root)
dirEntries, err := os.ReadDir(root)
if err != nil {
return nil, errors.Wrapf(err, "unable to read rule dir %s", root)
}
for _, info := range infos {
// After resolving link, info.Name() may be different than namespace, so keep original name.
namespace := info.Name()
for _, entry := range dirEntries {
// After resolving link, entry.Name() may be different than namespace, so keep original name.
namespace := entry.Name()
var isDir bool
if info.Mode()&os.ModeSymlink != 0 {
// ioutil.ReadDir only returns result of LStat. Calling Stat resolves symlink.
path := filepath.Join(root, info.Name())
info, err = os.Stat(path)
if entry.Type()&os.ModeSymlink != 0 {
// os.ReadDir only returns result of LStat. Calling Stat resolves symlink.
path := filepath.Join(root, entry.Name())
fi, err := os.Stat(path)
if err != nil {
return nil, errors.Wrapf(err, "unable to stat rule file %s", path)
}
isDir = fi.IsDir()
} else {
isDir = entry.IsDir()
}
if info.IsDir() {
if isDir {
continue
}

@ -2,7 +2,6 @@ package local
import (
"context"
"io/ioutil"
"os"
"path"
"testing"
@ -51,7 +50,7 @@ func TestClient_LoadAllRuleGroups(t *testing.T) {
err = os.Symlink(user1, path.Join(dir, user2))
require.NoError(t, err)
err = ioutil.WriteFile(path.Join(dir, user1, namespace1), b, 0777)
err = os.WriteFile(path.Join(dir, user1, namespace1), b, 0777)
require.NoError(t, err)
const ignoredDir = "ignored-dir"

@ -5,7 +5,7 @@ import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"io"
"strings"
"github.com/go-kit/log"
@ -63,7 +63,7 @@ func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rule
}
defer func() { _ = reader.Close() }()
buf, err := ioutil.ReadAll(reader)
buf, err := io.ReadAll(reader)
if err != nil {
return nil, errors.Wrapf(err, "failed to read rule group %s", objectKey)
}

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"sort"
"strings"
"sync"
@ -404,7 +404,7 @@ func (m *mockS3) PutObjectWithContext(_ aws.Context, req *s3.PutObjectInput, _ .
m.Lock()
defer m.Unlock()
buf, err := ioutil.ReadAll(req.Body)
buf, err := io.ReadAll(req.Body)
if err != nil {
return nil, err
}
@ -423,6 +423,6 @@ func (m *mockS3) GetObjectWithContext(_ aws.Context, req *s3.GetObjectInput, _ .
}
return &s3.GetObjectOutput{
Body: ioutil.NopCloser(bytes.NewReader(buf)),
Body: io.NopCloser(bytes.NewReader(buf)),
}, nil
}

@ -6,7 +6,7 @@ import (
"crypto/tls"
"flag"
"fmt"
"io/ioutil"
"os"
"strings"
"time"
@ -202,7 +202,7 @@ func (cfg *Config) setClusterConfig(cluster *gocql.ClusterConfig) error {
if cfg.Auth {
password := cfg.Password.String()
if cfg.PasswordFile != "" {
passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile)
passwordBytes, err := os.ReadFile(cfg.PasswordFile)
if err != nil {
return errors.Errorf("Could not read Cassandra password file: %v", err)
}

@ -2,7 +2,6 @@ package local
import (
"io"
"io/ioutil"
"os"
"time"
@ -27,7 +26,7 @@ func (f *fixture) Clients() (
indexClient index.Client, chunkClient client.Client, tableClient index.TableClient,
schemaConfig config.SchemaConfig, closer io.Closer, err error,
) {
f.dirname, err = ioutil.TempDir(os.TempDir(), "boltdb")
f.dirname, err = os.MkdirTemp(os.TempDir(), "boltdb")
if err != nil {
return
}

@ -3,7 +3,6 @@ package local
import (
"bytes"
"context"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -41,12 +40,12 @@ func TestFSObjectClient_DeleteChunksBefore(t *testing.T) {
require.NoError(t, f.Close())
// Verify whether all files are created
files, _ := ioutil.ReadDir(".")
files, _ := os.ReadDir(".")
require.Equal(t, 2, len(files), "Number of files should be 2")
// No files should be deleted, since all of them are not much older
require.NoError(t, bucketClient.DeleteChunksBefore(context.Background(), time.Now().Add(-deleteFilesOlderThan)))
files, _ = ioutil.ReadDir(".")
files, _ = os.ReadDir(".")
require.Equal(t, 2, len(files), "Number of files should be 2")
// Changing mtime of file1 to make it look older
@ -54,7 +53,7 @@ func TestFSObjectClient_DeleteChunksBefore(t *testing.T) {
require.NoError(t, bucketClient.DeleteChunksBefore(context.Background(), time.Now().Add(-deleteFilesOlderThan)))
// Verifying whether older file got deleted
files, _ = ioutil.ReadDir(".")
files, _ = os.ReadDir(".")
require.Equal(t, 1, len(files), "Number of files should be 1 after enforcing retention")
}

@ -6,7 +6,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
@ -132,7 +131,7 @@ func (s *SwiftObjectClient) GetObject(ctx context.Context, objectKey string) (io
return nil, 0, err
}
return ioutil.NopCloser(&buf), int64(buf.Len()), nil
return io.NopCloser(&buf), int64(buf.Len()), nil
}
// PutObject puts the specified bytes into the configured Swift container at the provided key

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"sort"
"strings"
"sync"
@ -435,11 +434,11 @@ func (m *MockStorage) GetObject(ctx context.Context, objectKey string) (io.ReadC
return nil, 0, errStorageObjectNotFound
}
return ioutil.NopCloser(bytes.NewReader(buf)), int64(len(buf)), nil
return io.NopCloser(bytes.NewReader(buf)), int64(len(buf)), nil
}
func (m *MockStorage) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error {
buf, err := ioutil.ReadAll(object)
buf, err := io.ReadAll(object)
if err != nil {
return err
}

@ -3,7 +3,7 @@ package compactor
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
@ -139,7 +139,7 @@ func TestCompactor_RunCompaction(t *testing.T) {
for i := tableNumStart; i <= tableNumEnd; i++ {
name := fmt.Sprintf("%s%d", indexTablePrefix, i)
// verify that we have only 1 file left in storage after compaction.
files, err := ioutil.ReadDir(filepath.Join(tablesPath, name))
files, err := os.ReadDir(filepath.Join(tablesPath, name))
require.NoError(t, err)
require.Len(t, files, 1)
require.True(t, strings.HasSuffix(files[0].Name(), ".gz"))

@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"sync"
@ -167,7 +166,7 @@ func (c *deleteRequestsClient) getRequestsFromServer(ctx context.Context, userID
return nil, err
}
defer func() {
_, _ = io.Copy(ioutil.Discard, resp.Body)
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()

@ -5,7 +5,6 @@ import (
"encoding/binary"
"fmt"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -253,7 +252,7 @@ func (r *markerProcessor) processPath(path string, deleteFunc func(ctx context.C
queue = make(chan *keyPair)
)
// we use a copy to view the file so that we can read and update at the same time.
viewFile, err := ioutil.TempFile("/tmp/", "marker-view-")
viewFile, err := os.CreateTemp("/tmp/", "marker-view-")
if err != nil {
return err
}

@ -3,7 +3,6 @@ package compactor
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -303,7 +302,7 @@ func TestTable_CompactionRetention(t *testing.T) {
"emptied table": {
dbsSetup: setup,
assert: func(t *testing.T, storagePath, tableName string) {
_, err := ioutil.ReadDir(filepath.Join(storagePath, tableName))
_, err := os.ReadDir(filepath.Join(storagePath, tableName))
require.True(t, os.IsNotExist(err))
},
tableMarker: TableMarkerFunc(func(ctx context.Context, tableName, userID string, indexFile retention.IndexProcessor, logger log.Logger) (bool, bool, error) {
@ -411,14 +410,14 @@ func validateTable(t *testing.T, path string, expectedNumCommonDBs, numUsers int
}
func listDir(t *testing.T, path string) (files, folders []string) {
filesInfo, err := ioutil.ReadDir(path)
dirEntries, err := os.ReadDir(path)
require.NoError(t, err)
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
folders = append(folders, fileInfo.Name())
for _, entry := range dirEntries {
if entry.IsDir() {
folders = append(folders, entry.Name())
} else {
files = append(files, fileInfo.Name())
files = append(files, entry.Name())
}
}
@ -446,7 +445,7 @@ func TestTable_CompactionFailure(t *testing.T) {
SetupTable(t, filepath.Join(objectStoragePath, tableName), IndexesConfig{NumCompactedFiles: numDBs}, PerUserIndexesConfig{})
// put a corrupt zip file in the table which should cause the compaction to fail in the middle because it would fail to open that file with boltdb client.
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, "fail.gz"), []byte("fail the compaction"), 0o666))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, "fail.gz"), []byte("fail the compaction"), 0o666))
// do the compaction
objectClient, err := local.NewFSObjectClient(local.FSConfig{Directory: objectStoragePath})
@ -460,7 +459,7 @@ func TestTable_CompactionFailure(t *testing.T) {
require.Error(t, table.compact(false))
// ensure that files in storage are intact.
files, err := ioutil.ReadDir(tablePathInStorage)
files, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
require.Len(t, files, numDBs+1)

@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -82,7 +81,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
idx := 0
for filename, content := range commonIndexes {
filePath := filepath.Join(path, strings.TrimSuffix(filename, ".gz"))
require.NoError(t, ioutil.WriteFile(filePath, []byte(content), 0777))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
@ -93,7 +92,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
require.NoError(t, util.EnsureDirectory(filepath.Join(path, userID)))
for filename, content := range files {
filePath := filepath.Join(path, userID, strings.TrimSuffix(filename, ".gz"))
require.NoError(t, ioutil.WriteFile(filePath, []byte(content), 0777))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
@ -349,15 +348,15 @@ func (i testIndexCompactor) OpenCompactedIndexFile(_ context.Context, path, _, _
func verifyCompactedIndexTable(t *testing.T, commonDBsConfig IndexesConfig, perUserDBsConfig PerUserIndexesConfig, tablePathInStorage string) {
commonIndexes, perUserIndexes := buildFilesContent(commonDBsConfig, perUserDBsConfig)
filesInfo, err := ioutil.ReadDir(tablePathInStorage)
dirEntries, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
files, folders := []string{}, []string{}
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
folders = append(folders, fileInfo.Name())
for _, entry := range dirEntries {
if entry.IsDir() {
folders = append(folders, entry.Name())
} else {
files = append(files, fileInfo.Name())
files = append(files, entry.Name())
}
}
@ -401,12 +400,12 @@ func verifyCompactedIndexTable(t *testing.T, commonDBsConfig IndexesConfig, perU
require.Len(t, folders, len(expectedUserIndexContent), fmt.Sprintf("%v", commonIndexes))
for _, userID := range folders {
filesInfo, err := ioutil.ReadDir(filepath.Join(tablePathInStorage, userID))
entries, err := os.ReadDir(filepath.Join(tablePathInStorage, userID))
require.NoError(t, err)
require.Len(t, filesInfo, 1)
require.False(t, filesInfo[0].IsDir())
require.Len(t, entries, 1)
require.False(t, entries[0].IsDir())
sort.Strings(expectedUserIndexContent[userID])
require.Equal(t, strings.Join(expectedUserIndexContent[userID], ""), string(readFile(t, filepath.Join(tablePathInStorage, userID, filesInfo[0].Name()))))
require.Equal(t, strings.Join(expectedUserIndexContent[userID], ""), string(readFile(t, filepath.Join(tablePathInStorage, userID, entries[0].Name()))))
}
}
@ -418,7 +417,7 @@ func readFile(t *testing.T, path string) []byte {
path = decompressedFilePath
}
fileContent, err := ioutil.ReadFile(path)
fileContent, err := os.ReadFile(path)
require.NoError(t, err)
return fileContent

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -113,18 +112,18 @@ func (t *indexSet) Init(forQuerying bool) (err error) {
t.indexMtx.markReady()
}()
filesInfo, err := ioutil.ReadDir(t.cacheLocation)
dirEntries, err := os.ReadDir(t.cacheLocation)
if err != nil {
return err
}
// open all the locally present files first to avoid downloading them again during sync operation below.
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
for _, entry := range dirEntries {
if entry.IsDir() {
continue
}
fullPath := filepath.Join(t.cacheLocation, fileInfo.Name())
fullPath := filepath.Join(t.cacheLocation, entry.Name())
// if we fail to open an index file, lets skip it and let sync operation re-download the file from storage.
idx, err := t.openIndexFileFunc(fullPath)
if err != nil {
@ -138,7 +137,7 @@ func (t *indexSet) Init(forQuerying bool) (err error) {
continue
}
t.index[fileInfo.Name()] = idx
t.index[entry.Name()] = idx
}
level.Debug(logger).Log("msg", fmt.Sprintf("opened %d local files, now starting sync operation", len(t.index)))

@ -3,7 +3,6 @@ package downloads
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -135,14 +134,14 @@ func TestIndexSet_Sync(t *testing.T) {
// first, let us add a new file and refresh the index list cache
oneMoreDB := "one-more-db"
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
indexSet.baseIndexSet.RefreshIndexListCache(context.Background())
// now, without syncing the indexset, let us compact the index in storage
compactedDBName := "compacted-db"
require.NoError(t, os.RemoveAll(tablePathInStorage))
require.NoError(t, util.EnsureDirectory(tablePathInStorage))
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
indexesSetup = []string{compactedDBName}
// verify that we are getting errIndexListCacheTooStale without refreshing the list cache

@ -3,7 +3,7 @@ package downloads
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
@ -76,7 +76,7 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, openInd
return nil, err
}
filesInfo, err := ioutil.ReadDir(cacheLocation)
dirEntries, err := os.ReadDir(cacheLocation)
if err != nil {
return nil, err
}
@ -93,15 +93,15 @@ func LoadTable(name, cacheLocation string, storageClient storage.Client, openInd
metrics: metrics,
}
level.Debug(table.logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(filesInfo))
level.Debug(table.logger).Log("msg", fmt.Sprintf("opening locally present files for table %s", name), "files", fmt.Sprint(dirEntries))
// common index files are outside the directories and user index files are in the directories
for _, fileInfo := range filesInfo {
if !fileInfo.IsDir() {
for _, entry := range dirEntries {
if !entry.IsDir() {
continue
}
userID := fileInfo.Name()
userID := entry.Name()
userIndexSet, err := NewIndexSet(name, userID, filepath.Join(cacheLocation, userID),
table.baseUserIndexSet, openIndexFileFunc, loggerWithUserID(table.logger, userID))
if err != nil {

@ -3,7 +3,7 @@ package downloads
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
@ -373,33 +373,33 @@ func (tm *tableManager) findUsersInTableForQueryReadiness(tableNumber int64, use
// loadLocalTables loads tables present locally.
func (tm *tableManager) loadLocalTables() error {
filesInfo, err := ioutil.ReadDir(tm.cfg.CacheDir)
dirEntries, err := os.ReadDir(tm.cfg.CacheDir)
if err != nil {
return err
}
for _, fileInfo := range filesInfo {
if !fileInfo.IsDir() {
for _, entry := range dirEntries {
if !entry.IsDir() {
continue
}
tableNumber, err := extractTableNumberFromName(fileInfo.Name())
tableNumber, err := extractTableNumberFromName(entry.Name())
if err != nil {
return err
}
if tableNumber == -1 || !tm.tableRangesToHandle.TableInRange(tableNumber, fileInfo.Name()) {
if tableNumber == -1 || !tm.tableRangesToHandle.TableInRange(tableNumber, entry.Name()) {
continue
}
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", fileInfo.Name()))
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading local table %s", entry.Name()))
table, err := LoadTable(fileInfo.Name(), filepath.Join(tm.cfg.CacheDir, fileInfo.Name()),
table, err := LoadTable(entry.Name(), filepath.Join(tm.cfg.CacheDir, entry.Name()),
tm.indexStorageClient, tm.openIndexFileFunc, tm.metrics)
if err != nil {
return err
}
tm.tables[fileInfo.Name()] = table
tm.tables[entry.Name()] = table
}
return nil

@ -2,7 +2,7 @@ package downloads
import (
"context"
"io/ioutil"
"io"
"os"
"path/filepath"
"sort"
@ -301,8 +301,8 @@ func TestTable_Sync(t *testing.T) {
newDB := "new"
require.NoError(t, os.MkdirAll(tablePathInStorage, 0755))
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, deleteDB), []byte(deleteDB), 0755))
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, noUpdatesDB), []byte(noUpdatesDB), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, deleteDB), []byte(deleteDB), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, noUpdatesDB), []byte(noUpdatesDB), 0755))
// create table instance
table, stopFunc := buildTestTable(t, tempDir)
@ -326,7 +326,7 @@ func TestTable_Sync(t *testing.T) {
// remove deleteDB and add the newDB
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, deleteDB)))
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, newDB), []byte(newDB), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, newDB), []byte(newDB), 0755))
// sync the table
table.storageClient.RefreshIndexListCache(context.Background())
@ -347,13 +347,13 @@ func TestTable_Sync(t *testing.T) {
noUpdatesDB: {},
newDB: {},
}
filesInfo, err := ioutil.ReadDir(tablePathInStorage)
dirEntries, err := os.ReadDir(tablePathInStorage)
require.NoError(t, err)
require.Len(t, table.indexSets[""].(*indexSet).index, len(expectedFilesInDir))
for _, fileInfo := range filesInfo {
require.False(t, fileInfo.IsDir())
_, ok := expectedFilesInDir[fileInfo.Name()]
for _, entry := range dirEntries {
require.False(t, entry.IsDir())
_, ok := expectedFilesInDir[entry.Name()]
require.True(t, ok)
}
@ -361,12 +361,12 @@ func TestTable_Sync(t *testing.T) {
// first, let us add a new file and refresh the index list cache
oneMoreDB := "one-more-db"
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, oneMoreDB), []byte(oneMoreDB), 0755))
table.storageClient.RefreshIndexListCache(context.Background())
// now, without syncing the table, let us compact the index in storage
compactedDBName := "compacted-db"
require.NoError(t, ioutil.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
require.NoError(t, os.WriteFile(filepath.Join(tablePathInStorage, compactedDBName), []byte(compactedDBName), 0755))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, noUpdatesDB)))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, newDB)))
require.NoError(t, os.Remove(filepath.Join(tablePathInStorage, oneMoreDB)))
@ -461,7 +461,7 @@ func verifyIndexForEach(t *testing.T, expectedIndexes []string, forEachFunc func
require.NoError(t, err)
// read the contents of the index.
buf, err := ioutil.ReadAll(readSeeker)
buf, err := io.ReadAll(readSeeker)
require.NoError(t, err)
// see if it matches the name of the file

@ -2,7 +2,6 @@ package downloads
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -40,7 +39,7 @@ func setupIndexesAtPath(t *testing.T, userID, path string, start, end int) []str
fileName := buildIndexFilename(userID, start)
indexPath := filepath.Join(path, fileName)
require.NoError(t, ioutil.WriteFile(indexPath, []byte(fileName), 0755))
require.NoError(t, os.WriteFile(indexPath, []byte(fileName), 0755))
testIndexes = append(testIndexes, indexPath)
}

@ -3,7 +3,8 @@ package storage
import (
"bytes"
"context"
"io/ioutil"
"io"
"os"
"path/filepath"
"testing"
@ -28,7 +29,7 @@ func TestIndexStorageClient(t *testing.T) {
for tableName, files := range tablesToSetup {
require.NoError(t, util.EnsureDirectory(filepath.Join(tempDir, storageKeyPrefix, tableName)))
for _, file := range files {
err := ioutil.WriteFile(filepath.Join(tempDir, storageKeyPrefix, tableName, file), []byte(tableName+file), 0o666)
err := os.WriteFile(filepath.Join(tempDir, storageKeyPrefix, tableName, file), []byte(tableName+file), 0o666)
require.NoError(t, err)
}
}
@ -53,7 +54,7 @@ func TestIndexStorageClient(t *testing.T) {
readCloser, err := indexStorageClient.GetFile(context.Background(), table, fileInStorage.Name)
require.NoError(t, err)
b, err := ioutil.ReadAll(readCloser)
b, err := io.ReadAll(readCloser)
require.NoError(t, readCloser.Close())
require.NoError(t, err)
require.EqualValues(t, []byte(table+fileInStorage.Name), b)

@ -3,7 +3,6 @@ package storage
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -23,7 +22,7 @@ func Test_GetFileFromStorage(t *testing.T) {
testData := []byte("test-data")
tableName := "test-table"
require.NoError(t, util.EnsureDirectory(filepath.Join(tempDir, tableName)))
require.NoError(t, ioutil.WriteFile(filepath.Join(tempDir, tableName, "src"), testData, 0o666))
require.NoError(t, os.WriteFile(filepath.Join(tempDir, tableName, "src"), testData, 0o666))
// try downloading the file from the storage.
objectClient, err := local.NewFSObjectClient(local.FSConfig{Directory: tempDir})
@ -37,7 +36,7 @@ func Test_GetFileFromStorage(t *testing.T) {
}))
// verify the contents of the downloaded file.
b, err := ioutil.ReadFile(filepath.Join(tempDir, "dest"))
b, err := os.ReadFile(filepath.Join(tempDir, "dest"))
require.NoError(t, err)
require.Equal(t, testData, b)
@ -52,7 +51,7 @@ func Test_GetFileFromStorage(t *testing.T) {
}))
// verify the contents of the downloaded gz file.
b, err = ioutil.ReadFile(filepath.Join(tempDir, "dest.gz"))
b, err = os.ReadFile(filepath.Join(tempDir, "dest.gz"))
require.NoError(t, err)
require.Equal(t, testData, b)

@ -2,7 +2,8 @@ package uploads
import (
"context"
"io/ioutil"
"io"
"os"
"path/filepath"
"testing"
"time"
@ -72,7 +73,7 @@ func TestIndexSet_Upload(t *testing.T) {
// compare the contents of created test index and uploaded index in storage
_, err = testIndex.Seek(0, 0)
require.NoError(t, err)
expectedIndexContent, err := ioutil.ReadAll(testIndex.File)
expectedIndexContent, err := io.ReadAll(testIndex.File)
require.NoError(t, err)
require.Equal(t, expectedIndexContent, readCompressedFile(t, indexPathInStorage))
}
@ -156,7 +157,7 @@ func readCompressedFile(t *testing.T, path string) []byte {
decompressedFilePath := filepath.Join(tempDir, "decompressed")
testutil.DecompressFile(t, path, decompressedFilePath)
fileContent, err := ioutil.ReadFile(decompressedFilePath)
fileContent, err := os.ReadFile(decompressedFilePath)
require.NoError(t, err)
return fileContent

@ -5,7 +5,6 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -523,7 +522,7 @@ func compareCompactedTable(t *testing.T, srcTable string, tableCompactor *tableC
func readIndexFromFiles(t *testing.T, tablePath string) map[string]map[string]string {
tempDir := t.TempDir()
filesInfo, err := ioutil.ReadDir(tablePath)
dirEntries, err := os.ReadDir(tablePath)
if err != nil && os.IsNotExist(err) {
return map[string]map[string]string{}
}
@ -531,15 +530,15 @@ func readIndexFromFiles(t *testing.T, tablePath string) map[string]map[string]st
dbRecords := make(map[string]map[string]string)
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
for _, entry := range dirEntries {
if entry.IsDir() {
continue
}
filePath := filepath.Join(tablePath, fileInfo.Name())
filePath := filepath.Join(tablePath, entry.Name())
if strings.HasSuffix(filePath, ".gz") {
filePath = filepath.Join(tempDir, fileInfo.Name())
testutil.DecompressFile(t, filepath.Join(tablePath, fileInfo.Name()), filePath)
filePath = filepath.Join(tempDir, entry.Name())
testutil.DecompressFile(t, filepath.Join(tablePath, entry.Name()), filePath)
}
db, err := openBoltdbFileWithNoSync(filePath)

@ -3,7 +3,7 @@ package compactor
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"testing"
@ -142,12 +142,12 @@ type table struct {
func (t *testStore) indexTables() []table {
t.t.Helper()
res := []table{}
indexFilesInfo, err := ioutil.ReadDir(t.indexDir)
dirEntries, err := os.ReadDir(t.indexDir)
require.NoError(t.t, err)
for _, indexFileInfo := range indexFilesInfo {
db, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(t.indexDir, indexFileInfo.Name()))
for _, entry := range dirEntries {
db, err := shipper_util.SafeOpenBoltdbFile(filepath.Join(t.indexDir, entry.Name()))
require.NoError(t.t, err)
res = append(res, table{name: indexFileInfo.Name(), DB: db})
res = append(res, table{name: entry.Name(), DB: db})
}
return res
}

@ -3,7 +3,6 @@ package index
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -351,18 +350,18 @@ func (lt *Table) buildFileName(dbName string) string {
func loadBoltDBsFromDir(dir string, metrics *metrics) (map[string]*bbolt.DB, error) {
dbs := map[string]*bbolt.DB{}
filesInfo, err := ioutil.ReadDir(dir)
dirEntries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
for _, fileInfo := range filesInfo {
if fileInfo.IsDir() {
for _, entry := range dirEntries {
if entry.IsDir() {
continue
}
fullPath := filepath.Join(dir, fileInfo.Name())
fullPath := filepath.Join(dir, entry.Name())
if strings.HasSuffix(fileInfo.Name(), indexfile.TempFileSuffix) || strings.HasSuffix(fileInfo.Name(), snapshotFileSuffix) {
if strings.HasSuffix(entry.Name(), indexfile.TempFileSuffix) || strings.HasSuffix(entry.Name(), snapshotFileSuffix) {
// If an ingester is killed abruptly in the middle of an upload operation it could leave out a temp file which holds the snapshot of db for uploading.
// Cleaning up those temp files to avoid problems.
if err := os.Remove(fullPath); err != nil {
@ -395,7 +394,7 @@ func loadBoltDBsFromDir(dir string, metrics *metrics) (map[string]*bbolt.DB, err
continue
}
dbs[fileInfo.Name()] = db
dbs[entry.Name()] = db
}
return dbs, nil

@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
@ -184,7 +183,7 @@ func (tm *TableManager) handoverIndexesToShipper(force bool) {
func (tm *TableManager) loadTables() (map[string]*Table, error) {
localTables := make(map[string]*Table)
filesInfo, err := ioutil.ReadDir(tm.cfg.IndexDir)
dirEntries, err := os.ReadDir(tm.cfg.IndexDir)
if err != nil {
return nil, err
}
@ -195,16 +194,16 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
return nil, err
}
for _, fileInfo := range filesInfo {
if !re.MatchString(fileInfo.Name()) {
for _, entry := range dirEntries {
if !re.MatchString(entry.Name()) {
continue
}
// since we are moving to keeping files for same table in a folder, if current element is a file we need to move it inside a directory with the same name
// i.e file index_123 would be moved to path index_123/index_123.
if !fileInfo.IsDir() {
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", fileInfo.Name()))
filePath := filepath.Join(tm.cfg.IndexDir, fileInfo.Name())
if !entry.IsDir() {
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("found a legacy file %s, moving it to folder with same name", entry.Name()))
filePath := filepath.Join(tm.cfg.IndexDir, entry.Name())
// create a folder with .temp suffix since we can't create a directory with same name as file.
tempDirPath := filePath + ".temp"
@ -213,7 +212,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
}
// move the file to temp dir.
if err := os.Rename(filePath, filepath.Join(tempDirPath, fileInfo.Name())); err != nil {
if err := os.Rename(filePath, filepath.Join(tempDirPath, entry.Name())); err != nil {
return nil, err
}
@ -223,17 +222,17 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
}
}
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", fileInfo.Name()))
table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()), tm.cfg.Uploader, tm.indexShipper, tm.cfg.MakePerTenantBuckets, tm.metrics)
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("loading table %s", entry.Name()))
table, err := LoadTable(filepath.Join(tm.cfg.IndexDir, entry.Name()), tm.cfg.Uploader, tm.indexShipper, tm.cfg.MakePerTenantBuckets, tm.metrics)
if err != nil {
return nil, err
}
if table == nil {
// if table is nil it means it has no files in it so remove the folder for that table.
err := os.Remove(filepath.Join(tm.cfg.IndexDir, fileInfo.Name()))
err := os.Remove(filepath.Join(tm.cfg.IndexDir, entry.Name()))
if err != nil {
level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", fileInfo.Name(), "err", err)
level.Error(util_log.Logger).Log("msg", "failed to remove empty table folder", "table", entry.Name(), "err", err)
}
continue
}
@ -250,7 +249,7 @@ func (tm *TableManager) loadTables() (map[string]*Table, error) {
return nil, err
}
localTables[fileInfo.Name()] = table
localTables[entry.Name()] = table
}
return localTables, nil

@ -3,7 +3,7 @@ package index
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
@ -104,7 +104,7 @@ func TestLoadTable(t *testing.T) {
// change a boltdb file to text file which would fail to open.
invalidFilePath := filepath.Join(tablePath, "invalid")
require.NoError(t, ioutil.WriteFile(invalidFilePath, []byte("invalid boltdb file"), 0o666))
require.NoError(t, os.WriteFile(invalidFilePath, []byte("invalid boltdb file"), 0o666))
// verify that changed boltdb file can't be opened.
_, err = local.OpenBoltdbFile(invalidFilePath)
@ -120,9 +120,9 @@ func TestLoadTable(t *testing.T) {
}()
// verify that we still have 3 files(2 valid, 1 invalid)
filesInfo, err := ioutil.ReadDir(tablePath)
dirEntries, err := os.ReadDir(tablePath)
require.NoError(t, err)
require.Len(t, filesInfo, 3)
require.Len(t, dirEntries, 3)
// query the loaded table to see if it has right data.
require.NoError(t, table.Snapshot())
@ -301,9 +301,9 @@ func Test_LoadBoltDBsFromDir(t *testing.T) {
require.NoError(t, boltdb.Close())
}
filesInfo, err := ioutil.ReadDir(tablePath)
dirEntries, err := os.ReadDir(tablePath)
require.NoError(t, err)
require.Len(t, filesInfo, 2)
require.Len(t, dirEntries, 2)
}
func TestTable_ImmutableUploads(t *testing.T) {

@ -4,7 +4,6 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"sync"
@ -126,11 +125,11 @@ func (i *indexClient) getUploaderName() (string, error) {
if !os.IsNotExist(err) {
return "", err
}
if err := ioutil.WriteFile(uploaderFilePath, []byte(uploader), 0o666); err != nil {
if err := os.WriteFile(uploaderFilePath, []byte(uploader), 0o666); err != nil {
return "", err
}
} else {
ub, err := ioutil.ReadFile(uploaderFilePath)
ub, err := os.ReadFile(uploaderFilePath)
if err != nil {
return "", err
}

@ -3,7 +3,6 @@ package tsdb
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
@ -423,7 +422,7 @@ func walsByPeriod(dir string, period period) ([]WalGroup, error) {
}
func walGroups(dir string, period period) (map[int]*WalGroup, error) {
files, err := ioutil.ReadDir(managerWalDir(dir))
files, err := os.ReadDir(managerWalDir(dir))
if err != nil {
return nil, err
}

@ -22,7 +22,6 @@ import (
"hash"
"hash/crc32"
"io"
"io/ioutil"
"math"
"os"
"path/filepath"
@ -1200,7 +1199,7 @@ func (b RealByteSlice) Sub(start, end int) ByteSlice {
// NewReader returns a new index reader on the given byte slice. It automatically
// handles different format versions.
func NewReader(b ByteSlice) (*Reader, error) {
return newReader(b, ioutil.NopCloser(nil))
return newReader(b, io.NopCloser(nil))
}
type nopCloser struct{}
@ -1209,7 +1208,7 @@ func (nopCloser) Close() error { return nil }
// NewFileReader returns a new index reader against the given index file.
func NewFileReader(path string) (*Reader, error) {
b, err := ioutil.ReadFile(path)
b, err := os.ReadFile(path)
if err != nil {
return nil, err
}

@ -17,7 +17,6 @@ import (
"context"
"fmt"
"hash/crc32"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
@ -484,7 +483,7 @@ func TestNewFileReaderErrorNoOpenFiles(t *testing.T) {
dir := testutil.NewTemporaryDirectory("block", t)
idxName := filepath.Join(dir.Path(), "index")
err := ioutil.WriteFile(idxName, []byte("corrupted contents"), 0o666)
err := os.WriteFile(idxName, []byte("corrupted contents"), 0o666)
require.NoError(t, err)
_, err = NewFileReader(idxName)

@ -3,7 +3,7 @@ package tsdb
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
@ -95,7 +95,7 @@ func (m *tsdbManager) Start() (err error) {
// load list of multitenant tsdbs
mulitenantDir := managerMultitenantDir(m.dir)
files, err := ioutil.ReadDir(mulitenantDir)
files, err := os.ReadDir(mulitenantDir)
if err != nil {
return err
}
@ -116,7 +116,7 @@ func (m *tsdbManager) Start() (err error) {
}
buckets++
tsdbs, err := ioutil.ReadDir(filepath.Join(mulitenantDir, bucket))
tsdbs, err := os.ReadDir(filepath.Join(mulitenantDir, bucket))
if err != nil {
level.Warn(m.log).Log(
"msg", "failed to open period bucket dir",

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"os"
"strings"
"time"
@ -86,7 +86,7 @@ type TSDBIndex struct {
// Return the index as well as the underlying []byte which isn't exposed as an index
// method but is helpful for building an io.reader for the index shipper
func NewTSDBIndexFromFile(location string, gzip bool) (*TSDBIndex, []byte, error) {
raw, err := ioutil.ReadFile(location)
raw, err := os.ReadFile(location)
if err != nil {
return nil, nil, err
}

@ -2,7 +2,6 @@ package cfg
import (
"flag"
"io/ioutil"
"os"
"testing"
"time"
@ -69,7 +68,7 @@ tls:
func TestDefaultUnmarshal(t *testing.T) {
testContext := func(yamlString string, args []string) TestConfigWrapper {
file, err := ioutil.TempFile("", "config.yaml")
file, err := os.CreateTemp("", "config.yaml")
defer func() {
os.Remove(file.Name())
}()

@ -2,7 +2,7 @@ package cfg
import (
"flag"
"io/ioutil"
"os"
"testing"
"time"
@ -21,7 +21,7 @@ server:
data := NewDynamicConfig(mockApplyDynamicConfig)
fs := flag.NewFlagSet(t.Name(), flag.PanicOnError)
file, err := ioutil.TempFile("", "config.yaml")
file, err := os.CreateTemp("", "config.yaml")
require.NoError(t, err)
_, err = file.WriteString(config)
require.NoError(t, err)

@ -4,7 +4,6 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
@ -21,7 +20,7 @@ func JSON(f *string) Source {
return nil
}
j, err := ioutil.ReadFile(*f)
j, err := os.ReadFile(*f)
if err != nil {
return err
}
@ -43,7 +42,7 @@ func dJSON(y []byte) Source {
// using https://pkg.go.dev/github.com/drone/envsubst?tab=overview
func YAML(f string, expandEnvVars bool, strict bool) Source {
return func(dst Cloneable) error {
y, err := ioutil.ReadFile(f)
y, err := os.ReadFile(f)
if err != nil {
return err
}

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"html/template"
"io/ioutil"
"io"
"math/rand"
"net/http"
"net/http/httptest"
@ -215,6 +215,6 @@ func (b bytesBuffered) BytesBuffer() *bytes.Buffer {
}
func TestIsRequestBodyTooLargeRegression(t *testing.T) {
_, err := ioutil.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), ioutil.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
_, err := io.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), io.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
assert.True(t, util.IsRequestBodyTooLarge(err))
}

@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"testing"
@ -55,7 +55,7 @@ func Test_writeError(t *testing.T) {
rec := httptest.NewRecorder()
WriteError(tt.err, rec)
require.Equal(t, tt.expectedStatus, rec.Result().StatusCode)
b, err := ioutil.ReadAll(rec.Result().Body)
b, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatal(err)
}

@ -1,7 +1,7 @@
package unmarshal
import (
"io/ioutil"
"io"
"log"
"strings"
"testing"
@ -49,7 +49,7 @@ func Test_DecodePushRequest(t *testing.T) {
for i, pushTest := range pushTests {
var actual logproto.PushRequest
closer := ioutil.NopCloser(strings.NewReader(pushTest.actual))
closer := io.NopCloser(strings.NewReader(pushTest.actual))
err := DecodePushRequest(closer, &actual)
require.NoError(t, err)

@ -2,7 +2,7 @@ package unmarshal
import (
"fmt"
"io/ioutil"
"io"
"strings"
"testing"
"time"
@ -50,7 +50,7 @@ var pushTests = []struct {
func Test_DecodePushRequest(t *testing.T) {
for i, pushTest := range pushTests {
var actual logproto.PushRequest
closer := ioutil.NopCloser(strings.NewReader(pushTest.actual))
closer := io.NopCloser(strings.NewReader(pushTest.actual))
err := DecodePushRequest(closer, &actual)
require.NoError(t, err)

@ -3,7 +3,7 @@ package main
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/aws/aws-lambda-go/events"
@ -34,7 +34,7 @@ func (b *MockBatch) createPushRequest() (*logproto.PushRequest, int) {
}
func ReadJSONFromFile(t *testing.T, inputFile string) []byte {
inputJSON, err := ioutil.ReadFile(inputFile)
inputJSON, err := os.ReadFile(inputFile)
if err != nil {
t.Errorf("could not open test file. details: %v", err)
}
@ -43,7 +43,7 @@ func ReadJSONFromFile(t *testing.T, inputFile string) []byte {
}
func TestLambdaPromtail_KinesisParseEvents(t *testing.T) {
inputJson, err := ioutil.ReadFile("../testdata/kinesis-event.json")
inputJson, err := os.ReadFile("../testdata/kinesis-event.json")
if err != nil {
t.Errorf("could not open test file. details: %v", err)

@ -3,7 +3,6 @@ package querytee
import (
"context"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -107,7 +106,7 @@ func (b *ProxyBackend) doBackendRequest(req *http.Request) (int, []byte, error)
// Read the entire response body.
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
body, err := io.ReadAll(res.Body)
if err != nil {
return 0, nil, errors.Wrap(err, "reading backend response")
}

@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
@ -85,7 +84,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
)
if r.Body != nil {
body, err = ioutil.ReadAll(r.Body)
body, err = io.ReadAll(r.Body)
if err != nil {
level.Warn(p.logger).Log("msg", "Unable to read request body", "err", err)
return
@ -94,7 +93,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
level.Warn(p.logger).Log("msg", "Unable to close request body", "err", err)
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
r.Body = io.NopCloser(bytes.NewReader(body))
if err := r.ParseForm(); err != nil {
level.Warn(p.logger).Log("msg", "Unable to parse form", "err", err)
}
@ -115,7 +114,7 @@ func (p *ProxyEndpoint) executeBackendRequests(r *http.Request, resCh chan *back
start = time.Now()
)
if len(body) > 0 {
bodyReader = ioutil.NopCloser(bytes.NewReader(body))
bodyReader = io.NopCloser(bytes.NewReader(body))
}
status, body, err := b.ForwardRequest(r, bodyReader)

@ -2,7 +2,7 @@ package querytee
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"net/url"
@ -179,7 +179,7 @@ func Test_Proxy_RequestsForwarding(t *testing.T) {
require.NoError(t, err)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
body, err := io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, testData.expectedStatus, res.StatusCode)
@ -330,7 +330,7 @@ func TestProxy_Passthrough(t *testing.T) {
require.NoError(t, err)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
body, err := io.ReadAll(res.Body)
require.NoError(t, err)
assert.Equal(t, query.expectedStatusCode, res.StatusCode)

Loading…
Cancel
Save