Update dskit to 7b512eb. (#10927)

**What this PR does / why we need it**:
This is a small update to dskit that includes
https://github.com/grafana/dskit/pull/406 which will allow us to
intercept gRPC request in call clients.

**Checklist**
- [ ] Reviewed the
[`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md)
guide (**required**)
- [ ] Documentation added
- [ ] Tests updated
- [ ] `CHANGELOG.md` updated
- [ ] If the change is worth mentioning in the release notes, add
`add-to-release-notes` label
- [ ] Changes that require user attention or interaction to upgrade are
documented in `docs/sources/setup/upgrade/_index.md`
- [ ] For Helm chart changes bump the Helm chart version in
`production/helm/loki/Chart.yaml` and update
`production/helm/loki/CHANGELOG.md` and
`production/helm/loki/README.md`. [Example
PR](d10549e3ec)

---------

Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com>
pull/10933/head^2
Karsten Jeschkies 2 years ago committed by GitHub
parent d27c4d297d
commit b01e830cde
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 5
      docs/sources/configure/_index.md
  2. 2
      go.mod
  3. 4
      go.sum
  4. 2
      pkg/analytics/seed_test.go
  5. 2
      pkg/loki/modules.go
  6. 4
      pkg/loki/runtime_config_test.go
  7. 9
      vendor/github.com/grafana/dskit/grpcclient/grpcclient.go
  8. 4
      vendor/github.com/grafana/dskit/httpgrpc/server/server.go
  9. 5
      vendor/github.com/grafana/dskit/kv/memberlist/tcp_transport.go
  10. 4
      vendor/github.com/grafana/dskit/log/ratelimit.go
  11. 56
      vendor/github.com/grafana/dskit/ring/replication_set.go
  12. 14
      vendor/github.com/grafana/dskit/ring/replication_set_tracker.go
  13. 1
      vendor/github.com/grafana/dskit/ring/ring.go
  14. 4
      vendor/github.com/grafana/dskit/runtimeconfig/manager.go
  15. 8
      vendor/github.com/grafana/dskit/server/server.go
  16. 2
      vendor/modules.txt

@ -397,6 +397,11 @@ grpc_tls_config:
# CLI flag: -server.grpc.keepalive.ping-without-stream-allowed
[grpc_server_ping_without_stream_allowed: <boolean> | default = true]
# If non-zero, configures the amount of GRPC server workers used to serve the
# requests.
# CLI flag: -server.grpc.num-workers
[grpc_server_num_workers: <int> | default = 0]
# Output log messages in the given format. Valid formats: [logfmt, json]
# CLI flag: -log.format
[log_format: <string> | default = "logfmt"]

@ -49,7 +49,7 @@ require (
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.5.0
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
github.com/grafana/dskit v0.0.0-20231006094724-ad2fd7e7931e
github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47
github.com/grafana/go-gelf/v2 v2.0.1
github.com/grafana/gomemcache v0.0.0-20230914135007-70d78eaabfe1
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd

@ -716,8 +716,8 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw=
github.com/grafana/dskit v0.0.0-20231006094724-ad2fd7e7931e h1:fl1bLiyD4GTijZk/AubmOI0ZFmxfQP7mRD1/ttdpsxI=
github.com/grafana/dskit v0.0.0-20231006094724-ad2fd7e7931e/go.mod h1:byPCvaG/pqi33Kq+Wvkp7WhLfmrlyy0RAoYG4yRh01I=
github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 h1:wRtcM7fvzg/MJ4KCIYLryadp2fI3pO61BEiY7SizCoI=
github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47/go.mod h1:byPCvaG/pqi33Kq+Wvkp7WhLfmrlyy0RAoYG4yRh01I=
github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak=
github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90=
github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY=

@ -36,7 +36,7 @@ func createMemberlist(t *testing.T, port, memberID int) *memberlist.KV {
var cfg memberlist.KVConfig
flagext.DefaultValues(&cfg)
cfg.TCPTransport = memberlist.TCPTransportConfig{
BindAddrs: []string{"localhost"},
BindAddrs: []string{"0.0.0.0"},
BindPort: 0,
}
cfg.GossipInterval = 100 * time.Millisecond

@ -246,7 +246,7 @@ func (t *Loki) initRuntimeConfig() (services.Service, error) {
validation.SetDefaultLimitsForYAMLUnmarshalling(t.Cfg.LimitsConfig)
var err error
t.runtimeConfig, err = runtimeconfig.New(t.Cfg.RuntimeConfig, prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer), util_log.Logger)
t.runtimeConfig, err = runtimeconfig.New(t.Cfg.RuntimeConfig, "loki", prometheus.WrapRegistererWithPrefix("loki_", prometheus.DefaultRegisterer), util_log.Logger)
t.TenantLimits = newtenantLimitsFromRuntimeConfig(t.runtimeConfig)
// Update config fields using runtime config. Only if multiKV is used for given ring these returned functions will be

@ -130,7 +130,7 @@ func newTestRuntimeconfig(t *testing.T, yaml string) runtime.TenantConfig {
require.NoError(t, flagset.Parse(nil))
reg := prometheus.NewPedanticRegistry()
runtimeConfig, err := runtimeconfig.New(cfg, prometheus.WrapRegistererWithPrefix("loki_", reg), log.NewNopLogger())
runtimeConfig, err := runtimeconfig.New(cfg, "test", prometheus.WrapRegistererWithPrefix("loki_", reg), log.NewNopLogger())
require.NoError(t, err)
require.NoError(t, runtimeConfig.StartAsync(context.Background()))
@ -164,7 +164,7 @@ func newTestOverrides(t *testing.T, yaml string) *validation.Overrides {
validation.SetDefaultLimitsForYAMLUnmarshalling(defaults)
reg := prometheus.NewPedanticRegistry()
runtimeConfig, err := runtimeconfig.New(cfg, prometheus.WrapRegistererWithPrefix("loki_", reg), log.NewNopLogger())
runtimeConfig, err := runtimeconfig.New(cfg, "test", prometheus.WrapRegistererWithPrefix("loki_", reg), log.NewNopLogger())
require.NoError(t, err)
require.NoError(t, runtimeConfig.StartAsync(context.Background()))

@ -37,6 +37,9 @@ type Config struct {
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md
ConnectBackoffBaseDelay time.Duration `yaml:"connect_backoff_base_delay" category:"advanced"`
ConnectBackoffMaxDelay time.Duration `yaml:"connect_backoff_max_delay" category:"advanced"`
Middleware []grpc.UnaryClientInterceptor `yaml:"-"`
StreamMiddleware []grpc.StreamClientInterceptor `yaml:"-"`
}
// RegisterFlags registers flags.
@ -91,7 +94,8 @@ func (cfg *Config) CallOptions() []grpc.CallOption {
return opts
}
// DialOption returns the config as a grpc.DialOptions.
// DialOption returns the config as a grpc.DialOptions. The passed inceptors
// wrap around the configured middleware.
func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) {
var opts []grpc.DialOption
tlsOpts, err := cfg.TLS.GetGRPCDialOptions(cfg.TLSEnabled)
@ -100,6 +104,9 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep
}
opts = append(opts, tlsOpts...)
unaryClientInterceptors = append(unaryClientInterceptors, cfg.Middleware...)
streamClientInterceptors = append(streamClientInterceptors, cfg.StreamMiddleware...)
if cfg.BackoffOnRatelimits {
unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewBackoffRetry(cfg.BackoffConfig)}, unaryClientInterceptors...)
}

@ -80,7 +80,7 @@ type Client struct {
}
// ParseURL deals with direct:// style URLs, as well as kubernetes:// urls.
// For backwards compatibility it treats URLs without schems as kubernetes://.
// For backwards compatibility it treats URLs without schemes as kubernetes://.
func ParseURL(unparsed string) (string, error) {
// if it has :///, this is the kuberesolver v2 URL. Return it as it is.
if strings.Contains(unparsed, ":///") {
@ -115,7 +115,7 @@ func ParseURL(unparsed string) (string, error) {
if len(parts) > 2 {
domain = domain + "." + parts[2]
}
address := fmt.Sprintf("kubernetes:///%s%s:%s", service, domain, port)
address := fmt.Sprintf("kubernetes:///%s", net.JoinHostPort(service+domain, port))
return address, nil
default:

@ -39,7 +39,7 @@ const colonColon = "::"
// TCPTransportConfig is a configuration structure for creating new TCPTransport.
type TCPTransportConfig struct {
// BindAddrs is a list of addresses to bind to.
// BindAddrs is a list of IP addresses to bind to.
BindAddrs flagext.StringSlice `yaml:"bind_addr"`
// BindPort is the port to listen on, for each address above.
@ -147,6 +147,9 @@ func NewTCPTransport(config TCPTransportConfig, logger log.Logger, registerer pr
port := config.BindPort
for _, addr := range config.BindAddrs {
ip := net.ParseIP(addr)
if ip == nil {
return nil, fmt.Errorf("could not parse bind addr %q as IP address", addr)
}
tcpAddr := &net.TCPAddr{IP: ip, Port: port}

@ -20,7 +20,7 @@ type RateLimitedLogger struct {
// NewRateLimitedLogger returns a log.Logger that is limited to the given number of logs per second,
// with the given burst size.
func NewRateLimitedLogger(logger log.Logger, logsPerSecond float64, logsPerSecondBurst int, registry prometheus.Registerer) log.Logger {
func NewRateLimitedLogger(logger log.Logger, logsPerSecond float64, logsBurstSize int, registry prometheus.Registerer) log.Logger {
discardedLogLinesCounter := promauto.With(registry).NewCounterVec(prometheus.CounterOpts{
Name: "logger_rate_limit_discarded_log_lines_total",
Help: "Total number of discarded log lines per level.",
@ -28,7 +28,7 @@ func NewRateLimitedLogger(logger log.Logger, logsPerSecond float64, logsPerSecon
return &RateLimitedLogger{
next: logger,
limiter: rate.NewLimiter(rate.Limit(logsPerSecond), logsPerSecondBurst),
limiter: rate.NewLimiter(rate.Limit(logsPerSecond), logsBurstSize),
discardedInfoLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.InfoValue().String()),
discardedDebugLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.DebugValue().String()),
discardedWarnLogLinesCounter: discardedLogLinesCounter.WithLabelValues(level.WarnValue().String()),

@ -3,6 +3,7 @@ package ring
import (
"context"
"errors"
"fmt"
"sort"
"time"
@ -24,6 +25,8 @@ type ReplicationSet struct {
// Maximum number of different zones in which instances can fail. Max unavailable zones and
// max errors are mutually exclusive.
MaxUnavailableZones int
ZoneAwarenessEnabled bool
}
// Do function f in parallel for all replicas in the set, erroring if we exceed
@ -96,16 +99,29 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont
}
type DoUntilQuorumConfig struct {
// If true, enable request minimization.
// MinimizeRequests enables request minimization.
// See docs for DoUntilQuorum for more information.
MinimizeRequests bool
// If non-zero and MinimizeRequests is true, enables hedging.
// HedgingDelay configures the delay used before initiating hedged requests.
// Hedging is only enabled if HedgingDelay is non-zero and MinimizeRequests is true.
// See docs for DoUntilQuorum for more information.
HedgingDelay time.Duration
// If non-nil, DoUntilQuorum will emit log lines and span events during the call.
// Logger to emit log lines and span events to during the call.
// Can be nil, in which case no log lines or span events are emitted.
Logger *spanlogger.SpanLogger
// IsTerminalError allows DoUntilQuorum to detect terminal errors generated by requests.
//
// If IsTerminalError is non-nil and a request returns an error for which IsTerminalError returns true,
// DoUntilQuorum will immediately cancel any inflight requests and return the error.
//
// This is useful to cancel DoUntilQuorum when an unrecoverable error occurs and it does not
// make sense to attempt requests to other instances. For example, if a client-side limit on the
// total response size across all instances is reached, making further requests to other
// instances would not be worthwhile.
IsTerminalError func(error) bool
}
func (c DoUntilQuorumConfig) Validate() error {
@ -120,7 +136,7 @@ func (c DoUntilQuorumConfig) Validate() error {
//
// # Result selection
//
// If r.MaxUnavailableZones is greater than zero, DoUntilQuorum operates in zone-aware mode:
// If r.MaxUnavailableZones is greater than zero, or r.ZoneAwarenessEnabled is true, DoUntilQuorum operates in zone-aware mode:
// - DoUntilQuorum returns an error if calls to f for instances in more than r.MaxUnavailableZones zones return errors
// - Otherwise, DoUntilQuorum returns all results from all replicas in the first zones for which f succeeds
// for every instance in that zone (eg. if there are 3 zones and r.MaxUnavailableZones is 1, DoUntilQuorum will
@ -204,6 +220,10 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
return nil, err
}
if r.ZoneAwarenessEnabled && r.MaxErrors > 0 {
return nil, fmt.Errorf("invalid ReplicationSet: MaxErrors is non-zero (is %v) and ZoneAwarenessEnabled is true", r.MaxErrors)
}
var logger kitlog.Logger = cfg.Logger
if cfg.Logger == nil {
logger = kitlog.NewNopLogger()
@ -227,7 +247,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
var resultTracker replicationSetResultTracker
var contextTracker replicationSetContextTracker
if r.MaxUnavailableZones > 0 {
if r.MaxUnavailableZones > 0 || r.ZoneAwarenessEnabled {
resultTracker = newZoneAwareResultTracker(r.Instances, r.MaxUnavailableZones, logger)
contextTracker = newZoneAwareContextTracker(ctx, r.Instances)
} else {
@ -272,6 +292,16 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
}
}
terminate := func(err error) ([]T, error) {
if cfg.Logger != nil {
_ = cfg.Logger.Error(err)
}
contextTracker.cancelAllContexts()
cleanupResultsAlreadyReceived()
return nil, err
}
var hedgingTrigger <-chan time.Time
if cfg.HedgingDelay > 0 {
@ -293,6 +323,13 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
resultTracker.startAdditionalRequests()
case result := <-resultsChan:
resultsRemaining--
if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) {
level.Error(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err)
// We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled.
return terminate(result.err)
}
resultTracker.done(result.instance, result.err)
if result.err == nil {
@ -302,14 +339,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex
if resultTracker.failed() {
level.Error(logger).Log("msg", "cancelling all requests because quorum cannot be reached")
if cfg.Logger != nil {
_ = cfg.Logger.Error(result.err)
}
contextTracker.cancelAllContexts()
cleanupResultsAlreadyReceived()
return nil, result.err
return terminate(result.err)
}
}
}

@ -108,7 +108,8 @@ func (t *defaultResultTracker) done(instance *InstanceDesc, err error) {
} else {
level.Warn(t.logger).Log(
"msg", "instance failed",
"instance", instance.Addr,
"instanceAddr", instance.Addr,
"instanceID", instance.Id,
"err", err,
)
@ -155,7 +156,7 @@ func (t *defaultResultTracker) startMinimumRequests() {
if len(t.pendingInstances) < t.maxErrors {
t.pendingInstances = append(t.pendingInstances, instance)
} else {
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instance", instance.Addr)
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instanceAddr", instance.Addr, "instanceID", instance.Id)
t.instanceRelease[instance] <- struct{}{}
}
}
@ -175,7 +176,7 @@ func (t *defaultResultTracker) startAdditionalRequestsDueTo(reason string) {
if len(t.pendingInstances) > 0 {
// There are some outstanding requests we could make before we reach maxErrors. Release the next one.
i := t.pendingInstances[0]
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", reason, "instance", i.Addr)
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", reason, "instanceAddr", i.Addr, "instanceID", i.Id)
t.instanceRelease[i] <- struct{}{}
t.pendingInstances = t.pendingInstances[1:]
}
@ -186,7 +187,7 @@ func (t *defaultResultTracker) startAllRequests() {
for i := range t.instances {
instance := &t.instances[i]
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instance", instance.Addr)
level.Debug(t.logger).Log("msg", "starting request to instance", "reason", "initial requests", "instanceAddr", instance.Addr, "instanceID", instance.Id)
t.instanceRelease[instance] = make(chan struct{}, 1)
t.instanceRelease[instance] <- struct{}{}
}
@ -283,9 +284,10 @@ func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) {
if t.failuresByZone[instance.Zone] == 1 {
level.Warn(t.logger).Log(
"msg", "zone has failed",
"msg", "request to instance has failed, zone cannot contribute to quorum",
"zone", instance.Zone,
"failingInstance", instance.Addr,
"failingInstanceAddr", instance.Addr,
"failingInstanceID", instance.Id,
"err", err,
)

@ -526,6 +526,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro
Instances: healthyInstances,
MaxErrors: maxErrors,
MaxUnavailableZones: maxUnavailableZones,
ZoneAwarenessEnabled: r.cfg.ZoneAwarenessEnabled,
}, nil
}

@ -63,11 +63,13 @@ type Manager struct {
}
// New creates an instance of Manager. Manager is a services.Service, and must be explicitly started to perform any work.
func New(cfg Config, registerer prometheus.Registerer, logger log.Logger) (*Manager, error) {
func New(cfg Config, configName string, registerer prometheus.Registerer, logger log.Logger) (*Manager, error) {
if len(cfg.LoadPath) == 0 {
return nil, errors.New("LoadPath is empty")
}
registerer = prometheus.WrapRegistererWith(prometheus.Labels{"config": configName}, registerer)
mgr := Manager{
cfg: cfg,
configLoadSuccess: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{

@ -13,6 +13,7 @@ import (
"net"
"net/http"
_ "net/http/pprof" // anonymous import to get the pprof handler registered
"strconv"
"strings"
"time"
@ -118,6 +119,7 @@ type Config struct {
GRPCServerTimeout time.Duration `yaml:"grpc_server_keepalive_timeout"`
GRPCServerMinTimeBetweenPings time.Duration `yaml:"grpc_server_min_time_between_pings"`
GRPCServerPingWithoutStreamAllowed bool `yaml:"grpc_server_ping_without_stream_allowed"`
GRPCServerNumWorkers int `yaml:"grpc_server_num_workers"`
LogFormat string `yaml:"log_format"`
LogLevel log.Level `yaml:"log_level"`
@ -179,6 +181,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.GRPCServerTimeout, "server.grpc.keepalive.timeout", time.Second*20, "After having pinged for keepalive check, the duration after which an idle connection should be closed, Default: 20s")
f.DurationVar(&cfg.GRPCServerMinTimeBetweenPings, "server.grpc.keepalive.min-time-between-pings", 5*time.Minute, "Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection.")
f.BoolVar(&cfg.GRPCServerPingWithoutStreamAllowed, "server.grpc.keepalive.ping-without-stream-allowed", false, "If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection.")
f.IntVar(&cfg.GRPCServerNumWorkers, "server.grpc.num-workers", 0, "If non-zero, configures the amount of GRPC server workers used to serve the requests.")
f.StringVar(&cfg.PathPrefix, "server.path-prefix", "", "Base path to serve all API routes from (e.g. /v1/)")
f.StringVar(&cfg.LogFormat, "log.format", log.LogfmtFormat, "Output log messages in the given format. Valid formats: [logfmt, json]")
cfg.LogLevel.RegisterFlags(f)
@ -251,7 +254,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
network = DefaultNetwork
}
// Setup listeners first, so we can fail early if the port is in use.
httpListener, err := net.Listen(network, fmt.Sprintf("%s:%d", cfg.HTTPListenAddress, cfg.HTTPListenPort))
httpListener, err := net.Listen(network, net.JoinHostPort(cfg.HTTPListenAddress, strconv.Itoa(cfg.HTTPListenPort)))
if err != nil {
return nil, err
}
@ -275,7 +278,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
if network == "" {
network = DefaultNetwork
}
grpcListener, err := net.Listen(network, fmt.Sprintf("%s:%d", cfg.GRPCListenAddress, cfg.GRPCListenPort))
grpcListener, err := net.Listen(network, net.JoinHostPort(cfg.GRPCListenAddress, strconv.Itoa(cfg.GRPCListenPort)))
if err != nil {
return nil, err
}
@ -378,6 +381,7 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) {
grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize),
grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize),
grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)),
grpc.NumStreamWorkers(uint32(cfg.GRPCServerNumWorkers)),
}
if cfg.GrpcMethodLimiter != nil {

@ -852,7 +852,7 @@ github.com/gorilla/websocket
# github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2
## explicit; go 1.17
github.com/grafana/cloudflare-go
# github.com/grafana/dskit v0.0.0-20231006094724-ad2fd7e7931e
# github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47
## explicit; go 1.19
github.com/grafana/dskit/aws
github.com/grafana/dskit/backoff

Loading…
Cancel
Save