refactor: Use OTel tracing library (#17859)

Signed-off-by: Oleg Zaytsev <mail@olegzaytsev.com>
pull/17943/head
Oleg Zaytsev 7 months ago committed by GitHub
parent ad0bef31e7
commit c8a15f451c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 8
      cmd/loki/main.go
  2. 15
      go.mod
  3. 2
      go.sum
  4. 3
      pkg/blockbuilder/builder/builder.go
  5. 8
      pkg/blockbuilder/builder/storage.go
  6. 6
      pkg/blockbuilder/types/grpc_transport.go
  7. 21
      pkg/bloomgateway/bloomgateway.go
  8. 13
      pkg/bloomgateway/processor.go
  9. 5
      pkg/bloomgateway/processor_test.go
  10. 2
      pkg/bloomgateway/querier.go
  11. 32
      pkg/dataobj/querier/metadata.go
  12. 102
      pkg/dataobj/querier/store.go
  13. 28
      pkg/distributor/distributor.go
  14. 4
      pkg/distributor/field_detection.go
  15. 39
      pkg/distributor/ratestore.go
  16. 6
      pkg/indexgateway/client.go
  17. 47
      pkg/indexgateway/gateway.go
  18. 7
      pkg/ingester/client/client.go
  19. 40
      pkg/ingester/ingester.go
  20. 32
      pkg/ingester/instance.go
  21. 20
      pkg/ingester/stream.go
  22. 6
      pkg/limits/client/client.go
  23. 6
      pkg/limits/frontend/client/client.go
  24. 92
      pkg/logproto/compat.go
  25. 73
      pkg/logproto/compat_test.go
  26. 28
      pkg/logql/engine.go
  27. 39
      pkg/logql/metrics_test.go
  28. 12
      pkg/lokifrontend/frontend/downstream_roundtripper.go
  29. 20
      pkg/lokifrontend/frontend/v1/frontend.go
  30. 54
      pkg/lokifrontend/frontend/v1/frontend_test.go
  31. 10
      pkg/lokifrontend/frontend/v2/frontend.go
  32. 31
      pkg/pattern/aggregation/push.go
  33. 7
      pkg/pattern/clientpool/client.go
  34. 22
      pkg/pattern/instance.go
  35. 33
      pkg/querier/http.go
  36. 44
      pkg/querier/querier.go
  37. 105
      pkg/querier/queryrange/codec.go
  38. 125
      pkg/querier/queryrange/codec_test.go
  39. 16
      pkg/querier/queryrange/downstreamer.go
  40. 6
      pkg/querier/queryrange/instrument.go
  41. 30
      pkg/querier/queryrange/limits.go
  42. 6
      pkg/querier/queryrange/log_result_cache.go
  43. 12
      pkg/querier/queryrange/marshal.go
  44. 5
      pkg/querier/queryrange/parquet.go
  45. 12
      pkg/querier/queryrange/prometheus.go
  46. 6
      pkg/querier/queryrange/queryrangebase/definitions/interface.go
  47. 36
      pkg/querier/queryrange/queryrangebase/query_range.go
  48. 34
      pkg/querier/queryrange/queryrangebase/query_range_test.go
  49. 10
      pkg/querier/queryrange/serialize.go
  50. 12
      pkg/querier/queryrange/shard_resolver.go
  51. 16
      pkg/querier/queryrange/split_by_interval.go
  52. 142
      pkg/querier/store_combiner.go
  53. 16
      pkg/querier/worker/frontend_processor.go
  54. 21
      pkg/querier/worker/scheduler_processor.go
  55. 3
      pkg/querier/worker/worker.go
  56. 14
      pkg/ruler/base/manager.go
  57. 3
      pkg/ruler/base/ruler.go
  58. 7
      pkg/ruler/evaluator_remote.go
  59. 3
      pkg/ruler/ruler.go
  60. 25
      pkg/scheduler/scheduler.go
  61. 25
      pkg/storage/async_store.go
  62. 14
      pkg/storage/chunk/cache/background.go
  63. 41
      pkg/storage/chunk/cache/instrumented.go
  64. 59
      pkg/storage/chunk/cache/resultscache/cache.go
  65. 26
      pkg/storage/chunk/client/aws/dynamodb_storage_client.go
  66. 8
      pkg/storage/chunk/client/aws/retryer.go
  67. 3
      pkg/storage/chunk/client/aws/s3_storage_client.go
  68. 14
      pkg/storage/chunk/client/gcp/bigtable_index_client.go
  69. 12
      pkg/storage/chunk/client/gcp/bigtable_object_client.go
  70. 3
      pkg/storage/chunk/client/gcp/gcs_object_client.go
  71. 4
      pkg/storage/chunk/client/gcp/instrumentation.go
  72. 3
      pkg/storage/chunk/client/gcp/table_client.go
  73. 15
      pkg/storage/chunk/client/util/parallel_chunk_fetch.go
  74. 6
      pkg/storage/chunk/client/util/util.go
  75. 9
      pkg/storage/chunk/fetcher/fetcher.go
  76. 4
      pkg/storage/store.go
  77. 35
      pkg/storage/stores/composite_store_entry.go
  78. 60
      pkg/storage/stores/series/series_index_store.go
  79. 9
      pkg/storage/stores/series_store_write.go
  80. 15
      pkg/storage/stores/shipper/bloomshipper/client.go
  81. 33
      pkg/storage/stores/shipper/indexshipper/storage/cached_client.go
  82. 56
      pkg/storage/stores/shipper/indexshipper/tsdb/index_client.go
  83. 6
      pkg/storage/stores/shipper/indexshipper/tsdb/single_file_index.go
  84. 5
      pkg/tracing/config.go
  85. 22
      pkg/tracing/otel_kv.go
  86. 27
      pkg/util/http.go
  87. 56
      pkg/util/httpgrpc/carrier.go
  88. 52
      pkg/util/spanlogger/noop.go
  89. 7
      pkg/util/spanlogger/spanlogger.go
  90. 6
      tools/stream-generator/distributor/client/client.go
  91. 104
      vendor/github.com/grafana/dskit/spanprofiler/README.md
  92. 107
      vendor/github.com/grafana/dskit/spanprofiler/spanprofiler.go
  93. 112
      vendor/github.com/grafana/dskit/spanprofiler/tracer.go
  94. 27
      vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE
  95. 23
      vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS
  96. 57
      vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md
  97. 239
      vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go
  98. 69
      vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go
  99. 76
      vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go
  100. 5
      vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -10,9 +10,7 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/log"
"github.com/grafana/dskit/spanprofiler"
"github.com/grafana/dskit/tracing"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/version"
@ -90,13 +88,11 @@ func main() {
if config.Tracing.Enabled {
// Setting the environment variable JAEGER_AGENT_HOST enables tracing
trace, err := tracing.NewFromEnv(fmt.Sprintf("loki-%s", config.Target))
trace, err := tracing.NewOTelFromJaegerEnv(fmt.Sprintf("loki-%s", config.Target))
if err != nil {
level.Error(util_log.Logger).Log("msg", "error in initializing tracing. tracing will not be enabled", "err", err)
}
if config.Tracing.ProfilingEnabled {
opentracing.SetGlobalTracer(spanprofiler.NewTracer(opentracing.GlobalTracer()))
}
defer func() {
if trace != nil {
if err := trace.Close(); err != nil {

@ -58,7 +58,6 @@ require (
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/consul/api v1.32.1
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/influxdata/telegraf v1.34.1
@ -76,9 +75,9 @@ require (
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1 // indirect
github.com/opentracing-contrib/go-grpc v0.1.2
github.com/opentracing-contrib/go-stdlib v1.1.0
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b
github.com/opentracing-contrib/go-grpc v0.1.2 // indirect
github.com/opentracing-contrib/go-stdlib v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
github.com/oschwald/geoip2-golang v1.11.0
// github.com/pierrec/lz4 v2.0.5+incompatible
github.com/pierrec/lz4/v4 v4.1.22
@ -153,6 +152,8 @@ require (
github.com/twmb/franz-go/plugin/kotel v1.6.0
github.com/twmb/franz-go/plugin/kprom v1.2.1
go.opentelemetry.io/collector/pdata v1.30.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0
go.opentelemetry.io/otel/sdk v1.35.0
go4.org/netipx v0.0.0-20230125063823-8449b0a6169f
golang.org/x/oauth2 v0.30.0
golang.org/x/text v0.25.0
@ -244,11 +245,9 @@ require (
go.opentelemetry.io/collector/pipeline v0.118.0 // indirect
go.opentelemetry.io/collector/processor v0.118.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 // indirect
go.opentelemetry.io/contrib/samplers/jaegerremote v0.29.0 // indirect
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
@ -412,8 +411,8 @@ require (
go.mongodb.org/mongo-driver v1.17.2 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/semconv v0.118.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0
go.opentelemetry.io/otel v1.35.0
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0

@ -699,8 +699,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=

@ -16,6 +16,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/twmb/franz-go/pkg/kgo"
"go.opentelemetry.io/otel"
"golang.org/x/sync/errgroup"
"github.com/grafana/loki/v3/pkg/blockbuilder/types"
@ -32,6 +33,8 @@ import (
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var tracer = otel.Tracer("pkg/blockbuilder/builder")
type Config struct {
ConcurrentFlushes int `yaml:"concurrent_flushes"`
ConcurrentWriters int `yaml:"concurrent_writers"`

@ -6,7 +6,6 @@ import (
"io"
"sort"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/grafana/loki/v3/pkg/storage"
@ -104,11 +103,8 @@ func (m *MultiStore) GetObject(ctx context.Context, objectKey string) (io.ReadCl
}
func (m *MultiStore) GetObjectRange(ctx context.Context, objectKey string, off, length int64) (io.ReadCloser, error) {
sp, _ := opentracing.StartSpanFromContext(ctx, "GetObjectRange")
if sp != nil {
sp.LogKV("objectKey", objectKey, "off", off, "length", length)
}
defer sp.Finish()
_, sp := tracer.Start(ctx, "GetObjectRange")
defer sp.End()
s, err := m.GetStoreFor(model.Now())
if err != nil {
return nil, err

@ -7,11 +7,10 @@ import (
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/instrument"
"github.com/grafana/dskit/middleware"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@ -53,14 +52,13 @@ func NewGRPCTransportFromAddress(
metrics := newGRPCTransportMetrics(reg)
dialOpts, err := cfg.DialOption(
[]grpc.UnaryClientInterceptor{
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.UnaryClientInstrumentInterceptor(metrics.requestLatency),
}, []grpc.StreamClientInterceptor{
otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()),
middleware.StreamClientInstrumentInterceptor(metrics.requestLatency),
},
middleware.NoOpInvalidClusterValidationReporter,
)
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
if err != nil {
return nil, err
}

@ -16,9 +16,10 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.uber.org/atomic"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
@ -32,6 +33,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var tracer = otel.Tracer("pkg/bloomgateway")
const (
metricsSubsystem = "bloom_gateway"
querierMetricsSubsystem = "bloom_gateway_querier"
@ -205,7 +208,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
return nil, err
}
sp, ctx := opentracing.StartSpanFromContext(ctx, "bloomgateway.FilterChunkRefs")
ctx, sp := tracer.Start(ctx, "bloomgateway.FilterChunkRefs")
stats, ctx := ContextWithEmptyStats(ctx)
logger := spanlogger.FromContext(
ctx,
@ -214,7 +217,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
defer func() {
level.Info(logger).Log(stats.KVArgs()...)
sp.Finish()
sp.End()
}()
// start time == end time --> empty response
@ -256,11 +259,11 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
seriesByDay := partitionRequest(req)
stats.NumTasks = len(seriesByDay)
sp.LogKV(
"matchers", len(matchers),
"days", len(seriesByDay),
"blocks", len(req.Blocks),
"series_requested", len(req.Refs),
sp.SetAttributes(
attribute.Int("matchers", len(matchers)),
attribute.Int("days", len(seriesByDay)),
attribute.Int("blocks", len(req.Blocks)),
attribute.Int("series_requested", len(req.Refs)),
)
// len(seriesByDay) should never be 0
@ -345,7 +348,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk
stats.ChunksRequested = preFilterChunks
stats.ChunksFiltered = preFilterChunks - postFilterChunks
sp.LogKV("msg", "return filtered chunk refs")
sp.AddEvent("return filtered chunk refs")
return &logproto.FilterChunkRefResponse{ChunkRefs: filtered}, nil
}

@ -5,10 +5,9 @@ import (
"time"
"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/multierror"
"github.com/pkg/errors"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@ -161,12 +160,10 @@ func (p *processor) processBlock(_ context.Context, bq *bloomshipper.CloseableBl
// NB(owen-d): can be helpful for debugging, but is noisy
// and don't feel like threading this through a configuration
// if sp := opentracing.SpanFromContext(task.ctx); sp != nil {
// md, _ := blockQuerier.Metadata()
// blk := bloomshipper.BlockRefFrom(task.tenant, task.table.String(), md)
// blockID := blk.String()
// sp.LogKV("process block", blockID, "series", len(task.series))
// }
//sp := trace.SpanFromContext(task.ctx)
//md, _ := blockQuerier.Metadata()
//blk := bloomshipper.BlockRefFrom(task.tenant, task.table.String(), md)
//sp.SetAttributes(attribute.String("process block", blk.String()), attribute.Int("series", len(task.series)))
it := iter.NewPeekIter(task.RequestIter())
iters = append(iters, it)

@ -7,7 +7,6 @@ import (
"time"
"github.com/go-kit/log"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
@ -117,8 +116,8 @@ func (s *dummyStore) FetchBlocks(_ context.Context, refs []bloomshipper.BlockRef
func TestProcessor(t *testing.T) {
ctx := context.Background()
sp, ctx := opentracing.StartSpanFromContext(ctx, "TestProcessor")
t.Cleanup(sp.Finish)
ctx, sp := tracer.Start(ctx, "TestProcessor")
t.Cleanup(func() { sp.End() })
tenant := "fake"
now := mktime("2024-01-27 12:00")

@ -108,7 +108,7 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from
return chunkRefs, false, nil
}
logger, ctx := spanlogger.New(ctx, bq.logger, "bloomquerier.FilterChunkRefs")
logger, ctx := spanlogger.NewOTel(ctx, bq.logger, tracer, "bloomquerier.FilterChunkRefs")
defer logger.Finish()
grouped := groupedChunksRefPool.Get(len(chunkRefs))

@ -10,9 +10,10 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
@ -193,10 +194,8 @@ func (sp *streamProcessor) ProcessParallel(ctx context.Context, onNewStream func
}()
start := time.Now()
span := opentracing.SpanFromContext(ctx)
if span != nil {
span.LogKV("msg", "processing streams", "total_readers", len(readers))
}
span := trace.SpanFromContext(ctx)
span.AddEvent("processing streams", trace.WithAttributes(attribute.Int("total_readers", len(readers))))
level.Debug(sp.logger).Log("msg", "processing streams", "total_readers", len(readers))
// set predicate on all readers
@ -210,8 +209,9 @@ func (sp *streamProcessor) ProcessParallel(ctx context.Context, onNewStream func
var processedStreams atomic.Int64
for _, reader := range readers {
g.Go(func() error {
span, ctx := opentracing.StartSpanFromContext(ctx, "streamProcessor.processSingleReader")
defer span.Finish()
ctx, span := tracer.Start(ctx, "streamProcessor.processSingleReader")
defer span.End()
n, err := sp.processSingleReader(ctx, reader, onNewStream)
if err != nil {
return err
@ -230,9 +230,11 @@ func (sp *streamProcessor) ProcessParallel(ctx context.Context, onNewStream func
"total_streams_processed", processedStreams.Load(),
"duration", time.Since(start),
)
if span != nil {
span.LogKV("msg", "streamProcessor.ProcessParallel done", "total_readers", len(readers), "total_streams_processed", processedStreams.Load(), "duration", time.Since(start))
}
span.AddEvent("streamProcessor.ProcessParallel done", trace.WithAttributes(
attribute.Int("total_readers", len(readers)),
attribute.Int64("total_streams_processed", processedStreams.Load()),
attribute.String("duration", time.Since(start).String()),
))
return nil
}
@ -284,10 +286,10 @@ func labelsToSeriesIdentifier(labels labels.Labels) logproto.SeriesIdentifier {
// shardStreamReaders fetches metadata of objects in parallel and shards them into a list of StreamsReaders
func shardStreamReaders(ctx context.Context, objects []object, shard logql.Shard) ([]*streams.RowReader, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "shardStreamReaders")
defer span.Finish()
ctx, span := tracer.Start(ctx, "shardStreamReaders")
defer span.End()
span.SetTag("objects", len(objects))
span.SetAttributes(attribute.Int("objects", len(objects)))
var (
// sectionIndex tracks the global section number across all objects to ensure consistent sharding
@ -329,6 +331,8 @@ func shardStreamReaders(ctx context.Context, objects []object, shard logql.Shard
sectionIndex++
}
span.LogKV("msg", "shardStreamReaders done", "readers", len(readers))
span.AddEvent("shardStreamReaders done", trace.WithAttributes(
attribute.Int("readers", len(readers)),
))
return readers, nil
}

@ -12,10 +12,12 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/thanos-io/objstore"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"github.com/grafana/loki/v3/pkg/dataobj"
@ -32,9 +34,12 @@ import (
storageconfig "github.com/grafana/loki/v3/pkg/storage/config"
"github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index"
"github.com/grafana/loki/v3/pkg/tracing"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var tracer = otel.Tracer("pkg/dataobj/querier")
var (
_ querier.Store = &Store{}
@ -182,27 +187,29 @@ type object struct {
// objectsForTimeRange returns data objects for the given time range.
func (s *Store) objectsForTimeRange(ctx context.Context, from, through time.Time, logger log.Logger) ([]object, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "objectsForTimeRange")
defer span.Finish()
ctx, span := tracer.Start(ctx, "objectsForTimeRange")
defer span.End()
span.SetTag("from", from)
span.SetTag("through", through)
span.SetAttributes(
attribute.String("from", from.String()),
attribute.String("through", through.String()),
)
files, err := s.metastore.DataObjects(ctx, from, through)
if err != nil {
return nil, err
}
logParams := []interface{}{
level.Debug(logger).Log(
"msg", "found data objects for time range",
"count", len(files),
"from", from,
"through", through,
}
level.Debug(logger).Log(logParams...)
span.LogKV(logParams...)
span.LogKV("files", files)
)
span.AddEvent("found data objects for time range", trace.WithAttributes(
attribute.Int("count", len(files)),
attribute.StringSlice("files", files)),
)
objects := make([]object, 0, len(files))
for _, path := range files {
@ -250,10 +257,11 @@ func selectLogs(ctx context.Context, objects []object, shard logql.Shard, req lo
for i, obj := range shardedObjects {
g.Go(func() error {
span, ctx := opentracing.StartSpanFromContext(ctx, "object selectLogs")
defer span.Finish()
span.SetTag("object", obj.object.path)
span.SetTag("sections", len(obj.logReaders))
ctx, span := tracer.Start(ctx, "object selectLogs", trace.WithAttributes(
attribute.String("object", obj.object.path),
attribute.Int("sections", len(obj.logReaders)),
))
defer span.End()
iterator, err := obj.selectLogs(ctx, streamsPredicate, logsPredicates, req)
if err != nil {
@ -307,10 +315,12 @@ func selectSamples(ctx context.Context, objects []object, shard logql.Shard, exp
for i, obj := range shardedObjects {
g.Go(func() error {
span, ctx := opentracing.StartSpanFromContext(ctx, "object selectSamples")
defer span.Finish()
span.SetTag("object", obj.object.path)
span.SetTag("sections", len(obj.logReaders))
ctx, span := tracer.Start(ctx, "object selectSamples", trace.WithAttributes(
attribute.String("object", obj.object.path),
attribute.Int("sections", len(obj.logReaders)),
))
defer span.End()
iterator, err := obj.selectSamples(ctx, streamsPredicate, logsPredicates, expr)
if err != nil {
@ -379,8 +389,8 @@ func shardObjects(
shard logql.Shard,
logger log.Logger,
) ([]*shardedObject, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "shardObjects")
defer span.Finish()
ctx, span := tracer.Start(ctx, "shardObjects")
defer span.End()
metadatas, err := fetchSectionsStats(ctx, objects)
if err != nil {
@ -443,9 +453,8 @@ func shardObjects(
}
level.Debug(logger).Log(logParams...)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(logParams...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(logParams)...)
return shardedReaders, nil
}
@ -505,10 +514,16 @@ func (s *shardedObject) selectLogs(ctx context.Context, streamsPredicate streams
for i, reader := range s.logReaders {
g.Go(func() error {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "starting selectLogs in section", "index", i)
defer sp.LogKV("msg", "selectLogs section done", "index", i)
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("starting selectLogs in section", trace.WithAttributes(
attribute.Int("index", i),
))
defer func() {
sp.AddEvent("selectLogs section done", trace.WithAttributes(
attribute.Int("index", i),
))
}()
iter, err := newEntryIterator(ctx, s.streams, reader, req)
if err != nil {
return err
@ -538,10 +553,16 @@ func (s *shardedObject) selectSamples(ctx context.Context, streamsPredicate stre
for i, reader := range s.logReaders {
g.Go(func() error {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "starting selectSamples in section", "index", i)
defer sp.LogKV("msg", "selectSamples section done", "index", i)
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("starting selectSamples in section", trace.WithAttributes(
attribute.Int("index", i),
))
defer func() {
sp.AddEvent("selectSamples section done", trace.WithAttributes(
attribute.Int("index", i),
))
}()
// extractors is not thread safe, so we need to create a new one for each object
extractors, err := expr.Extractors()
if err != nil {
@ -576,10 +597,10 @@ func (s *shardedObject) setPredicate(streamsPredicate streams.RowPredicate, logs
}
func (s *shardedObject) matchStreams(ctx context.Context) error {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "starting matchStreams")
defer sp.LogKV("msg", "matchStreams done")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("starting matchStreams")
defer sp.AddEvent("matchStreams done")
streamsPtr := streamsPool.Get().(*[]streams.Stream)
defer streamsPool.Put(streamsPtr)
streams := *streamsPtr
@ -609,10 +630,11 @@ func (s *shardedObject) matchStreams(ctx context.Context) error {
// fetchSectionsStats retrieves section count of objects.
func fetchSectionsStats(ctx context.Context, objects []object) ([]sectionsStats, error) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "fetching metadata", "objects", len(objects))
defer sp.LogKV("msg", "fetched metadata")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("fetching metadata", trace.WithAttributes(
attribute.Int("objects", len(objects)),
))
defer sp.AddEvent("fetched metadata")
res := make([]sectionsStats, 0, len(objects))

@ -16,6 +16,7 @@ import (
"unicode/utf8"
otlptranslate "github.com/prometheus/otlptranslator"
"go.opentelemetry.io/otel/trace"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -32,7 +33,6 @@ import (
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@ -580,13 +580,9 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
var ingestionBlockedError error
func() {
sp := opentracing.SpanFromContext(ctx)
if sp != nil {
sp.LogKV("event", "start to validate request")
defer func() {
sp.LogKV("event", "finished to validate request")
}()
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("start to validate request")
defer sp.AddEvent("finished to validate request")
for _, stream := range req.Streams {
// Return early if stream does not contain any entries
@ -788,13 +784,9 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
ingesterDescs := map[string]ring.InstanceDesc{}
if err := func() error {
sp := opentracing.SpanFromContext(ctx)
if sp != nil {
sp.LogKV("event", "started to query ingesters ring")
defer func() {
sp.LogKV("event", "finished to query ingesters ring")
}()
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("started to query ingesters ring")
defer sp.AddEvent("finished to query ingesters ring")
for i, stream := range streams {
replicationSet, err := d.ingestersRing.Get(stream.HashKey, ring.WriteNoExtend, descs[:0], nil, nil)
@ -822,9 +814,9 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
// Clone the context using WithoutCancel, which is not canceled when parent is canceled.
// This is to make sure all ingesters get samples even if we return early
localCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), d.clientCfg.RemoteTimeout)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
localCtx = opentracing.ContextWithSpan(localCtx, sp)
}
sp := trace.SpanFromContext(ctx)
localCtx = trace.ContextWithSpan(localCtx, sp)
select {
case <-ctx.Done():
cancel()

@ -21,7 +21,7 @@ import (
)
var (
trace = []byte("trace")
traceBytes = []byte("trace")
traceAbbrv = []byte("trc")
debug = []byte("debug")
debugAbbrv = []byte("dbg")
@ -196,7 +196,7 @@ func (l *FieldDetector) extractLogLevelFromLogLine(log string) string {
}
switch {
case bytes.EqualFold(v, trace), bytes.EqualFold(v, traceAbbrv):
case bytes.EqualFold(v, traceBytes), bytes.EqualFold(v, traceAbbrv):
return constants.LogLevelTrace
case bytes.EqualFold(v, debug), bytes.EqualFold(v, debugAbbrv):
return constants.LogLevelDebug

@ -12,8 +12,9 @@ import (
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/ring/client"
"github.com/grafana/dskit/services"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/util"
@ -133,10 +134,14 @@ type rateStats struct {
func (s *rateStore) updateRates(ctx context.Context, updated map[string]map[uint64]expiringRate) rateStats {
streamCnt := 0
if s.debug {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "started updating rates")
defer sp.LogKV("event", "finished updating rates", "streams", streamCnt)
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("started updating rates")
defer func() {
sp.AddEvent("finished updating rates", trace.WithAttributes(
attribute.Int("streams", streamCnt),
))
}()
}
s.rateLock.Lock()
defer s.rateLock.Unlock()
@ -230,10 +235,10 @@ func (s *rateStore) anyShardingEnabled() bool {
func (s *rateStore) aggregateByShard(ctx context.Context, streamRates map[string]map[uint64]*logproto.StreamRate) map[string]map[uint64]expiringRate {
if s.debug {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "started to aggregate by shard")
defer sp.LogKV("event", "finished to aggregate by shard")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("started to aggregate by shard")
defer sp.AddEvent("finished to aggregate by shard")
}
rates := map[string]map[uint64]expiringRate{}
now := time.Now()
@ -259,10 +264,10 @@ func (s *rateStore) aggregateByShard(ctx context.Context, streamRates map[string
func (s *rateStore) getRates(ctx context.Context, clients []ingesterClient) map[string]map[uint64]*logproto.StreamRate {
if s.debug {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "started to get rates from ingesters")
defer sp.LogKV("event", "finished to get rates from ingesters")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("started to get rates from ingesters")
defer sp.AddEvent("finished to get rates from ingesters")
}
parallelClients := make(chan ingesterClient, len(clients))
@ -336,10 +341,10 @@ func (s *rateStore) ratesPerStream(responses chan *logproto.StreamRatesResponse,
func (s *rateStore) getClients(ctx context.Context) ([]ingesterClient, error) {
if s.debug {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "ratestore started getting clients")
defer sp.LogKV("event", "ratestore finished getting clients")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("ratestore started getting clients")
defer sp.AddEvent("ratestore finished getting clients")
}
ingesters, err := s.ring.GetAllHealthy(ring.Read)

@ -19,10 +19,9 @@ import (
"github.com/grafana/dskit/ring/client"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -140,6 +139,7 @@ func NewGatewayClient(cfg ClientConfig, r prometheus.Registerer, limits Limits,
if err != nil {
return nil, errors.Wrap(err, "index gateway grpc dial option")
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
factory := func(addr string) (client.PoolClient, error) {
igPool, err := NewClientPool(addr, dialOpts)
if err != nil {
@ -544,13 +544,11 @@ func (b *grpcIter) Value() []byte {
func instrumentation(cfg ClientConfig, clientRequestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
var unaryInterceptors []grpc.UnaryClientInterceptor
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
unaryInterceptors = append(unaryInterceptors, middleware.UnaryClientInstrumentInterceptor(clientRequestDuration))
var streamInterceptors []grpc.StreamClientInterceptor
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
streamInterceptors = append(streamInterceptors, middleware.StreamClientInstrumentInterceptor(clientRequestDuration))

@ -13,11 +13,13 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/logproto"
@ -36,6 +38,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var tracer = otel.Tracer("pkg/indexgateway")
const (
maxIndexEntriesPerResponse = 1000
)
@ -106,7 +110,7 @@ func NewIndexGateway(cfg Config, limits Limits, log log.Logger, r prometheus.Reg
}
func (g *Gateway) QueryIndex(request *logproto.QueryIndexRequest, server logproto.IndexGateway_QueryIndexServer) error {
log, _ := spanlogger.New(context.Background(), g.log, "IndexGateway.QueryIndex")
log, _ := spanlogger.NewOTel(context.Background(), g.log, tracer, "IndexGateway.QueryIndex")
defer log.Finish()
var outerErr, innerErr error
@ -209,8 +213,8 @@ func buildResponses(query seriesindex.Query, batch seriesindex.ReadBatchResult,
func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequest) (result *logproto.GetChunkRefResponse, err error) {
logger := util_log.WithContext(ctx, g.log)
sp, ctx := opentracing.StartSpanFromContext(ctx, "indexgateway.GetChunkRef")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "indexgateway.GetChunkRef")
defer sp.End()
instanceID, err := tenant.TenantID(ctx)
if err != nil {
@ -267,14 +271,19 @@ func (g *Gateway) GetChunkRef(ctx context.Context, req *logproto.GetChunkRefRequ
for _, s := range series {
seriesMap[s.Hash()] = s
}
sp.LogKV("msg", "indexQuerier.GetSeries", "duration", time.Since(start), "count", len(series))
sp.AddEvent("indexQuerier.GetSeries", trace.WithAttributes(
attribute.String("duration", time.Since(start).String()),
attribute.Int("count", len(series)),
))
start = time.Now()
chunkRefs, used, err := g.bloomQuerier.FilterChunkRefs(ctx, instanceID, req.From, req.Through, seriesMap, result.Refs, req.Plan)
if err != nil {
return nil, err
}
sp.LogKV("msg", "bloomQuerier.FilterChunkRefs", "duration", time.Since(start))
sp.AddEvent("bloomQuerier.FilterChunkRefs", trace.WithAttributes(
attribute.String("duration", time.Since(start).String()),
))
result.Refs = chunkRefs
level.Info(logger).Log("msg", "return filtered chunk refs", "unfiltered", initialChunkCount, "filtered", len(result.Refs), "used_blooms", used)
@ -396,8 +405,8 @@ func (g *Gateway) GetVolume(ctx context.Context, req *logproto.VolumeRequest) (*
func (g *Gateway) GetShards(request *logproto.ShardsRequest, server logproto.IndexGateway_GetShardsServer) error {
ctx := server.Context()
sp, ctx := opentracing.StartSpanFromContext(ctx, "indexgateway.GetShards")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "indexgateway.GetShards")
defer sp.End()
instanceID, err := tenant.TenantID(ctx)
if err != nil {
@ -411,10 +420,9 @@ func (g *Gateway) GetShards(request *logproto.ShardsRequest, server logproto.Ind
forSeries, ok := g.indexQuerier.HasForSeries(request.From, request.Through)
if !ok {
sp.LogKV(
"msg", "index does not support forSeries",
"action", "falling back to indexQuerier.GetShards impl",
)
sp.AddEvent("index does not support forSeries", trace.WithAttributes(
attribute.String("action", "falling back to indexQuerier.GetShards impl"),
))
shards, err := g.indexQuerier.GetShards(
ctx,
instanceID,
@ -455,8 +463,8 @@ func (g *Gateway) boundedShards(
// sending multiple requests to the entire keyspace).
logger := util_log.WithContext(ctx, g.log)
sp, ctx := opentracing.StartSpanFromContext(ctx, "indexgateway.boundedShards")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "indexgateway.boundedShards")
defer sp.End()
// 1) for all bounds, get chunk refs
grps, _, err := g.indexQuerier.GetChunks(ctx, instanceID, req.From, req.Through, p, nil)
@ -469,10 +477,9 @@ func (g *Gateway) boundedShards(
ct += len(g)
}
sp.LogKV(
"stage", "queried local index",
"index_chunks_resolved", ct,
)
sp.AddEvent("queried local index", trace.WithAttributes(
attribute.Int("index_chunks_resolved", ct),
))
// TODO(owen-d): pool
refs := make([]*logproto.ChunkRef, 0, ct)
@ -529,7 +536,9 @@ func (g *Gateway) boundedShards(
}
}
sp.LogKV("msg", "send shards response", "shards", len(resp.Shards))
sp.AddEvent("send shards response", trace.WithAttributes(
attribute.Int("shards", len(resp.Shards)),
))
var refCt int
for _, grp := range resp.ChunkGroups {

@ -5,12 +5,12 @@ import (
"io"
"time"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"github.com/grafana/loki/v3/pkg/util/server"
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc"
@ -74,6 +74,7 @@ func New(cfg Config, addr string) (HealthAndIngesterClient, error) {
return nil, err
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
opts = append(opts, dialOpts...)
// nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
@ -95,7 +96,6 @@ func instrumentation(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.StreamC
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientHTTPHeadersInterceptor)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
}
@ -105,7 +105,6 @@ func instrumentation(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.StreamC
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor)
streamInterceptors = append(streamInterceptors, server.StreamClientHTTPHeadersInterceptor)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
}

@ -24,10 +24,11 @@ import (
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/time/rate"
"google.golang.org/grpc/health/grpc_health_v1"
@ -1014,10 +1015,9 @@ func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logpro
// GetStreamRates returns a response containing all streams and their current rate
// TODO: It might be nice for this to be human readable, eventually: Sort output and return labels, too?
func (i *Ingester) GetStreamRates(ctx context.Context, _ *logproto.StreamRatesRequest) (*logproto.StreamRatesResponse, error) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "ingester started to handle GetStreamRates")
defer sp.LogKV("event", "ingester finished handling GetStreamRates")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("ingester started to handle GetStreamRates")
defer sp.AddEvent("ingester finished handling GetStreamRates")
// Set profiling tags
defer pprof.SetGoroutineLabels(ctx)
@ -1123,7 +1123,7 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log
// initialize stats collection for ingester queries.
_, ctx := stats.NewContext(queryServer.Context())
_, ctx = metadata.NewContext(ctx)
sp := opentracing.SpanFromContext(ctx)
sp := trace.SpanFromContext(ctx)
// If the plan is empty we want all series to be returned.
if req.Plan == nil {
@ -1155,9 +1155,11 @@ func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer log
if err != nil {
return err
}
if sp != nil {
sp.LogKV("event", "finished instance query sample", "selector", req.Selector, "start", req.Start, "end", req.End)
}
sp.AddEvent("finished instance query sample", trace.WithAttributes(
attribute.String("selector", req.Selector),
attribute.String("start", req.Start.String()),
attribute.String("end", req.End.String()),
))
if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
storeReq := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
@ -1401,7 +1403,7 @@ func (i *Ingester) series(ctx context.Context, req *logproto.SeriesRequest) (*lo
}
func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
sp := opentracing.SpanFromContext(ctx)
sp := trace.SpanFromContext(ctx)
user, err := tenant.TenantID(ctx)
if err != nil {
@ -1449,15 +1451,15 @@ func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest
merged := index_stats.MergeStats(resps...)
if sp != nil {
sp.LogKV(
"user", user,
"from", req.From.Time(),
"through", req.Through.Time(),
"matchers", syntax.MatchersString(matchers),
"streams", merged.Streams,
"chunks", merged.Chunks,
"bytes", merged.Bytes,
"entries", merged.Entries,
sp.SetAttributes(
attribute.String("user", user),
attribute.String("from", req.From.Time().String()),
attribute.String("through", req.Through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.Int64("streams", int64(merged.Streams)),
attribute.Int64("chunks", int64(merged.Chunks)),
attribute.Int64("bytes", int64(merged.Bytes)),
attribute.Int64("entries", int64(merged.Entries)),
)
}

@ -15,13 +15,14 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunks"
tsdb_record "github.com/prometheus/prometheus/tsdb/record"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/analytics"
@ -814,18 +815,17 @@ func (i *instance) getStats(ctx context.Context, req *logproto.IndexStatsRequest
return nil, err
}
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(
"function", "instance.GetStats",
"from", from,
"through", through,
"matchers", syntax.MatchersString(matchers),
"streams", res.Streams,
"chunks", res.Chunks,
"bytes", res.Bytes,
"entries", res.Entries,
)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(
attribute.String("function", "instance.GetStats"),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.Int64("streams", int64(res.Streams)),
attribute.Int64("chunks", int64(res.Chunks)),
attribute.Int64("bytes", int64(res.Bytes)),
attribute.Int64("entries", int64(res.Entries)),
)
return res, nil
}
@ -1133,7 +1133,7 @@ func sendBatches(ctx context.Context, i iter.EntryIterator, queryServer QuerierQ
}
func sendSampleBatches(ctx context.Context, it iter.SampleIterator, queryServer logproto.Querier_QuerySampleServer) error {
sp := opentracing.SpanFromContext(ctx)
sp := trace.SpanFromContext(ctx)
stats := stats.FromContext(ctx)
metadata := metadata.FromContext(ctx)
@ -1162,9 +1162,7 @@ func sendSampleBatches(ctx context.Context, it iter.SampleIterator, queryServer
stats.Reset()
metadata.Reset()
if sp != nil {
sp.LogKV("event", "sent batch", "size", size)
}
sp.AddEvent("sent batch", trace.WithAttributes(attribute.Int("size", int(size))))
}
return nil

@ -11,9 +11,10 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/chunkenc"
"github.com/grafana/loki/v3/pkg/distributor/writefailures"
@ -329,10 +330,11 @@ func (s *stream) recordAndSendToTailers(record *wal.Record, entries []logproto.E
}
func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usageTracker push.UsageTracker) (int, []logproto.Entry, []entryWithError) {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "stream started to store entries", "labels", s.labelsString)
defer sp.LogKV("event", "stream finished to store entries")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("stream started to store entries", trace.WithAttributes(
attribute.String("labels", s.labelsString)),
)
defer sp.AddEvent("stream finished to store entries")
var bytesAdded, outOfOrderSamples, outOfOrderBytes int
@ -498,10 +500,10 @@ func (s *stream) reportMetrics(ctx context.Context, outOfOrderSamples, outOfOrde
}
func (s *stream) cutChunk(ctx context.Context) *chunkDesc {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("event", "stream started to cut chunk")
defer sp.LogKV("event", "stream finished to cut chunk")
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("stream started to cut chunk")
defer sp.AddEvent("stream finished to cut chunk")
// If the chunk has no more space call Close to make sure anything in the head block is cut and compressed
chunk := &s.chunks[len(s.chunks)-1]
err := chunk.chunk.Close()

@ -11,10 +11,9 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/ring"
ring_client "github.com/grafana/dskit/ring/client"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@ -82,6 +81,7 @@ func NewClient(cfg Config, addr string) (*Client, error) {
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
opts = append(opts, dialOpts...)
// nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
conn, err := grpc.Dial(addr, opts...)
@ -105,7 +105,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientHTTPHeadersInterceptor)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
}
@ -114,7 +113,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor)
streamInterceptors = append(streamInterceptors, server.StreamClientHTTPHeadersInterceptor)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
}

@ -14,10 +14,9 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/ring"
ring_client "github.com/grafana/dskit/ring/client"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@ -100,6 +99,7 @@ func NewClient(cfg Config, addr string) (*Client, error) {
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
opts = append(opts, dialOpts...)
// nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
conn, err := grpc.Dial(addr, opts...)
@ -123,7 +123,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientHTTPHeadersInterceptor)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
}
@ -132,7 +131,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor)
streamInterceptors = append(streamInterceptors, server.StreamClientHTTPHeadersInterceptor)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
}

@ -14,11 +14,11 @@ import (
"github.com/c2h5oh/datasize"
"github.com/cespare/xxhash/v2"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache"
@ -276,12 +276,12 @@ func (m *IndexStatsRequest) WithQuery(query string) definitions.Request {
return &clone
}
// LogToSpan writes information about this request to an OpenTracing span
func (m *IndexStatsRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("query", m.GetQuery()),
otlog.String("start", timestamp.Time(int64(m.From)).String()),
otlog.String("end", timestamp.Time(int64(m.Through)).String()),
// LogToSpan writes information about this request to an OTel span
func (m *IndexStatsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", m.GetQuery()),
attribute.String("start", timestamp.Time(int64(m.From)).String()),
attribute.String("end", timestamp.Time(int64(m.Through)).String()),
)
}
@ -326,13 +326,13 @@ func (m *VolumeRequest) WithQuery(query string) definitions.Request {
return &clone
}
// LogToSpan writes information about this request to an OpenTracing span
func (m *VolumeRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("query", m.GetQuery()),
otlog.String("start", timestamp.Time(int64(m.From)).String()),
otlog.String("end", timestamp.Time(int64(m.Through)).String()),
otlog.String("step", time.Duration(m.Step).String()),
// LogToSpan writes information about this request to an OTel span
func (m *VolumeRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", m.GetQuery()),
attribute.String("start", timestamp.Time(int64(m.From)).String()),
attribute.String("end", timestamp.Time(int64(m.Through)).String()),
attribute.String("step", time.Duration(m.Step).String()),
)
}
@ -494,14 +494,13 @@ func (m *ShardsRequest) WithStartEndForCache(start, end time.Time) resultscache.
return m.WithStartEnd(start, end).(resultscache.Request)
}
func (m *ShardsRequest) LogToSpan(sp opentracing.Span) {
fields := []otlog.Field{
otlog.String("from", timestamp.Time(int64(m.From)).String()),
otlog.String("through", timestamp.Time(int64(m.Through)).String()),
otlog.String("query", m.GetQuery()),
otlog.String("target_bytes_per_shard", datasize.ByteSize(m.TargetBytesPerShard).HumanReadable()),
}
sp.LogFields(fields...)
func (m *ShardsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("from", timestamp.Time(int64(m.From)).String()),
attribute.String("through", timestamp.Time(int64(m.Through)).String()),
attribute.String("query", m.GetQuery()),
attribute.String("target_bytes_per_shard", datasize.ByteSize(m.TargetBytesPerShard).HumanReadable()),
)
}
func (m *DetectedFieldsRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
@ -519,16 +518,15 @@ func (m *DetectedFieldsRequest) WithQuery(query string) definitions.Request {
return &clone
}
func (m *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) {
fields := []otlog.Field{
otlog.String("query", m.GetQuery()),
otlog.String("start", m.Start.String()),
otlog.String("end", m.End.String()),
otlog.String("step", time.Duration(m.Step).String()),
otlog.String("field_limit", fmt.Sprintf("%d", m.Limit)),
otlog.String("line_limit", fmt.Sprintf("%d", m.LineLimit)),
}
sp.LogFields(fields...)
func (m *DetectedFieldsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", m.GetQuery()),
attribute.String("start", m.Start.String()),
attribute.String("end", m.End.String()),
attribute.String("step", time.Duration(m.Step).String()),
attribute.String("field_limit", fmt.Sprintf("%d", m.Limit)),
attribute.String("line_limit", fmt.Sprintf("%d", m.LineLimit)),
)
}
func (m *QueryPatternsRequest) GetCachingOptions() (res definitions.CachingOptions) { return }
@ -550,14 +548,13 @@ func (m *QueryPatternsRequest) WithStartEndForCache(start, end time.Time) result
return m.WithStartEnd(start, end).(resultscache.Request)
}
func (m *QueryPatternsRequest) LogToSpan(sp opentracing.Span) {
fields := []otlog.Field{
otlog.String("query", m.GetQuery()),
otlog.String("start", m.Start.String()),
otlog.String("end", m.End.String()),
otlog.String("step", time.Duration(m.Step).String()),
}
sp.LogFields(fields...)
func (m *QueryPatternsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", m.GetQuery()),
attribute.String("start", m.Start.String()),
attribute.String("end", m.End.String()),
attribute.String("step", time.Duration(m.Step).String()),
)
}
func (m *DetectedLabelsRequest) GetStep() int64 { return 0 }
@ -581,11 +578,10 @@ func (m *DetectedLabelsRequest) WithStartEndForCache(start, end time.Time) resul
return m.WithStartEnd(start, end).(resultscache.Request)
}
func (m *DetectedLabelsRequest) LogToSpan(sp opentracing.Span) {
fields := []otlog.Field{
otlog.String("query", m.GetQuery()),
otlog.String("start", m.Start.String()),
otlog.String("end", m.End.String()),
}
sp.LogFields(fields...)
func (m *DetectedLabelsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", m.GetQuery()),
attribute.String("start", m.Start.String()),
attribute.String("end", m.End.String()),
)
}

@ -1,6 +1,7 @@
package logproto
import (
"context"
"encoding/json"
"fmt"
"math"
@ -9,12 +10,14 @@ import (
"unsafe"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/querier/plan"
@ -350,23 +353,32 @@ func TestIndexStatsRequestSpanLogging(t *testing.T) {
Through: model.Time(end.UnixMilli()),
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func TestVolumeRequest(t *testing.T) {
func TestVolumeRequestSpanLogging(t *testing.T) {
now := time.Now()
end := now.Add(1000 * time.Second)
req := VolumeRequest{
@ -374,20 +386,29 @@ func TestVolumeRequest(t *testing.T) {
Through: model.Time(end.UnixMilli()),
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func benchmarkMergeLabelResponses(b *testing.B, responses []*LabelResponse) {

@ -11,9 +11,11 @@ import (
"strings"
"time"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/loki/v3/pkg/logqlmodel/metadata"
"github.com/grafana/loki/v3/pkg/tracing"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -39,6 +41,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/validation"
)
var tracer = otel.Tracer("pkg/logql")
const (
DefaultBlockedQueryMessage = "blocked by policy"
)
@ -246,16 +250,16 @@ func (q *query) resultLength(res promql_parser.Value) int {
// Exec Implements `Query`. It handles instrumentation & defers to Eval.
func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "query.Exec")
defer sp.Finish()
sp.LogKV(
"type", GetRangeType(q.params),
"query", q.params.QueryString(),
"start", q.params.Start(),
"end", q.params.End(),
"step", q.params.Step(),
"length", q.params.End().Sub(q.params.Start()),
ctx, sp := tracer.Start(ctx, "query.Exec")
defer sp.End()
sp.SetAttributes(
attribute.String("type", string(GetRangeType(q.params))),
attribute.String("query", q.params.QueryString()),
attribute.String("start", q.params.Start().String()),
attribute.String("end", q.params.End().String()),
attribute.String("step", q.params.Step().String()),
attribute.String("length", q.params.End().Sub(q.params.Start()).String()),
)
if q.logExecQuery {
@ -291,7 +295,7 @@ func (q *query) Exec(ctx context.Context) (logqlmodel.Result, error) {
queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration)
statResult := statsCtx.Result(time.Since(start), queueTime, q.resultLength(data))
sp.LogKV(statResult.KVList()...)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := server.ClientHTTPStatusAndError(err)

@ -10,10 +10,9 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber/jaeger-client-go"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
@ -55,11 +54,11 @@ func TestQueryType(t *testing.T) {
func TestLogSlowQuery(t *testing.T) {
buf := bytes.NewBufferString("")
util_log.Logger = log.NewLogfmtLogger(buf)
tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter())
defer c.Close()
opentracing.SetGlobalTracer(tr)
sp := opentracing.StartSpan("")
ctx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), "foo"), sp)
ctx := user.InjectOrgID(context.Background(), "foo")
ctx, sp := tracesdk.NewTracerProvider().Tracer("test").Start(ctx, "test")
defer sp.End()
now := time.Now()
ctx = context.WithValue(ctx, httpreq.QueryTagsHTTPHeader, "Source=logvolhist,Feature=Beta")
@ -84,7 +83,7 @@ func TestLogSlowQuery(t *testing.T) {
require.Regexp(t,
regexp.MustCompile(fmt.Sprintf(
`level=info org_id=foo traceID=%s sampled=true latency=slow query=".*" query_hash=.* query_type=filter range_type=range length=1h0m0s .*\n`,
sp.Context().(jaeger.SpanContext).SpanID().String(),
sp.SpanContext().TraceID(),
)),
buf.String())
util_log.Logger = log.NewNopLogger()
@ -92,12 +91,12 @@ func TestLogSlowQuery(t *testing.T) {
func TestLogLabelsQuery(t *testing.T) {
buf := bytes.NewBufferString("")
tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter())
logger := log.NewLogfmtLogger(buf)
defer c.Close()
opentracing.SetGlobalTracer(tr)
sp := opentracing.StartSpan("")
ctx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), "foo"), sp)
ctx := user.InjectOrgID(context.Background(), "foo")
ctx, sp := tracesdk.NewTracerProvider().Tracer("test").Start(ctx, "test")
defer sp.End()
now := time.Now()
RecordLabelQueryMetrics(ctx, logger, now.Add(-1*time.Hour), now, "foo", "", "200", stats.Result{
Summary: stats.Summary{
@ -119,7 +118,7 @@ func TestLogLabelsQuery(t *testing.T) {
require.Regexp(t,
fmt.Sprintf(
"level=info org_id=foo traceID=%s sampled=true latency=slow query_type=labels splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 label=foo query= query_hash=2166136261 total_entries=12 cache_label_results_req=2 cache_label_results_hit=1 cache_label_results_stored=1 cache_label_results_download_time=80ns cache_label_results_query_length_served=10ns\n",
sp.Context().(jaeger.SpanContext).SpanID().String(),
sp.SpanContext().TraceID(),
),
buf.String())
util_log.Logger = log.NewNopLogger()
@ -128,11 +127,11 @@ func TestLogLabelsQuery(t *testing.T) {
func TestLogSeriesQuery(t *testing.T) {
buf := bytes.NewBufferString("")
logger := log.NewLogfmtLogger(buf)
tr, c := jaeger.NewTracer("foo", jaeger.NewConstSampler(true), jaeger.NewInMemoryReporter())
defer c.Close()
opentracing.SetGlobalTracer(tr)
sp := opentracing.StartSpan("")
ctx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), "foo"), sp)
ctx := user.InjectOrgID(context.Background(), "foo")
ctx, sp := tracesdk.NewTracerProvider().Tracer("test").Start(ctx, "test")
defer sp.End()
now := time.Now()
RecordSeriesQueryMetrics(ctx, logger, now.Add(-1*time.Hour), now, []string{`{container_name=~"prometheus.*", component="server"}`, `{app="loki"}`}, "200", []string{}, stats.Result{
Summary: stats.Summary{
@ -154,7 +153,7 @@ func TestLogSeriesQuery(t *testing.T) {
require.Regexp(t,
fmt.Sprintf(
"level=info org_id=foo traceID=%s sampled=true latency=slow query_type=series splits=0 start=.* end=.* start_delta=1h0m0.* end_delta=.* length=1h0m0s duration=25.25s status=200 match=\"{container_name=.*\"}:{app=.*}\" query_hash=23523089 total_entries=10 cache_series_results_req=2 cache_series_results_hit=1 cache_series_results_stored=1 cache_series_results_download_time=80ns cache_series_results_query_length_served=10ns\n",
sp.Context().(jaeger.SpanContext).SpanID().String(),
sp.SpanContext().TraceID(),
),
buf.String())
util_log.Logger = log.NewNopLogger()

@ -8,7 +8,7 @@ import (
"path"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace"
"github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase"
)
@ -30,8 +30,6 @@ func NewDownstreamRoundTripper(downstreamURL string, transport http.RoundTripper
}
func (d downstreamRoundTripper) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
var r *http.Request
r, err := d.codec.EncodeRequest(ctx, req)
@ -42,13 +40,7 @@ func (d downstreamRoundTripper) Do(ctx context.Context, req queryrangebase.Reque
return nil, err
}
if tracer != nil && span != nil {
carrier := opentracing.HTTPHeadersCarrier(r.Header)
err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier)
if err != nil {
return nil, err
}
}
otelhttptrace.Inject(ctx, r)
r.URL.Scheme = d.downstreamURL.Scheme
r.URL.Host = d.downstreamURL.Host

@ -11,10 +11,11 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/services"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/dskit/tenant"
@ -26,6 +27,8 @@ import (
lokigrpc "github.com/grafana/loki/v3/pkg/util/httpgrpc"
)
var tracer = otel.Tracer("pkg/lokifrontend/frontend/v1")
var errTooManyRequest = httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests")
// Config for a Frontend.
@ -74,7 +77,7 @@ type Frontend struct {
type request struct {
enqueueTime time.Time
queueSpan opentracing.Span
queueSpan trace.Span
originalCtx context.Context
request *httpgrpc.HTTPRequest
@ -151,14 +154,7 @@ func (f *Frontend) cleanupInactiveUserMetrics(user string) {
// RoundTripGRPC round trips a proto (instead of a HTTP request).
func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) {
// Propagate trace context in gRPC too - this will be ignored if using HTTP.
tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
if tracer != nil && span != nil {
carrier := (*lokigrpc.HeadersCarrier)(req)
err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier)
if err != nil {
return nil, err
}
}
otel.GetTextMapPropagator().Inject(ctx, (*lokigrpc.HeadersCarrier)(req))
request := request{
request: req,
@ -209,7 +205,7 @@ func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error {
req := reqWrapper.(*request)
f.queueDuration.Observe(time.Since(req.enqueueTime).Seconds())
req.queueSpan.Finish()
req.queueSpan.End()
/*
We want to dequeue the next unexpired request from the chosen tenant queue.
@ -313,7 +309,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error {
now := time.Now()
req.enqueueTime = now
req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued")
_, req.queueSpan = tracer.Start(ctx, "queued")
joinedTenantID := tenant.JoinTenantIDs(tenantIDs)
f.activeUsers.UpdateUserTimestamp(joinedTenantID, now)

@ -16,15 +16,17 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing-contrib/go-stdlib/nethttp"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/propagation"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"google.golang.org/grpc"
@ -39,6 +41,24 @@ import (
"github.com/grafana/loki/v3/pkg/util/constants"
)
var (
spanExporter = tracetest.NewInMemoryExporter()
)
func init() {
otel.SetTracerProvider(
tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(spanExporter)),
),
)
// This is usually done in dskit's tracing package, but we are initializing a custom tracer provider above so we'll do this manually.
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator([]propagation.TextMapPropagator{
// w3c Propagator is the default propagator for opentelemetry
propagation.TraceContext{}, propagation.Baggage{},
}...))
}
const (
query = "/loki/api/v1/query_range?end=1536716898&query=sum%28container_memory_rss%29+by+%28namespace%29&start=1536673680&step=120"
responseBody = `{"status":"success","data":{"resultType":"Matrix","result":[{"metric":{"foo":"bar"},"values":[[1536673680,"137"],[1536673780,"137"]]}]}}`
@ -71,26 +91,22 @@ func TestFrontend(t *testing.T) {
}
func TestFrontendPropagateTrace(t *testing.T) {
closer, err := config.Configuration{}.InitGlobalTracer("test")
require.NoError(t, err)
defer closer.Close()
observedTraceID := make(chan string, 2)
handler := queryrangebase.HandlerFunc(func(ctx context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) {
sp := opentracing.SpanFromContext(ctx)
defer sp.Finish()
sp := trace.SpanFromContext(ctx)
traceID := fmt.Sprintf("%v", sp.Context().(jaeger.SpanContext).TraceID())
traceID := sp.SpanContext().TraceID().String()
observedTraceID <- traceID
return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil
})
test := func(addr string, _ *Frontend) {
sp, ctx := opentracing.StartSpanFromContext(context.Background(), "client")
defer sp.Finish()
traceID := fmt.Sprintf("%v", sp.Context().(jaeger.SpanContext).TraceID())
ctx, sp := tracesdk.NewTracerProvider().Tracer("test").Start(context.Background(), "client")
defer sp.End()
traceID := sp.SpanContext().TraceID().String()
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/%s", addr, labelQuery), nil)
require.NoError(t, err)
@ -98,12 +114,10 @@ func TestFrontendPropagateTrace(t *testing.T) {
err = user.InjectOrgIDIntoHTTPRequest(user.InjectOrgID(ctx, "1"), req)
require.NoError(t, err)
req, tr := nethttp.TraceRequest(opentracing.GlobalTracer(), req)
defer tr.Finish()
client := http.Client{
Transport: &nethttp.Transport{},
Transport: otelhttp.NewTransport(nil),
}
resp, err := client.Do(req)
require.NoError(t, err)
require.Equal(t, 200, resp.StatusCode)
@ -253,7 +267,7 @@ func testFrontend(t *testing.T, config Config, handler queryrangebase.Handler, t
}()
grpcServer := grpc.NewServer(
grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer())),
grpc.StatsHandler(otelgrpc.NewServerHandler()),
)
defer grpcServer.GracefulStop()

@ -19,10 +19,10 @@ import (
"github.com/grafana/dskit/netutil"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/otel"
"go.uber.org/atomic"
"github.com/grafana/dskit/tenant"
@ -222,13 +222,7 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest)
tenantID := tenant.JoinTenantIDs(tenantIDs)
// Propagate trace context in gRPC too - this will be ignored if using HTTP.
tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
if tracer != nil && span != nil {
carrier := (*lokigrpc.HeadersCarrier)(req)
if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil {
return nil, err
}
}
otel.GetTextMapPropagator().Inject(ctx, (*lokigrpc.HeadersCarrier)(req))
ctx, cancel := context.WithCancel(ctx)
defer cancel()

@ -9,17 +9,18 @@ import (
"io"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/golang/snappy"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/loghttp/push"
"github.com/grafana/loki/v3/pkg/logproto"
@ -33,6 +34,8 @@ import (
"github.com/gogo/protobuf/proto"
)
var tracer = otel.Tracer("pkg/pattern/aggregation")
const (
defaultContentType = "application/x-protobuf"
defaultMaxReponseBufferLen = 1024
@ -172,11 +175,8 @@ func (p *Push) Stop() {
// buildPayload creates the snappy compressed protobuf to send to Loki
func (p *Push) buildPayload(ctx context.Context) ([]byte, error) {
sp, _ := opentracing.StartSpanFromContext(
ctx,
"patternIngester.aggregation.Push.buildPayload",
)
defer sp.Finish()
_, sp := tracer.Start(ctx, "patternIngester.aggregation.Push.buildPayload")
defer sp.End()
entries := p.entries.reset()
if len(entries) == 0 {
@ -243,13 +243,12 @@ func (p *Push) buildPayload(ctx context.Context) ([]byte, error) {
p.metrics.entriesPerPush.WithLabelValues(p.tenantID).Observe(float64(len(entries)))
p.metrics.servicesTracked.WithLabelValues(p.tenantID).Set(float64(serviceLimit))
sp.LogKV(
"event", "build aggregated metrics payload",
"num_service", len(entriesByStream),
"first_1k_services", strings.Join(services, ","),
"num_streams", len(streams),
"num_entries", len(entries),
)
sp.AddEvent("build aggregated metrics payload", trace.WithAttributes(
attribute.Int("num_service", len(entriesByStream)),
attribute.StringSlice("first_1k_services", services),
attribute.Int("num_streams", len(streams)),
attribute.Int("num_entries", len(entries)),
))
return payload, nil
}
@ -325,8 +324,8 @@ func (p *Push) send(ctx context.Context, payload []byte) (int, error) {
ctx, cancel := context.WithTimeout(ctx, p.httpClient.Timeout)
defer cancel()
sp, ctx := opentracing.StartSpanFromContext(ctx, "patternIngester.aggregation.Push.send")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "patternIngester.aggregation.Push.send")
defer sp.End()
req, err := http.NewRequestWithContext(ctx, "POST", p.lokiURL, bytes.NewReader(payload))
p.metrics.payloadSize.WithLabelValues(p.tenantID).Observe(float64(len(payload)))

@ -5,13 +5,13 @@ import (
"io"
"time"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/util/server"
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc"
@ -69,6 +69,7 @@ func NewClient(cfg Config, addr string) (HealthAndIngesterClient, error) {
return nil, err
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
opts = append(opts, dialOpts...)
// nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
@ -87,7 +88,6 @@ func instrumentation(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.StreamC
var unaryInterceptors []grpc.UnaryClientInterceptor
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
}
@ -96,7 +96,6 @@ func instrumentation(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.StreamC
var streamInterceptors []grpc.StreamClientInterceptor
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
}

@ -12,9 +12,11 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/multierror"
"github.com/grafana/dskit/ring"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/ingester"
"github.com/grafana/loki/v3/pkg/ingester/index"
@ -29,6 +31,8 @@ import (
lokiring "github.com/grafana/loki/v3/pkg/util/ring"
)
var tracer = otel.Tracer("pkg/pattern")
const indexShards = 32
// instance is a tenant instance of the pattern ingester.
@ -257,17 +261,13 @@ func (i *instance) Observe(ctx context.Context, stream string, entries []logprot
i.aggMetricsLock.Lock()
defer i.aggMetricsLock.Unlock()
sp, _ := opentracing.StartSpanFromContext(
ctx,
"patternIngester.Observe",
)
defer sp.Finish()
_, sp := tracer.Start(ctx, "patternIngester.Observe")
defer sp.End()
sp.LogKV(
"event", "observe stream for metrics",
"stream", stream,
"entries", len(entries),
)
sp.AddEvent("observe stream for metrics", trace.WithAttributes(
attribute.String("stream", stream),
attribute.Int("entries", len(entries)),
))
for _, entry := range entries {
lvl := constants.LogLevelUnknown

@ -13,12 +13,12 @@ import (
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/tenant"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
"github.com/thanos-io/objstore"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/engine"
"github.com/grafana/loki/v3/pkg/loghttp"
@ -30,6 +30,7 @@ import (
querier_limits "github.com/grafana/loki/v3/pkg/querier/limits"
"github.com/grafana/loki/v3/pkg/querier/queryrange"
index_stats "github.com/grafana/loki/v3/pkg/storage/stores/index/stats"
"github.com/grafana/loki/v3/pkg/tracing"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/util/httpreq"
utillog "github.com/grafana/loki/v3/pkg/util/log"
@ -144,9 +145,8 @@ func (q *QuerierAPI) LabelHandler(ctx context.Context, req *logproto.LabelReques
resLength = len(resp.Values)
}
statResult := statsCtx.Result(time.Since(start), queueTime, resLength)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(statResult.KVList()...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordLabelQueryMetrics(ctx, utillog.Logger, *req.Start, *req.End, req.Name, req.Query, strconv.Itoa(status), statResult)
@ -195,9 +195,8 @@ func (q *QuerierAPI) SeriesHandler(ctx context.Context, req *logproto.SeriesRequ
}
statResult := statsCtx.Result(time.Since(start), queueTime, resLength)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(statResult.KVList()...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordSeriesQueryMetrics(ctx, utillog.Logger, req.Start, req.End, req.Groups, strconv.Itoa(status), req.GetShards(), statResult)
@ -222,9 +221,8 @@ func (q *QuerierAPI) IndexStatsHandler(ctx context.Context, req *loghttp.RangeQu
queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration)
statResult := statsCtx.Result(time.Since(start), queueTime, 1)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(statResult.KVList()...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordStatsQueryMetrics(ctx, utillog.Logger, req.Start, req.End, req.Query, strconv.Itoa(status), statResult)
@ -250,9 +248,8 @@ func (q *QuerierAPI) IndexShardsHandler(ctx context.Context, req *loghttp.RangeQ
statResult := statsCtx.Result(time.Since(start), queueTime, resLength)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(statResult.KVList()...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordShardsQueryMetrics(
@ -283,9 +280,8 @@ func (q *QuerierAPI) VolumeHandler(ctx context.Context, req *logproto.VolumeRequ
queueTime, _ := ctx.Value(httpreq.QueryQueueTimeHTTPHeader).(time.Duration)
statResult := statsCtx.Result(time.Since(start), queueTime, 1)
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(statResult.KVList()...)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(tracing.KeyValuesToOTelAttributes(statResult.KVList())...)
status, _ := serverutil.ClientHTTPStatusAndError(err)
logql.RecordVolumeQueryMetrics(ctx, utillog.Logger, req.From.Time(), req.Through.Time(), req.GetQuery(), uint32(req.GetLimit()), time.Duration(req.GetStep()), strconv.Itoa(status), statResult)
@ -429,8 +425,9 @@ func (q *QuerierAPI) DetectedLabelsHandler(ctx context.Context, req *logproto.De
func WrapQuerySpanAndTimeout(call string, limits querier_limits.Limits) middleware.Interface {
return middleware.Func(func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
sp, ctx := opentracing.StartSpanFromContext(req.Context(), call)
defer sp.Finish()
ctx, sp := tracer.Start(req.Context(), call)
defer sp.End()
log := spanlogger.FromContext(req.Context(), utillog.Logger)
defer log.Finish()

@ -16,10 +16,12 @@ import (
"github.com/google/uuid"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/health/grpc_health_v1"
@ -45,6 +47,8 @@ import (
"github.com/grafana/loki/pkg/push"
)
var tracer = otel.Tracer("pkg/querier")
type interval struct {
start, end time.Time
}
@ -173,7 +177,7 @@ func (q *SingleTenantQuerier) SelectLogs(ctx context.Context, params logql.Selec
ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(params.Start, params.End)
sp := opentracing.SpanFromContext(ctx)
sp := trace.SpanFromContext(ctx)
iters := []iter.EntryIterator{}
if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil {
// Make a copy of the request before modifying
@ -184,11 +188,9 @@ func (q *SingleTenantQuerier) SelectLogs(ctx context.Context, params logql.Selec
}
newParams.Start = ingesterQueryInterval.start
newParams.End = ingesterQueryInterval.end
if sp != nil {
sp.LogKV(
"msg", "querying ingester",
"params", newParams)
}
sp.AddEvent("querying ingester", trace.WithAttributes(
attribute.Stringer("params", newParams),
))
ingesterIters, err := q.ingesterQuerier.SelectLogs(ctx, newParams)
if err != nil {
return nil, err
@ -200,11 +202,9 @@ func (q *SingleTenantQuerier) SelectLogs(ctx context.Context, params logql.Selec
if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil {
params.Start = storeQueryInterval.start
params.End = storeQueryInterval.end
if sp != nil {
sp.LogKV(
"msg", "querying store",
"params", params)
}
sp.AddEvent("querying store", trace.WithAttributes(
attribute.Stringer("params", params),
))
storeIter, err := q.store.SelectLogs(ctx, params)
if err != nil {
return nil, err
@ -662,8 +662,8 @@ func (q *SingleTenantQuerier) IndexShards(
}
func (q *SingleTenantQuerier) Volume(ctx context.Context, req *logproto.VolumeRequest) (*logproto.VolumeResponse, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "Querier.Volume")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "Querier.Volume")
defer sp.End()
userID, err := tenant.TenantID(ctx)
if err != nil {
@ -680,14 +680,14 @@ func (q *SingleTenantQuerier) Volume(ctx context.Context, req *logproto.VolumeRe
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(queryTimeout))
defer cancel()
sp.LogKV(
"user", userID,
"from", req.From.Time(),
"through", req.Through.Time(),
"matchers", syntax.MatchersString(matchers),
"limit", req.Limit,
"targetLabels", req.TargetLabels,
"aggregateBy", req.AggregateBy,
sp.SetAttributes(
attribute.String("user", userID),
attribute.String("from", req.From.Time().String()),
attribute.String("through", req.Through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.Int("limit", int(req.Limit)),
attribute.StringSlice("targetLabels", req.TargetLabels),
attribute.String("aggregateBy", req.AggregateBy),
)
ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.From.Time(), req.Through.Time())

@ -18,11 +18,13 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
"go.opentelemetry.io/otel"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/loghttp"
"github.com/grafana/loki/v3/pkg/logproto"
@ -88,16 +90,16 @@ func (r *LokiRequest) WithShards(shards logql.Shards) *LokiRequest {
return &clone
}
func (r *LokiRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("query", r.GetQuery()),
otlog.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
otlog.Int64("step (ms)", r.GetStep()),
otlog.Int64("interval (ms)", r.GetInterval()),
otlog.Int64("limit", int64(r.GetLimit())),
otlog.String("direction", r.GetDirection().String()),
otlog.String("shards", strings.Join(r.GetShards(), ",")),
func (r *LokiRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", r.GetQuery()),
attribute.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
attribute.Int64("step (ms)", r.GetStep()),
attribute.Int64("interval (ms)", r.GetInterval()),
attribute.Int64("limit", int64(r.GetLimit())),
attribute.String("direction", r.GetDirection().String()),
attribute.String("shards", strings.Join(r.GetShards(), ",")),
)
}
@ -136,13 +138,13 @@ func (r *LokiInstantRequest) WithShards(shards logql.Shards) *LokiInstantRequest
return &clone
}
func (r *LokiInstantRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("query", r.GetQuery()),
otlog.String("ts", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.Int64("limit", int64(r.GetLimit())),
otlog.String("direction", r.GetDirection().String()),
otlog.String("shards", strings.Join(r.GetShards(), ",")),
func (r *LokiInstantRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", r.GetQuery()),
attribute.String("ts", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.Int64("limit", int64(r.GetLimit())),
attribute.String("direction", r.GetDirection().String()),
attribute.String("shards", strings.Join(r.GetShards(), ",")),
)
}
@ -179,12 +181,12 @@ func (r *LokiSeriesRequest) GetStep() int64 {
return 0
}
func (r *LokiSeriesRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("matchers", strings.Join(r.GetMatch(), ",")),
otlog.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
otlog.String("shards", strings.Join(r.GetShards(), ",")),
func (r *LokiSeriesRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("matchers", strings.Join(r.GetMatch(), ",")),
attribute.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
attribute.String("shards", strings.Join(r.GetShards(), ",")),
)
}
@ -251,10 +253,10 @@ func (r *LabelRequest) WithQuery(query string) queryrangebase.Request {
return &clone
}
func (r *LabelRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
func (r *LabelRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
)
}
@ -311,10 +313,10 @@ func (r *DetectedLabelsRequest) WithQuery(query string) queryrangebase.Request {
return &clone
}
func (r *DetectedLabelsRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
func (r *DetectedLabelsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
)
}
@ -732,13 +734,7 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht
header.Set(user.OrgIDHeaderName, orgID)
// Propagate trace context in request.
tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
if tracer != nil && span != nil {
carrier := opentracing.HTTPHeadersCarrier(header)
if err := tracer.Inject(span.Context(), opentracing.HTTPHeaders, carrier); err != nil {
return nil, err
}
}
otel.GetTextMapPropagator().Inject(ctx, propagation.HeaderCarrier(header))
switch request := r.(type) {
case *LokiRequest:
@ -1293,8 +1289,9 @@ func (Codec) EncodeResponse(ctx context.Context, req *http.Request, res queryran
}
func encodeResponseJSON(ctx context.Context, version loghttp.Version, res queryrangebase.Response, encodeFlags httpreq.EncodingFlags) (*http.Response, error) {
sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse")
defer sp.Finish()
_, sp := tracer.Start(ctx, "codec.EncodeResponse")
defer sp.End()
var buf bytes.Buffer
err := encodeResponseJSONTo(version, res, &buf, encodeFlags)
@ -1302,7 +1299,7 @@ func encodeResponseJSON(ctx context.Context, version loghttp.Version, res queryr
return nil, err
}
sp.LogFields(otlog.Int("bytes", buf.Len()))
sp.SetAttributes(attribute.Int("bytes", buf.Len()))
resp := http.Response{
Header: http.Header{
@ -1390,8 +1387,8 @@ func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response,
}
func encodeResponseProtobuf(ctx context.Context, res queryrangebase.Response) (*http.Response, error) {
sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse")
defer sp.Finish()
_, sp := tracer.Start(ctx, "codec.EncodeResponse")
defer sp.End()
p, err := QueryResponseWrap(res)
if err != nil {
@ -2369,15 +2366,15 @@ func (r *DetectedFieldsRequest) WithQuery(query string) queryrangebase.Request {
return &clone
}
func (r *DetectedFieldsRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
otlog.String("query", r.GetQuery()),
otlog.Int64("step (ms)", r.GetStep()),
otlog.Int64("line_limit", int64(r.GetLineLimit())),
otlog.Int64("limit", int64(r.GetLimit())),
otlog.String("step", fmt.Sprintf("%d", r.GetStep())),
func (r *DetectedFieldsRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("start", timestamp.Time(r.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(r.GetEnd().UnixMilli()).String()),
attribute.String("query", r.GetQuery()),
attribute.Int64("step (ms)", r.GetStep()),
attribute.Int64("line_limit", int64(r.GetLineLimit())),
attribute.Int64("limit", int64(r.GetLimit())),
attribute.String("step", fmt.Sprintf("%d", r.GetStep())),
)
}

@ -17,13 +17,15 @@ import (
"github.com/axiomhq/hyperloglog"
"github.com/gorilla/mux"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
"github.com/grafana/loki/v3/pkg/loghttp"
"github.com/grafana/loki/v3/pkg/logproto"
@ -569,19 +571,29 @@ func TestLokiRequestSpanLogging(t *testing.T) {
EndTs: end,
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func TestLokiInstantRequestSpanLogging(t *testing.T) {
@ -590,16 +602,25 @@ func TestLokiInstantRequestSpanLogging(t *testing.T) {
TimeTs: now,
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "ts" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "ts" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 1, found, "expected to find ts attribute in span")
}
func TestLokiSeriesRequestSpanLogging(t *testing.T) {
@ -610,19 +631,29 @@ func TestLokiSeriesRequestSpanLogging(t *testing.T) {
EndTs: end,
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func TestLabelRequestSpanLogging(t *testing.T) {
@ -635,19 +666,29 @@ func TestLabelRequestSpanLogging(t *testing.T) {
},
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func Test_codec_DecodeProtobufResponseParity(t *testing.T) {

@ -8,10 +8,11 @@ import (
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logql"
"github.com/grafana/loki/v3/pkg/logql/syntax"
@ -141,9 +142,16 @@ func (in instance) Downstream(ctx context.Context, queries []logql.DownstreamQue
} else {
req = ParamsToLokiRequest(qry.Params).WithQuery(qry.Params.GetExpression().String())
}
sp, ctx := opentracing.StartSpanFromContext(ctx, "DownstreamHandler.instance")
defer sp.Finish()
sp.LogKV("shards", fmt.Sprintf("%+v", qry.Params.Shards()), "query", req.GetQuery(), "start", req.GetStart(), "end", req.GetEnd(), "step", req.GetStep(), "handler", reflect.TypeOf(in.handler), "engine", "downstream")
ctx, sp := tracer.Start(ctx, "DownstreamHandler.instance", trace.WithAttributes(
attribute.String("shards", fmt.Sprintf("%+v", qry.Params.Shards())),
attribute.String("query", req.GetQuery()),
attribute.String("start", req.GetStart().String()),
attribute.String("end", req.GetEnd().String()),
attribute.Int64("step", req.GetStep()),
attribute.String("handler", reflect.TypeOf(in.handler).String()),
attribute.String("engine", "downstream"),
))
defer sp.End()
res, err := in.handler.Do(ctx, req)
if err != nil {

@ -8,7 +8,6 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/instrument"
"github.com/grafana/dskit/middleware"
"github.com/opentracing/opentracing-go"
"github.com/grafana/dskit/server"
@ -63,8 +62,9 @@ func (t Tracer) Wrap(next queryrangebase.Handler) queryrangebase.Handler {
return queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
route := DefaultCodec.Path(r)
route = middleware.MakeLabelValue(route)
span, ctx := opentracing.StartSpanFromContext(ctx, route)
defer span.Finish()
ctx, span := tracer.Start(ctx, route)
defer span.End()
return next.Do(ctx, r)
})
}

@ -15,12 +15,12 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/semaphore"
"github.com/grafana/loki/v3/pkg/logproto"
@ -150,8 +150,9 @@ func NewLimitsMiddleware(l Limits) queryrangebase.Middleware {
}
func (l limitsMiddleware) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "limits")
defer span.Finish()
ctx, span := tracer.Start(ctx, "limits")
defer span.End()
log := spanlogger.FromContext(ctx, util_log.Logger)
defer log.Finish()
@ -276,8 +277,8 @@ func NewQuerySizeLimiterMiddleware(
// - {job="foo"}
// - {job="bar"}
func (q *querySizeLimiter) getBytesReadForRequest(ctx context.Context, r queryrangebase.Request) (uint64, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "querySizeLimiter.getBytesReadForRequest")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "querySizeLimiter.getBytesReadForRequest")
defer sp.End()
expr, err := syntax.ParseExpr(r.GetQuery())
if err != nil {
@ -540,11 +541,8 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque
cancel()
}()
span := opentracing.SpanFromContext(ctx)
if span != nil {
request.LogToSpan(span)
}
span := trace.SpanFromContext(ctx)
request.LogToSpan(span)
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
@ -580,12 +578,10 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque
return nil, fmt.Errorf("could not acquire work: %w", err)
}
if span != nil {
span.LogFields(
otlog.String("wait_time", elapsed.String()),
otlog.Int64("max_parallelism", int64(parallelism)),
)
}
span.SetAttributes(
attribute.String("wait_time", elapsed.String()),
attribute.Int64("max_parallelism", int64(parallelism)),
)
defer semWithTiming.sem.Release(int64(1))

@ -11,7 +11,6 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
@ -82,8 +81,9 @@ type logResultCache struct {
}
func (l *logResultCache) Do(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "logResultCache.Do")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "logResultCache.Do")
defer sp.End()
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error())

@ -13,8 +13,9 @@ import (
"github.com/gogo/status"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/user"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/prometheus/promql"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/propagation"
"google.golang.org/grpc/codes"
"github.com/grafana/loki/v3/pkg/loghttp"
@ -470,14 +471,7 @@ func (Codec) QueryRequestWrap(ctx context.Context, r queryrangebase.Request) (*Q
result.Metadata[user.OrgIDHeaderName] = orgID
// Tracing
tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx)
if tracer != nil && span != nil {
carrier := opentracing.TextMapCarrier(result.Metadata)
err := tracer.Inject(span.Context(), opentracing.TextMap, carrier)
if err != nil {
return nil, err
}
}
otel.GetTextMapPropagator().Inject(ctx, propagation.MapCarrier(result.Metadata))
return result, nil
}

@ -6,7 +6,6 @@ import (
"io"
"net/http"
"github.com/opentracing/opentracing-go"
"github.com/parquet-go/parquet-go"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser"
@ -17,8 +16,8 @@ import (
)
func encodeResponseParquet(ctx context.Context, res queryrangebase.Response) (*http.Response, error) {
sp, _ := opentracing.StartSpanFromContext(ctx, "codec.EncodeResponse")
defer sp.Finish()
_, sp := tracer.Start(ctx, "codec.EncodeResponse")
defer sp.End()
var buf bytes.Buffer

@ -7,9 +7,10 @@ import (
"net/http"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/common/model"
"go.opentelemetry.io/otel"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/loghttp"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
@ -17,6 +18,8 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache"
)
var tracer = otel.Tracer("pkg/querier/queryrange")
var (
jsonStd = jsoniter.ConfigCompatibleWithStandardLibrary
extractor = queryrangebase.PrometheusResponseExtractor{}
@ -43,7 +46,6 @@ func (PrometheusExtractor) ResponseWithoutHeaders(resp queryrangebase.Response)
// encode encodes a Prometheus response and injects Loki stats.
func (p *LokiPromResponse) encode(ctx context.Context) (*http.Response, error) {
sp := opentracing.SpanFromContext(ctx)
var buf bytes.Buffer
err := p.encodeTo(&buf)
@ -51,9 +53,7 @@ func (p *LokiPromResponse) encode(ctx context.Context) (*http.Response, error) {
return nil, err
}
if sp != nil {
sp.LogFields(otlog.Int("bytes", buf.Len()))
}
trace.SpanFromContext(ctx).SetAttributes(attribute.Int("bytes", buf.Len()))
resp := http.Response{
Header: http.Header{

@ -6,7 +6,7 @@ import (
"time"
"github.com/gogo/protobuf/proto"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache"
)
@ -49,8 +49,8 @@ type Request interface {
WithStartEnd(start time.Time, end time.Time) Request
// WithQuery clone the current request with a different query.
WithQuery(string) Request
// LogToSpan writes information about this request to an OpenTracing span
LogToSpan(opentracing.Span)
// LogToSpan writes information about this request to an OTel span
LogToSpan(trace.Span)
}
type CachingOptions = resultscache.CachingOptions

@ -16,16 +16,18 @@ import (
"github.com/gogo/status"
"github.com/grafana/dskit/httpgrpc"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/timestamp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache"
)
var tracer = otel.Tracer("pkg/querier/queryrange/queryrangebase")
// StatusSuccess Prometheus success result.
const StatusSuccess = "success"
@ -83,12 +85,12 @@ func (q *PrometheusRequest) WithQuery(query string) Request {
}
// LogToSpan logs the current `PrometheusRequest` parameters to the specified span.
func (q *PrometheusRequest) LogToSpan(sp opentracing.Span) {
sp.LogFields(
otlog.String("query", q.GetQuery()),
otlog.String("start", timestamp.Time(q.GetStart().UnixMilli()).String()),
otlog.String("end", timestamp.Time(q.GetEnd().UnixMilli()).String()),
otlog.Int64("step (ms)", q.GetStep()),
func (q *PrometheusRequest) LogToSpan(sp trace.Span) {
sp.SetAttributes(
attribute.String("query", q.GetQuery()),
attribute.String("start", timestamp.Time(q.GetStart().UnixMilli()).String()),
attribute.String("end", timestamp.Time(q.GetEnd().UnixMilli()).String()),
attribute.Int64("step (ms)", q.GetStep()),
)
}
@ -205,15 +207,15 @@ func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ R
body, _ := io.ReadAll(r.Body)
return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body))
}
sp, ctx := opentracing.StartSpanFromContext(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck
defer sp.Finish()
_, sp := tracer.Start(ctx, "ParseQueryRangeResponse")
defer sp.End() //nolint:ineffassign,staticcheck
buf, err := bodyBuffer(r)
if err != nil {
log.Error(err)
sp.RecordError(err)
return nil, err
}
sp.LogKV(otlog.Int("bytes", len(buf)))
sp.SetAttributes(attribute.Int("bytes", len(buf)))
var resp PrometheusResponse
if err := json.Unmarshal(buf, &resp); err != nil {
@ -251,22 +253,22 @@ func bodyBuffer(res *http.Response) ([]byte, error) {
// TODO(karsten): remove prometheusCodec from code base since only MergeResponse is used.
func (prometheusCodec) EncodeResponse(ctx context.Context, _ *http.Request, res Response) (*http.Response, error) {
sp, _ := opentracing.StartSpanFromContext(ctx, "APIResponse.ToHTTPResponse")
defer sp.Finish()
_, sp := tracer.Start(ctx, "APIResponse.ToHTTPResponse")
defer sp.End()
a, ok := res.(*PrometheusResponse)
if !ok {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "invalid response format")
}
sp.LogFields(otlog.Int("series", len(a.Data.Result)))
sp.SetAttributes(attribute.Int("series", len(a.Data.Result)))
b, err := json.Marshal(a)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusInternalServerError, "error encoding response: %v", err)
}
sp.LogFields(otlog.Int("bytes", len(b)))
sp.SetAttributes(attribute.Int("bytes", len(b)))
resp := http.Response{
Header: http.Header{

@ -10,10 +10,12 @@ import (
"time"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/sdk/trace/tracetest"
"github.com/grafana/loki/v3/pkg/logproto"
)
@ -283,19 +285,29 @@ func TestPrometheusRequestSpanLogging(t *testing.T) {
End: end,
}
span := mocktracer.MockSpan{}
req.LogToSpan(&span)
exporter := tracetest.NewInMemoryExporter()
tp := tracesdk.NewTracerProvider(
tracesdk.WithSpanProcessor(tracesdk.NewSimpleSpanProcessor(exporter)),
)
_, sp := tp.Tracer("test").Start(context.Background(), "request")
req.LogToSpan(sp)
sp.End()
for _, l := range span.Logs() {
for _, field := range l.Fields {
if field.Key == "start" {
require.Equal(t, timestamp.Time(now.UnixMilli()).String(), field.ValueString)
}
if field.Key == "end" {
require.Equal(t, timestamp.Time(end.UnixMilli()).String(), field.ValueString)
}
spans := exporter.GetSpans()
require.Len(t, spans, 1)
span := spans[0]
found := 0
for _, l := range span.Attributes {
if l.Key == "start" {
require.Equal(t, attribute.StringValue(timestamp.Time(now.UnixMilli()).String()), l.Value)
found++
}
if l.Key == "end" {
require.Equal(t, attribute.StringValue(timestamp.Time(end.UnixMilli()).String()), l.Value)
found++
}
}
require.Equal(t, 2, found, "expected to find start and end attributes in span")
}
func mustParse(t *testing.T, response string) Response {

@ -3,8 +3,6 @@ package queryrange
import (
"net/http"
"github.com/opentracing/opentracing-go"
"github.com/grafana/loki/v3/pkg/loghttp"
"github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/v3/pkg/util/httpreq"
@ -27,8 +25,8 @@ func NewSerializeRoundTripper(next queryrangebase.Handler, codec queryrangebase.
func (rt *serializeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
ctx := r.Context()
sp, ctx := opentracing.StartSpanFromContext(ctx, "serializeRoundTripper.do")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "serializeRoundTripper.do")
defer sp.End()
request, err := rt.codec.DecodeRequest(ctx, r, nil)
if err != nil {
@ -61,8 +59,8 @@ func NewSerializeHTTPHandler(next queryrangebase.Handler, codec queryrangebase.C
func (rt *serializeHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
sp, ctx := opentracing.StartSpanFromContext(ctx, "serializeHTTPHandler.ServerHTTP")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "serializeHTTPHandler.ServerHTTP")
defer sp.End()
request, err := rt.codec.DecodeRequest(ctx, r, nil)
if err != nil {

@ -3,7 +3,7 @@ package queryrange
import (
"context"
"fmt"
strings "strings"
"strings"
"time"
"github.com/dustin/go-humanize"
@ -12,7 +12,6 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/grafana/loki/v3/pkg/logproto"
@ -142,8 +141,8 @@ func getStatsForMatchers(
}
func (r *dynamicShardResolver) GetStats(e syntax.Expr) (stats.Stats, error) {
sp, ctx := opentracing.StartSpanFromContext(r.ctx, "dynamicShardResolver.GetStats")
defer sp.Finish()
ctx, sp := tracer.Start(r.ctx, "dynamicShardResolver.GetStats")
defer sp.End()
start := time.Now()
@ -183,8 +182,9 @@ func (r *dynamicShardResolver) GetStats(e syntax.Expr) (stats.Stats, error) {
}
func (r *dynamicShardResolver) Shards(e syntax.Expr) (int, uint64, error) {
sp, ctx := opentracing.StartSpanFromContext(r.ctx, "dynamicShardResolver.Shards")
defer sp.Finish()
ctx, sp := tracer.Start(r.ctx, "dynamicShardResolver.Shards")
defer sp.End()
log := spanlogger.FromContext(ctx, r.logger)
defer log.Finish()

@ -7,12 +7,12 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/tenant"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql/syntax"
@ -154,11 +154,13 @@ func (h *splitByInterval) Process(
func (h *splitByInterval) loop(ctx context.Context, ch <-chan *lokiResult, next queryrangebase.Handler) {
for data := range ch {
sp, ctx := opentracing.StartSpanFromContext(ctx, "interval")
data.req.LogToSpan(sp)
ctx, sp := tracer.Start(ctx, "interval")
if sp.SpanContext().IsSampled() {
data.req.LogToSpan(sp)
}
resp, err := next.Do(ctx, data.req)
sp.Finish()
sp.End()
select {
case <-ctx.Done():
@ -201,9 +203,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que
return h.next.Do(ctx, r)
}
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogFields(otlog.Int("n_intervals", len(intervals)))
}
trace.SpanFromContext(ctx).SetAttributes(attribute.Int("n_intervals", len(intervals)))
if len(intervals) == 1 {
return h.next.Do(ctx, intervals[0])

@ -6,9 +6,9 @@ import (
"sort"
"strings"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel/attribute"
"github.com/grafana/dskit/tenant"
@ -371,120 +371,136 @@ func newInstrumentedStore(store Store, index int) *instrumentedStore {
}
func (s *instrumentedStore) SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".SelectSamples")
defer span.Finish()
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".SelectSamples")
defer span.End()
tenantID, _ := tenant.TenantID(ctx)
span.SetTag("tenantID", tenantID)
span.SetTag("start", req.Start)
span.SetTag("end", req.End)
span.SetTag("shards", req.Shards)
span.SetAttributes(
attribute.String("tenantID", tenantID),
attribute.String("start", req.Start.String()),
attribute.String("end", req.End.String()),
attribute.StringSlice("shards", req.Shards),
)
if req.Plan != nil && req.Plan.AST != nil {
span.SetTag("expr", req.Plan.AST.String())
span.SetAttributes(attribute.String("expr", req.Plan.AST.String()))
}
return s.Store.SelectSamples(ctx, req)
}
func (s *instrumentedStore) SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".SelectLogs")
defer span.Finish()
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".SelectLogs")
defer span.End()
tenantID, _ := tenant.TenantID(ctx)
span.SetTag("tenantID", tenantID)
span.SetTag("start", req.Start)
span.SetTag("end", req.End)
span.SetTag("shards", req.Shards)
span.SetTag("direction", req.Direction)
span.SetAttributes(
attribute.String("tenantID", tenantID),
attribute.String("start", req.Start.String()),
attribute.String("end", req.End.String()),
attribute.StringSlice("shards", req.Shards),
attribute.String("direction", req.Direction.String()),
)
if req.Plan != nil && req.Plan.AST != nil {
span.SetTag("expr", req.Plan.AST.String())
span.SetAttributes(attribute.String("expr", req.Plan.AST.String()))
}
return s.Store.SelectLogs(ctx, req)
}
func (s *instrumentedStore) SelectSeries(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".SelectSeries")
defer span.Finish()
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".SelectSeries")
defer span.End()
tenantID, _ := tenant.TenantID(ctx)
span.SetTag("tenantID", tenantID)
span.SetTag("start", req.Start)
span.SetTag("end", req.End)
span.SetTag("shards", req.Shards)
span.SetAttributes(
attribute.String("tenantID", tenantID),
attribute.String("start", req.Start.String()),
attribute.String("end", req.End.String()),
attribute.StringSlice("shards", req.Shards),
)
if req.Plan != nil && req.Plan.AST != nil {
span.SetTag("expr", req.Plan.AST.String())
span.SetAttributes(attribute.String("expr", req.Plan.AST.String()))
}
return s.Store.SelectSeries(ctx, req)
}
func (s *instrumentedStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".LabelValuesForMetricName")
defer span.Finish()
span.SetTag("tenantID", userID)
span.SetTag("from", from)
span.SetTag("through", through)
span.SetTag("metricName", metricName)
span.SetTag("labelName", labelName)
span.SetTag("matchers", stringifyMatchers(matchers))
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".LabelValuesForMetricName")
defer span.End()
span.SetAttributes(
attribute.String("tenantID", userID),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.String("metricName", metricName),
attribute.String("labelName", labelName),
attribute.String("matchers", stringifyMatchers(matchers)),
)
return s.Store.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName, matchers...)
}
func (s *instrumentedStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, matchers ...*labels.Matcher) ([]string, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".LabelNamesForMetricName")
defer span.Finish()
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".LabelNamesForMetricName")
defer span.End()
span.SetTag("tenantID", userID)
span.SetTag("from", from)
span.SetTag("through", through)
span.SetTag("metricName", metricName)
span.SetTag("matchers", stringifyMatchers(matchers))
span.SetAttributes(
attribute.String("tenantID", userID),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.String("metricName", metricName),
attribute.String("matchers", stringifyMatchers(matchers)),
)
return s.Store.LabelNamesForMetricName(ctx, userID, from, through, metricName, matchers...)
}
func (s *instrumentedStore) Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".Stats")
defer span.Finish()
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".Stats")
defer span.End()
span.SetTag("tenantID", userID)
span.SetTag("from", from)
span.SetTag("through", through)
span.SetTag("matchers", stringifyMatchers(matchers))
span.SetAttributes(
attribute.String("tenantID", userID),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.String("matchers", stringifyMatchers(matchers)),
)
return s.Store.Stats(ctx, userID, from, through, matchers...)
}
func (s *instrumentedStore) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".Volume")
defer span.Finish()
span.SetTag("tenantID", userID)
span.SetTag("from", from)
span.SetTag("through", through)
span.SetTag("limit", limit)
span.SetTag("targetLabels", targetLabels)
span.SetTag("aggregateBy", aggregateBy)
span.SetTag("matchers", stringifyMatchers(matchers))
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".Volume")
defer span.End()
span.SetAttributes(
attribute.String("tenantID", userID),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.Int("limit", int(limit)),
attribute.StringSlice("targetLabels", targetLabels),
attribute.String("aggregateBy", aggregateBy),
attribute.String("matchers", stringifyMatchers(matchers)),
)
return s.Store.Volume(ctx, userID, from, through, limit, targetLabels, aggregateBy, matchers...)
}
func (s *instrumentedStore) GetShards(ctx context.Context, userID string, from, through model.Time, targetBytesPerShard uint64, predicate chunk.Predicate) (*logproto.ShardsResponse, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "querier.Store."+s.name+".GetShards")
defer span.Finish()
span.SetTag("tenantID", userID)
span.SetTag("from", from)
span.SetTag("through", through)
span.SetTag("targetBytesPerShard", targetBytesPerShard)
span.SetTag("matchers", stringifyMatchers(predicate.Matchers))
ctx, span := tracer.Start(ctx, "querier.Store."+s.name+".GetShards")
defer span.End()
span.SetAttributes(
attribute.String("tenantID", userID),
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.Int64("targetBytesPerShard", int64(targetBytesPerShard)),
attribute.String("matchers", stringifyMatchers(predicate.Matchers)),
)
return s.Store.GetShards(ctx, userID, from, through, targetBytesPerShard, predicate)
}

@ -10,7 +10,6 @@ import (
"github.com/go-kit/log/level"
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc"
@ -120,16 +119,11 @@ func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) erro
}
func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, statsEnabled bool, sendResponse func(response *httpgrpc.HTTPResponse, stats *querier_stats.Stats) error) {
tracer := opentracing.GlobalTracer()
// Ignore errors here. If we cannot get parent span, we just don't create new one.
parentSpanContext, _ := httpgrpcutil.GetParentSpanForHTTPRequest(tracer, request)
if parentSpanContext != nil {
queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "frontend_processor_runRequest", opentracing.ChildOf(parentSpanContext))
defer queueSpan.Finish()
ctx = spanCtx
}
ctx, queueSpan := tracer.Start(
httpgrpcutil.ExtractSpanFromHTTPRequest(ctx, request),
"frontend_processor_runRequest",
)
defer queueSpan.End()
var stats *querier_stats.Stats
if statsEnabled {

@ -19,9 +19,8 @@ import (
"github.com/grafana/dskit/ring/client"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.uber.org/atomic"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@ -148,15 +147,13 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer
ctx := user.InjectOrgID(ctx, request.UserID)
sp.metrics.inflightRequests.Inc()
tracer := opentracing.GlobalTracer()
// Ignore errors here. If we cannot get parent span, we just don't create new one.
parentSpanContext, _ := httpgrpcutil.GetParentSpanForRequest(tracer, request)
if parentSpanContext != nil {
queueSpan, spanCtx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "querier_processor_runRequest", opentracing.ChildOf(parentSpanContext))
defer queueSpan.Finish()
ctx = spanCtx
}
ctx, queueSpan := tracer.Start(
httpgrpcutil.ExtractSpanFromRequest(ctx, request),
"querier_processor_runRequest",
)
defer queueSpan.End()
logger := util_log.WithContext(ctx, sp.log)
switch r := request.Request.(type) {
@ -308,13 +305,13 @@ func runPoolWithBackoff(
func (sp *schedulerProcessor) createFrontendClient(addr string) (client.PoolClient, error) {
opts, err := sp.grpcConfig.DialOption([]grpc.UnaryClientInterceptor{
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.ClientUserHeaderInterceptor,
middleware.UnaryClientInstrumentInterceptor(sp.metrics.frontendClientRequestDuration),
}, nil, middleware.NoOpInvalidClusterValidationReporter)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
// nolint:staticcheck // grpc.Dial() has been deprecated; we'll address it before upgrading to gRPC 2.
conn, err := grpc.Dial(addr, opts...)

@ -16,6 +16,7 @@ import (
"github.com/grafana/dskit/services"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"google.golang.org/grpc"
"github.com/grafana/loki/v3/pkg/querier/queryrange"
@ -23,6 +24,8 @@ import (
"github.com/grafana/loki/v3/pkg/util"
)
var tracer = otel.Tracer("pkg/querier/worker")
type Config struct {
FrontendAddress string `yaml:"frontend_address"`
SchedulerAddress string `yaml:"scheduler_address"`

@ -9,7 +9,6 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/user"
ot "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@ -17,6 +16,9 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
promRules "github.com/prometheus/prometheus/rules"
"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/context/ctxhttp"
"github.com/grafana/loki/v3/pkg/ruler/rulespb"
@ -227,11 +229,11 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string) (*notifie
if err := user.InjectOrgIDIntoHTTPRequest(ctx, req); err != nil {
return nil, err
}
// Jaeger complains the passed-in context has an invalid span ID, so start a new root span
sp := ot.GlobalTracer().StartSpan("notify", ot.Tag{Key: "organization", Value: userID})
defer sp.Finish()
ctx = ot.ContextWithSpan(ctx, sp)
_ = ot.GlobalTracer().Inject(sp.Context(), ot.HTTPHeaders, ot.HTTPHeadersCarrier(req.Header))
_, sp := tracer.Start(context.Background(), "notify", trace.WithAttributes(attribute.String("organization", userID)))
defer sp.End()
ctx = trace.ContextWithSpan(ctx, sp)
otelhttptrace.Inject(ctx, req)
return ctxhttp.Do(ctx, client, req)
},
}, log.With(r.logger, "user", userID))

@ -30,6 +30,7 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
promRules "github.com/prometheus/prometheus/rules"
"go.opentelemetry.io/otel"
"golang.org/x/sync/errgroup"
"github.com/grafana/dskit/tenant"
@ -42,6 +43,8 @@ import (
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var tracer = otel.Tracer("pkg/ruler/base")
var (
supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle}
supportedShardingAlgos = []string{util.ShardingAlgoByGroup, util.ShardingAlgoByRule}

@ -26,12 +26,11 @@ import (
"github.com/grafana/dskit/instrument"
"github.com/grafana/dskit/middleware"
"github.com/grafana/dskit/user"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
@ -189,10 +188,10 @@ func DialQueryFrontend(cfg *QueryFrontendConfig) (httpgrpc.HTTPClient, error) {
},
),
grpc.WithChainUnaryInterceptor(
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.ClientUserHeaderInterceptor,
),
grpc.WithDefaultServiceConfig(serviceConfig),
grpc.WithStatsHandler(otelgrpc.NewClientHandler()),
},
tlsDialOptions...,
)
@ -210,7 +209,7 @@ type Middleware func(ctx context.Context, req *httpgrpc.HTTPRequest) error
// Query performs a query for the given time.
func (r *RemoteEvaluator) Query(ctx context.Context, ch chan<- queryResponse, orgID, qs string, t time.Time) {
logger, ctx := spanlogger.New(ctx, r.logger, "ruler.remoteEvaluation.Query")
logger, ctx := spanlogger.NewOTel(ctx, r.logger, tracer, "ruler.remoteEvaluation.Query")
defer logger.Finish()
res, err := r.query(ctx, orgID, qs, t, logger)

@ -5,11 +5,14 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config"
"go.opentelemetry.io/otel"
ruler "github.com/grafana/loki/v3/pkg/ruler/base"
"github.com/grafana/loki/v3/pkg/ruler/rulestore"
)
var tracer = otel.Tracer("pkg/ruler")
func NewRuler(cfg Config, evaluator Evaluator, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits, metricsNamespace string) (*ruler.Ruler, error) {
// For backward compatibility, client and clients are defined in the remote_write config.
// When both are present, an error is thrown.

@ -19,11 +19,12 @@ import (
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/user"
otgrpc "github.com/opentracing-contrib/go-grpc"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"google.golang.org/grpc"
@ -38,6 +39,8 @@ import (
lokiring "github.com/grafana/loki/v3/pkg/util/ring"
)
var tracer = otel.Tracer("pkg/scheduler")
const (
// NumTokens is 1 since we only need to insert 1 token to be used for leader election purposes.
NumTokens = 1
@ -228,10 +231,10 @@ type schedulerRequest struct {
ctx context.Context
ctxCancel context.CancelFunc
queueSpan opentracing.Span
queueSpan trace.Span
// This is only used for testing.
parentSpanContext opentracing.SpanContext
parentSpanContext trace.SpanContext
}
// FrontendLoop handles connection from frontend.
@ -353,11 +356,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr
// Extract tracing information from headers in HTTP request. FrontendContext doesn't have the correct tracing
// information, since that is a long-running request.
tracer := opentracing.GlobalTracer()
parentSpanContext, err := lokigrpc.GetParentSpanForRequest(tracer, msg)
if err != nil && err != opentracing.ErrSpanContextNotFound {
return err
}
ctx = lokigrpc.ExtractSpanFromRequest(ctx, msg)
req := &schedulerRequest{
frontendAddress: frontendAddr,
@ -370,8 +369,8 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr
now := time.Now()
req.parentSpanContext = parentSpanContext
req.queueSpan, req.ctx = opentracing.StartSpanFromContextWithTracer(ctx, tracer, "queued", opentracing.ChildOf(parentSpanContext))
req.parentSpanContext = trace.SpanFromContext(ctx).SpanContext()
req.ctx, req.queueSpan = tracer.Start(ctx, "queued")
req.queueTime = now
req.ctxCancel = cancel
@ -446,7 +445,7 @@ func (s *Scheduler) QuerierLoop(querier schedulerpb.SchedulerForQuerier_QuerierL
reqQueueTime := time.Since(r.queueTime)
s.queueDuration.Observe(reqQueueTime.Seconds())
r.queueSpan.Finish()
r.queueSpan.End()
// Add HTTP header to the request containing the query queue time
if r.request != nil {
@ -547,7 +546,6 @@ func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuer
func (s *Scheduler) forwardErrorToFrontend(ctx context.Context, req *schedulerRequest, requestErr error) {
opts, err := s.cfg.GRPCClientConfig.DialOption([]grpc.UnaryClientInterceptor{
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.ClientUserHeaderInterceptor,
},
nil, middleware.NoOpInvalidClusterValidationReporter)
@ -555,6 +553,7 @@ func (s *Scheduler) forwardErrorToFrontend(ctx context.Context, req *schedulerRe
level.Warn(s.log).Log("msg", "failed to create gRPC options for the connection to frontend to report error", "frontend", req.frontendAddress, "err", err, "requestErr", requestErr)
return
}
opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
// nolint:staticcheck // grpc.DialContext() has been deprecated; we'll address it before upgrading to gRPC 2.
conn, err := grpc.DialContext(ctx, req.frontendAddress, opts...)

@ -6,7 +6,8 @@ import (
"time"
"github.com/c2h5oh/datasize"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"github.com/grafana/loki/v3/pkg/logproto"
@ -96,9 +97,9 @@ func (a *AsyncStore) GetChunks(ctx context.Context,
ingesterChunks, err = a.ingesterQuerier.GetChunkIDs(ctx, from, through, predicate.Matchers...)
if err == nil {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("ingester-chunks-count", len(ingesterChunks))
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(attribute.Int("ingester-chunks-count", len(ingesterChunks)))
level.Debug(util_log.Logger).Log("msg", "got chunk ids from ingester", "count", len(ingesterChunks))
}
return err
@ -172,8 +173,8 @@ func (a *AsyncStore) Stats(ctx context.Context, userID string, from, through mod
}
func (a *AsyncStore) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "AsyncStore.Volume")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "AsyncStore.Volume")
defer sp.End()
logger := util_log.WithContext(ctx, util_log.Logger)
matchersStr := syntax.MatchersString(matchers)
@ -215,12 +216,12 @@ func (a *AsyncStore) Volume(ctx context.Context, userID string, from, through mo
return nil, err
}
sp.LogKV(
"user", userID,
"from", from.Time(),
"through", through.Time(),
"matchers", syntax.MatchersString(matchers),
"limit", limit,
sp.SetAttributes(
attribute.String("user", userID),
attribute.String("from", from.Time().String()),
attribute.String("through", through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.Int("limit", int(limit)),
)
merged := seriesvolume.Merge(resps, limit)

@ -6,10 +6,10 @@ import (
"sync"
"github.com/go-kit/log/level"
opentracing "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"github.com/grafana/loki/v3/pkg/util/constants"
@ -175,10 +175,12 @@ func (c *backgroundCache) Store(ctx context.Context, keys []string, bufs [][]byt
func (c *backgroundCache) failStore(ctx context.Context, size int, num int, reason string) {
c.droppedWriteBackBytes.Add(float64(size))
c.droppedWriteBack.Add(float64(num))
sp := opentracing.SpanFromContext(ctx)
if sp != nil {
sp.LogFields(otlog.String("reason", reason), otlog.Int("dropped", num), otlog.Int("dropped_bytes", size))
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(
attribute.String("reason", reason),
attribute.Int("dropped", num),
attribute.Int("dropped_bytes", size),
)
}
func (c *backgroundCache) writeBackLoop() {

@ -4,11 +4,11 @@ import (
"context"
instr "github.com/grafana/dskit/instrument"
ot "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/util/constants"
)
@ -74,14 +74,12 @@ func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]b
method := i.name + ".store"
return instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error {
sp := ot.SpanFromContext(ctx)
if sp != nil {
sp.LogFields(otlog.Int("keys", len(keys)))
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(attribute.Int("keys", len(keys)))
storeErr := i.Cache.Store(ctx, keys, bufs)
if sp != nil && storeErr != nil {
ext.Error.Set(sp, true)
sp.LogFields(otlog.String("event", "error"), otlog.String("message", storeErr.Error()))
if storeErr != nil {
sp.SetStatus(codes.Error, storeErr.Error())
sp.RecordError(storeErr)
}
return storeErr
})
@ -97,19 +95,20 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string,
)
err := instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error {
sp := ot.SpanFromContext(ctx)
if sp != nil {
sp.LogFields(otlog.Int("keys requested", len(keys)))
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(attribute.Int("keys requested", len(keys)))
found, bufs, missing, fetchErr = i.Cache.Fetch(ctx, keys)
if sp != nil {
if fetchErr != nil {
ext.Error.Set(sp, true)
sp.LogFields(otlog.String("event", "error"), otlog.String("message", fetchErr.Error()))
}
sp.LogFields(otlog.Int("keys found", len(found)), otlog.Int("keys missing", len(keys)-len(found)))
if fetchErr != nil {
sp.SetStatus(codes.Error, fetchErr.Error())
sp.RecordError(fetchErr)
return fetchErr
}
return fetchErr
sp.SetAttributes(
attribute.Int("keys found", len(found)),
attribute.Int("keys missing", len(keys)-len(found)),
)
return nil
})
i.fetchedKeys.Add(float64(len(keys)))

@ -12,10 +12,10 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/common/model"
"github.com/uber/jaeger-client-go"
"go.opentelemetry.io/otel"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/dskit/tenant"
@ -25,6 +25,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/validation"
)
var tracer = otel.Tracer("pkg/storage/chunk/cache/resultscache")
// ConstSplitter is a utility for using a constant split interval when determining cache keys
type ConstSplitter time.Duration
@ -100,8 +102,9 @@ func NewResultsCache(
}
func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "resultsCache.Do")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "resultsCache.Do")
defer sp.End()
tenantIDs, err := tenant.TenantIDs(ctx)
if err != nil {
return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error())
@ -121,12 +124,12 @@ func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) {
response Response
)
sp.LogKV(
"query", r.GetQuery(),
"step", time.UnixMilli(r.GetStep()),
"start", r.GetStart(),
"end", r.GetEnd(),
"key", key,
sp.SetAttributes(
attribute.String("query", r.GetQuery()),
attribute.String("step", time.UnixMilli(r.GetStep()).String()),
attribute.String("start", r.GetStart().String()),
attribute.String("end", r.GetEnd().String()),
attribute.String("key", key),
)
cacheFreshnessCapture := func(id string) time.Duration { return s.limits.MaxCacheFreshness(ctx, id) }
@ -180,8 +183,8 @@ func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent
reqResps []RequestResponse
err error
)
sp, ctx := opentracing.StartSpanFromContext(ctx, "handleHit")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "handleHit")
defer sp.End()
requests, responses, err := s.partition(r, extents)
if err != nil {
@ -254,7 +257,9 @@ func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent
continue
}
accumulator.TraceId = jaegerTraceID(ctx)
if spanContext := trace.SpanFromContext(ctx).SpanContext(); spanContext.IsValid() {
accumulator.TraceId = spanContext.TraceID().String()
}
accumulator.End = extents[i].End
currentRes, err := extents[i].toResponse()
if err != nil {
@ -311,11 +316,15 @@ func toExtent(ctx context.Context, req Request, res Response) (Extent, error) {
if err != nil {
return Extent{}, err
}
traceID := ""
if spanContext := trace.SpanFromContext(ctx).SpanContext(); spanContext.IsValid() {
traceID = spanContext.TraceID().String()
}
return Extent{
Start: req.GetStart().UnixMilli(),
End: req.GetEnd().UnixMilli(),
Response: anyResp,
TraceId: jaegerTraceID(ctx),
TraceId: traceID,
}, nil
}
@ -421,10 +430,10 @@ func (s ResultsCache) get(ctx context.Context, key string) ([]Extent, bool) {
}
var resp CachedResponse
sp, ctx := opentracing.StartSpanFromContext(ctx, "unmarshal-extent") //nolint:ineffassign,staticcheck
defer sp.Finish()
_, sp := tracer.Start(ctx, "unmarshal-extent")
defer sp.End() //nolint:ineffassign,staticcheck
sp.LogFields(otlog.Int("bytes", len(bufs[0])))
sp.SetAttributes(attribute.Int("bytes", len(bufs[0])))
if err := proto.Unmarshal(bufs[0], &resp); err != nil {
level.Error(util_log.Logger).Log("msg", "error unmarshalling cached value", "err", err)
@ -458,20 +467,6 @@ func (s ResultsCache) put(ctx context.Context, key string, extents []Extent) {
_ = s.cache.Store(ctx, []string{cache.HashKey(key)}, [][]byte{buf})
}
func jaegerTraceID(ctx context.Context) string {
span := opentracing.SpanFromContext(ctx)
if span == nil {
return ""
}
spanContext, ok := span.Context().(jaeger.SpanContext)
if !ok {
return ""
}
return spanContext.TraceID().String()
}
func (e *Extent) toResponse() (Response, error) {
msg, err := types.EmptyAny(e.Response)
if err != nil {

@ -21,10 +21,10 @@ import (
"github.com/grafana/dskit/backoff"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/instrument"
ot "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/time/rate"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@ -301,15 +301,15 @@ func (a dynamoDBStorageClient) query(ctx context.Context, query index.Query, cal
retryer := newRetryer(ctx, a.cfg.BackoffConfig)
err := instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(innerCtx context.Context) error {
if sp := ot.SpanFromContext(innerCtx); sp != nil {
sp.SetTag("tableName", query.TableName)
sp.SetTag("hashValue", query.HashValue)
}
span := trace.SpanFromContext(innerCtx)
span.SetAttributes(
attribute.String("tableName", query.TableName),
attribute.String("hashValue", query.HashValue),
)
return a.DynamoDB.QueryPagesWithContext(innerCtx, input, func(output *dynamodb.QueryOutput, _ bool) bool {
pageCount++
if sp := ot.SpanFromContext(innerCtx); sp != nil {
sp.LogFields(otlog.Int("page", pageCount))
}
span.SetAttributes(attribute.Int("page", pageCount))
if cc := output.ConsumedCapacity; cc != nil {
a.metrics.dynamoConsumedCapacity.WithLabelValues("DynamoDB.QueryPages", *cc.TableName).
@ -371,7 +371,9 @@ type chunksPlusError struct {
// GetChunks implements chunk.Client.
func (a dynamoDBStorageClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) {
log, ctx := spanlogger.New(ctx, log.Logger, "GetChunks.DynamoDB", ot.Tag{Key: "numChunks", Value: len(chunks)})
log, ctx := spanlogger.NewOTel(ctx, log.Logger, tracer, "GetChunks.DynamoDB",
"numChunks", len(chunks),
)
defer log.Finish()
level.Debug(log).Log("chunks requested", len(chunks))
@ -420,7 +422,9 @@ var placeholder = []byte{'c'}
// Structure is identical to BatchWrite(), but operating on different datatypes
// so cannot share implementation. If you fix a bug here fix it there too.
func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) {
log, ctx := spanlogger.New(ctx, log.Logger, "getDynamoDBChunks", ot.Tag{Key: "numChunks", Value: len(chunks)})
log, ctx := spanlogger.NewOTel(ctx, log.Logger, tracer, "getDynamoDBChunks",
"numChunks", len(chunks),
)
defer log.Finish()
outstanding := dynamoDBReadRequest{}
chunksByKey := map[string]chunk.Chunk{}

@ -6,8 +6,8 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/grafana/dskit/backoff"
ot "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// Map Cortex Backoff into AWS Retryer interface
@ -33,9 +33,7 @@ func (r *retryer) withRetries(req *request.Request) {
// making another request attempt for the failed request.
func (r *retryer) RetryRules(req *request.Request) time.Duration {
duration := r.Backoff.NextDelay()
if sp := ot.SpanFromContext(req.Context()); sp != nil {
sp.LogFields(otlog.Int("retry", r.NumRetries()))
}
trace.SpanFromContext(req.Context()).SetAttributes(attribute.Int("retry", r.NumRetries()))
return duration
}

@ -25,6 +25,7 @@ import (
"github.com/grafana/dskit/instrument"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
amnet "k8s.io/apimachinery/pkg/util/net"
bucket_s3 "github.com/grafana/loki/v3/pkg/storage/bucket/s3"
@ -37,6 +38,8 @@ import (
loki_instrument "github.com/grafana/loki/v3/pkg/util/instrument"
)
var tracer = otel.Tracer("pkg/storage/chunk/client/awsd")
const (
SignatureVersionV4 = "v4"
)

@ -13,8 +13,9 @@ import (
"cloud.google.com/go/bigtable"
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/middleware"
ot "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
"github.com/grafana/loki/v3/pkg/storage/config"
@ -81,6 +82,7 @@ func NewStorageClientV1(ctx context.Context, cfg Config, schemaCfg config.Schema
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...)
if err != nil {
return nil, err
@ -109,6 +111,7 @@ func NewStorageClientColumnKey(ctx context.Context, cfg Config, schemaCfg config
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...)
if err != nil {
return nil, err
@ -219,8 +222,8 @@ func (s *storageClientColumnKey) BatchWrite(ctx context.Context, batch index.Wri
}
func (s *storageClientColumnKey) QueryPages(ctx context.Context, queries []index.Query, callback index.QueryPagesCallback) error {
sp, ctx := ot.StartSpanFromContext(ctx, "QueryPages")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "QueryPages")
defer sp.End()
// A limitation of this approach is that this only fetches whole rows; but
// whatever, we filter them in the cache on the client. But for unit tests to
@ -333,7 +336,10 @@ func (s *storageClientV1) QueryPages(ctx context.Context, queries []index.Query,
func (s *storageClientV1) query(ctx context.Context, query index.Query, callback index.QueryPagesCallback) error {
const null = string('\xff')
log, ctx := spanlogger.New(ctx, util_log.Logger, "QueryPages", ot.Tag{Key: "tableName", Value: query.TableName}, ot.Tag{Key: "hashValue", Value: query.HashValue})
log, ctx := spanlogger.NewOTel(ctx, util_log.Logger, tracer, "QueryPages",
"tableName", query.TableName,
"hashValue", query.HashValue,
)
defer log.Finish()
table := s.client.Open(query.TableName)

@ -6,9 +6,10 @@ import (
"cloud.google.com/go/bigtable"
"github.com/grafana/dskit/middleware"
ot "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
attribute "go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
@ -29,6 +30,7 @@ func NewBigtableObjectClient(ctx context.Context, cfg Config, schemaCfg config.S
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...)
if err != nil {
return nil, err
@ -85,9 +87,9 @@ func (s *bigtableObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chu
}
func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) {
sp, ctx := ot.StartSpanFromContext(ctx, "GetChunks")
defer sp.Finish()
sp.LogFields(otlog.Int("chunks requested", len(input)))
ctx, sp := tracer.Start(ctx, "GetChunks")
defer sp.End()
sp.SetAttributes(attribute.Int("chunks requested", len(input)))
chunks := map[string]map[string]chunk.Chunk{}
keys := map[string]bigtable.RowList{}

@ -14,6 +14,7 @@ import (
"github.com/grafana/dskit/flagext"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
@ -25,6 +26,8 @@ import (
"github.com/grafana/loki/v3/pkg/storage/chunk/client/util"
)
var tracer = otel.Tracer("pkg/storage/chunk/client/gcp")
type ClientFactory func(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error)
type GCSObjectClient struct {

@ -6,8 +6,6 @@ import (
"time"
"github.com/grafana/dskit/middleware"
otgrpc "github.com/opentracing-contrib/go-grpc"
opentracing "github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/api/option"
@ -39,11 +37,9 @@ var (
func bigtableInstrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
return []grpc.UnaryClientInterceptor{
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
middleware.UnaryClientInstrumentInterceptor(bigtableRequestDuration),
},
[]grpc.StreamClientInterceptor{
otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()),
middleware.StreamClientInstrumentInterceptor(bigtableRequestDuration),
}
}

@ -4,6 +4,8 @@ import (
"context"
"time"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"cloud.google.com/go/bigtable"
@ -32,6 +34,7 @@ func NewTableClient(ctx context.Context, cfg Config) (index.TableClient, error)
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
client, err := bigtable.NewAdminClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...)
if err != nil {
return nil, err

@ -5,13 +5,15 @@ import (
"sync"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel"
attribute "go.opentelemetry.io/otel/attribute"
"github.com/grafana/loki/v3/pkg/storage/chunk"
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var tracer = otel.Tracer("pkg/storage/chunk/client/util")
var decodeContextPool = sync.Pool{
New: func() interface{} {
return chunk.NewDecodeContext()
@ -20,9 +22,10 @@ var decodeContextPool = sync.Pool{
// GetParallelChunks fetches chunks in parallel (up to maxParallel).
func GetParallelChunks(ctx context.Context, maxParallel int, chunks []chunk.Chunk, f func(context.Context, *chunk.DecodeContext, chunk.Chunk) (chunk.Chunk, error)) ([]chunk.Chunk, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "GetParallelChunks")
defer sp.Finish()
sp.LogFields(otlog.Int("requested", len(chunks)))
ctx, sp := tracer.Start(ctx, "GetParallelChunks")
defer sp.End()
sp.SetAttributes(attribute.Int("requested", len(chunks)))
if ctx.Err() != nil {
return nil, ctx.Err()
@ -66,7 +69,7 @@ func GetParallelChunks(ctx context.Context, maxParallel int, chunks []chunk.Chun
}
}
sp.LogFields(otlog.Int("fetched", len(result)))
sp.SetAttributes(attribute.Int("fetched", len(result)))
if lastErr != nil {
level.Error(util_log.Logger).Log("msg", "error fetching chunks", "err", lastErr)
}

@ -7,8 +7,6 @@ import (
"io/fs"
"os"
ot "github.com/opentracing/opentracing-go"
"github.com/grafana/loki/v3/pkg/storage/stores/series/index"
)
@ -35,8 +33,8 @@ func DoParallelQueries(
// Run n parallel goroutines fetching queries from the queue
for i := 0; i < n; i++ {
go func() {
sp, ctx := ot.StartSpanFromContext(ctx, "DoParallelQueries-worker")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "DoParallelQueries-worker")
defer sp.End()
for {
query, ok := <-queue
if !ok {

@ -6,9 +6,9 @@ import (
"time"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/otel"
"github.com/grafana/loki/v3/pkg/logqlmodel/stats"
"github.com/grafana/loki/v3/pkg/storage/chunk"
@ -20,6 +20,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var tracer = otel.Tracer("pkg/storage/chunk/fetcher")
var (
cacheCorrupt = promauto.NewCounter(prometheus.CounterOpts{
Namespace: constants.Loki,
@ -127,8 +129,9 @@ func (c *Fetcher) FetchChunks(ctx context.Context, chunks []chunk.Chunk) ([]chun
if ctx.Err() != nil {
return nil, ctx.Err()
}
sp, ctx := opentracing.StartSpanFromContext(ctx, "ChunkStore.FetchChunks")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "ChunkStore.FetchChunks")
defer sp.End()
log := spanlogger.FromContext(ctx, util_log.Logger)
defer log.Finish()

@ -6,6 +6,8 @@ import (
"math"
"time"
"go.opentelemetry.io/otel"
"github.com/grafana/loki/v3/pkg/storage/types"
"github.com/grafana/loki/v3/pkg/util/httpreq"
@ -43,6 +45,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/deletion"
)
var tracer = otel.Tracer("pkg/storage")
var (
indexTypeStats = analytics.NewString("store_index_type")
objectTypeStats = analytics.NewString("store_object_type")

@ -5,12 +5,14 @@ import (
"fmt"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@ -111,8 +113,10 @@ func (c *storeEntry) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
// LabelNamesForMetricName retrieves all label names for a metric name.
func (c *storeEntry) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, matchers ...*labels.Matcher) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelNamesForMetricName")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.LabelNamesForMetricName", trace.WithAttributes(
attribute.String("metric", metricName),
))
defer sp.End()
shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
if err != nil {
@ -120,14 +124,13 @@ func (c *storeEntry) LabelNamesForMetricName(ctx context.Context, userID string,
} else if shortcut {
return nil, nil
}
sp.LogKV("metric", metricName)
return c.indexReader.LabelNamesForMetricName(ctx, userID, from, through, metricName, matchers...)
}
func (c *storeEntry) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelValuesForMetricName")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.LabelValuesForMetricName")
defer sp.End()
shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
if err != nil {
@ -151,8 +154,8 @@ func (c *storeEntry) Stats(ctx context.Context, userID string, from, through mod
}
func (c *storeEntry) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.Volume")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.Volume")
defer sp.End()
shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through)
if err != nil {
@ -161,14 +164,14 @@ func (c *storeEntry) Volume(ctx context.Context, userID string, from, through mo
return nil, nil
}
sp.LogKV(
"user", userID,
"from", from.Time(),
"through", through.Time(),
"matchers", syntax.MatchersString(matchers),
"err", err,
"limit", limit,
"aggregateBy", aggregateBy,
sp.SetAttributes(
attribute.String("user", userID),
attribute.String("from", from.Time().String()),
attribute.String("through", through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.String("err", err.Error()),
attribute.Int("limit", int(limit)),
attribute.String("aggregateBy", aggregateBy),
)
return c.indexReader.Volume(ctx, userID, from, through, limit, targetLabels, aggregateBy, matchers...)

@ -9,11 +9,13 @@ import (
"github.com/go-kit/log/level"
jsoniter "github.com/json-iterator/go"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/dskit/concurrency"
@ -34,6 +36,8 @@ import (
util_log "github.com/grafana/loki/v3/pkg/util/log"
)
var tracer = otel.Tracer("pkg/storage/stores/series")
var (
indexLookupsPerQuery = promauto.NewHistogram(prometheus.HistogramOpts{
Namespace: constants.Loki,
@ -316,15 +320,15 @@ func (c *IndexReaderWriter) chunksToSeries(ctx context.Context, in []logproto.Ch
// LabelNamesForMetricName retrieves all label names for a metric name.
func (c *IndexReaderWriter) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, matchers ...*labels.Matcher) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelNamesForMetricName")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.LabelNamesForMetricName")
defer sp.End()
// Fetch the series IDs from the index
seriesIDs, err := c.lookupSeriesByMetricNameMatchers(ctx, from, through, userID, metricName, matchers)
if err != nil {
return nil, err
}
sp.LogKV("series-ids", len(seriesIDs))
sp.SetAttributes(attribute.Int("series-ids", len(seriesIDs)))
// Lookup the series in the index to get label names.
labelNames, err := c.lookupLabelNamesBySeries(ctx, from, through, userID, seriesIDs)
@ -333,23 +337,30 @@ func (c *IndexReaderWriter) LabelNamesForMetricName(ctx context.Context, userID
if err == series_index.ErrNotSupported {
return c.lookupLabelNamesByChunks(ctx, from, through, userID, seriesIDs)
}
sp.LogKV("msg", "lookupLabelNamesBySeries", "err", err)
sp.RecordError(err, trace.WithAttributes(
attribute.String("method", "lookupLabelNamesBySeries"),
))
return nil, err
}
sp.LogKV("labelNames", len(labelNames))
sp.SetAttributes(attribute.Int("labelNames", len(labelNames)))
return labelNames, nil
}
func (c *IndexReaderWriter) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string, matchers ...*labels.Matcher) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.LabelValuesForMetricName")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.LabelValuesForMetricName")
defer sp.End()
if len(matchers) != 0 {
return c.labelValuesForMetricNameWithMatchers(ctx, userID, from, through, metricName, labelName, matchers...)
}
sp.LogKV("from", from, "through", through, "metricName", metricName, "labelName", labelName)
sp.SetAttributes(
attribute.String("from", from.String()),
attribute.String("through", through.String()),
attribute.String("metricName", metricName),
attribute.String("labelName", labelName),
)
queries, err := c.schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName)
if err != nil {
@ -627,10 +638,11 @@ func (c *IndexReaderWriter) lookupEntriesByQueries(ctx context.Context, queries
}
func (c *IndexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.lookupLabelNamesBySeries")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.lookupLabelNamesBySeries", trace.WithAttributes(
attribute.Int("seriesIDs", len(seriesIDs)),
))
defer sp.End()
sp.LogKV("seriesIDs", len(seriesIDs))
queries := make([]series_index.Query, 0, len(seriesIDs))
for _, seriesID := range seriesIDs {
qs, err := c.schema.GetLabelNamesForSeries(from, through, userID, []byte(seriesID))
@ -639,7 +651,7 @@ func (c *IndexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from,
}
queries = append(queries, qs...)
}
sp.LogKV("queries", len(queries))
sp.SetAttributes(attribute.Int("queries", len(queries)))
entries := entriesPool.Get().(*[]series_index.Entry)
defer entriesPool.Put(entries)
err := c.lookupEntriesByQueries(ctx, queries, entries)
@ -647,7 +659,7 @@ func (c *IndexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from,
return nil, err
}
sp.LogKV("entries", len(*entries))
sp.SetAttributes(attribute.Int("entries", len(*entries)))
var result util.UniqueStrings
for _, entry := range *entries {
@ -662,34 +674,40 @@ func (c *IndexReaderWriter) lookupLabelNamesBySeries(ctx context.Context, from,
}
func (c *IndexReaderWriter) lookupLabelNamesByChunks(ctx context.Context, from, through model.Time, userID string, seriesIDs []string) ([]string, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.lookupLabelNamesByChunks")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.lookupLabelNamesByChunks")
defer sp.End()
// Lookup the series in the index to get the chunks.
chunkIDs, err := c.lookupChunksBySeries(ctx, from, through, userID, seriesIDs)
if err != nil {
sp.LogKV("msg", "lookupChunksBySeries", "err", err)
sp.RecordError(err, trace.WithAttributes(
attribute.String("method", "lookupChunksBySeries"),
))
return nil, err
}
sp.LogKV("chunk-ids", len(chunkIDs))
sp.SetAttributes(attribute.Int("chunk-ids", len(chunkIDs)))
chunks, err := c.convertChunkIDsToChunks(ctx, userID, chunkIDs)
if err != nil {
sp.LogKV("err", "convertChunkIDsToChunks", "err", err)
sp.RecordError(err, trace.WithAttributes(
attribute.String("method", "convertChunkIDsToChunks"),
))
return nil, err
}
// Filter out chunks that are not in the selected time range and keep a single chunk per fingerprint
filtered := filterChunksByTime(from, through, chunks)
filtered = filterChunksByUniqueFingerprint(filtered)
sp.LogKV("Chunks post filtering", len(chunks))
sp.SetAttributes(attribute.Int("Chunks post filtering", len(chunks)))
chunksPerQuery.Observe(float64(len(filtered)))
// Now fetch the actual chunk data from Memcache / S3
allChunks, err := c.fetcher.FetchChunks(ctx, filtered)
if err != nil {
sp.LogKV("msg", "FetchChunks", "err", err)
sp.RecordError(err, trace.WithAttributes(
attribute.String("method", "FetchChunks"),
))
return nil, err
}
return labelNamesFromChunks(allChunks), nil

@ -4,10 +4,10 @@ import (
"context"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"go.opentelemetry.io/otel"
"github.com/grafana/loki/v3/pkg/storage/chunk"
"github.com/grafana/loki/v3/pkg/storage/chunk/fetcher"
@ -18,6 +18,8 @@ import (
"github.com/grafana/loki/v3/pkg/util/spanlogger"
)
var tracer = otel.Tracer("pkg/storage/stores")
var (
DedupedChunksTotal = promauto.NewCounter(prometheus.CounterOpts{
Namespace: constants.Loki,
@ -68,8 +70,9 @@ func (c *Writer) Put(ctx context.Context, chunks []chunk.Chunk) error {
// PutOne implements Store
func (c *Writer) PutOne(ctx context.Context, from, through model.Time, chk chunk.Chunk) error {
sp, ctx := opentracing.StartSpanFromContext(ctx, "SeriesStore.PutOne")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "SeriesStore.PutOne")
defer sp.End()
log := spanlogger.FromContext(ctx, util_log.Logger)
defer log.Finish()

@ -13,9 +13,10 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/compression"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
@ -503,12 +504,12 @@ func (c *cachedListOpObjectClient) List(ctx context.Context, prefix string, deli
cacheDur time.Duration
)
defer func() {
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(
"cache_duration", cacheDur,
"total_duration", time.Since(start),
)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(
attribute.String("cache_duration", cacheDur.String()),
attribute.String("total_duration", time.Since(start).String()),
)
}()
if delimiter != "" {

@ -9,7 +9,8 @@ import (
"time"
"github.com/go-kit/log/level"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/singleflight"
"github.com/grafana/loki/v3/pkg/storage/chunk/client"
@ -190,13 +191,14 @@ func (c *cachedObjectClient) buildTableNamesCache(ctx context.Context) (err erro
}
}()
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "building table names cache")
now := time.Now()
defer func() {
sp.LogKV("msg", "table names cache built", "duration", time.Since(now))
}()
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("building table names cache")
now := time.Now()
defer func() {
sp.AddEvent("table names cache built", trace.WithAttributes(
attribute.String("duration", time.Since(now).String()),
))
}()
_, tableNames, err := c.ObjectClient.List(ctx, "", delimiter)
if err != nil {
@ -277,13 +279,14 @@ func (t *table) buildCache(ctx context.Context, objectClient client.ObjectClient
}
}()
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV("msg", "building table cache")
now := time.Now()
defer func() {
sp.LogKV("msg", "table cache built", "duration", time.Since(now))
}()
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("building table cache")
now := time.Now()
defer func() {
sp.AddEvent("table cache built", trace.WithAttributes(
attribute.String("duration", time.Since(now).String()),
))
}()
objects, _, err := objectClient.List(ctx, t.name+delimiter, "")
if err != nil {

@ -2,15 +2,19 @@ package tsdb
import (
"context"
"fmt"
"sort"
"sync"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/loki/v3/pkg/logql"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/stores/index/seriesvolume"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@ -25,6 +29,8 @@ import (
"github.com/grafana/loki/v3/pkg/util"
)
var tracer = otel.Tracer("pkg/storage/stores/shipper/indexshipper/tsdb")
// implements stores.Index
type IndexClient struct {
idx Index
@ -221,26 +227,26 @@ func (c *IndexClient) Stats(ctx context.Context, userID string, from, through mo
}
res := acc.Stats()
if sp := opentracing.SpanFromContext(ctx); sp != nil {
sp.LogKV(
"function", "IndexClient.Stats",
"from", from.Time(),
"through", through.Time(),
"matchers", syntax.MatchersString(matchers),
"shard", shard,
"intervals", len(intervals),
"streams", res.Streams,
"chunks", res.Chunks,
"bytes", res.Bytes,
"entries", res.Entries,
)
}
sp := trace.SpanFromContext(ctx)
sp.SetAttributes(
attribute.String("function", "IndexClient.Stats"),
attribute.String("from", from.Time().String()),
attribute.String("through", through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.String("shard", fmt.Sprintf("%+v", shard)),
attribute.Int("intervals", len(intervals)),
attribute.Int64("streams", int64(res.Streams)),
attribute.Int64("chunks", int64(res.Chunks)),
attribute.Int64("bytes", int64(res.Bytes)),
attribute.Int64("entries", int64(res.Entries)),
)
return &res, nil
}
func (c *IndexClient) Volume(ctx context.Context, userID string, from, through model.Time, limit int32, targetLabels []string, aggregateBy string, matchers ...*labels.Matcher) (*logproto.VolumeResponse, error) {
sp, ctx := opentracing.StartSpanFromContext(ctx, "IndexClient.Volume")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "IndexClient.Volume")
defer sp.End()
matchers, shard, err := cleanMatchers(matchers...)
if err != nil {
@ -263,14 +269,14 @@ func (c *IndexClient) Volume(ctx context.Context, userID string, from, through m
}
}
sp.LogKV(
"from", from.Time(),
"through", through.Time(),
"matchers", syntax.MatchersString(matchers),
"shard", shard,
"intervals", len(intervals),
"limit", limit,
"aggregateBy", aggregateBy,
sp.SetAttributes(
attribute.String("from", from.Time().String()),
attribute.String("through", through.Time().String()),
attribute.String("matchers", syntax.MatchersString(matchers)),
attribute.String("shard", fmt.Sprintf("%+v", shard)),
attribute.Int("intervals", len(intervals)),
attribute.Int("limit", int(limit)),
attribute.String("aggregateBy", aggregateBy),
)
if err != nil {

@ -10,8 +10,6 @@ import (
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/go-kit/log/level"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@ -367,8 +365,8 @@ func (i *TSDBIndex) Volume(
aggregateBy string,
matchers ...*labels.Matcher,
) error {
sp, ctx := opentracing.StartSpanFromContext(ctx, "Index.Volume")
defer sp.Finish()
ctx, sp := tracer.Start(ctx, "Index.Volume")
defer sp.End()
labelsToMatch, matchers, includeAll := util.PrepareLabelsAndMatchers(targetLabels, matchers, TenantLabel)

@ -5,16 +5,13 @@ import (
)
type Config struct {
Enabled bool `yaml:"enabled"`
ProfilingEnabled bool `yaml:"profiling_enabled" category:"experimental" doc:"hidden"`
Enabled bool `yaml:"enabled"`
}
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.BoolVar(&cfg.Enabled, "tracing.enabled", true, "Set to false to disable tracing.")
f.BoolVar(&cfg.ProfilingEnabled, "tracing.profiling-enabled", true, "Set to true to enable profiling integration.")
}
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.BoolVar(&cfg.Enabled, prefix+"tracing.enabled", true, "Set to false to disable tracing.")
f.BoolVar(&cfg.ProfilingEnabled, prefix+"tracing.profiling-enabled", true, "Set to true to enable profiling integration.")
}

@ -0,0 +1,22 @@
package tracing
import (
"fmt"
"github.com/grafana/dskit/tracing"
"go.opentelemetry.io/otel/attribute"
)
func KeyValuesToOTelAttributes(kvps ...any) []attribute.KeyValue {
attrs := make([]attribute.KeyValue, 0, len(kvps)/2)
for i := 0; i < len(kvps); i += 2 {
if i+1 < len(kvps) {
key, ok := kvps[i].(string)
if !ok {
key = fmt.Sprintf("not_string_key:%v", kvps[i])
}
attrs = append(attrs, tracing.KeyValueToOTelAttribute(key, kvps[i+1]))
}
}
return attrs
}

@ -17,8 +17,8 @@ import (
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
attribute "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"gopkg.in/yaml.v2"
)
@ -165,18 +165,14 @@ const (
// ParseProtoReader parses a compressed proto from an io.Reader.
func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSize int, req proto.Message, compression CompressionType) error {
sp := opentracing.SpanFromContext(ctx)
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[start reading]"))
}
sp := trace.SpanFromContext(ctx)
sp.AddEvent("util.ParseProtoRequest[start reading]")
body, err := decompressRequest(reader, expectedSize, maxSize, compression, sp)
if err != nil {
return err
}
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[unmarshal]"), otlog.Int("size", len(body)))
}
sp.AddEvent("util.ParseProtoRequest[unmarshal]", trace.WithAttributes(attribute.Int("size", len(body))))
// We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first,
// which we can't override without upsetting golint.
@ -193,7 +189,7 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSi
return nil
}
func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) (body []byte, err error) {
func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp trace.Span) (body []byte, err error) {
defer func() {
if err != nil && len(body) > maxSize {
err = fmt.Errorf(messageSizeLargerErrFmt, ErrMessageSizeTooLarge, len(body), maxSize)
@ -211,7 +207,7 @@ func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression
return
}
func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) {
func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp trace.Span) ([]byte, error) {
var (
buf bytes.Buffer
body []byte
@ -237,7 +233,7 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi
return body, err
}
func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) {
func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression CompressionType, sp trace.Span) ([]byte, error) {
bufBytes := buffer.Bytes()
if len(bufBytes) > maxSize {
return nil, fmt.Errorf(messageSizeLargerErrFmt, ErrMessageSizeTooLarge, len(bufBytes), maxSize)
@ -246,10 +242,9 @@ func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression Compres
case NoCompression:
return bufBytes, nil
case RawSnappy:
if sp != nil {
sp.LogFields(otlog.String("event", "util.ParseProtoRequest[decompress]"),
otlog.Int("size", len(bufBytes)))
}
sp.AddEvent("util.ParseProtoRequest[decompress]", trace.WithAttributes(
attribute.Int("size", len(bufBytes)),
))
size, err := snappy.DecodedLen(bufBytes)
if err != nil {
return nil, err

@ -1,8 +1,11 @@
package httpgrpc
import (
"context"
weaveworks_httpgrpc "github.com/grafana/dskit/httpgrpc"
"github.com/opentracing/opentracing-go"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/propagation"
"github.com/grafana/loki/v3/pkg/querier/queryrange"
)
@ -15,6 +18,31 @@ type Request interface {
// Used to transfer trace information from/to HTTP request.
type HeadersCarrier weaveworks_httpgrpc.HTTPRequest
func (c *HeadersCarrier) Get(key string) string {
// Check if the key exists in the headers
for _, h := range c.Headers {
if h.Key == key {
// Return the first value for the key
if len(h.Values) > 0 {
return h.Values[0]
}
break
}
}
return ""
}
func (c *HeadersCarrier) Keys() []string {
// Collect all unique keys from the headers
keys := make([]string, 0, len(c.Headers))
for _, h := range c.Headers {
if h.Key != "" {
keys = append(keys, h.Key)
}
}
return keys
}
func (c *HeadersCarrier) Set(key, val string) {
c.Headers = append(c.Headers, &weaveworks_httpgrpc.Header{
Key: key,
@ -33,32 +61,22 @@ func (c *HeadersCarrier) ForeachKey(handler func(key, val string) error) error {
return nil
}
func GetParentSpanForHTTPRequest(tracer opentracing.Tracer, req *weaveworks_httpgrpc.HTTPRequest) (opentracing.SpanContext, error) {
if tracer == nil {
return nil, nil
}
carrier := (*HeadersCarrier)(req)
return tracer.Extract(opentracing.HTTPHeaders, carrier)
func ExtractSpanFromHTTPRequest(ctx context.Context, req *weaveworks_httpgrpc.HTTPRequest) context.Context {
return otel.GetTextMapPropagator().Extract(ctx, (*HeadersCarrier)(req))
}
func GetParentSpanForQueryRequest(tracer opentracing.Tracer, req *queryrange.QueryRequest) (opentracing.SpanContext, error) {
if tracer == nil {
return nil, nil
}
carrier := opentracing.TextMapCarrier(req.Metadata)
return tracer.Extract(opentracing.TextMap, carrier)
func ExtractSpanFromQueryRequest(ctx context.Context, req *queryrange.QueryRequest) context.Context {
return otel.GetTextMapPropagator().Extract(ctx, propagation.MapCarrier(req.Metadata))
}
func GetParentSpanForRequest(tracer opentracing.Tracer, req Request) (opentracing.SpanContext, error) {
func ExtractSpanFromRequest(ctx context.Context, req Request) context.Context {
if r := req.GetQueryRequest(); r != nil {
return GetParentSpanForQueryRequest(tracer, r)
return ExtractSpanFromQueryRequest(ctx, r)
}
if r := req.GetHttpRequest(); r != nil {
return GetParentSpanForHTTPRequest(tracer, r)
return ExtractSpanFromHTTPRequest(ctx, r)
}
return nil, nil
return ctx
}

@ -1,52 +0,0 @@
package spanlogger
import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/log"
)
type noopTracer struct{}
type noopSpan struct{}
type noopSpanContext struct{}
var (
defaultNoopSpanContext = noopSpanContext{}
defaultNoopSpan = noopSpan{}
defaultNoopTracer = noopTracer{}
)
const (
emptyString = ""
)
func (n noopSpanContext) ForeachBaggageItem(_ func(k, v string) bool) {}
func (n noopSpan) Context() opentracing.SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(_, _ string) opentracing.Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(_ string) string { return emptyString }
func (n noopSpan) SetTag(_ string, _ interface{}) opentracing.Span { return n }
func (n noopSpan) LogFields(_ ...log.Field) {}
func (n noopSpan) LogKV(_ ...interface{}) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(_ opentracing.FinishOptions) {}
func (n noopSpan) SetOperationName(_ string) opentracing.Span { return n }
func (n noopSpan) Tracer() opentracing.Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(_ string) {}
func (n noopSpan) LogEventWithPayload(_ string, _ interface{}) {}
func (n noopSpan) Log(_ opentracing.LogData) {}
// StartSpan belongs to the Tracer interface.
func (n noopTracer) StartSpan(_ string, _ ...opentracing.StartSpanOption) opentracing.Span {
return defaultNoopSpan
}
// Inject belongs to the Tracer interface.
func (n noopTracer) Inject(_ opentracing.SpanContext, _ interface{}, _ interface{}) error {
return nil
}
// Extract belongs to the Tracer interface.
func (n noopTracer) Extract(_ interface{}, _ interface{}) (opentracing.SpanContext, error) {
return nil, opentracing.ErrSpanContextNotFound
}

@ -6,6 +6,7 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/spanlogger" //lint:ignore faillint // This package is the wrapper that should be used.
"github.com/grafana/dskit/tenant"
"go.opentelemetry.io/otel/trace"
)
const (
@ -30,10 +31,10 @@ var (
// SpanLogger unifies tracing and logging, to reduce repetition.
type SpanLogger = spanlogger.SpanLogger
// New makes a new SpanLogger with a log.Logger to send logs to. The provided context will have the logger attached
// NewOTel makes a new OTel SpanLogger with a log.Logger to send logs to. The provided context will have the logger attached
// to it and can be retrieved with FromContext.
func New(ctx context.Context, logger log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) {
return spanlogger.New(ctx, logger, method, resolver, kvps...)
func NewOTel(ctx context.Context, logger log.Logger, tracer trace.Tracer, method string, kvps ...interface{}) (*SpanLogger, context.Context) {
return spanlogger.NewOTel(ctx, logger, tracer, method, resolver, kvps...)
}
// FromContext returns a span logger using the current parent span.

@ -9,10 +9,9 @@ import (
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/middleware"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/keepalive"
@ -70,6 +69,7 @@ func New(cfg Config) (*Client, error) {
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
serviceConfig := fmt.Sprintf(grpcServiceConfigTemplate, GRPCLoadBalancingPolicyRoundRobin)
@ -108,7 +108,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
unaryInterceptors = append(unaryInterceptors, cfg.GRPCUnaryClientInterceptors...)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientQueryTagsInterceptor)
unaryInterceptors = append(unaryInterceptors, server.UnaryClientHTTPHeadersInterceptor)
unaryInterceptors = append(unaryInterceptors, otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
unaryInterceptors = append(unaryInterceptors, middleware.ClientUserHeaderInterceptor)
}
@ -117,7 +116,6 @@ func getGRPCInterceptors(cfg *Config) ([]grpc.UnaryClientInterceptor, []grpc.Str
streamInterceptors = append(streamInterceptors, cfg.GRCPStreamClientInterceptors...)
streamInterceptors = append(streamInterceptors, server.StreamClientQueryTagsInterceptor)
streamInterceptors = append(streamInterceptors, server.StreamClientHTTPHeadersInterceptor)
streamInterceptors = append(streamInterceptors, otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()))
if !cfg.Internal {
streamInterceptors = append(streamInterceptors, middleware.StreamClientUserHeaderInterceptor)
}

@ -1,104 +0,0 @@
# Span Profiler for OpenTracing-Go
## Overview
The Span Profiler for OpenTracing-Go is a package that seamlessly integrates `opentracing-go` instrumentation with
profiling through the use of pprof labels.
Accessing trace span profiles is made convenient through the Grafana Explore view. You can find a complete example setup
with Grafana Tempo in the [Pyroscope repository](https://github.com/grafana/pyroscope/tree/main/examples/tracing/golang-push):
![image](https://github.com/grafana/otel-profiling-go/assets/12090599/31e33cd1-818b-4116-b952-c9ec7b1fb593)
## Usage
There are two primary ways to use the Span Profiler:
### 1. Wrap the Global Tracer.
You can wrap the global tracer using `spanprofiler.NewTracer`:
```go
import (
"github.com/opentracing/opentracing-go"
"github.com/grafana/dskit/spanprofiler"
)
func main() {
// Initialize your OpenTracing tracer
tracer := opentracing.GlobalTracer()
// Wrap it with the tracer-profiler
wrappedTracer := spanprofiler.NewTracer(tracer)
// Use the wrapped tracer in your application
opentracing.SetGlobalTracer(wrappedTracer)
// Or, as an oneliner:
// opentracing.SetGlobalTracer(spanprofiler.NewTracer(opentracing.GlobalTracer()))
// Your application logic here
}
```
For efficiency, the tracer selectively records profiles for _root_ spans — the initial _local_ span in a process — since
a trace may encompass thousands of spans. All stack trace samples accumulated during the execution of their child spans
contribute to the root span's profile. In practical terms, this signifies that, for instance, an HTTP request results
in a singular profile, irrespective of the numerous spans within the trace. It's important to note that these profiles
don't extend beyond the boundaries of a single process.
The limitation of this approach is that only spans created within the same goroutine, or its children, as the parent are
taken into account. Consequently, in scenarios involving asynchronous execution, where the parent span context is passed
to another goroutine, explicit profiling becomes necessary using `spanprofiler.StartSpanFromContext`.
### 2. Profile individual spans.
The `spanprofiler.StartSpanFromContext` function allows you to granularly control which spans to profile:
```go
func YourOperationName(ctx context.Background()) {
// Start a span and enable profiling for it
span, ctx := spanprofiler.StartSpanFromContext(ctx, "YourOperationName", tracer)
defer span.Finish() // Finish the span when done
// Use the span in your application logic
}
```
The function guarantees that the span is to be profiled.
Both methods can be employed either in conjunction or independently. Our recommendation is to utilize the tracer for
seamless integration, reserving explicit span profiling only for cases where spans are spawned in detached goroutines.
## Implementation details
When a new trace span is created, and is eligible for profiling, the tracer sets `span_id` and `span_name` [pprof labels](https://github.com/google/pprof/blob/master/doc/README.md#tag-filtering)
that point to the respective span. These labels are stored in the goroutine's local storage and inherited by any
subsequent child goroutines.
`span_name` is available as a regular label and can be used in the query expressions. For example, the following query
will show you profile for the code that is not covered with traces:
```
{service_name="my-service",span_name=""}
```
Additionally, trace spans are identified by the `pyroscope.profile.id` attribute, indicating the associated profile.
This allows to find such spans in the trace view (in the screenshot) and fetch profiles for specific spans.
It's important to note that the presence of this attribute does not guarantee profile availability; stack trace samples
might not be collected if the CPU time utilized falls below the sample interval (10ms).
It is crucial to understand that this module doesn't directly control the pprof profiler; its initialization is still
necessary for profile collection. This initialization can be achieved through the `runtime/pprof` package, or using the
[Pyroscope client](https://github.com/grafana/pyroscope-go).
Limitations:
- Only CPU profiling is fully supported at the moment.
- Only [Jaeger tracer](https://github.com/jaegertracing/jaeger-client-go) implementation is supported.
## Performance implications
The typical performance impact is generally imperceptible and primarily arises from the cost of pprof labeling. However,
intensive use of pprof labels may have negative impact on the profiled application.
In the case of the tracer provided by this package, the `StartSpan` method wrapper introduces an approximate 20% increase
in CPU time compared to the original call. In vase majority of cases, the overhead constitutes less than 0.01% of the total
CPU time and is considered safe for deployment in production systems.

@ -1,107 +0,0 @@
package spanprofiler
import (
"context"
"runtime/pprof"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
)
// StartSpanFromContext starts and returns a Span with `operationName`, using
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
// found, StartSpanFromContext creates a root (parentless) Span.
//
// The call sets `operationName` as `span_name` pprof label, and the new span
// identifier as `span_id` pprof label, if the trace is sampled.
//
// The second return value is a context.Context object built around the
// returned Span.
//
// Example usage:
//
// SomeFunction(ctx context.Context, ...) {
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
// defer sp.Finish()
// ...
// }
func StartSpanFromContext(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
return StartSpanFromContextWithTracer(ctx, opentracing.GlobalTracer(), operationName, opts...)
}
// StartSpanFromContextWithTracer starts and returns a span with `operationName`
// using a span found within the context as a ChildOfRef. If that doesn't exist
// it creates a root span. It also returns a context.Context object built
// around the returned span.
//
// The call sets `operationName` as `span_name` pprof label, and the new span
// identifier as `span_id` pprof label, if the trace is sampled.
//
// It's behavior is identical to StartSpanFromContext except that it takes an explicit
// tracer as opposed to using the global tracer.
func StartSpanFromContextWithTracer(ctx context.Context, tracer opentracing.Tracer, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, operationName, opts...)
spanCtx, ok := span.Context().(jaeger.SpanContext)
if ok {
span = wrapJaegerSpanWithGoroutineLabels(ctx, span, operationName, sampledSpanID(spanCtx))
}
return span, ctx
}
func wrapJaegerSpanWithGoroutineLabels(
parentCtx context.Context,
span opentracing.Span,
operationName string,
spanID string,
) *spanWrapper {
// Note that pprof labels are propagated through the goroutine's local
// storage and are always copied to child goroutines. This way, stack
// trace samples collected during execution of child spans will be taken
// into account at the root.
var ctx context.Context
if spanID != "" {
ctx = pprof.WithLabels(parentCtx, pprof.Labels(
spanNameLabelName, operationName,
spanIDLabelName, spanID))
} else {
// Even if the trace has not been sampled, we still need to keep track
// of samples that belong to the span (all spans with the given name).
ctx = pprof.WithLabels(parentCtx, pprof.Labels(
spanNameLabelName, operationName))
}
// Goroutine labels should be set as early as possible,
// in order to capture the overhead of the function call.
pprof.SetGoroutineLabels(ctx)
// We create a span wrapper to ensure we remove the newly attached pprof
// labels when span finishes. The need of this wrapper is questioned:
// as we do not have the original context, we could leave the goroutine
// labels – normally, span is finished at the very end of the goroutine's
// lifetime, so no significant side effects should take place.
w := spanWrapper{
parentPprofCtx: parentCtx,
currentPprofCtx: ctx,
}
w.Span = span.SetTag(profileIDTagKey, spanID)
return &w
}
type spanWrapper struct {
parentPprofCtx context.Context
currentPprofCtx context.Context
opentracing.Span
}
func (s *spanWrapper) Finish() {
s.Span.Finish()
pprof.SetGoroutineLabels(s.parentPprofCtx)
s.currentPprofCtx = s.parentPprofCtx
}
// sampledSpanID returns the span ID, if the span is sampled,
// otherwise an empty string is returned.
func sampledSpanID(spanCtx jaeger.SpanContext) string {
if spanCtx.IsSampled() {
return spanCtx.SpanID().String()
}
return ""
}

@ -1,112 +0,0 @@
package spanprofiler
import (
"context"
"unsafe"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
)
const (
profileIDTagKey = "pyroscope.profile.id"
spanIDLabelName = "span_id"
spanNameLabelName = "span_name"
)
type tracer struct{ opentracing.Tracer }
// NewTracer creates a new opentracing.Tracer with the span profiler integrated.
//
// For efficiency, the tracer selectively records profiles for _root_ spans
// — the initial _local_ span in a process — since a trace may encompass
// thousands of spans. All stack trace samples accumulated during the execution
// of their child spans contribute to the root span's profile. In practical
// terms, this signifies that, for instance, an HTTP request results in a
// singular profile, irrespective of the numerous spans within the trace. It's
// important to note that these profiles don't extend beyond the boundaries of
// a single process.
//
// The limitation of this approach is that only spans created within the same
// goroutine, or its children, as the parent are taken into account.
// Consequently, in scenarios involving asynchronous execution, where the parent
// span context is passed to another goroutine, explicit profiling becomes
// necessary using `spanprofiler.StartSpanFromContext`.
func NewTracer(tr opentracing.Tracer) opentracing.Tracer { return &tracer{tr} }
func (t *tracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
span := t.Tracer.StartSpan(operationName, opts...)
spanCtx, ok := span.Context().(jaeger.SpanContext)
if !ok {
return span
}
if !spanCtx.IsSampled() {
return span
}
// pprof labels are attached only once, at the span root level.
if !isRootSpan(opts...) {
return span
}
// The pprof label API assumes that pairs of labels are passed through the
// context. Unfortunately, the opentracing Tracer API doesn't match this
// concept: this makes it impossible to save an existing pprof context and
// all the original pprof labels associated with the goroutine.
ctx := context.Background()
return wrapJaegerSpanWithGoroutineLabels(ctx, span, operationName, sampledSpanID(spanCtx))
}
// isRootSpan reports whether the span is a root span.
//
// There are only two valid cases: if the span is the first span in the trace,
// or is the first _local_ span in the trace.
//
// An exception is made for FollowsFrom reference: spans without an explicit
// parent are considered as root ones.
func isRootSpan(opts ...opentracing.StartSpanOption) bool {
parent, ok := parentSpanContextFromRef(opts...)
return !ok || isRemoteSpan(parent)
}
// parentSpanContextFromRef returns the first parent reference.
func parentSpanContextFromRef(options ...opentracing.StartSpanOption) (sc jaeger.SpanContext, ok bool) {
var sso opentracing.StartSpanOptions
for _, option := range options {
option.Apply(&sso)
}
for _, ref := range sso.References {
if ref.Type == opentracing.ChildOfRef && ref.ReferencedContext != nil {
sc, ok = ref.ReferencedContext.(jaeger.SpanContext)
return sc, ok
}
}
return sc, ok
}
// isRemoteSpan reports whether the span context represents a remote parent.
//
// NOTE(kolesnikovae): this is ugly, but the only reliable method I found.
// The opentracing-go package and Jaeger client are not meant to change as
// both are deprecated.
func isRemoteSpan(c jaeger.SpanContext) bool {
jaegerCtx := *(*jaegerSpanCtx)(unsafe.Pointer(&c))
return jaegerCtx.remote
}
// jaegerSpanCtx represents memory layout of the jaeger.SpanContext type.
type jaegerSpanCtx struct {
traceID [16]byte // TraceID
spanID [8]byte // SpanID
parentID [8]byte // SpanID
baggage uintptr // map[string]string
debugID [2]uintptr // string
// samplingState is a pointer to a struct that has "localRootSpan" member,
// which we could probably use: that would allow omitting quite expensive
// parentSpanContextFromRef call. However, interpreting the pointer and
// the complex struct memory layout is more complicated and dangerous.
samplingState uintptr
// remote indicates that span context represents a remote parent
remote bool
}

@ -1,27 +0,0 @@
Copyright (c) 2016, gRPC Ecosystem
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of grpc-opentracing nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,23 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the GRPC project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of GRPC, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of GRPC. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of GRPC or any code incorporated within this
implementation of GRPC constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of GRPC
shall terminate as of the date such litigation is filed.
Status API Training Shop Blog About

@ -1,57 +0,0 @@
# OpenTracing support for gRPC in Go
The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based
systems in Go.
## Installation
```
go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc
```
## Documentation
See the basic usage examples below and the [package documentation on
godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc).
## Client-side usage example
Wherever you call `grpc.Dial`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Set up a connection to the server peer.
conn, err := grpc.Dial(
address,
... // other options
grpc.WithUnaryInterceptor(
otgrpc.OpenTracingClientInterceptor(tracer)),
grpc.WithStreamInterceptor(
otgrpc.OpenTracingStreamClientInterceptor(tracer)))
// All future RPC activity involving `conn` will be automatically traced.
```
## Server-side usage example
Wherever you call `grpc.NewServer`:
```go
// You must have some sort of OpenTracing Tracer instance on hand.
var tracer opentracing.Tracer = ...
...
// Initialize the gRPC server.
s := grpc.NewServer(
... // other options
grpc.UnaryInterceptor(
otgrpc.OpenTracingServerInterceptor(tracer)),
grpc.StreamInterceptor(
otgrpc.OpenTracingStreamServerInterceptor(tracer)))
// All future RPC activity involving `s` will be automatically traced.
```

@ -1,239 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"io"
"runtime"
"sync/atomic"
)
// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable
// for use in a grpc.Dial call.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
method string,
req, resp interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) error {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) {
return invoker(ctx, method, req, resp, cc, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
defer clientSpan.Finish()
ctx = injectSpanContext(ctx, tracer, clientSpan)
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC request", req))
}
err = invoker(ctx, method, req, resp, cc, opts...)
if err == nil {
if otgrpcOpts.logPayloads {
clientSpan.LogFields(log.Object("gRPC response", resp))
}
} else {
SetSpanTags(clientSpan, err, true)
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, req, resp, err)
}
return err
}
}
// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating
// a single span to correspond to the lifetime of the RPC's stream.
//
// For example:
//
// conn, err := grpc.Dial(
// address,
// ..., // (existing DialOptions)
// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer)))
//
// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC
// metadata; they will also look in the context.Context for an active
// in-process parent Span and establish a ChildOf reference if such a parent
// Span could be found.
func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor {
otgrpcOpts := newOptions()
otgrpcOpts.apply(optFuncs...)
return func(
ctx context.Context,
desc *grpc.StreamDesc,
cc *grpc.ClientConn,
method string,
streamer grpc.Streamer,
opts ...grpc.CallOption,
) (grpc.ClientStream, error) {
var err error
var parentCtx opentracing.SpanContext
if parent := opentracing.SpanFromContext(ctx); parent != nil {
parentCtx = parent.Context()
}
if otgrpcOpts.inclusionFunc != nil &&
!otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) {
return streamer(ctx, desc, cc, method, opts...)
}
clientSpan := tracer.StartSpan(
method,
opentracing.ChildOf(parentCtx),
ext.SpanKindRPCClient,
gRPCComponentTag,
)
ctx = injectSpanContext(ctx, tracer, clientSpan)
cs, err := streamer(ctx, desc, cc, method, opts...)
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
clientSpan.Finish()
return cs, err
}
return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil
}
}
func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream {
finishChan := make(chan struct{})
isFinished := new(int32)
*isFinished = 0
finishFunc := func(err error) {
// The current OpenTracing specification forbids finishing a span more than
// once. Since we have multiple code paths that could concurrently call
// `finishFunc`, we need to add some sort of synchronization to guard against
// multiple finishing.
if !atomic.CompareAndSwapInt32(isFinished, 0, 1) {
return
}
close(finishChan)
defer clientSpan.Finish()
if err != nil {
clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error()))
SetSpanTags(clientSpan, err, true)
}
if otgrpcOpts.decorator != nil {
otgrpcOpts.decorator(clientSpan, method, nil, nil, err)
}
}
go func() {
select {
case <-finishChan:
// The client span is being finished by another code path; hence, no
// action is necessary.
case <-cs.Context().Done():
finishFunc(cs.Context().Err())
}
}()
otcs := &openTracingClientStream{
ClientStream: cs,
desc: desc,
finishFunc: finishFunc,
}
// The `ClientStream` interface allows one to omit calling `Recv` if it's
// known that the result will be `io.EOF`. See
// http://stackoverflow.com/q/42915337
// In such cases, there's nothing that triggers the span to finish. We,
// therefore, set a finalizer so that the span and the context goroutine will
// at least be cleaned up when the garbage collector is run.
runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) {
otcs.finishFunc(nil)
})
return otcs
}
type openTracingClientStream struct {
grpc.ClientStream
desc *grpc.StreamDesc
finishFunc func(error)
}
func (cs *openTracingClientStream) Header() (metadata.MD, error) {
md, err := cs.ClientStream.Header()
if err != nil {
cs.finishFunc(err)
}
return md, err
}
func (cs *openTracingClientStream) SendMsg(m interface{}) error {
err := cs.ClientStream.SendMsg(m)
if err != nil {
cs.finishFunc(err)
}
return err
}
func (cs *openTracingClientStream) RecvMsg(m interface{}) error {
err := cs.ClientStream.RecvMsg(m)
if err == io.EOF {
cs.finishFunc(nil)
return err
} else if err != nil {
cs.finishFunc(err)
return err
}
if !cs.desc.ServerStreams {
cs.finishFunc(nil)
}
return err
}
func (cs *openTracingClientStream) CloseSend() error {
err := cs.ClientStream.CloseSend()
if err != nil {
cs.finishFunc(err)
}
return err
}
func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok {
md = metadata.New(nil)
} else {
md = md.Copy()
}
mdWriter := metadataReaderWriter{md}
err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter)
// We have no better place to record an error than the Span itself :-/
if err != nil {
clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err))
}
return metadata.NewOutgoingContext(ctx, md)
}

@ -1,69 +0,0 @@
package otgrpc
import (
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// A Class is a set of types of outcomes (including errors) that will often
// be handled in the same way.
type Class string
const (
Unknown Class = "0xx"
// Success represents outcomes that achieved the desired results.
Success Class = "2xx"
// ClientError represents errors that were the client's fault.
ClientError Class = "4xx"
// ServerError represents errors that were the server's fault.
ServerError Class = "5xx"
)
// ErrorClass returns the class of the given error
func ErrorClass(err error) Class {
if s, ok := status.FromError(err); ok {
switch s.Code() {
// Success or "success"
case codes.OK, codes.Canceled:
return Success
// Client errors
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists,
codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition,
codes.OutOfRange:
return ClientError
// Server errors
case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted,
codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss:
return ServerError
// Not sure
case codes.Unknown:
fallthrough
default:
return Unknown
}
}
return Unknown
}
// SetSpanTags sets one or more tags on the given span according to the
// error.
func SetSpanTags(span opentracing.Span, err error, client bool) {
c := ErrorClass(err)
code := codes.Unknown
if s, ok := status.FromError(err); ok {
code = s.Code()
}
span.SetTag("response_code", code)
span.SetTag("response_class", c)
if err == nil {
return
}
if client || c == ServerError {
ext.Error.Set(span, true)
}
}

@ -1,76 +0,0 @@
package otgrpc
import "github.com/opentracing/opentracing-go"
// Option instances may be used in OpenTracing(Server|Client)Interceptor
// initialization.
//
// See this post about the "functional options" pattern:
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
type Option func(o *options)
// LogPayloads returns an Option that tells the OpenTracing instrumentation to
// try to log application payloads in both directions.
func LogPayloads() Option {
return func(o *options) {
o.logPayloads = true
}
}
// SpanInclusionFunc provides an optional mechanism to decide whether or not
// to trace a given gRPC call. Return true to create a Span and initiate
// tracing, false to not create a Span and not trace.
//
// parentSpanCtx may be nil if no parent could be extraction from either the Go
// context.Context (on the client) or the RPC (on the server).
type SpanInclusionFunc func(
parentSpanCtx opentracing.SpanContext,
method string,
req, resp interface{}) bool
// IncludingSpans binds a IncludeSpanFunc to the options
func IncludingSpans(inclusionFunc SpanInclusionFunc) Option {
return func(o *options) {
o.inclusionFunc = inclusionFunc
}
}
// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add
// arbitrary tags/logs/etc to the opentracing.Span associated with client
// and/or server RPCs.
type SpanDecoratorFunc func(
span opentracing.Span,
method string,
req, resp interface{},
grpcError error)
// SpanDecorator binds a function that decorates gRPC Spans.
func SpanDecorator(decorator SpanDecoratorFunc) Option {
return func(o *options) {
o.decorator = decorator
}
}
// The internal-only options struct. Obviously overkill at the moment; but will
// scale well as production use dictates other configuration and tuning
// parameters.
type options struct {
logPayloads bool
decorator SpanDecoratorFunc
// May be nil.
inclusionFunc SpanInclusionFunc
}
// newOptions returns the default options.
func newOptions() *options {
return &options{
logPayloads: false,
inclusionFunc: nil,
}
}
func (o *options) apply(opts ...Option) {
for _, opt := range opts {
opt(o)
}
}

@ -1,5 +0,0 @@
// Package otgrpc provides OpenTracing support for any gRPC client or server.
//
// See the README for simple usage examples:
// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md
package otgrpc

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save