renamed `non indexed labels` feature to `structured metadata` (#10432)

Loki team has made a decision to rename the feature because the new name
is more precise.

---------

Signed-off-by: Vladyslav Diachenko <vlad.diachenko@grafana.com>
Co-authored-by: Salva Corts <salva.corts@grafana.com>
pull/10437/head
Vladyslav Diachenko 2 years ago committed by GitHub
parent 37ec68eebb
commit 00a3c5b026
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      CHANGELOG.md
  2. 132
      clients/pkg/logentry/stages/nonindexedlabels_test.go
  3. 57
      clients/pkg/logentry/stages/stage.go
  4. 14
      clients/pkg/logentry/stages/structuredmetadata.go
  5. 146
      clients/pkg/logentry/stages/structuredmetadata_test.go
  6. 8
      clients/pkg/promtail/client/batch.go
  7. 2
      clients/pkg/promtail/client/client_test.go
  8. 82
      docs/sources/get-started/architecture.md
  9. 4
      docs/sources/get-started/labels/_index.md
  10. 2
      docs/sources/get-started/labels/bp-labels.md
  11. 30
      docs/sources/get-started/labels/structured-metadata.md
  12. 4
      docs/sources/reference/api.md
  13. 2
      docs/sources/send-data/promtail/stages/_index.md
  14. 21
      docs/sources/send-data/promtail/stages/structured_metadata.md
  15. 20
      integration/client/client.go
  16. 20
      integration/loki_micro_services_delete_test.go
  17. 8
      integration/loki_micro_services_test.go
  18. 124
      pkg/chunkenc/memchunk.go
  19. 192
      pkg/chunkenc/memchunk_test.go
  20. 10
      pkg/chunkenc/symbols.go
  21. 84
      pkg/chunkenc/unordered.go
  22. 88
      pkg/chunkenc/unordered_test.go
  23. 10
      pkg/chunkenc/util_test.go
  24. 2
      pkg/ingester/chunk_test.go
  25. 4
      pkg/ingester/encoding_test.go
  26. 2
      pkg/ingester/flush_test.go
  27. 14
      pkg/ingester/recovery_test.go
  28. 26
      pkg/ingester/wal/encoding.go
  29. 44
      pkg/ingester/wal/encoding_test.go
  30. 6
      pkg/iter/entry_iterator.go
  31. 26
      pkg/loghttp/entry.go
  32. 34
      pkg/loghttp/push/push.go
  33. 48
      pkg/loghttp/push/push_test.go
  34. 8
      pkg/loghttp/query.go
  35. 14
      pkg/loghttp/query_test.go
  36. 28
      pkg/logql/log/metrics_extraction.go
  37. 126
      pkg/logql/log/metrics_extraction_test.go
  38. 32
      pkg/logql/log/pipeline.go
  39. 60
      pkg/logql/log/pipeline_test.go
  40. 2
      pkg/logql/metrics.go
  41. 16
      pkg/logqlmodel/stats/context.go
  42. 226
      pkg/logqlmodel/stats/stats.pb.go
  43. 12
      pkg/logqlmodel/stats/stats.proto
  44. 3
      pkg/push/go.sum
  45. 116
      pkg/push/push.pb.go
  46. 4
      pkg/push/push.proto
  47. 28
      pkg/push/types.go
  48. 16
      pkg/push/types_test.go
  49. 10
      pkg/querier/http_test.go
  50. 10
      pkg/querier/queryrange/codec_test.go
  51. 10
      pkg/querier/queryrange/prometheus_test.go
  52. 2
      pkg/storage/chunk/cache/cache_test.go
  53. 2
      pkg/storage/chunk/client/grpc/grpc_client_test.go
  54. 2
      pkg/storage/chunk/client/testutils/testutils.go
  55. 2
      pkg/storage/chunk/fetcher/fetcher_test.go
  56. 2
      pkg/storage/hack/main.go
  57. 4
      pkg/storage/stores/indexshipper/compactor/deletion/delete_request.go
  58. 50
      pkg/storage/stores/indexshipper/compactor/deletion/delete_request_test.go
  59. 4
      pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_manager.go
  60. 62
      pkg/storage/stores/indexshipper/compactor/deletion/delete_requests_manager_test.go
  61. 4
      pkg/storage/stores/indexshipper/compactor/retention/retention.go
  62. 26
      pkg/storage/stores/indexshipper/compactor/retention/retention_test.go
  63. 2
      pkg/storage/util_test.go
  64. 2
      pkg/util/filter/filter_function.go
  65. 18
      pkg/util/marshal/legacy/marshal_test.go
  66. 34
      pkg/util/marshal/marshal_test.go
  67. 4
      pkg/util/marshal/query.go
  68. 4
      pkg/util/unmarshal/legacy/unmarshal_test.go
  69. 2
      pkg/util/unmarshal/unmarshal_test.go
  70. 116
      vendor/github.com/grafana/loki/pkg/push/push.pb.go
  71. 4
      vendor/github.com/grafana/loki/pkg/push/push.proto
  72. 28
      vendor/github.com/grafana/loki/pkg/push/types.go

@ -64,7 +64,7 @@
* [10344](https://github.com/grafana/loki/pull/10344) **ashwanthgoli** Compactor: deprecate `-boltdb.shipper.compactor.` prefix in favor of `-compactor.`.
* [10373](https://github.com/grafana/loki/pull/10373) **jeschkies** Loki: Shard `avg_over_time` range aggregations.
* [10377](https://github.com/grafana/loki/pull/10377) **shantanualsi** Remove deprecated config `-s3.sse-encryption` in favor or `-s3.sse.*` settings.
* [10073](https://github.com/grafana/loki/pull/10073) **sandeepsukhani,salvacorts,vlad-diachenko** Support attaching non-indexed labels to log lines.
* [10073](https://github.com/grafana/loki/pull/10073) **sandeepsukhani,salvacorts,vlad-diachenko** Support attaching structured metadata to log lines.
* [10378](https://github.com/grafana/loki/pull/10378) **shantanualsi** Remove deprecated `ruler.wal-cleaer.period`
* [10380](https://github.com/grafana/loki/pull/10380) **shantanualsi** Remove `experimental.ruler.enable-api` in favour of `ruler.enable-api`
* [10395](https://github.com/grafana/loki/pull/10395/) **shantanualshi** Remove deprecated `split_queries_by_interval` and `forward_headers_list` configuration options in the `query_range` section
@ -103,7 +103,7 @@
* [8474](https://github.com/grafana/loki/pull/8787) **andriikushch**: Promtail: Add a new target for the Azure Event Hubs
* [8874](https://github.com/grafana/loki/pull/8874) **rfratto**: Promtail: Support expoential backoff when polling unchanged files for logs.
* [9508](https://github.com/grafana/loki/pull/9508) **farodin91**: Promtail: improve behavior of partial lines.
* [9986](https://github.com/grafana/loki/pull/9986) **vlad-diachenko**: Promtail: Add `non_indexed_labels` stage to attach non-indexed labels to each log line.
* [9986](https://github.com/grafana/loki/pull/9986) **vlad-diachenko**: Promtail: Add `structured_metadata` stage to attach metadata to each log line.
##### Fixes

@ -1,132 +0,0 @@
package stages
import (
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/push"
util_log "github.com/grafana/loki/pkg/util/log"
)
var pipelineStagesNonIndexedLabelsUsingMatch = `
pipeline_stages:
- match:
selector: '{source="test"}'
stages:
- logfmt:
mapping:
app:
- non_indexed_labels:
app:
`
var pipelineStagesNonIndexedLabelsFromLogfmt = `
pipeline_stages:
- logfmt:
mapping:
app:
- non_indexed_labels:
app:
`
var pipelineStagesNonIndexedLabelsFromJSON = `
pipeline_stages:
- json:
expressions:
app:
- non_indexed_labels:
app:
`
var pipelineStagesNonIndexedLabelsWithRegexParser = `
pipeline_stages:
- regex:
expression: "^(?s)(?P<time>\\S+?) (?P<stream>stdout|stderr) (?P<flags>\\S+?) (?P<content>.*)$"
- non_indexed_labels:
stream:
`
var pipelineStagesNonIndexedLabelsFromJSONWithTemplate = `
pipeline_stages:
- json:
expressions:
app:
- template:
source: app
template: '{{ ToUpper .Value }}'
- non_indexed_labels:
app:
`
var pipelineStagesNonIndexedAndRegularLabelsFromJSON = `
pipeline_stages:
- json:
expressions:
app:
component:
- non_indexed_labels:
app:
- labels:
component:
`
func Test_NonIndexedLabelsStage(t *testing.T) {
tests := map[string]struct {
pipelineStagesYaml string
logLine string
streamLabels model.LabelSet
expectedNonIndexedLabels push.LabelsAdapter
expectedLabels model.LabelSet
}{
"expected non-indexed labels to be extracted with logfmt parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesNonIndexedLabelsFromLogfmt,
logLine: "app=loki component=ingester",
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
},
"expected non-indexed labels to be extracted with json parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesNonIndexedLabelsFromJSON,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
},
"expected non-indexed labels to be extracted with regexp parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesNonIndexedLabelsWithRegexParser,
logLine: `2019-01-01T01:00:00.000000001Z stderr P i'm a log message!`,
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "stream", Value: "stderr"}},
},
"expected non-indexed labels to be extracted with json parser and to be added to entry after rendering the template": {
pipelineStagesYaml: pipelineStagesNonIndexedLabelsFromJSONWithTemplate,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "LOKI"}},
},
"expected non-indexed and regular labels to be extracted with json parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesNonIndexedAndRegularLabelsFromJSON,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
expectedLabels: model.LabelSet{model.LabelName("component"): model.LabelValue("ingester")},
},
"expected non-indexed to be extracted using match stage": {
pipelineStagesYaml: pipelineStagesNonIndexedLabelsUsingMatch,
logLine: `app=loki component=ingester`,
expectedNonIndexedLabels: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
expectedLabels: model.LabelSet{model.LabelName("source"): model.LabelValue("test")},
streamLabels: model.LabelSet{model.LabelName("source"): model.LabelValue("test")},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
pl, err := NewPipeline(util_log.Logger, loadConfig(test.pipelineStagesYaml), nil, prometheus.DefaultRegisterer)
require.NoError(t, err)
result := processEntries(pl, newEntry(nil, test.streamLabels, test.logLine, time.Now()))[0]
require.Equal(t, test.expectedNonIndexedLabels, result.NonIndexedLabels)
if test.expectedLabels != nil {
require.Equal(t, test.expectedLabels, result.Labels)
} else {
require.Empty(t, result.Labels)
}
})
}
}

@ -16,32 +16,34 @@ import (
)
const (
StageTypeJSON = "json"
StageTypeLogfmt = "logfmt"
StageTypeRegex = "regex"
StageTypeReplace = "replace"
StageTypeMetric = "metrics"
StageTypeLabel = "labels"
StageTypeLabelDrop = "labeldrop"
StageTypeTimestamp = "timestamp"
StageTypeOutput = "output"
StageTypeDocker = "docker"
StageTypeCRI = "cri"
StageTypeMatch = "match"
StageTypeTemplate = "template"
StageTypePipeline = "pipeline"
StageTypeTenant = "tenant"
StageTypeDrop = "drop"
StageTypeSampling = "sampling"
StageTypeLimit = "limit"
StageTypeMultiline = "multiline"
StageTypePack = "pack"
StageTypeLabelAllow = "labelallow"
StageTypeStaticLabels = "static_labels"
StageTypeDecolorize = "decolorize"
StageTypeEventLogMessage = "eventlogmessage"
StageTypeGeoIP = "geoip"
StageTypeNonIndexedLabels = "non_indexed_labels"
StageTypeJSON = "json"
StageTypeLogfmt = "logfmt"
StageTypeRegex = "regex"
StageTypeReplace = "replace"
StageTypeMetric = "metrics"
StageTypeLabel = "labels"
StageTypeLabelDrop = "labeldrop"
StageTypeTimestamp = "timestamp"
StageTypeOutput = "output"
StageTypeDocker = "docker"
StageTypeCRI = "cri"
StageTypeMatch = "match"
StageTypeTemplate = "template"
StageTypePipeline = "pipeline"
StageTypeTenant = "tenant"
StageTypeDrop = "drop"
StageTypeSampling = "sampling"
StageTypeLimit = "limit"
StageTypeMultiline = "multiline"
StageTypePack = "pack"
StageTypeLabelAllow = "labelallow"
StageTypeStaticLabels = "static_labels"
StageTypeDecolorize = "decolorize"
StageTypeEventLogMessage = "eventlogmessage"
StageTypeGeoIP = "geoip"
// Deprecated. Renamed to `structured_metadata`. Will be removed after the migration.
StageTypeNonIndexedLabels = "non_indexed_labels"
StageTypeStructuredMetadata = "structured_metadata"
)
// Processor takes an existing set of labels, timestamp and log entry and returns either a possibly mutated
@ -205,7 +207,8 @@ func initCreators() {
StageTypeGeoIP: func(params StageCreationParams) (Stage, error) {
return newGeoIPStage(params.logger, params.config)
},
StageTypeNonIndexedLabels: newNonIndexedLabelsStage,
StageTypeNonIndexedLabels: newStructuredMetadataStage,
StageTypeStructuredMetadata: newStructuredMetadataStage,
}
}

@ -8,7 +8,7 @@ import (
"github.com/grafana/loki/pkg/logproto"
)
func newNonIndexedLabelsStage(params StageCreationParams) (Stage, error) {
func newStructuredMetadataStage(params StageCreationParams) (Stage, error) {
cfgs := &LabelsConfig{}
err := mapstructure.Decode(params.config, cfgs)
if err != nil {
@ -18,25 +18,25 @@ func newNonIndexedLabelsStage(params StageCreationParams) (Stage, error) {
if err != nil {
return nil, err
}
return &nonIndexedLabelsStage{
return &structuredMetadataStage{
cfgs: *cfgs,
logger: params.logger,
}, nil
}
type nonIndexedLabelsStage struct {
type structuredMetadataStage struct {
cfgs LabelsConfig
logger log.Logger
}
func (s *nonIndexedLabelsStage) Name() string {
return StageTypeNonIndexedLabels
func (s *structuredMetadataStage) Name() string {
return StageTypeStructuredMetadata
}
func (s *nonIndexedLabelsStage) Run(in chan Entry) chan Entry {
func (s *structuredMetadataStage) Run(in chan Entry) chan Entry {
return RunWith(in, func(e Entry) Entry {
processLabelsConfigs(s.logger, e.Extracted, s.cfgs, func(labelName model.LabelName, labelValue model.LabelValue) {
e.NonIndexedLabels = append(e.NonIndexedLabels, logproto.LabelAdapter{Name: string(labelName), Value: string(labelValue)})
e.StructuredMetadata = append(e.StructuredMetadata, logproto.LabelAdapter{Name: string(labelName), Value: string(labelValue)})
})
return e
})

@ -0,0 +1,146 @@
package stages
import (
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/pkg/push"
util_log "github.com/grafana/loki/pkg/util/log"
)
var pipelineStagesStructuredMetadataUsingMatch = `
pipeline_stages:
- match:
selector: '{source="test"}'
stages:
- logfmt:
mapping:
app:
- structured_metadata:
app:
`
var pipelineStagesStructuredMetadataFromLogfmt = `
pipeline_stages:
- logfmt:
mapping:
app:
- structured_metadata:
app:
`
var pipelineStagesStructuredMetadataFromJSON = `
pipeline_stages:
- json:
expressions:
app:
- structured_metadata:
app:
`
var pipelineStagesStructuredMetadataWithRegexParser = `
pipeline_stages:
- regex:
expression: "^(?s)(?P<time>\\S+?) (?P<stream>stdout|stderr) (?P<flags>\\S+?) (?P<content>.*)$"
- structured_metadata:
stream:
`
var pipelineStagesStructuredMetadataFromJSONWithTemplate = `
pipeline_stages:
- json:
expressions:
app:
- template:
source: app
template: '{{ ToUpper .Value }}'
- structured_metadata:
app:
`
var pipelineStagesStructuredMetadataAndRegularLabelsFromJSON = `
pipeline_stages:
- json:
expressions:
app:
component:
- structured_metadata:
app:
- labels:
component:
`
var deprecatedPipelineStagesStructuredMetadataFromJSON = `
pipeline_stages:
- json:
expressions:
app:
- non_indexed_labels:
app:
`
func Test_StructuredMetadataStage(t *testing.T) {
tests := map[string]struct {
pipelineStagesYaml string
logLine string
streamLabels model.LabelSet
expectedStructuredMetadata push.LabelsAdapter
expectedLabels model.LabelSet
}{
"expected structured metadata to be extracted with logfmt parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesStructuredMetadataFromLogfmt,
logLine: "app=loki component=ingester",
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
},
"expected structured metadata to be extracted with json parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesStructuredMetadataFromJSON,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
},
"expected structured metadata to be extracted with json parser and to be added to entry even if deprecated stage name is used": {
pipelineStagesYaml: deprecatedPipelineStagesStructuredMetadataFromJSON,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
},
"expected structured metadata to be extracted with regexp parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesStructuredMetadataWithRegexParser,
logLine: `2019-01-01T01:00:00.000000001Z stderr P i'm a log message!`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "stream", Value: "stderr"}},
},
"expected structured metadata to be extracted with json parser and to be added to entry after rendering the template": {
pipelineStagesYaml: pipelineStagesStructuredMetadataFromJSONWithTemplate,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "LOKI"}},
},
"expected structured metadata and regular labels to be extracted with json parser and to be added to entry": {
pipelineStagesYaml: pipelineStagesStructuredMetadataAndRegularLabelsFromJSON,
logLine: `{"app":"loki" ,"component":"ingester"}`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
expectedLabels: model.LabelSet{model.LabelName("component"): model.LabelValue("ingester")},
},
"expected structured metadata to be extracted using match stage": {
pipelineStagesYaml: pipelineStagesStructuredMetadataUsingMatch,
logLine: `app=loki component=ingester`,
expectedStructuredMetadata: push.LabelsAdapter{push.LabelAdapter{Name: "app", Value: "loki"}},
expectedLabels: model.LabelSet{model.LabelName("source"): model.LabelValue("test")},
streamLabels: model.LabelSet{model.LabelName("source"): model.LabelValue("test")},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
pl, err := NewPipeline(util_log.Logger, loadConfig(test.pipelineStagesYaml), nil, prometheus.DefaultRegisterer)
require.NoError(t, err)
result := processEntries(pl, newEntry(nil, test.streamLabels, test.logLine, time.Now()))[0]
require.Equal(t, test.expectedStructuredMetadata, result.StructuredMetadata)
if test.expectedLabels != nil {
require.Equal(t, test.expectedLabels, result.Labels)
} else {
require.Empty(t, result.Labels)
}
})
}
}

@ -148,9 +148,9 @@ func (b *batch) createPushRequest() (*logproto.PushRequest, int) {
}
func entrySize(entry api.Entry) int {
nonIndexedLabelsSize := 0
for _, label := range entry.NonIndexedLabels {
nonIndexedLabelsSize += label.Size()
structuredMetadataSize := 0
for _, label := range entry.StructuredMetadata {
structuredMetadataSize += label.Size()
}
return len(entry.Line) + nonIndexedLabelsSize
return len(entry.Line) + structuredMetadataSize
}

@ -39,7 +39,7 @@ var logEntries = []api.Entry{
Entry: logproto.Entry{
Timestamp: time.Unix(7, 0).UTC(),
Line: "line7",
NonIndexedLabels: push.LabelsAdapter{
StructuredMetadata: push.LabelsAdapter{
{Name: "trace_id", Value: "12345"},
},
},

@ -20,51 +20,51 @@ index and in stored chunks.
## Chunk Format
```
-----------------------------------------------------------------------
| | | |
| MagicNumber(4b) | version(1b) | encoding (1b) |
| | | |
-----------------------------------------------------------------------
| #nonIndexedLabels (uvarint) |
-----------------------------------------------------------------------
| len(label-1) (uvarint) | label-1 (bytes) |
-----------------------------------------------------------------------
| len(label-2) (uvarint) | label-2 (bytes) |
-----------------------------------------------------------------------
| len(label-n) (uvarint) | label-n (bytes) |
-----------------------------------------------------------------------
| checksum(from #nonIndexedLabels) |
-----------------------------------------------------------------------
| block-1 bytes | checksum (4b) |
-----------------------------------------------------------------------
| block-2 bytes | checksum (4b) |
-----------------------------------------------------------------------
| block-n bytes | checksum (4b) |
-----------------------------------------------------------------------
| #blocks (uvarint) |
-----------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
-----------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
-----------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
-----------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
-----------------------------------------------------------------------
| checksum(from #blocks) |
-----------------------------------------------------------------------
| #nonIndexedLabels len (uvarint) | #nonIndexedLabels offset (uvarint)|
-----------------------------------------------------------------------
| #blocks len (uvarint) | #blocks offset (uvarint) |
-----------------------------------------------------------------------
----------------------------------------------------------------------------
| | | |
| MagicNumber(4b) | version(1b) | encoding (1b) |
| | | |
----------------------------------------------------------------------------
| #structuredMetadata (uvarint) |
----------------------------------------------------------------------------
| len(label-1) (uvarint) | label-1 (bytes) |
----------------------------------------------------------------------------
| len(label-2) (uvarint) | label-2 (bytes) |
----------------------------------------------------------------------------
| len(label-n) (uvarint) | label-n (bytes) |
----------------------------------------------------------------------------
| checksum(from #structuredMetadata) |
----------------------------------------------------------------------------
| block-1 bytes | checksum (4b) |
----------------------------------------------------------------------------
| block-2 bytes | checksum (4b) |
----------------------------------------------------------------------------
| block-n bytes | checksum (4b) |
----------------------------------------------------------------------------
| #blocks (uvarint) |
----------------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
----------------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
----------------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
----------------------------------------------------------------------------
| #entries(uvarint) | mint, maxt (varint) | offset, len (uvarint) |
----------------------------------------------------------------------------
| checksum(from #blocks) |
----------------------------------------------------------------------------
| #structuredMetadata len (uvarint) | #structuredMetadata offset (uvarint) |
----------------------------------------------------------------------------
| #blocks len (uvarint) | #blocks offset (uvarint) |
----------------------------------------------------------------------------
```
`mint` and `maxt` describe the minimum and maximum Unix nanosecond timestamp,
respectively.
The `nonIndexedLabels` section stores non-repeated strings. It is used to store label names and label values from
[non-indexed labels]({{< relref "./labels/non-indexed-labels" >}}).
Note that the labels strings and lengths within the `nonIndexedLabels` section are stored compressed.
The `structuredMetadata` section stores non-repeated strings. It is used to store label names and label values from
[structured metadata]({{< relref "./labels/structured-metadata" >}}).
Note that the labels strings and lengths within the `structuredMetadata` section are stored compressed.
### Block Format
@ -89,7 +89,7 @@ Note that the bytes of a block are stored compressed. The following is their for
bytes of the log entry.
Symbols store references to the actual strings containing label names and values in the
`nonIndexedLabels` section of the chunk.
`structuredMetadata` section of the chunk.
## Storage

@ -20,8 +20,8 @@ Labels in Loki perform a very important task: They define a stream. More specifi
If you are familiar with Prometheus, the term used there is series; however, Prometheus has an additional dimension: metric name. Loki simplifies this in that there are no metric names, just labels, and we decided to use streams instead of series.
{{% admonition type="note" %}}
Non-indexed labels do not define a stream, but are metadata attached to a log line.
See [Non-indexed labels]({{< relref "./non-indexed-labels" >}}) for more information.
Structured metadata do not define a stream, but are metadata attached to a log line.
See [structured metadata]({{< relref "./structured-metadata" >}}) for more information.
{{% /admonition %}}
## Format

@ -21,7 +21,7 @@ Too many label value combinations leads to too many streams. The penalties for t
To avoid those issues, don't add a label for something until you know you need it! Use filter expressions (`|= "text"`, `|~ "regex"`, …) and brute force those logs. It works -- and it's fast.
If you often parse a label from a log line at query time, the label has a high cardinality, and extracting that label is expensive in terms of performance; consider extracting the label on the client side
attaching it as metadata to log lines using a [non-indexed label]({{< relref "./non-indexed-labels" >}}).
attaching it as [structured metadata]({{< relref "./structured-metadata" >}}) to log lines .
From early on, we have set a label dynamically using Promtail pipelines for `level`. This seemed intuitive for us as we often wanted to only show logs for `level="error"`; however, we are re-evaluating this now as writing a query. `{app="loki"} |= "level=error"` is proving to be just as fast for many of our applications as `{app="loki",level="error"}`.

@ -1,12 +1,12 @@
---
menuTitle: Non-indexed labels
title: What are non-indexed labels
menuTitle: Structured metadata
title: What is structured metadata
description: Attaching metadata to logs.
---
# What are non-indexed labels
# What is structured metadata
{{% admonition type="warning" %}}
Non-indexed labels is an experimental feature and is subject to change in future releases of Grafana Loki.
Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki.
{{% /admonition %}}
One of the powerful features of Loki is parsing logs at query time to extract metadata and build labels out of it.
@ -16,36 +16,36 @@ large json blobs or a poorly written query using complex regex patterns.
In addition, the data extracted from logs at query time is usually high cardinality, which can’t be stored
in the index as it would increase the cardinality too much, and therefore reduce the performance of the index.
Non-indexed labels are a way to attach metadata to logs without indexing them. Examples of useful metadata are
Structured metadata is a way to attach metadata to logs without indexing them. Examples of useful metadata are
trace IDs, user IDs, and any other label that is often used in queries but has high cardinality and is expensive
to extract at query time.
## Attaching non-indexed labels to log lines
## Attaching structured metadata to log lines
You have the option to attach non-indexed labels to log lines in the push payload along with each log line and the timestamp.
You have the option to attach structured metadata to log lines in the push payload along with each log line and the timestamp.
For more information on how to push logs to Loki via the HTTP endpoint, refer to the [HTTP API documentation]({{< relref "../../reference/api#push-log-entries-to-loki" >}}).
Alternatively, you can use the Grafana Agent or Promtail to extract and attach non-indexed labels to your log lines.
See the [Promtail: Non-indexed labels stage]({{< relref "../../send-data/promtail/stages/non_indexed_labels" >}}) for more information.
Alternatively, you can use the Grafana Agent or Promtail to extract and attach structured metadata to your log lines.
See the [Promtail: Structured metadata stage]({{< relref "../../send-data/promtail/stages/structured_metadata" >}}) for more information.
## Querying non-indexed labels
## Querying structured metadata
Non-indexed labels are extracted automatically for each returned log line and added to the labels returned for the query.
You can use non-indexed labels to filter log line using a [label filter expression]({{< relref "../../query/log_queries#label-filter-expression" >}}).
Structured metadata is extracted automatically for each returned log line and added to the labels returned for the query.
You can use labels of structured metadata to filter log line using a [label filter expression]({{< relref "../../query/log_queries#label-filter-expression" >}}).
For example, if you have a non-indexed label `trace_id` attached to some of your log lines, you can filter log lines using:
For example, if you have a label `trace_id` attached to some of your log lines as structured metadata, you can filter log lines using:
```logql
{job="example"} | trace_id="0242ac120002"`
```
Of course, you can filter by multiple non-indexed labels at the same time:
Of course, you can filter by multiple labels of structured metadata at the same time:
```logql
{job="example"} | trace_id="0242ac120002" | user_id="superUser123"
```
Note that since non-indexed labels are extracted automatically to the results labels, some metric queries might return
Note that since structured metadata is extracted automatically to the results labels, some metric queries might return
an error like `maximum of series (50000) reached for a single query`. You can use the [Keep]({{< relref "../../query/log_queries#keep-labels-expression" >}}) and [Drop]({{< relref "../../query/log_queries#drop-labels-expression" >}}) stages to filter out labels that you don't need.
For example:

@ -592,9 +592,9 @@ JSON post body can be sent in the following format:
}
```
You can optionally attach [non-indexed labels]({{< relref "../get-started/labels/non-indexed-labels" >}}) to each log line by adding a JSON object to the end of the log line array.
You can optionally attach [structured metadata]({{< relref "../get-started/labels/structured-metadata" >}}) to each log line by adding a JSON object to the end of the log line array.
The JSON object must be a valid JSON object with string keys and string values. The JSON object should not contain any nested object.
The JSON object must be set immediately after the log line. Here is an example of a log entry with some non-indexed labels attached:
The JSON object must be set immediately after the log line. Here is an example of a log entry with some structured metadata attached:
```
"values": [

@ -41,7 +41,7 @@ Action stages:
- [static_labels]({{< relref "./static_labels" >}}): Add static-labels to the log entry.
- [metrics]({{< relref "./metrics" >}}): Calculate metrics based on extracted data.
- [tenant]({{< relref "./tenant" >}}): Set the tenant ID value to use for the log entry.
- [non_indexed_labels]({{< relref "./non_indexed_labels" >}}): Add non-indexed labels to the log entry.
- [structured_metadata]({{< relref "./structured_metadata" >}}): Add structured metadata to the log entry.
Filtering stages:

@ -1,21 +1,22 @@
---
title: non_indexed_labels
description: non_indexed_labels stage
title: structured_metadata
description: The 'structured_metadata' Promtail pipeline stage
---
# non_indexed_labels
# structured_metadata
{{% admonition type="warning" %}}
Non-indexed labels is an experimental feature and is subject to change in future releases of Grafana Loki.
Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki.
{{% /admonition %}}
The labels stage is an action stage that takes data from the extracted map and
modifies the [non-indexed labels]({{< relref "../../../get-started/labels/non-indexed-labels" >}}) set that is sent to Loki with the log entry.
The `structured_metadata` stage is an action stage that takes data from the extracted map and
modifies the [structured metadata]({{< relref "../../../get-started/labels/structured-metadata" >}}) that is sent to Loki with the log entry.
## Schema
```yaml
non_indexed_labels:
# Key is REQUIRED and the name for the non-indexed label that will be created.
structured-metadata:
# Key is REQUIRED and the name for the label of structured metadata that will be created.
# Value is optional and will be the name from extracted data whose value
# will be used for the value of the label. If empty, the value will be
# inferred to be the same as the key.
@ -33,7 +34,7 @@ For the given pipeline:
traceID: traceID
- labels:
stream:
- non_indexed_labels:
- structured-metadata:
traceID:
```
@ -45,4 +46,4 @@ Given the following log line:
The first stage would extract `stream` with a value of `stderr` and `traceID` with a value of `0242ac120002` into
the extracted data set. The `labels` stage would turn that `stream` and `stderr` key-value pair into a stream label.
The `non_indexed_labels` stage would attach the `traceID` and `0242ac120002` key-value pair as a non-indexed label metadata to the log line.
The `structured-metadata` stage would attach the `traceID` and `0242ac120002` key-value pair as a structured metadata to the log line.

@ -90,8 +90,8 @@ func (c *Client) PushLogLine(line string, extraLabels ...map[string]string) erro
return c.pushLogLine(line, c.Now, nil, extraLabels...)
}
func (c *Client) PushLogLineWithNonIndexedLabels(line string, logLabels map[string]string, extraLabels ...map[string]string) error {
return c.PushLogLineWithTimestampAndNonIndexedLabels(line, c.Now, logLabels, extraLabels...)
func (c *Client) PushLogLineWithStructuredMetadata(line string, structuredMetadata map[string]string, extraLabels ...map[string]string) error {
return c.PushLogLineWithTimestampAndStructuredMetadata(line, c.Now, structuredMetadata, extraLabels...)
}
// PushLogLineWithTimestamp creates a new logline at the given timestamp
@ -100,15 +100,15 @@ func (c *Client) PushLogLineWithTimestamp(line string, timestamp time.Time, extr
return c.pushLogLine(line, timestamp, nil, extraLabels...)
}
func (c *Client) PushLogLineWithTimestampAndNonIndexedLabels(line string, timestamp time.Time, logLabels map[string]string, extraLabelList ...map[string]string) error {
// If the logLabels map is empty, labels.FromMap will allocate some empty slices.
func (c *Client) PushLogLineWithTimestampAndStructuredMetadata(line string, timestamp time.Time, structuredMetadata map[string]string, extraLabelList ...map[string]string) error {
// If the structuredMetadata map is empty, labels.FromMap will allocate some empty slices.
// Since this code is executed for every log line we receive, as an optimization
// to avoid those allocations we'll call labels.FromMap only if the map is not empty.
var lbls labels.Labels
if len(logLabels) > 0 {
lbls = labels.FromMap(logLabels)
var metadata labels.Labels
if len(structuredMetadata) > 0 {
metadata = labels.FromMap(structuredMetadata)
}
return c.pushLogLine(line, timestamp, lbls, extraLabelList...)
return c.pushLogLine(line, timestamp, metadata, extraLabelList...)
}
func formatTS(ts time.Time) string {
@ -121,7 +121,7 @@ type stream struct {
}
// pushLogLine creates a new logline
func (c *Client) pushLogLine(line string, timestamp time.Time, logLabels labels.Labels, extraLabelList ...map[string]string) error {
func (c *Client) pushLogLine(line string, timestamp time.Time, structuredMetadata labels.Labels, extraLabelList ...map[string]string) error {
apiEndpoint := fmt.Sprintf("%s/loki/api/v1/push", c.baseURL)
s := stream{
@ -132,7 +132,7 @@ func (c *Client) pushLogLine(line string, timestamp time.Time, logLabels labels.
{
formatTS(timestamp),
line,
logLabels,
structuredMetadata,
},
},
}

@ -131,13 +131,13 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
pushRequests = append(pushRequests, pushRequest{
stream: map[string]string{
"job": "fake",
"deletion_type": "with_non_indexed_labels",
"deletion_type": "with_structured_metadata",
},
entries: []logproto.Entry{
{
Timestamp: now.Add(-48 * time.Hour),
Line: "AlineA",
NonIndexedLabels: push.LabelsAdapter{
StructuredMetadata: push.LabelsAdapter{
{
Name: "line",
Value: "A",
@ -147,7 +147,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
{
Timestamp: now.Add(-48 * time.Hour),
Line: "AlineB",
NonIndexedLabels: push.LabelsAdapter{
StructuredMetadata: push.LabelsAdapter{
{
Name: "line",
Value: "B",
@ -157,7 +157,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
{
Timestamp: now.Add(-time.Minute),
Line: "AlineC",
NonIndexedLabels: push.LabelsAdapter{
StructuredMetadata: push.LabelsAdapter{
{
Name: "line",
Value: "C",
@ -167,7 +167,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
{
Timestamp: now.Add(-time.Minute),
Line: "AlineD",
NonIndexedLabels: push.LabelsAdapter{
StructuredMetadata: push.LabelsAdapter{
{
Name: "line",
Value: "D",
@ -209,7 +209,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
{
StartTime: now.Add(-48 * time.Hour).Unix(),
EndTime: now.Unix(),
Query: `{deletion_type="with_non_indexed_labels"} | line="A"`,
Query: `{deletion_type="with_structured_metadata"} | line="A"`,
Status: "received",
},
}
@ -234,10 +234,10 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
// ingest some log lines
for _, pr := range pushRequests {
for _, entry := range pr.entries {
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndNonIndexedLabels(
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata(
entry.Line,
entry.Timestamp,
logproto.FromLabelAdaptersToLabels(entry.NonIndexedLabels).Map(),
logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata).Map(),
pr.stream,
))
}
@ -358,7 +358,7 @@ func TestMicroServicesDeleteRequest(t *testing.T) {
require.NoError(t, err)
checkUserLabelAndMetricValue(t, "loki_compactor_delete_requests_processed_total", metrics, tenantID, float64(len(expectedDeleteRequests)))
// ideally this metric should be equal to 2 given that a single line matches the line filter and non-indexed labels filter
// ideally this metric should be equal to 2 given that a single line matches the line filter and structured metadata filter
// but the same chunks are indexed in 3 tables
checkUserLabelAndMetricValue(t, "loki_compactor_deleted_lines", metrics, tenantID, 6)
})
@ -411,7 +411,7 @@ func pushRequestToClientStreamValues(t *testing.T, p pushRequest) []client.Strea
logsByStream := map[string][][]string{}
for _, entry := range p.entries {
lb := labels.NewBuilder(labels.FromMap(p.stream))
for _, l := range entry.NonIndexedLabels {
for _, l := range entry.StructuredMetadata {
lb.Set(l.Name, l.Value)
}
stream := lb.Labels().String()

@ -400,12 +400,12 @@ func TestMicroServicesIngestQueryOverMultipleBucketSingleProvider(t *testing.T)
cliQueryFrontend.Now = now
t.Run("ingest-logs", func(t *testing.T) {
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndNonIndexedLabels("lineA", time.Now().Add(-48*time.Hour), map[string]string{"traceID": "123"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndNonIndexedLabels("lineB", time.Now().Add(-36*time.Hour), map[string]string{"traceID": "456"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata("lineA", time.Now().Add(-48*time.Hour), map[string]string{"traceID": "123"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithTimestampAndStructuredMetadata("lineB", time.Now().Add(-36*time.Hour), map[string]string{"traceID": "456"}, map[string]string{"job": "fake"}))
// ingest logs to the current period
require.NoError(t, cliDistributor.PushLogLineWithNonIndexedLabels("lineC", map[string]string{"traceID": "789"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithNonIndexedLabels("lineD", map[string]string{"traceID": "123"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithStructuredMetadata("lineC", map[string]string{"traceID": "789"}, map[string]string{"job": "fake"}))
require.NoError(t, cliDistributor.PushLogLineWithStructuredMetadata("lineD", map[string]string{"traceID": "123"}, map[string]string{"job": "fake"}))
})

@ -41,11 +41,11 @@ const (
// different block size in the new chunk which should be fine.
defaultBlockSize = 256 * 1024
chunkMetasSectionIdx = 1
chunkNonIndexedLabelsSectionIdx = 2
chunkMetasSectionIdx = 1
chunkStructuredMetadataSectionIdx = 2
)
var HeadBlockFmts = []HeadBlockFmt{OrderedHeadBlockFmt, UnorderedHeadBlockFmt, UnorderedWithNonIndexedLabelsHeadBlockFmt}
var HeadBlockFmts = []HeadBlockFmt{OrderedHeadBlockFmt, UnorderedHeadBlockFmt, UnorderedWithStructuredMetadataHeadBlockFmt}
type HeadBlockFmt byte
@ -57,8 +57,8 @@ func (f HeadBlockFmt) String() string {
return "ordered"
case f == UnorderedHeadBlockFmt:
return "unordered"
case f == UnorderedWithNonIndexedLabelsHeadBlockFmt:
return "unordered with non-indexed labels"
case f == UnorderedWithStructuredMetadataHeadBlockFmt:
return "unordered with structured metadata"
default:
return fmt.Sprintf("unknown: %v", byte(f))
}
@ -81,7 +81,7 @@ const (
_
OrderedHeadBlockFmt
UnorderedHeadBlockFmt
UnorderedWithNonIndexedLabelsHeadBlockFmt
UnorderedWithStructuredMetadataHeadBlockFmt
)
// ChunkHeadFormatFor returns corresponding head block format for the given `chunkfmt`.
@ -95,7 +95,7 @@ func ChunkHeadFormatFor(chunkfmt byte) HeadBlockFmt {
}
// return the latest head format for all chunkformat >v3
return UnorderedWithNonIndexedLabelsHeadBlockFmt
return UnorderedWithStructuredMetadataHeadBlockFmt
}
var magicNumber = uint32(0x12EE56A)
@ -340,7 +340,7 @@ func (hb *headBlock) Convert(version HeadBlockFmt, symbolizer *symbolizer) (Head
out := version.NewBlock(symbolizer)
for _, e := range hb.entries {
if err := out.Append(e.t, e.s, e.nonIndexedLabels); err != nil {
if err := out.Append(e.t, e.s, e.structuredMetadata); err != nil {
return nil, err
}
}
@ -348,9 +348,9 @@ func (hb *headBlock) Convert(version HeadBlockFmt, symbolizer *symbolizer) (Head
}
type entry struct {
t int64
s string
nonIndexedLabels labels.Labels
t int64
s string
structuredMetadata labels.Labels
}
// NewMemChunk returns a new in-mem chunk.
@ -362,9 +362,9 @@ func panicIfInvalidFormat(chunkFmt byte, head HeadBlockFmt) {
if chunkFmt == ChunkFormatV2 && head != OrderedHeadBlockFmt {
panic("only OrderedHeadBlockFmt is supported for V2 chunks")
}
if chunkFmt == ChunkFormatV4 && head != UnorderedWithNonIndexedLabelsHeadBlockFmt {
if chunkFmt == ChunkFormatV4 && head != UnorderedWithStructuredMetadataHeadBlockFmt {
fmt.Println("received head fmt", head.String())
panic("only UnorderedWithNonIndexedLabelsHeadBlockFmt is supported for V4 chunks")
panic("only UnorderedWithStructuredMetadataHeadBlockFmt is supported for V4 chunks")
}
}
@ -496,11 +496,11 @@ func newByteChunk(b []byte, blockSize, targetSize int, fromCheckpoint bool) (*Me
}
if version >= ChunkFormatV4 {
nonIndexedLabelsLen, nonIndexedLabelsOffset := readSectionLenAndOffset(chunkNonIndexedLabelsSectionIdx)
lb := b[nonIndexedLabelsOffset : nonIndexedLabelsOffset+nonIndexedLabelsLen] // non-indexed labels Offset + checksum
structuredMetadataLength, structuredMetadataOffset := readSectionLenAndOffset(chunkStructuredMetadataSectionIdx)
lb := b[structuredMetadataOffset : structuredMetadataOffset+structuredMetadataLength] // structured metadata offset + checksum
db = decbuf{b: lb}
expCRC := binary.BigEndian.Uint32(b[nonIndexedLabelsOffset+nonIndexedLabelsLen:])
expCRC := binary.BigEndian.Uint32(b[structuredMetadataOffset+structuredMetadataLength:])
if expCRC != db.crc32() {
return nil, ErrInvalidChecksum
}
@ -567,10 +567,10 @@ func (c *MemChunk) BytesSize() int {
if c.format >= ChunkFormatV4 {
size += 8 // metablock length
size += c.symbolizer.CheckpointSize() // non-indexed labels block
size += crc32.Size // non-indexed labels block crc
size += c.symbolizer.CheckpointSize() // structured metadata block
size += crc32.Size // structured metadata block crc
size += 8 + 8 // non-indexed labels offset and length
size += 8 + 8 // structured metadata offset and length
}
return size
}
@ -610,8 +610,8 @@ func (c *MemChunk) writeTo(w io.Writer, forCheckpoint bool) (int64, error) {
return offset, errors.Wrap(err, "write blockMeta #entries")
}
offset += int64(n)
nonIndexedLabelsOffset := offset
nonIndexedLabelsLen := 0
structuredMetadataOffset := offset
structuredMetadataLength := 0
if c.format >= ChunkFormatV4 {
var (
@ -622,21 +622,21 @@ func (c *MemChunk) writeTo(w io.Writer, forCheckpoint bool) (int64, error) {
var err error
n, crcHash, err = c.symbolizer.CheckpointTo(w)
if err != nil {
return offset, errors.Wrap(err, "write non-indexed labels")
return offset, errors.Wrap(err, "write structured metadata")
}
} else {
var err error
n, crcHash, err = c.symbolizer.SerializeTo(w, getWriterPool(c.encoding))
if err != nil {
return offset, errors.Wrap(err, "write non-indexed labels")
return offset, errors.Wrap(err, "write structured metadata")
}
}
offset += int64(n)
nonIndexedLabelsLen = n
structuredMetadataLength = n
n, err = w.Write(crcHash)
if err != nil {
return offset, errors.Wrap(err, "write crc32 hash for non-indexed labels")
return offset, errors.Wrap(err, "write crc32 hash for structured metadata")
}
offset += int64(n)
}
@ -684,13 +684,13 @@ func (c *MemChunk) writeTo(w io.Writer, forCheckpoint bool) (int64, error) {
offset += int64(n)
if c.format >= ChunkFormatV4 {
// Write non-indexed labels offset and length
// Write structured metadata offset and length
eb.reset()
eb.putBE64int(nonIndexedLabelsLen)
eb.putBE64int(int(nonIndexedLabelsOffset))
eb.putBE64int(structuredMetadataLength)
eb.putBE64int(int(structuredMetadataOffset))
n, err = w.Write(eb.get())
if err != nil {
return offset, errors.Wrap(err, "write non-indexed labels offset and length")
return offset, errors.Wrap(err, "write structured metadata offset and length")
}
offset += int64(n)
}
@ -716,10 +716,10 @@ func (c *MemChunk) writeTo(w io.Writer, forCheckpoint bool) (int64, error) {
// In turn this allows us to maintain a more effective dedupe ratio in storage.
func (c *MemChunk) SerializeForCheckpointTo(chk, head io.Writer) error {
// serialize the head before the MemChunk because:
// * We store non-indexed labels with chunks(using symbolizer) which are then referenced by blocks and head.
// * When a write request is received with some new non-indexed labels, we update symbolizer first and then append log entry to head.
// * We store structured metadata with chunks(using symbolizer) which are then referenced by blocks and head.
// * When a write request is received with some new labels of structured metadata, we update symbolizer first and then append log entry to head.
// * Labels stored in symbolizer are serialized with MemChunk.
// This means if we serialize the MemChunk before the head, we might miss writing some newly added non-indexed labels which are referenced by head.
// This means if we serialize the MemChunk before the head, we might miss writing some newly added structured metadata labels which are referenced by head.
err := c.head.CheckpointTo(head)
if err != nil {
return err
@ -776,15 +776,15 @@ func (c *MemChunk) SpaceFor(e *logproto.Entry) bool {
// This is looking to see if the uncompressed lines will fit which is not
// a great check, but it will guarantee we are always under the target size
newHBSize := c.head.UncompressedSize() + len(e.Line)
nonIndexedLabelsSize := 0
structuredMetadataSize := 0
if c.format >= ChunkFormatV4 {
newHBSize += metaLabelsLen(logproto.FromLabelAdaptersToLabels(e.NonIndexedLabels))
// non-indexed labels are compressed while serializing the chunk so we don't know what their size would be after compression.
newHBSize += metaLabelsLen(logproto.FromLabelAdaptersToLabels(e.StructuredMetadata))
// structured metadata is compressed while serializing the chunk so we don't know what their size would be after compression.
// As adoption increases, their overall size can be non-trivial so we can't ignore them while calculating chunk size.
// ToDo(Sandeep): See if we can just use some average compression ratio for each compression format we support and use it here
nonIndexedLabelsSize = c.symbolizer.UncompressedSize()
structuredMetadataSize = c.symbolizer.UncompressedSize()
}
return (nonIndexedLabelsSize + c.cutBlockSize + newHBSize) < c.targetSize
return (structuredMetadataSize + c.cutBlockSize + newHBSize) < c.targetSize
}
// if targetSize is not defined, default to the original behavior of fixed blocks per chunk
return len(c.blocks) < blocksPerChunk
@ -844,9 +844,9 @@ func (c *MemChunk) Append(entry *logproto.Entry) error {
}
if c.format < ChunkFormatV4 {
entry.NonIndexedLabels = nil
entry.StructuredMetadata = nil
}
if err := c.head.Append(entryTimestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.NonIndexedLabels)); err != nil {
if err := c.head.Append(entryTimestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)); err != nil {
return err
}
@ -959,7 +959,7 @@ func (c *MemChunk) Iterator(ctx context.Context, mintT, maxtT time.Time, directi
stats.AddCompressedBytes(int64(c.symbolizer.CompressedSize()))
decompressedSize := int64(c.symbolizer.DecompressedSize())
stats.AddDecompressedBytes(decompressedSize)
stats.AddDecompressedNonIndexedLabelsBytes(decompressedSize)
stats.AddDecompressedStructuredMetadataBytes(decompressedSize)
}
var headIterator iter.EntryIterator
@ -1044,7 +1044,7 @@ func (c *MemChunk) SampleIterator(ctx context.Context, from, through time.Time,
stats.AddCompressedBytes(int64(c.symbolizer.CompressedSize()))
decompressedSize := int64(c.symbolizer.DecompressedSize())
stats.AddDecompressedBytes(decompressedSize)
stats.AddDecompressedNonIndexedLabelsBytes(decompressedSize)
stats.AddDecompressedStructuredMetadataBytes(decompressedSize)
}
var lastMax int64 // placeholder to check order across blocks
@ -1100,7 +1100,7 @@ func (c *MemChunk) Blocks(mintT, maxtT time.Time) []Block {
// Rebound builds a smaller chunk with logs having timestamp from start and end(both inclusive)
func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, error) {
// add a millisecond to end time because the Chunk.Iterator considers end time to be non-inclusive.
itr, err := c.Iterator(context.Background(), start, end.Add(time.Millisecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepNonIndexedLabels())
itr, err := c.Iterator(context.Background(), start, end.Add(time.Millisecond), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepStructuredMetadata())
if err != nil {
return nil, err
}
@ -1119,7 +1119,7 @@ func (c *MemChunk) Rebound(start, end time.Time, filter filter.Func) (Chunk, err
for itr.Next() {
entry := itr.Entry()
if filter != nil && filter(entry.Timestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.NonIndexedLabels)...) {
if filter != nil && filter(entry.Timestamp, entry.Line, logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata)...) {
continue
}
if err := newChunk.Append(&entry); err != nil {
@ -1199,7 +1199,7 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction,
return
}
stats.AddHeadChunkBytes(int64(len(e.s)))
newLine, parsedLbs, matches := pipeline.ProcessString(e.t, e.s, e.nonIndexedLabels...)
newLine, parsedLbs, matches := pipeline.ProcessString(e.t, e.s, e.structuredMetadata...)
if !matches {
return
}
@ -1215,9 +1215,9 @@ func (hb *headBlock) Iterator(ctx context.Context, direction logproto.Direction,
streams[labels] = stream
}
stream.Entries = append(stream.Entries, logproto.Entry{
Timestamp: time.Unix(0, e.t),
Line: newLine,
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(e.nonIndexedLabels),
Timestamp: time.Unix(0, e.t),
Line: newLine,
StructuredMetadata: logproto.FromLabelsToLabelAdapters(e.structuredMetadata),
})
}
@ -1252,7 +1252,7 @@ func (hb *headBlock) SampleIterator(ctx context.Context, mint, maxt int64, extra
for _, e := range hb.entries {
stats.AddHeadChunkBytes(int64(len(e.s)))
value, parsedLabels, ok := extractor.ProcessString(e.t, e.s, e.nonIndexedLabels...)
value, parsedLabels, ok := extractor.ProcessString(e.t, e.s, e.structuredMetadata...)
if !ok {
continue
}
@ -1320,8 +1320,8 @@ type bufferedIterator struct {
currLine []byte // the current line, this is the same as the buffer but sliced the line size.
currTs int64
symbolsBuf []symbol // The buffer for a single entry's symbols.
currNonIndexedLabels labels.Labels // The current labels.
symbolsBuf []symbol // The buffer for a single entry's symbols.
currStructuredMetadata labels.Labels // The current labels.
closed bool
}
@ -1354,7 +1354,7 @@ func (si *bufferedIterator) Next() bool {
}
}
ts, line, nonIndexedLabels, ok := si.moveNext()
ts, line, structuredMetadata, ok := si.moveNext()
if !ok {
si.Close()
return false
@ -1362,14 +1362,14 @@ func (si *bufferedIterator) Next() bool {
si.currTs = ts
si.currLine = line
si.currNonIndexedLabels = nonIndexedLabels
si.currStructuredMetadata = structuredMetadata
return true
}
// moveNext moves the buffer to the next entry
func (si *bufferedIterator) moveNext() (int64, []byte, labels.Labels, bool) {
var decompressedBytes int64
var decompressedNonIndexedLabelsBytes int64
var decompressedStructuredMetadataBytes int64
var ts int64
var tWidth, lWidth, lineSize, lastAttempt int
for lWidth == 0 { // Read until both varints have enough bytes.
@ -1469,9 +1469,9 @@ func (si *bufferedIterator) moveNext() (int64, []byte, labels.Labels, bool) {
}
// Number of labels
decompressedNonIndexedLabelsBytes += binary.MaxVarintLen64
decompressedStructuredMetadataBytes += binary.MaxVarintLen64
// Label symbols
decompressedNonIndexedLabelsBytes += int64(nSymbols * 2 * binary.MaxVarintLen64)
decompressedStructuredMetadataBytes += int64(nSymbols * 2 * binary.MaxVarintLen64)
// Shift down what is still left in the fixed-size read buffer, if any.
si.readBufValid = copy(si.readBuf[:], si.readBuf[symbolsSectionLengthWidth+nSymbolsWidth:si.readBufValid])
@ -1547,8 +1547,8 @@ func (si *bufferedIterator) moveNext() (int64, []byte, labels.Labels, bool) {
}
si.stats.AddDecompressedLines(1)
si.stats.AddDecompressedNonIndexedLabelsBytes(decompressedNonIndexedLabelsBytes)
si.stats.AddDecompressedBytes(decompressedBytes + decompressedNonIndexedLabelsBytes)
si.stats.AddDecompressedStructuredMetadataBytes(decompressedStructuredMetadataBytes)
si.stats.AddDecompressedBytes(decompressedBytes + decompressedStructuredMetadataBytes)
return ts, si.buf[:lineSize], si.symbolizer.Lookup(si.symbolsBuf[:nSymbols]), true
}
@ -1614,7 +1614,7 @@ func (e *entryBufferedIterator) StreamHash() uint64 { return e.pipeline.BaseLabe
func (e *entryBufferedIterator) Next() bool {
for e.bufferedIterator.Next() {
newLine, lbs, matches := e.pipeline.Process(e.currTs, e.currLine, e.currNonIndexedLabels...)
newLine, lbs, matches := e.pipeline.Process(e.currTs, e.currLine, e.currStructuredMetadata...)
if !matches {
continue
}
@ -1624,10 +1624,10 @@ func (e *entryBufferedIterator) Next() bool {
e.cur.Timestamp = time.Unix(0, e.currTs)
e.cur.Line = string(newLine)
// Most of the time, there is no need to send back the non-indexed labels, as they are already part of the labels results.
// Most of the time, there is no need to send back the labels of structured metadata, as they are already part of the labels results.
// Still it might be needed for example when appending entries from one chunk into another one.
if e.iterOptions.KeepNonIndexedLabels {
e.cur.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(e.currNonIndexedLabels)
if e.iterOptions.KeepStructuredMetdata {
e.cur.StructuredMetadata = logproto.FromLabelsToLabelAdapters(e.currStructuredMetadata)
}
return true
}
@ -1653,7 +1653,7 @@ type sampleBufferedIterator struct {
func (e *sampleBufferedIterator) Next() bool {
for e.bufferedIterator.Next() {
val, labels, ok := e.extractor.Process(e.currTs, e.currLine, e.currNonIndexedLabels...)
val, labels, ok := e.extractor.Process(e.currTs, e.currLine, e.currStructuredMetadata...)
if !ok {
continue
}

@ -69,14 +69,14 @@ var (
chunkFormat: ChunkFormatV3,
},
{
headBlockFmt: UnorderedWithNonIndexedLabelsHeadBlockFmt,
headBlockFmt: UnorderedWithStructuredMetadataHeadBlockFmt,
chunkFormat: ChunkFormatV4,
},
}
)
const (
DefaultTestHeadBlockFmt = UnorderedWithNonIndexedLabelsHeadBlockFmt
DefaultTestHeadBlockFmt = UnorderedWithStructuredMetadataHeadBlockFmt
lblPing = "ping"
lblPong = "pong"
)
@ -177,7 +177,7 @@ func TestBlock(t *testing.T) {
}
for _, c := range cases {
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(c.ts, c.str, c.lbs)))
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(c.ts, c.str, c.lbs)))
if c.cut {
require.NoError(t, chk.cut())
}
@ -193,7 +193,7 @@ func TestBlock(t *testing.T) {
e := it.Entry()
require.Equal(t, cases[idx].ts, e.Timestamp.UnixNano())
require.Equal(t, cases[idx].str, e.Line)
require.Empty(t, e.NonIndexedLabels)
require.Empty(t, e.StructuredMetadata)
if chunkFormat < ChunkFormatV4 {
require.Equal(t, labels.EmptyLabels().String(), it.Labels())
} else {
@ -412,14 +412,14 @@ func TestSerialization(t *testing.T) {
for _, testData := range allPossibleFormats {
for _, enc := range testEncoding {
enc := enc
// run tests with and without non-indexed labels set since it is optional
for _, appendWithNonIndexedLabels := range []bool{false, true} {
appendWithNonIndexedLabels := appendWithNonIndexedLabels
// run tests with and without structured metadata since it is optional
for _, appendWithStructuredMetadata := range []bool{false, true} {
appendWithStructuredMetadata := appendWithStructuredMetadata
testName := testNameWithFormats(enc, testData.chunkFormat, testData.headBlockFmt)
if appendWithNonIndexedLabels {
testName = fmt.Sprintf("%s - append non-indexed labels", testName)
if appendWithStructuredMetadata {
testName = fmt.Sprintf("%s - append structured metadata", testName)
} else {
testName = fmt.Sprintf("%s - without non-indexed labels", testName)
testName = fmt.Sprintf("%s - without structured metadata", testName)
}
t.Run(testName, func(t *testing.T) {
t.Parallel()
@ -431,8 +431,8 @@ func TestSerialization(t *testing.T) {
for i := 0; i < numSamples; i++ {
entry = logprotoEntry(int64(i), strconv.Itoa(i))
if appendWithNonIndexedLabels {
entry.NonIndexedLabels = []logproto.LabelAdapter{{Name: "foo", Value: strconv.Itoa(i)}}
if appendWithStructuredMetadata {
entry.StructuredMetadata = []logproto.LabelAdapter{{Name: "foo", Value: strconv.Itoa(i)}}
}
require.NoError(t, chk.Append(entry))
}
@ -452,8 +452,8 @@ func TestSerialization(t *testing.T) {
e := it.Entry()
require.Equal(t, int64(i), e.Timestamp.UnixNano())
require.Equal(t, strconv.Itoa(i), e.Line)
require.Nil(t, e.NonIndexedLabels)
if appendWithNonIndexedLabels && testData.chunkFormat >= ChunkFormatV4 {
require.Nil(t, e.StructuredMetadata)
if appendWithStructuredMetadata && testData.chunkFormat >= ChunkFormatV4 {
require.Equal(t, labels.FromStrings("foo", strconv.Itoa(i)).String(), it.Labels())
} else {
require.Equal(t, labels.EmptyLabels().String(), it.Labels())
@ -476,7 +476,7 @@ func TestSerialization(t *testing.T) {
s := sampleIt.Sample()
require.Equal(t, int64(i), s.Timestamp)
require.Equal(t, 1., s.Value)
if appendWithNonIndexedLabels && testData.chunkFormat >= ChunkFormatV4 {
if appendWithStructuredMetadata && testData.chunkFormat >= ChunkFormatV4 {
require.Equal(t, labels.FromStrings("foo", strconv.Itoa(i)).String(), sampleIt.Labels())
} else {
require.Equal(t, labels.EmptyLabels().String(), sampleIt.Labels())
@ -701,7 +701,7 @@ func TestChunkStats(t *testing.T) {
inserted++
entry.Timestamp = entry.Timestamp.Add(time.Nanosecond)
}
// For each entry: timestamp <varint>, line size <varint>, line <bytes>, num of non-indexed labels <varint>
// For each entry: timestamp <varint>, line size <varint>, line <bytes>, num of labels in structured metadata <varint>
expectedSize := inserted * (len(entry.Line) + 3*binary.MaxVarintLen64)
statsCtx, ctx := stats.NewContext(context.Background())
@ -801,10 +801,10 @@ func BenchmarkWrite(b *testing.B) {
for _, f := range HeadBlockFmts {
for _, enc := range testEncoding {
for _, withNonIndexedLabels := range []bool{false, true} {
for _, withStructuredMetadata := range []bool{false, true} {
name := fmt.Sprintf("%v-%v", f, enc)
if withNonIndexedLabels {
name += "-withNonIndexedLabels"
if withStructuredMetadata {
name += "-withStructuredMetadata"
}
b.Run(name, func(b *testing.B) {
uncompressedBytes, compressedBytes := 0, 0
@ -815,8 +815,8 @@ func BenchmarkWrite(b *testing.B) {
_ = c.Append(entry)
entry.Timestamp = time.Unix(0, i)
entry.Line = testdata.LogString(i)
if withNonIndexedLabels {
entry.NonIndexedLabels = []logproto.LabelAdapter{
if withStructuredMetadata {
entry.StructuredMetadata = []logproto.LabelAdapter{
{Name: "foo", Value: fmt.Sprint(i)},
}
}
@ -948,17 +948,17 @@ func TestGenerateDataSize(t *testing.T) {
func BenchmarkHeadBlockIterator(b *testing.B) {
for _, j := range []int{100000, 50000, 15000, 10000} {
for _, withNonIndexedLabels := range []bool{false, true} {
b.Run(fmt.Sprintf("size=%d nonIndexedLabels=%v", j, withNonIndexedLabels), func(b *testing.B) {
for _, withStructuredMetadata := range []bool{false, true} {
b.Run(fmt.Sprintf("size=%d structuredMetadata=%v", j, withStructuredMetadata), func(b *testing.B) {
h := headBlock{}
var nonIndexedLabels labels.Labels
if withNonIndexedLabels {
nonIndexedLabels = labels.Labels{{Name: "foo", Value: "foo"}}
var structuredMetadata labels.Labels
if withStructuredMetadata {
structuredMetadata = labels.Labels{{Name: "foo", Value: "foo"}}
}
for i := 0; i < j; i++ {
if err := h.Append(int64(i), "this is the append string", nonIndexedLabels); err != nil {
if err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
b.Fatal(err)
}
}
@ -979,17 +979,17 @@ func BenchmarkHeadBlockIterator(b *testing.B) {
func BenchmarkHeadBlockSampleIterator(b *testing.B) {
for _, j := range []int{20000, 10000, 8000, 5000} {
for _, withNonIndexedLabels := range []bool{false, true} {
b.Run(fmt.Sprintf("size=%d nonIndexedLabels=%v", j, withNonIndexedLabels), func(b *testing.B) {
for _, withStructuredMetadata := range []bool{false, true} {
b.Run(fmt.Sprintf("size=%d structuredMetadata=%v", j, withStructuredMetadata), func(b *testing.B) {
h := headBlock{}
var nonIndexedLabels labels.Labels
if withNonIndexedLabels {
nonIndexedLabels = labels.Labels{{Name: "foo", Value: "foo"}}
var structuredMetadata labels.Labels
if withStructuredMetadata {
structuredMetadata = labels.Labels{{Name: "foo", Value: "foo"}}
}
for i := 0; i < j; i++ {
if err := h.Append(int64(i), "this is the append string", nonIndexedLabels); err != nil {
if err := h.Append(int64(i), "this is the append string", structuredMetadata); err != nil {
b.Fatal(err)
}
}
@ -1115,7 +1115,7 @@ func TestCheckpointEncoding(t *testing.T) {
entry := &logproto.Entry{
Timestamp: time.Unix(int64(i), 0),
Line: fmt.Sprintf("hi there - %d", i),
NonIndexedLabels: push.LabelsAdapter{{
StructuredMetadata: push.LabelsAdapter{{
Name: fmt.Sprintf("name%d", i),
Value: fmt.Sprintf("val%d", i),
}},
@ -1456,39 +1456,39 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) {
err: chunk.ErrSliceNoDataInRange,
},
// Test cases with non-indexed labels
// Test cases with structured metadata
{
name: "no matches - chunk without non-indexed labels",
name: "no matches - chunk without structured metadata",
testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkThroughPlus1, false),
filterFunc: func(_ time.Time, in string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
nrMatching: 0,
nrNotMatching: 10,
},
{
name: "non-indexed labels not matching",
name: "structured metadata not matching",
testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkThroughPlus1, true),
filterFunc: func(_ time.Time, in string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get("ding") == "dong"
filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get("ding") == "dong"
},
nrMatching: 0,
nrNotMatching: 10,
},
{
name: "some lines removed - with non-indexed labels",
name: "some lines removed - with structured metadata",
testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkFromPlus5, true),
filterFunc: func(_ time.Time, in string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
nrMatching: 5,
nrNotMatching: 5,
},
{
name: "all lines match - with non-indexed labels",
name: "all lines match - with structured metadata",
testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkThroughPlus1, true),
filterFunc: func(_ time.Time, in string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && strings.HasPrefix(in, "matching")
filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong && strings.HasPrefix(in, "matching")
},
err: chunk.ErrSliceNoDataInRange,
},
@ -1523,13 +1523,13 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) {
}
}
func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matchingFrom, matchingTo *time.Time, withNonIndexedLabels bool) *MemChunk {
func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matchingFrom, matchingTo *time.Time, withStructuredMetadata bool) *MemChunk {
chk := NewMemChunk(ChunkFormatV4, EncGZIP, DefaultTestHeadBlockFmt, defaultBlockSize, 0)
t.Logf("from : %v", from.String())
t.Logf("through: %v", through.String())
var nonIndexedLabels push.LabelsAdapter
if withNonIndexedLabels {
nonIndexedLabels = push.LabelsAdapter{{Name: lblPing, Value: lblPong}}
var structuredMetadata push.LabelsAdapter
if withStructuredMetadata {
structuredMetadata = push.LabelsAdapter{{Name: lblPing, Value: lblPong}}
}
for from.Before(through) {
// If a line is between matchingFrom and matchingTo add the prefix "matching"
@ -1537,21 +1537,21 @@ func buildFilterableTestMemChunk(t *testing.T, from, through time.Time, matching
(from.Equal(*matchingFrom) || (from.After(*matchingFrom) && (from.Before(*matchingTo)))) {
t.Logf("%v matching line", from.String())
err := chk.Append(&logproto.Entry{
Line: fmt.Sprintf("matching %v", from.String()),
Timestamp: from,
NonIndexedLabels: nonIndexedLabels,
Line: fmt.Sprintf("matching %v", from.String()),
Timestamp: from,
StructuredMetadata: structuredMetadata,
})
require.NoError(t, err)
} else {
t.Logf("%v non-match line", from.String())
var nonIndexedLabels push.LabelsAdapter
if withNonIndexedLabels {
nonIndexedLabels = push.LabelsAdapter{{Name: "ding", Value: "dong"}}
var structuredMetadata push.LabelsAdapter
if withStructuredMetadata {
structuredMetadata = push.LabelsAdapter{{Name: "ding", Value: "dong"}}
}
err := chk.Append(&logproto.Entry{
Line: from.String(),
Timestamp: from,
NonIndexedLabels: nonIndexedLabels,
Line: from.String(),
Timestamp: from,
StructuredMetadata: structuredMetadata,
})
require.NoError(t, err)
}
@ -1626,14 +1626,14 @@ func TestMemChunk_SpaceFor(t *testing.T) {
expect: true,
},
{
desc: "entry fits with non-indexed labels",
desc: "entry fits with structured metadata",
targetSize: 10,
headSize: 0,
cutBlockSize: 0,
entry: logproto.Entry{
Timestamp: time.Unix(0, 0),
Line: strings.Repeat("a", 2),
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: strings.Repeat("a", 2)},
},
},
@ -1651,21 +1651,21 @@ func TestMemChunk_SpaceFor(t *testing.T) {
expect: false,
},
{
desc: "entry too big because non-indexed labels",
desc: "entry too big because structured metadata",
targetSize: 10,
headSize: 0,
cutBlockSize: 0,
entry: logproto.Entry{
Timestamp: time.Unix(0, 0),
Line: strings.Repeat("a", 5),
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: strings.Repeat("a", 5)},
},
},
expectFunc: func(chunkFormat byte, _ HeadBlockFmt) bool {
// Succeed unless we're using chunk format v4, which should
// take the non-indexed labels into account.
// take the structured metadata into account.
return chunkFormat < ChunkFormatV4
},
},
@ -1694,45 +1694,45 @@ func TestMemChunk_SpaceFor(t *testing.T) {
}
}
func TestMemChunk_IteratorWithNonIndexedLabels(t *testing.T) {
func TestMemChunk_IteratorWithStructuredMetadata(t *testing.T) {
for _, enc := range testEncoding {
enc := enc
t.Run(enc.String(), func(t *testing.T) {
streamLabels := labels.Labels{
{Name: "job", Value: "fake"},
}
chk := newMemChunkWithFormat(ChunkFormatV4, enc, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(1, "lineA", []logproto.LabelAdapter{
chk := newMemChunkWithFormat(ChunkFormatV4, enc, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(1, "lineA", []logproto.LabelAdapter{
{Name: "traceID", Value: "123"},
{Name: "user", Value: "a"},
})))
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(2, "lineB", []logproto.LabelAdapter{
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(2, "lineB", []logproto.LabelAdapter{
{Name: "traceID", Value: "456"},
{Name: "user", Value: "b"},
})))
require.NoError(t, chk.cut())
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(3, "lineC", []logproto.LabelAdapter{
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(3, "lineC", []logproto.LabelAdapter{
{Name: "traceID", Value: "789"},
{Name: "user", Value: "c"},
})))
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(4, "lineD", []logproto.LabelAdapter{
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(4, "lineD", []logproto.LabelAdapter{
{Name: "traceID", Value: "123"},
{Name: "user", Value: "d"},
})))
// The expected bytes is the sum of bytes decompressed and bytes read from the head chunk.
// First we add the bytes read from the store (aka decompressed). That's
// nonIndexedLabelsBytes = n. lines * (n. labels <int> + (2 * n. nonIndexedLabelsSymbols * symbol <int>))
// structuredMetadataBytes = n. lines * (n. labels <int> + (2 * n. structuredMetadataSymbols * symbol <int>))
// lineBytes = n. lines * (ts <int> + line length <int> + line)
expectedNonIndexedLabelsBytes := 2 * (binary.MaxVarintLen64 + (2 * 2 * binary.MaxVarintLen64))
expectedStructuredMetadataBytes := 2 * (binary.MaxVarintLen64 + (2 * 2 * binary.MaxVarintLen64))
lineBytes := 2 * (2*binary.MaxVarintLen64 + len("lineA"))
// Now we add the bytes read from the head chunk. That's
// nonIndexedLabelsBytes = n. lines * (2 * n. nonIndexedLabelsSymbols * symbol <uint32>)
// structuredMetadataBytes = n. lines * (2 * n. structuredMetadataSymbols * symbol <uint32>)
// lineBytes = n. lines * (line)
expectedNonIndexedLabelsBytes += 2 * (2 * 2 * 4)
expectedStructuredMetadataBytes += 2 * (2 * 2 * 4)
lineBytes += 2 * (len("lineC"))
// Finally, the expected total bytes is the line bytes + non-indexed labels bytes
expectedBytes := lineBytes + expectedNonIndexedLabelsBytes
// Finally, the expected total bytes is the line bytes + structured metadata bytes
expectedBytes := lineBytes + expectedStructuredMetadataBytes
for _, tc := range []struct {
name string
@ -1861,16 +1861,16 @@ func TestMemChunk_IteratorWithNonIndexedLabels(t *testing.T) {
lines = append(lines, e.Line)
streams = append(streams, it.Labels())
// We don't want to send back the non-indexed labels since
// We don't want to send back the structured metadata since
// they are already part of the returned labels.
require.Empty(t, e.NonIndexedLabels)
require.Empty(t, e.StructuredMetadata)
}
assert.ElementsMatch(t, tc.expectedLines, lines)
assert.ElementsMatch(t, tc.expectedStreams, streams)
resultStats := sts.Result(0, 0, len(lines))
require.Equal(t, int64(expectedBytes), resultStats.Summary.TotalBytesProcessed)
require.Equal(t, int64(expectedNonIndexedLabelsBytes), resultStats.Summary.TotalNonIndexedLabelsBytesProcessed)
require.Equal(t, int64(expectedStructuredMetadataBytes), resultStats.Summary.TotalStructuredMetadataBytesProcessed)
}
})
@ -1901,7 +1901,7 @@ func TestMemChunk_IteratorWithNonIndexedLabels(t *testing.T) {
resultStats := sts.Result(0, 0, 0)
require.Equal(t, int64(expectedBytes), resultStats.Summary.TotalBytesProcessed)
require.Equal(t, int64(expectedNonIndexedLabelsBytes), resultStats.Summary.TotalNonIndexedLabelsBytesProcessed)
require.Equal(t, int64(expectedStructuredMetadataBytes), resultStats.Summary.TotalStructuredMetadataBytesProcessed)
}
})
})
@ -1911,37 +1911,37 @@ func TestMemChunk_IteratorWithNonIndexedLabels(t *testing.T) {
}
func TestMemChunk_IteratorOptions(t *testing.T) {
chk := newMemChunkWithFormat(ChunkFormatV4, EncNone, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(0, "0", logproto.FromLabelsToLabelAdapters(
chk := newMemChunkWithFormat(ChunkFormatV4, EncNone, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(0, "0", logproto.FromLabelsToLabelAdapters(
labels.FromStrings("a", "0"),
))))
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(1, "1", logproto.FromLabelsToLabelAdapters(
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(1, "1", logproto.FromLabelsToLabelAdapters(
labels.FromStrings("a", "1"),
))))
require.NoError(t, chk.cut())
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(2, "2", logproto.FromLabelsToLabelAdapters(
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(2, "2", logproto.FromLabelsToLabelAdapters(
labels.FromStrings("a", "2"),
))))
require.NoError(t, chk.Append(logprotoEntryWithNonIndexedLabels(3, "3", logproto.FromLabelsToLabelAdapters(
require.NoError(t, chk.Append(logprotoEntryWithStructuredMetadata(3, "3", logproto.FromLabelsToLabelAdapters(
labels.FromStrings("a", "3"),
))))
for _, tc := range []struct {
name string
options []iter.EntryIteratorOption
expectNonIndexedLabels bool
name string
options []iter.EntryIteratorOption
expectStructuredMetadata bool
}{
{
name: "No options",
expectNonIndexedLabels: false,
name: "No options",
expectStructuredMetadata: false,
},
{
name: "WithKeepNonIndexedLabels",
name: "WithKeepStructuredMetadata",
options: []iter.EntryIteratorOption{
iter.WithKeepNonIndexedLabels(),
iter.WithKeepStructuredMetadata(),
},
expectNonIndexedLabels: true,
expectStructuredMetadata: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
@ -1956,8 +1956,8 @@ func TestMemChunk_IteratorOptions(t *testing.T) {
Line: fmt.Sprintf("%d", idx),
}
if tc.expectNonIndexedLabels {
expectedEntry.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(expectedLabels)
if tc.expectStructuredMetadata {
expectedEntry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(expectedLabels)
}
require.Equal(t, expectedEntry, it.Entry())

@ -172,11 +172,11 @@ func (s *symbolizer) SerializeTo(w io.Writer, pool WriterPool) (int, []byte, err
_, err := crc32Hash.Write(eb.get())
if err != nil {
return 0, nil, errors.Wrap(err, "write num non-indexed labels to crc32hash")
return 0, nil, errors.Wrap(err, "write num of labels of structured metadata to crc32hash")
}
n, err := w.Write(eb.get())
if err != nil {
return 0, nil, errors.Wrap(err, "write num non-indexed labels to writer")
return 0, nil, errors.Wrap(err, "write num of labels of structured metadata to writer")
}
writtenBytes += n
@ -214,13 +214,13 @@ func (s *symbolizer) SerializeTo(w io.Writer, pool WriterPool) (int, []byte, err
// hash the labels block
_, err = crc32Hash.Write(b)
if err != nil {
return writtenBytes, nil, errors.Wrap(err, "build non-indexed labels hash")
return writtenBytes, nil, errors.Wrap(err, "build structured metadata hash")
}
// write the labels block to writer
n, err = w.Write(b)
if err != nil {
return writtenBytes, nil, errors.Wrap(err, "write non-indexed labels block")
return writtenBytes, nil, errors.Wrap(err, "write structured metadata block")
}
writtenBytes += n
return writtenBytes, crc32Hash.Sum(nil), nil
@ -350,7 +350,7 @@ func symbolizerFromEnc(b []byte, pool ReaderPool) (*symbolizer, error) {
return nil, fmt.Errorf("got unexpected EOF")
}
if readBufValid == lastAttempt { // Got EOF and could not parse same data last time.
return nil, fmt.Errorf("invalid non-indexed labels block in chunk")
return nil, fmt.Errorf("invalid structured metadata block in chunk")
}
}
var l uint64

@ -97,8 +97,8 @@ func (hb *unorderedHeadBlock) Reset() {
}
type nsEntry struct {
line string
nonIndexedLabelsSymbols symbols
line string
structuredMetadataSymbols symbols
}
// collection of entries belonging to the same nanosecond
@ -111,10 +111,10 @@ func (e *nsEntries) ValueAtDimension(_ uint64) int64 {
return e.ts
}
func (hb *unorderedHeadBlock) Append(ts int64, line string, nonIndexedLabels labels.Labels) error {
if hb.format < UnorderedWithNonIndexedLabelsHeadBlockFmt {
// nonIndexedLabels must be ignored for the previous head block formats
nonIndexedLabels = nil
func (hb *unorderedHeadBlock) Append(ts int64, line string, structuredMetadata labels.Labels) error {
if hb.format < UnorderedWithStructuredMetadataHeadBlockFmt {
// structuredMetadata must be ignored for the previous head block formats
structuredMetadata = nil
}
// This is an allocation hack. The rangetree lib does not
// support the ability to pass a "mutate" function during an insert
@ -139,9 +139,9 @@ func (hb *unorderedHeadBlock) Append(ts int64, line string, nonIndexedLabels lab
return nil
}
}
e.entries = append(displaced[0].(*nsEntries).entries, nsEntry{line, hb.symbolizer.Add(nonIndexedLabels)})
e.entries = append(displaced[0].(*nsEntries).entries, nsEntry{line, hb.symbolizer.Add(structuredMetadata)})
} else {
e.entries = []nsEntry{{line, hb.symbolizer.Add(nonIndexedLabels)}}
e.entries = []nsEntry{{line, hb.symbolizer.Add(structuredMetadata)}}
}
// Update hb metdata
@ -154,7 +154,7 @@ func (hb *unorderedHeadBlock) Append(ts int64, line string, nonIndexedLabels lab
}
hb.size += len(line)
hb.size += len(nonIndexedLabels) * 2 * 4 // 4 bytes per label and value pair as nonIndexedLabelsSymbols
hb.size += len(structuredMetadata) * 2 * 4 // 4 bytes per label and value pair as structuredMetadataSymbols
hb.lines++
return nil
@ -215,12 +215,12 @@ func (hb *unorderedHeadBlock) forEntries(
for ; i < len(es.entries) && i >= 0; next() {
line := es.entries[i].line
nonIndexedLabelsSymbols := es.entries[i].nonIndexedLabelsSymbols
nonIndexedLabelsBytes := int64(2 * len(nonIndexedLabelsSymbols) * 4) // 2 * num_symbols * 4 bytes(uint32)
chunkStats.AddHeadChunkNonIndexedLabelsBytes(nonIndexedLabelsBytes)
chunkStats.AddHeadChunkBytes(int64(len(line)) + nonIndexedLabelsBytes)
structuredMetadataSymbols := es.entries[i].structuredMetadataSymbols
structuredMetadataBytes := int64(2 * len(structuredMetadataSymbols) * 4) // 2 * num_symbols * 4 bytes(uint32)
chunkStats.AddHeadChunkStructuredMetadataBytes(structuredMetadataBytes)
chunkStats.AddHeadChunkBytes(int64(len(line)) + structuredMetadataBytes)
err = entryFn(chunkStats, es.ts, line, nonIndexedLabelsSymbols)
err = entryFn(chunkStats, es.ts, line, structuredMetadataSymbols)
}
}
@ -261,8 +261,8 @@ func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.D
direction,
mint,
maxt,
func(statsCtx *stats.Context, ts int64, line string, nonIndexedLabelsSymbols symbols) error {
newLine, parsedLbs, matches := pipeline.ProcessString(ts, line, hb.symbolizer.Lookup(nonIndexedLabelsSymbols)...)
func(statsCtx *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
newLine, parsedLbs, matches := pipeline.ProcessString(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols)...)
if !matches {
return nil
}
@ -283,10 +283,10 @@ func (hb *unorderedHeadBlock) Iterator(ctx context.Context, direction logproto.D
Line: newLine,
}
// Most of the time, there is no need to send back the non-indexed labels, as they are already part of the labels results.
// Most of the time, there is no need to send back the structured metadata, as they are already part of the labels results.
// Still it might be needed for example when appending entries from one chunk into another one.
if iterOptions.KeepNonIndexedLabels {
entry.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(hb.symbolizer.Lookup(nonIndexedLabelsSymbols))
if iterOptions.KeepStructuredMetdata {
entry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(hb.symbolizer.Lookup(structuredMetadataSymbols))
}
stream.Entries = append(stream.Entries, entry)
@ -318,8 +318,8 @@ func (hb *unorderedHeadBlock) SampleIterator(
logproto.FORWARD,
mint,
maxt,
func(statsCtx *stats.Context, ts int64, line string, nonIndexedLabelsSymbols symbols) error {
value, parsedLabels, ok := extractor.ProcessString(ts, line, hb.symbolizer.Lookup(nonIndexedLabelsSymbols)...)
func(statsCtx *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
value, parsedLabels, ok := extractor.ProcessString(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols)...)
if !ok {
return nil
}
@ -388,7 +388,7 @@ func (hb *unorderedHeadBlock) Serialise(pool WriterPool) ([]byte, error) {
logproto.FORWARD,
0,
math.MaxInt64,
func(_ *stats.Context, ts int64, line string, nonIndexedLabelsSymbols symbols) error {
func(_ *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
n := binary.PutVarint(encBuf, ts)
inBuf.Write(encBuf[:n])
@ -397,17 +397,17 @@ func (hb *unorderedHeadBlock) Serialise(pool WriterPool) ([]byte, error) {
inBuf.WriteString(line)
if hb.format >= UnorderedWithNonIndexedLabelsHeadBlockFmt {
if hb.format >= UnorderedWithStructuredMetadataHeadBlockFmt {
symbolsSectionBuf.Reset()
// Serialize non-indexed labels symbols to symbolsSectionBuf so that we can find and write its length before
// Serialize structured metadata symbols to symbolsSectionBuf so that we can find and write its length before
// writing symbols section to inbuf since we can't estimate its size beforehand due to variable length encoding.
// write the number of symbol pairs
n = binary.PutUvarint(encBuf, uint64(len(nonIndexedLabelsSymbols)))
n = binary.PutUvarint(encBuf, uint64(len(structuredMetadataSymbols)))
symbolsSectionBuf.Write(encBuf[:n])
// write the symbols
for _, l := range nonIndexedLabelsSymbols {
for _, l := range structuredMetadataSymbols {
n = binary.PutUvarint(encBuf, uint64(l.Name))
symbolsSectionBuf.Write(encBuf[:n])
@ -447,8 +447,8 @@ func (hb *unorderedHeadBlock) Convert(version HeadBlockFmt, symbolizer *symboliz
logproto.FORWARD,
0,
math.MaxInt64,
func(_ *stats.Context, ts int64, line string, nonIndexedLabelsSymbols symbols) error {
return out.Append(ts, line, hb.symbolizer.Lookup(nonIndexedLabelsSymbols))
func(_ *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
return out.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols))
},
)
return out, err
@ -460,8 +460,8 @@ func (hb *unorderedHeadBlock) CheckpointSize() int {
size += binary.MaxVarintLen32 * 2 // total entries + total size
size += binary.MaxVarintLen64 * 2 // mint,maxt
size += (binary.MaxVarintLen64 + binary.MaxVarintLen32) * hb.lines // ts + len of log line.
if hb.format >= UnorderedWithNonIndexedLabelsHeadBlockFmt {
// number of non-indexed labels stored for each log entry
if hb.format >= UnorderedWithStructuredMetadataHeadBlockFmt {
// number of labels of structured metadata stored for each log entry
size += binary.MaxVarintLen32 * hb.lines
}
size += hb.size // uncompressed bytes of lines
@ -504,7 +504,7 @@ func (hb *unorderedHeadBlock) CheckpointTo(w io.Writer) error {
logproto.FORWARD,
0,
math.MaxInt64,
func(_ *stats.Context, ts int64, line string, nonIndexedLabelsSymbols symbols) error {
func(_ *stats.Context, ts int64, line string, structuredMetadataSymbols symbols) error {
eb.putVarint64(ts)
eb.putUvarint(len(line))
_, err = w.Write(eb.get())
@ -518,20 +518,20 @@ func (hb *unorderedHeadBlock) CheckpointTo(w io.Writer) error {
return errors.Wrap(err, "write headblock entry line")
}
if hb.format >= UnorderedWithNonIndexedLabelsHeadBlockFmt {
// non-indexed labels
eb.putUvarint(len(nonIndexedLabelsSymbols))
if hb.format >= UnorderedWithStructuredMetadataHeadBlockFmt {
// structured metadata
eb.putUvarint(len(structuredMetadataSymbols))
_, err = w.Write(eb.get())
if err != nil {
return errors.Wrap(err, "write headBlock entry meta labels length")
}
eb.reset()
for _, l := range nonIndexedLabelsSymbols {
for _, l := range structuredMetadataSymbols {
eb.putUvarint(int(l.Name))
eb.putUvarint(int(l.Value))
_, err = w.Write(eb.get())
if err != nil {
return errors.Wrap(err, "write headBlock entry nonIndexedLabelsSymbols")
return errors.Wrap(err, "write headBlock entry structuredMetadataSymbols")
}
eb.reset()
}
@ -574,13 +574,13 @@ func (hb *unorderedHeadBlock) LoadBytes(b []byte) error {
lineLn := db.uvarint()
line := string(db.bytes(lineLn))
var nonIndexedLabelsSymbols symbols
if version >= UnorderedWithNonIndexedLabelsHeadBlockFmt.Byte() {
var structuredMetadataSymbols symbols
if version >= UnorderedWithStructuredMetadataHeadBlockFmt.Byte() {
metaLn := db.uvarint()
if metaLn > 0 {
nonIndexedLabelsSymbols = make([]symbol, metaLn)
structuredMetadataSymbols = make([]symbol, metaLn)
for j := 0; j < metaLn && db.err() == nil; j++ {
nonIndexedLabelsSymbols[j] = symbol{
structuredMetadataSymbols[j] = symbol{
Name: uint32(db.uvarint()),
Value: uint32(db.uvarint()),
}
@ -588,7 +588,7 @@ func (hb *unorderedHeadBlock) LoadBytes(b []byte) error {
}
}
if err := hb.Append(ts, line, hb.symbolizer.Lookup(nonIndexedLabelsSymbols)); err != nil {
if err := hb.Append(ts, line, hb.symbolizer.Lookup(structuredMetadataSymbols)); err != nil {
return err
}
}
@ -615,7 +615,7 @@ func HeadFromCheckpoint(b []byte, desiredIfNotUnordered HeadBlockFmt, symbolizer
return nil, errors.Wrap(db.err(), "verifying headblock header")
}
format := HeadBlockFmt(version)
if format > UnorderedWithNonIndexedLabelsHeadBlockFmt {
if format > UnorderedWithStructuredMetadataHeadBlockFmt {
return nil, fmt.Errorf("unexpected head block version: %v", format)
}

@ -25,7 +25,7 @@ func iterEq(t *testing.T, exp []entry, got iter.EntryIterator) {
Timestamp: time.Unix(0, exp[i].t),
Line: exp[i].s,
}, got.Entry())
require.Equal(t, exp[i].nonIndexedLabels.String(), got.Labels())
require.Equal(t, exp[i].structuredMetadata.String(), got.Labels())
i++
}
require.Equal(t, i, len(exp))
@ -167,12 +167,12 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
for _, format := range []HeadBlockFmt{
UnorderedHeadBlockFmt,
UnorderedWithNonIndexedLabelsHeadBlockFmt,
UnorderedWithStructuredMetadataHeadBlockFmt,
} {
t.Run(format.String(), func(t *testing.T) {
hb := newUnorderedHeadBlock(format, newSymbolizer())
for _, e := range tc.input {
require.Nil(t, hb.Append(e.t, e.s, e.nonIndexedLabels))
require.Nil(t, hb.Append(e.t, e.s, e.structuredMetadata))
}
itr := hb.Iterator(
@ -185,9 +185,9 @@ func Test_Unordered_InsertRetrieval(t *testing.T) {
expected := make([]entry, len(tc.exp))
copy(expected, tc.exp)
if format < UnorderedWithNonIndexedLabelsHeadBlockFmt {
if format < UnorderedWithStructuredMetadataHeadBlockFmt {
for i := range expected {
expected[i].nonIndexedLabels = nil
expected[i].structuredMetadata = nil
}
}
@ -244,12 +244,12 @@ func Test_UnorderedBoundedIter(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
for _, format := range []HeadBlockFmt{
UnorderedHeadBlockFmt,
UnorderedWithNonIndexedLabelsHeadBlockFmt,
UnorderedWithStructuredMetadataHeadBlockFmt,
} {
t.Run(format.String(), func(t *testing.T) {
hb := newUnorderedHeadBlock(format, newSymbolizer())
for _, e := range tc.input {
require.Nil(t, hb.Append(e.t, e.s, e.nonIndexedLabels))
require.Nil(t, hb.Append(e.t, e.s, e.structuredMetadata))
}
itr := hb.Iterator(
@ -262,9 +262,9 @@ func Test_UnorderedBoundedIter(t *testing.T) {
expected := make([]entry, len(tc.exp))
copy(expected, tc.exp)
if format < UnorderedWithNonIndexedLabelsHeadBlockFmt {
if format < UnorderedWithStructuredMetadataHeadBlockFmt {
for i := range expected {
expected[i].nonIndexedLabels = nil
expected[i].structuredMetadata = nil
}
}
@ -277,11 +277,11 @@ func Test_UnorderedBoundedIter(t *testing.T) {
func TestHeadBlockInterop(t *testing.T) {
unordered, ordered := newUnorderedHeadBlock(UnorderedHeadBlockFmt, nil), &headBlock{}
unorderedWithNonIndexedLabels := newUnorderedHeadBlock(UnorderedWithNonIndexedLabelsHeadBlockFmt, newSymbolizer())
unorderedWithStructuredMetadata := newUnorderedHeadBlock(UnorderedWithStructuredMetadataHeadBlockFmt, newSymbolizer())
for i := 0; i < 100; i++ {
metaLabels := labels.Labels{{Name: "foo", Value: fmt.Sprint(99 - i)}}
require.Nil(t, unordered.Append(int64(99-i), fmt.Sprint(99-i), metaLabels))
require.Nil(t, unorderedWithNonIndexedLabels.Append(int64(99-i), fmt.Sprint(99-i), metaLabels))
require.Nil(t, unorderedWithStructuredMetadata.Append(int64(99-i), fmt.Sprint(99-i), metaLabels))
require.Nil(t, ordered.Append(int64(i), fmt.Sprint(i), labels.Labels{{Name: "foo", Value: fmt.Sprint(i)}}))
}
@ -290,7 +290,7 @@ func TestHeadBlockInterop(t *testing.T) {
require.Nil(t, err)
unorderedCheckpointBytes, err := unordered.CheckpointBytes(nil)
require.Nil(t, err)
unorderedWithNonIndexedLabelsCheckpointBytes, err := unorderedWithNonIndexedLabels.CheckpointBytes(nil)
unorderedWithStructuredMetadataCheckpointBytes, err := unorderedWithStructuredMetadata.CheckpointBytes(nil)
require.Nil(t, err)
// Ensure we can recover ordered checkpoint into ordered headblock
@ -303,11 +303,11 @@ func TestHeadBlockInterop(t *testing.T) {
require.Nil(t, err)
require.Equal(t, unordered, recovered)
// Ensure we can recover ordered checkpoint into unordered headblock with non-indexed labels
recovered, err = HeadFromCheckpoint(orderedCheckpointBytes, UnorderedWithNonIndexedLabelsHeadBlockFmt, nil)
// Ensure we can recover ordered checkpoint into unordered headblock with structured metadata
recovered, err = HeadFromCheckpoint(orderedCheckpointBytes, UnorderedWithStructuredMetadataHeadBlockFmt, nil)
require.NoError(t, err)
require.Equal(t, &unorderedHeadBlock{
format: UnorderedWithNonIndexedLabelsHeadBlockFmt,
format: UnorderedWithStructuredMetadataHeadBlockFmt,
rt: unordered.rt,
lines: unordered.lines,
size: unordered.size,
@ -320,25 +320,25 @@ func TestHeadBlockInterop(t *testing.T) {
require.Nil(t, err)
require.Equal(t, unordered, recovered)
// Ensure trying to recover unordered checkpoint into unordered with non-indexed labels keeps it in unordered format
recovered, err = HeadFromCheckpoint(unorderedCheckpointBytes, UnorderedWithNonIndexedLabelsHeadBlockFmt, nil)
// Ensure trying to recover unordered checkpoint into unordered with structured metadata keeps it in unordered format
recovered, err = HeadFromCheckpoint(unorderedCheckpointBytes, UnorderedWithStructuredMetadataHeadBlockFmt, nil)
require.NoError(t, err)
require.Equal(t, unordered, recovered)
// Ensure trying to recover unordered with non-indexed labels checkpoint into ordered headblock keeps it in unordered with non-indexed labels format
recovered, err = HeadFromCheckpoint(unorderedWithNonIndexedLabelsCheckpointBytes, OrderedHeadBlockFmt, unorderedWithNonIndexedLabels.symbolizer)
// Ensure trying to recover unordered with structured metadata checkpoint into ordered headblock keeps it in unordered with structured metadata format
recovered, err = HeadFromCheckpoint(unorderedWithStructuredMetadataCheckpointBytes, OrderedHeadBlockFmt, unorderedWithStructuredMetadata.symbolizer)
require.Nil(t, err)
require.Equal(t, unorderedWithNonIndexedLabels, recovered) // we compare the data with unordered because unordered head block does not contain metaLabels.
require.Equal(t, unorderedWithStructuredMetadata, recovered) // we compare the data with unordered because unordered head block does not contain metaLabels.
// Ensure trying to recover unordered with non-indexed labels checkpoint into unordered headblock keeps it in unordered with non-indexed labels format
recovered, err = HeadFromCheckpoint(unorderedWithNonIndexedLabelsCheckpointBytes, UnorderedHeadBlockFmt, unorderedWithNonIndexedLabels.symbolizer)
// Ensure trying to recover unordered with structured metadata checkpoint into unordered headblock keeps it in unordered with structured metadata format
recovered, err = HeadFromCheckpoint(unorderedWithStructuredMetadataCheckpointBytes, UnorderedHeadBlockFmt, unorderedWithStructuredMetadata.symbolizer)
require.Nil(t, err)
require.Equal(t, unorderedWithNonIndexedLabels, recovered) // we compare the data with unordered because unordered head block does not contain metaLabels.
require.Equal(t, unorderedWithStructuredMetadata, recovered) // we compare the data with unordered because unordered head block does not contain metaLabels.
// Ensure we can recover unordered with non-indexed checkpoint into unordered with non-indexed headblock
recovered, err = HeadFromCheckpoint(unorderedWithNonIndexedLabelsCheckpointBytes, UnorderedWithNonIndexedLabelsHeadBlockFmt, unorderedWithNonIndexedLabels.symbolizer)
// Ensure we can recover unordered with structured metadata checkpoint into unordered with structured metadata headblock
recovered, err = HeadFromCheckpoint(unorderedWithStructuredMetadataCheckpointBytes, UnorderedWithStructuredMetadataHeadBlockFmt, unorderedWithStructuredMetadata.symbolizer)
require.Nil(t, err)
require.Equal(t, unorderedWithNonIndexedLabels, recovered)
require.Equal(t, unorderedWithStructuredMetadata, recovered)
}
// ensure backwards compatibility from when chunk format
@ -388,7 +388,7 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
unorderedWrites: true,
},
} {
for _, withNonIndexedLabels := range []bool{false, true} {
for _, withStructuredMetadata := range []bool{false, true} {
// build writes before we start benchmarking so random number generation, etc,
// isn't included in our timing info
writes := make([]entry, 0, nWrites)
@ -399,27 +399,27 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
ts = rnd.Int63()
}
var nonIndexedLabels labels.Labels
if withNonIndexedLabels {
nonIndexedLabels = labels.Labels{{Name: "foo", Value: fmt.Sprint(ts)}}
var structuredMetadata labels.Labels
if withStructuredMetadata {
structuredMetadata = labels.Labels{{Name: "foo", Value: fmt.Sprint(ts)}}
}
writes = append(writes, entry{
t: ts,
s: fmt.Sprint("line:", i),
nonIndexedLabels: nonIndexedLabels,
t: ts,
s: fmt.Sprint("line:", i),
structuredMetadata: structuredMetadata,
})
}
name := tc.desc
if withNonIndexedLabels {
name += " with non-indexed labels"
if withStructuredMetadata {
name += " with structured metadata"
}
b.Run(name, func(b *testing.B) {
for n := 0; n < b.N; n++ {
writeFn := tc.fn()
for _, w := range writes {
writeFn(w.t, w.s, w.nonIndexedLabels)
writeFn(w.t, w.s, w.structuredMetadata)
}
}
})
@ -428,7 +428,7 @@ func BenchmarkHeadBlockWrites(b *testing.B) {
}
func TestUnorderedChunkIterators(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for i := 0; i < 100; i++ {
// push in reverse order
require.Nil(t, c.Append(&logproto.Entry{
@ -546,7 +546,7 @@ func BenchmarkUnorderedRead(b *testing.B) {
}
func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
fillChunkRandomOrder(c, false)
ct := 0
@ -583,7 +583,7 @@ func TestUnorderedIteratorCountsAllEntries(t *testing.T) {
}
func chunkFrom(xs []logproto.Entry) ([]byte, error) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range xs {
if err := c.Append(&x); err != nil {
return nil, err
@ -643,7 +643,7 @@ func TestReorder(t *testing.T) {
},
} {
t.Run(tc.desc, func(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, x := range tc.input {
require.Nil(t, c.Append(&x))
}
@ -660,7 +660,7 @@ func TestReorder(t *testing.T) {
}
func TestReorderAcrossBlocks(t *testing.T) {
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithNonIndexedLabelsHeadBlockFmt, testBlockSize, testTargetSize)
c := NewMemChunk(ChunkFormatV4, EncSnappy, UnorderedWithStructuredMetadataHeadBlockFmt, testBlockSize, testTargetSize)
for _, batch := range [][]int{
// ensure our blocks have overlapping bounds and must be reordered
// before closing.
@ -711,9 +711,9 @@ func Test_HeadIteratorHash(t *testing.T) {
}
for name, b := range map[string]HeadBlock{
"unordered": newUnorderedHeadBlock(UnorderedHeadBlockFmt, nil),
"unordered with non-indexed labels": newUnorderedHeadBlock(UnorderedWithNonIndexedLabelsHeadBlockFmt, newSymbolizer()),
"ordered": &headBlock{},
"unordered": newUnorderedHeadBlock(UnorderedHeadBlockFmt, nil),
"unordered with structured metadata": newUnorderedHeadBlock(UnorderedWithStructuredMetadataHeadBlockFmt, newSymbolizer()),
"ordered": &headBlock{},
} {
t.Run(name, func(t *testing.T) {
require.NoError(t, b.Append(1, "foo", labels.Labels{{Name: "foo", Value: "bar"}}))

@ -15,11 +15,11 @@ func logprotoEntry(ts int64, line string) *logproto.Entry {
}
}
func logprotoEntryWithNonIndexedLabels(ts int64, line string, nonIndexedLabels []logproto.LabelAdapter) *logproto.Entry {
func logprotoEntryWithStructuredMetadata(ts int64, line string, structuredMetadata []logproto.LabelAdapter) *logproto.Entry {
return &logproto.Entry{
Timestamp: time.Unix(0, ts),
Line: line,
NonIndexedLabels: nonIndexedLabels,
Timestamp: time.Unix(0, ts),
Line: line,
StructuredMetadata: structuredMetadata,
}
}
@ -30,7 +30,7 @@ func generateData(enc Encoding, chunksCount, blockSize, targetSize int) ([]Chunk
for n := 0; n < chunksCount; n++ {
entry := logprotoEntry(0, testdata.LogString(0))
c := NewMemChunk(ChunkFormatV4, enc, UnorderedWithNonIndexedLabelsHeadBlockFmt, blockSize, targetSize)
c := NewMemChunk(ChunkFormatV4, enc, UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
for c.SpaceFor(entry) {
size += uint64(len(entry.Line))
_ = c.Append(entry)

@ -49,7 +49,7 @@ func TestIterator(t *testing.T) {
}{
{"dumbChunk", chunkenc.NewDumbChunk},
{"gzipChunk", func() chunkenc.Chunk {
return chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 256*1024, 0)
return chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
}},
} {
t.Run(chk.name, func(t *testing.T) {

@ -56,7 +56,7 @@ func Test_EncodingChunks(t *testing.T) {
t.Run(fmt.Sprintf("%v-%s", close, tc.desc), func(t *testing.T) {
conf := tc.conf
c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
fillChunk(t, c)
if close {
require.Nil(t, c.Close())
@ -119,7 +119,7 @@ func Test_EncodingChunks(t *testing.T) {
func Test_EncodingCheckpoint(t *testing.T) {
conf := dummyConf()
c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
c := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, conf.BlockSize, conf.TargetChunkSize)
require.Nil(t, c.Append(&logproto.Entry{
Timestamp: time.Unix(1, 0),
Line: "hi there",

@ -124,7 +124,7 @@ func buildChunkDecs(t testing.TB) []*chunkDesc {
for i := range res {
res[i] = &chunkDesc{
closed: true,
chunk: chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, dummyConf().BlockSize, dummyConf().TargetChunkSize),
chunk: chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, dummyConf().BlockSize, dummyConf().TargetChunkSize),
}
fillChunk(t, res[i].chunk)
require.NoError(t, res[i].chunk.Close())

@ -50,7 +50,7 @@ func (m *MemoryWALReader) Err() error { return nil }
func (m *MemoryWALReader) Record() []byte { return m.xs[0] }
func buildMemoryReader(users, totalStreams, entriesPerStream int, withNonIndexedLabels bool) (*MemoryWALReader, []*wal.Record) {
func buildMemoryReader(users, totalStreams, entriesPerStream int, withStructuredMetadata bool) (*MemoryWALReader, []*wal.Record) {
var recs []*wal.Record
reader := &MemoryWALReader{}
for i := 0; i < totalStreams; i++ {
@ -77,8 +77,8 @@ func buildMemoryReader(users, totalStreams, entriesPerStream int, withNonIndexed
Line: fmt.Sprintf("%d", j),
}
if withNonIndexedLabels {
entry.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
if withStructuredMetadata {
entry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", strings.Repeat(fmt.Sprintf("%d", j), 10),
"userID", strings.Repeat(fmt.Sprintf("%d", j), 10),
))
@ -172,8 +172,8 @@ func (r *MemRecoverer) Close() { close(r.done) }
func (r *MemRecoverer) Done() <-chan struct{} { return r.done }
func Test_InMemorySegmentRecover(t *testing.T) {
for _, withNonIndexedLabels := range []bool{true, false} {
t.Run(fmt.Sprintf("nonIndexedLabels=%t", withNonIndexedLabels), func(t *testing.T) {
for _, withStructuredMetadata := range []bool{true, false} {
t.Run(fmt.Sprintf("structuredMetadata=%t", withStructuredMetadata), func(t *testing.T) {
var (
users = 10
streamsCt = 1000
@ -182,9 +182,9 @@ func Test_InMemorySegmentRecover(t *testing.T) {
// TODO: remove once we set v3 as current
if wal.CurrentEntriesRec < wal.WALRecordEntriesV3 {
withNonIndexedLabels = false
withStructuredMetadata = false
}
reader, recs := buildMemoryReader(users, streamsCt, entriesPerStream, withNonIndexedLabels)
reader, recs := buildMemoryReader(users, streamsCt, entriesPerStream, withStructuredMetadata)
recoverer := NewMemRecoverer()

@ -25,7 +25,7 @@ const (
// WALRecordEntriesV2 is the type for the WAL record for samples with an
// additional counter value for use in replaying without the ordering constraint.
WALRecordEntriesV2
// WALRecordEntriesV3 is the type for the WAL record for samples with non-indexed labels.
// WALRecordEntriesV3 is the type for the WAL record for samples with structured metadata.
WALRecordEntriesV3
)
@ -133,9 +133,9 @@ outer:
buf.PutString(s.Line)
if version >= WALRecordEntriesV3 {
// non-indexed labels
buf.PutUvarint(len(s.NonIndexedLabels))
for _, l := range s.NonIndexedLabels {
// structured metadata
buf.PutUvarint(len(s.StructuredMetadata))
for _, l := range s.StructuredMetadata {
buf.PutUvarint(len(l.Name))
buf.PutString(l.Name)
buf.PutUvarint(len(l.Value))
@ -172,17 +172,17 @@ func DecodeEntries(b []byte, version RecordType, rec *Record) error {
lineLength := dec.Uvarint()
line := dec.Bytes(lineLength)
var nonIndexedLabels []logproto.LabelAdapter
var structuredMetadata []logproto.LabelAdapter
if version >= WALRecordEntriesV3 {
nNonIndexedLabels := dec.Uvarint()
if nNonIndexedLabels > 0 {
nonIndexedLabels = make([]logproto.LabelAdapter, 0, nNonIndexedLabels)
for i := 0; dec.Err() == nil && i < nNonIndexedLabels; i++ {
nStructuredMetadata := dec.Uvarint()
if nStructuredMetadata > 0 {
structuredMetadata = make([]logproto.LabelAdapter, 0, nStructuredMetadata)
for i := 0; dec.Err() == nil && i < nStructuredMetadata; i++ {
nameLength := dec.Uvarint()
name := dec.Bytes(nameLength)
valueLength := dec.Uvarint()
value := dec.Bytes(valueLength)
nonIndexedLabels = append(nonIndexedLabels, logproto.LabelAdapter{
structuredMetadata = append(structuredMetadata, logproto.LabelAdapter{
Name: string(name),
Value: string(value),
})
@ -191,9 +191,9 @@ func DecodeEntries(b []byte, version RecordType, rec *Record) error {
}
refEntries.Entries = append(refEntries.Entries, logproto.Entry{
Timestamp: time.Unix(0, baseTime+timeOffset),
Line: string(line),
NonIndexedLabels: nonIndexedLabels,
Timestamp: time.Unix(0, baseTime+timeOffset),
Line: string(line),
StructuredMetadata: structuredMetadata,
})
}

@ -71,7 +71,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(1000, 0),
Line: "first",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "a",
)),
@ -79,7 +79,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(2000, 0),
Line: "second",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "456",
"userID", "b",
)),
@ -92,7 +92,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(3000, 0),
Line: "third",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "789",
"userID", "c",
)),
@ -100,7 +100,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(4000, 0),
Line: "fourth",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "d",
)),
@ -124,7 +124,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(1000, 0),
Line: "first",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "a",
)),
@ -132,7 +132,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(2000, 0),
Line: "second",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "456",
"userID", "b",
)),
@ -146,7 +146,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(3000, 0),
Line: "third",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "789",
"userID", "c",
)),
@ -154,7 +154,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(4000, 0),
Line: "fourth",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "d",
)),
@ -178,7 +178,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(1000, 0),
Line: "first",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "a",
)),
@ -186,7 +186,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(2000, 0),
Line: "second",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "456",
"userID", "b",
)),
@ -200,7 +200,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(3000, 0),
Line: "third",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "789",
"userID", "c",
)),
@ -208,7 +208,7 @@ func Test_Encoding_Entries(t *testing.T) {
{
Timestamp: time.Unix(4000, 0),
Line: "fourth",
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", "123",
"userID", "d",
)),
@ -226,12 +226,12 @@ func Test_Encoding_Entries(t *testing.T) {
err := DecodeRecord(buf, decoded)
require.Nil(t, err)
// If the version is less than v3, we need to remove the non-indexed labels.
// If the version is less than v3, we need to remove the structured metadata.
expectedRecords := tc.rec
if tc.version < WALRecordEntriesV3 {
for i := range expectedRecords.RefEntries {
for j := range expectedRecords.RefEntries[i].Entries {
expectedRecords.RefEntries[i].Entries[j].NonIndexedLabels = nil
expectedRecords.RefEntries[i].Entries[j].StructuredMetadata = nil
}
}
}
@ -242,8 +242,8 @@ func Test_Encoding_Entries(t *testing.T) {
}
func Benchmark_EncodeEntries(b *testing.B) {
for _, withNonIndexedLabels := range []bool{true, false} {
b.Run(fmt.Sprintf("nonIndexedLabels=%t", withNonIndexedLabels), func(b *testing.B) {
for _, withStructuredMetadata := range []bool{true, false} {
b.Run(fmt.Sprintf("structuredMetadata=%t", withStructuredMetadata), func(b *testing.B) {
var entries []logproto.Entry
for i := int64(0); i < 10000; i++ {
entry := logproto.Entry{
@ -251,8 +251,8 @@ func Benchmark_EncodeEntries(b *testing.B) {
Line: fmt.Sprintf("long line with a lot of data like a log %d", i),
}
if withNonIndexedLabels {
entry.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
if withStructuredMetadata {
entry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", strings.Repeat(fmt.Sprintf("%d", i), 10),
"userID", strings.Repeat(fmt.Sprintf("%d", i), 10),
))
@ -287,8 +287,8 @@ func Benchmark_EncodeEntries(b *testing.B) {
}
func Benchmark_DecodeWAL(b *testing.B) {
for _, withNonIndexedLabels := range []bool{true, false} {
b.Run(fmt.Sprintf("nonIndexedLabels=%t", withNonIndexedLabels), func(b *testing.B) {
for _, withStructuredMetadata := range []bool{true, false} {
b.Run(fmt.Sprintf("structuredMetadata=%t", withStructuredMetadata), func(b *testing.B) {
var entries []logproto.Entry
for i := int64(0); i < 10000; i++ {
entry := logproto.Entry{
@ -296,8 +296,8 @@ func Benchmark_DecodeWAL(b *testing.B) {
Line: fmt.Sprintf("long line with a lot of data like a log %d", i),
}
if withNonIndexedLabels {
entry.NonIndexedLabels = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
if withStructuredMetadata {
entry.StructuredMetadata = logproto.FromLabelsToLabelAdapters(labels.FromStrings(
"traceID", strings.Repeat(fmt.Sprintf("%d", i), 10),
"userID", strings.Repeat(fmt.Sprintf("%d", i), 10),
))

@ -20,14 +20,14 @@ type EntryIterator interface {
}
type EntryIteratorOptions struct {
KeepNonIndexedLabels bool
KeepStructuredMetdata bool
}
type EntryIteratorOption func(*EntryIteratorOptions)
func WithKeepNonIndexedLabels() EntryIteratorOption {
func WithKeepStructuredMetadata() EntryIteratorOption {
return func(o *EntryIteratorOptions) {
o.KeepNonIndexedLabels = true
o.KeepStructuredMetdata = true
}
}

@ -17,9 +17,9 @@ func init() {
// Entry represents a log entry. It includes a log message and the time it occurred at.
type Entry struct {
Timestamp time.Time
Line string
NonIndexedLabels labels.Labels
Timestamp time.Time
Line string
StructuredMetadata labels.Labels
}
func (e *Entry) UnmarshalJSON(data []byte) error {
@ -57,12 +57,12 @@ func (e *Entry) UnmarshalJSON(data []byte) error {
parseError = jsonparser.MalformedObjectError
return
}
var nonIndexedLabels labels.Labels
var structuredMetadata labels.Labels
if err := jsonparser.ObjectEach(value, func(key []byte, value []byte, dataType jsonparser.ValueType, _ int) error {
if dataType != jsonparser.String {
return jsonparser.MalformedStringError
}
nonIndexedLabels = append(nonIndexedLabels, labels.Label{
structuredMetadata = append(structuredMetadata, labels.Label{
Name: string(key),
Value: string(value),
})
@ -71,7 +71,7 @@ func (e *Entry) UnmarshalJSON(data []byte) error {
parseError = err
return
}
e.NonIndexedLabels = nonIndexedLabels
e.StructuredMetadata = structuredMetadata
}
i++
})
@ -93,7 +93,7 @@ func (sliceEntryDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
i := 0
var ts time.Time
var line string
var nonIndexedLabels labels.Labels
var structuredMetadata labels.Labels
ok := iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool {
var ok bool
switch i {
@ -111,7 +111,7 @@ func (sliceEntryDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
case 2:
iter.ReadMapCB(func(iter *jsoniter.Iterator, labelName string) bool {
labelValue := iter.ReadString()
nonIndexedLabels = append(nonIndexedLabels, labels.Label{
structuredMetadata = append(structuredMetadata, labels.Label{
Name: labelName,
Value: labelValue,
})
@ -129,9 +129,9 @@ func (sliceEntryDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
})
if ok {
*((*[]Entry)(ptr)) = append(*((*[]Entry)(ptr)), Entry{
Timestamp: ts,
Line: line,
NonIndexedLabels: nonIndexedLabels,
Timestamp: ts,
Line: line,
StructuredMetadata: structuredMetadata,
})
return true
}
@ -168,10 +168,10 @@ func (EntryEncoder) Encode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
stream.WriteRaw(`"`)
stream.WriteMore()
stream.WriteStringWithHTMLEscaped(e.Line)
if len(e.NonIndexedLabels) > 0 {
if len(e.StructuredMetadata) > 0 {
stream.WriteMore()
stream.WriteObjectStart()
for i, lbl := range e.NonIndexedLabels {
for i, lbl := range e.StructuredMetadata {
if i > 0 {
stream.WriteMore()
}

@ -33,12 +33,12 @@ var (
bytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "loki",
Name: "distributor_bytes_received_total",
Help: "The total number of uncompressed bytes received per tenant. Includes non-indexed labels bytes.",
Help: "The total number of uncompressed bytes received per tenant. Includes structured metadata bytes.",
}, []string{"tenant", "retention_hours"})
nonIndexedLabelsBytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{
structuredMetadataBytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "loki",
Name: "distributor_non_indexed_labels_bytes_received_total",
Help: "The total number of uncompressed bytes received per tenant for entries' non-indexed labels",
Name: "distributor_structured_metadata_bytes_received_total",
Help: "The total number of uncompressed bytes received per tenant for entries' structured metadata",
}, []string{"tenant", "retention_hours"})
linesIngested = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "loki",
@ -46,9 +46,9 @@ var (
Help: "The total number of lines received per tenant",
}, []string{"tenant"})
bytesReceivedStats = analytics.NewCounter("distributor_bytes_received")
nonIndexedLabelsBytesReceivedStats = analytics.NewCounter("distributor_non_indexed_labels_bytes_received")
linesReceivedStats = analytics.NewCounter("distributor_lines_received")
bytesReceivedStats = analytics.NewCounter("distributor_bytes_received")
structuredMetadataBytesReceivedStats = analytics.NewCounter("distributor_structured_metadata_bytes_received")
linesReceivedStats = analytics.NewCounter("distributor_lines_received")
)
const applicationJSON = "application/json"
@ -88,11 +88,11 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
contentType := r.Header.Get(contentType)
var (
entriesSize int64
nonIndexedLabelsSize int64
streamLabelsSize int64
totalEntries int64
req logproto.PushRequest
entriesSize int64
structuredMetadataSize int64
streamLabelsSize int64
totalEntries int64
req logproto.PushRequest
)
contentType, _ /* params */, err := mime.ParseMediaType(contentType)
@ -140,16 +140,16 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
for _, e := range s.Entries {
totalEntries++
var entryLabelsSize int64
for _, l := range e.NonIndexedLabels {
for _, l := range e.StructuredMetadata {
entryLabelsSize += int64(len(l.Name) + len(l.Value))
}
entrySize := int64(len(e.Line)) + entryLabelsSize
entriesSize += entrySize
nonIndexedLabelsSize += entryLabelsSize
structuredMetadataSize += entryLabelsSize
bytesIngested.WithLabelValues(userID, retentionHours).Add(float64(entrySize))
nonIndexedLabelsBytesIngested.WithLabelValues(userID, retentionHours).Add(float64(entryLabelsSize))
structuredMetadataBytesIngested.WithLabelValues(userID, retentionHours).Add(float64(entryLabelsSize))
bytesReceivedStats.Inc(entrySize)
nonIndexedLabelsBytesReceivedStats.Inc(entryLabelsSize)
structuredMetadataBytesReceivedStats.Inc(entryLabelsSize)
if e.Timestamp.After(mostRecentEntry) {
mostRecentEntry = e.Timestamp
}
@ -172,7 +172,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete
"entries", totalEntries,
"streamLabelsSize", humanize.Bytes(uint64(streamLabelsSize)),
"entriesSize", humanize.Bytes(uint64(entriesSize)),
"nonIndexedLabelsSize", humanize.Bytes(uint64(nonIndexedLabelsSize)),
"structuredMetadataSize", humanize.Bytes(uint64(structuredMetadataSize)),
"totalSize", humanize.Bytes(uint64(entriesSize+streamLabelsSize)),
"mostRecentLagMs", time.Since(mostRecentEntry).Milliseconds(),
)

@ -44,16 +44,16 @@ func deflateString(source string) string {
}
func TestParseRequest(t *testing.T) {
var previousBytesReceived, previousNonIndexedLabelsBytesReceived, previousLinesReceived int
var previousBytesReceived, previousStructuredMetadataBytesReceived, previousLinesReceived int
for index, test := range []struct {
path string
body string
contentType string
contentEncoding string
valid bool
expectedNonIndexedLabelsBytes int
expectedBytes int
expectedLines int
path string
body string
contentType string
contentEncoding string
valid bool
expectedStructuredMetadataBytes int
expectedBytes int
expectedLines int
}{
{
path: `/loki/api/v1/push`,
@ -177,18 +177,18 @@ func TestParseRequest(t *testing.T) {
valid: false,
},
{
path: `/loki/api/v1/push`,
body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz", {"a": "a", "b": "b"} ] ] }]}`),
contentType: `application/json; charset=utf-8`,
contentEncoding: `deflate`,
valid: true,
expectedNonIndexedLabelsBytes: 2*len("a") + 2*len("b"),
expectedBytes: len("fizzbuzz") + 2*len("a") + 2*len("b"),
expectedLines: 1,
path: `/loki/api/v1/push`,
body: deflateString(`{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz", {"a": "a", "b": "b"} ] ] }]}`),
contentType: `application/json; charset=utf-8`,
contentEncoding: `deflate`,
valid: true,
expectedStructuredMetadataBytes: 2*len("a") + 2*len("b"),
expectedBytes: len("fizzbuzz") + 2*len("a") + 2*len("b"),
expectedLines: 1,
},
} {
t.Run(fmt.Sprintf("test %d", index), func(t *testing.T) {
nonIndexedLabelsBytesIngested.Reset()
structuredMetadataBytesIngested.Reset()
bytesIngested.Reset()
linesIngested.Reset()
@ -202,8 +202,8 @@ func TestParseRequest(t *testing.T) {
data, err := ParseRequest(util_log.Logger, "fake", request, nil)
nonIndexedLabelsBytesReceived := int(nonIndexedLabelsBytesReceivedStats.Value()["total"].(int64)) - previousNonIndexedLabelsBytesReceived
previousNonIndexedLabelsBytesReceived += nonIndexedLabelsBytesReceived
structuredMetadataBytesReceived := int(structuredMetadataBytesReceivedStats.Value()["total"].(int64)) - previousStructuredMetadataBytesReceived
previousStructuredMetadataBytesReceived += structuredMetadataBytesReceived
bytesReceived := int(bytesReceivedStats.Value()["total"].(int64)) - previousBytesReceived
previousBytesReceived += bytesReceived
linesReceived := int(linesReceivedStats.Value()["total"].(int64)) - previousLinesReceived
@ -212,19 +212,19 @@ func TestParseRequest(t *testing.T) {
if test.valid {
assert.Nil(t, err, "Should not give error for %d", index)
assert.NotNil(t, data, "Should give data for %d", index)
require.Equal(t, test.expectedNonIndexedLabelsBytes, nonIndexedLabelsBytesReceived)
require.Equal(t, test.expectedStructuredMetadataBytes, structuredMetadataBytesReceived)
require.Equal(t, test.expectedBytes, bytesReceived)
require.Equal(t, test.expectedLines, linesReceived)
require.Equal(t, float64(test.expectedNonIndexedLabelsBytes), testutil.ToFloat64(nonIndexedLabelsBytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(test.expectedStructuredMetadataBytes), testutil.ToFloat64(structuredMetadataBytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(test.expectedBytes), testutil.ToFloat64(bytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(test.expectedLines), testutil.ToFloat64(linesIngested.WithLabelValues("fake")))
} else {
assert.NotNil(t, err, "Should give error for %d", index)
assert.Nil(t, data, "Should not give data for %d", index)
require.Equal(t, 0, nonIndexedLabelsBytesReceived)
require.Equal(t, 0, structuredMetadataBytesReceived)
require.Equal(t, 0, bytesReceived)
require.Equal(t, 0, linesReceived)
require.Equal(t, float64(0), testutil.ToFloat64(nonIndexedLabelsBytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(0), testutil.ToFloat64(structuredMetadataBytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(0), testutil.ToFloat64(bytesIngested.WithLabelValues("fake", "")))
require.Equal(t, float64(0), testutil.ToFloat64(linesIngested.WithLabelValues("fake")))
}

@ -155,13 +155,13 @@ func unmarshalHTTPToLogProtoEntry(data []byte) (logproto.Entry, error) {
return
}
e.Line = v
case 2: // nonIndexedLabels
var nonIndexedLabels []logproto.LabelAdapter
case 2: // structuredMetadata
var structuredMetadata []logproto.LabelAdapter
err := jsonparser.ObjectEach(value, func(key, val []byte, dataType jsonparser.ValueType, _ int) error {
if dataType != jsonparser.String {
return jsonparser.MalformedStringError
}
nonIndexedLabels = append(nonIndexedLabels, logproto.LabelAdapter{
structuredMetadata = append(structuredMetadata, logproto.LabelAdapter{
Name: string(key),
Value: string(val),
})
@ -171,7 +171,7 @@ func unmarshalHTTPToLogProtoEntry(data []byte) (logproto.Entry, error) {
parseError = err
return
}
e.NonIndexedLabels = nonIndexedLabels
e.StructuredMetadata = structuredMetadata
}
i++
})

@ -154,7 +154,7 @@ func TestStreams_ToProto(t *testing.T) {
Labels: map[string]string{"foo": "bar"},
Entries: []Entry{
{Timestamp: time.Unix(0, 1), Line: "1"},
{Timestamp: time.Unix(0, 2), Line: "2", NonIndexedLabels: labels.Labels{
{Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: labels.Labels{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -164,7 +164,7 @@ func TestStreams_ToProto(t *testing.T) {
Labels: map[string]string{"foo": "bar", "lvl": "error"},
Entries: []Entry{
{Timestamp: time.Unix(0, 3), Line: "3"},
{Timestamp: time.Unix(0, 4), Line: "4", NonIndexedLabels: labels.Labels{
{Timestamp: time.Unix(0, 4), Line: "4", StructuredMetadata: labels.Labels{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -176,7 +176,7 @@ func TestStreams_ToProto(t *testing.T) {
Labels: `{foo="bar"}`,
Entries: []logproto.Entry{
{Timestamp: time.Unix(0, 1), Line: "1"},
{Timestamp: time.Unix(0, 2), Line: "2", NonIndexedLabels: []logproto.LabelAdapter{
{Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -186,7 +186,7 @@ func TestStreams_ToProto(t *testing.T) {
Labels: `{foo="bar", lvl="error"}`,
Entries: []logproto.Entry{
{Timestamp: time.Unix(0, 3), Line: "3"},
{Timestamp: time.Unix(0, 4), Line: "4", NonIndexedLabels: []logproto.LabelAdapter{
{Timestamp: time.Unix(0, 4), Line: "4", StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -223,7 +223,7 @@ func Test_QueryResponseUnmarshal(t *testing.T) {
Labels: LabelSet{"foo": "bar"},
Entries: []Entry{
{Timestamp: time.Unix(0, 1), Line: "1"},
{Timestamp: time.Unix(0, 2), Line: "2", NonIndexedLabels: labels.Labels{
{Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: labels.Labels{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -246,7 +246,7 @@ func Test_QueryResponseUnmarshal(t *testing.T) {
Labels: LabelSet{"foo": "bar"},
Entries: []Entry{
{Timestamp: time.Unix(0, 1), Line: "log line 1"},
{Timestamp: time.Unix(0, 2), Line: "some log line 2", NonIndexedLabels: labels.Labels{
{Timestamp: time.Unix(0, 2), Line: "some log line 2", StructuredMetadata: labels.Labels{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},
@ -259,7 +259,7 @@ func Test_QueryResponseUnmarshal(t *testing.T) {
{Timestamp: time.Unix(0, 2), Line: "2"},
{Timestamp: time.Unix(0, 2), Line: "2"},
{Timestamp: time.Unix(0, 2), Line: "2"},
{Timestamp: time.Unix(0, 2), Line: "2", NonIndexedLabels: labels.Labels{
{Timestamp: time.Unix(0, 2), Line: "2", StructuredMetadata: labels.Labels{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
}},

@ -34,8 +34,8 @@ type SampleExtractor interface {
// A StreamSampleExtractor never mutate the received line.
type StreamSampleExtractor interface {
BaseLabels() LabelsResult
Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool)
ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool)
Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool)
ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool)
}
type lineSampleExtractor struct {
@ -80,9 +80,9 @@ type streamLineSampleExtractor struct {
builder *LabelsBuilder
}
func (l *streamLineSampleExtractor) Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (l *streamLineSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
l.builder.Reset()
l.builder.Add(nonIndexedLabels...)
l.builder.Add(structuredMetadata...)
// short circuit.
if l.Stage == NoopStage {
@ -96,9 +96,9 @@ func (l *streamLineSampleExtractor) Process(ts int64, line []byte, nonIndexedLab
return l.LineExtractor(line), l.builder.GroupedLabels(), true
}
func (l *streamLineSampleExtractor) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (l *streamLineSampleExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
// unsafe get bytes since we have the guarantee that the line won't be mutated.
return l.Process(ts, unsafeGetBytes(line), nonIndexedLabels...)
return l.Process(ts, unsafeGetBytes(line), structuredMetadata...)
}
func (l *streamLineSampleExtractor) BaseLabels() LabelsResult { return l.builder.currentResult }
@ -171,10 +171,10 @@ func (l *labelSampleExtractor) ForStream(labels labels.Labels) StreamSampleExtra
return res
}
func (l *streamLabelSampleExtractor) Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (l *streamLabelSampleExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
// Apply the pipeline first.
l.builder.Reset()
l.builder.Add(nonIndexedLabels...)
l.builder.Add(structuredMetadata...)
line, ok := l.preStage.Process(ts, line, l.builder)
if !ok {
return 0, nil, false
@ -202,9 +202,9 @@ func (l *streamLabelSampleExtractor) Process(ts int64, line []byte, nonIndexedLa
return v, l.builder.GroupedLabels(), true
}
func (l *streamLabelSampleExtractor) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (l *streamLabelSampleExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
// unsafe get bytes since we have the guarantee that the line won't be mutated.
return l.Process(ts, unsafeGetBytes(line), nonIndexedLabels...)
return l.Process(ts, unsafeGetBytes(line), structuredMetadata...)
}
func (l *streamLabelSampleExtractor) BaseLabels() LabelsResult { return l.builder.currentResult }
@ -251,13 +251,13 @@ func (sp *filteringStreamExtractor) BaseLabels() LabelsResult {
return sp.extractor.BaseLabels()
}
func (sp *filteringStreamExtractor) Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (sp *filteringStreamExtractor) Process(ts int64, line []byte, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
for _, filter := range sp.filters {
if ts < filter.start || ts > filter.end {
continue
}
_, _, matches := filter.pipeline.Process(ts, line, nonIndexedLabels...)
_, _, matches := filter.pipeline.Process(ts, line, structuredMetadata...)
if matches { // When the filter matches, don't run the next step
return 0, nil, false
}
@ -266,13 +266,13 @@ func (sp *filteringStreamExtractor) Process(ts int64, line []byte, nonIndexedLab
return sp.extractor.Process(ts, line)
}
func (sp *filteringStreamExtractor) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (float64, LabelsResult, bool) {
func (sp *filteringStreamExtractor) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (float64, LabelsResult, bool) {
for _, filter := range sp.filters {
if ts < filter.start || ts > filter.end {
continue
}
_, _, matches := filter.pipeline.ProcessString(ts, line, nonIndexedLabels...)
_, _, matches := filter.pipeline.ProcessString(ts, line, structuredMetadata...)
if matches { // When the filter matches, don't run the next step
return 0, nil, false
}

@ -11,14 +11,14 @@ import (
func Test_labelSampleExtractor_Extract(t *testing.T) {
tests := []struct {
name string
ex SampleExtractor
in labels.Labels
nonIndexedLabels labels.Labels
want float64
wantLbs labels.Labels
wantOk bool
line string
name string
ex SampleExtractor
in labels.Labels
structuredMetadata labels.Labels
want float64
wantLbs labels.Labels
wantOk bool
line string
}{
{
name: "convert float",
@ -71,48 +71,48 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
wantOk: true,
},
{
name: "convert float with non-indexed labels",
name: "convert float with structured metadata",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertFloat, nil, false, false, nil, NoopStage,
)),
in: labels.EmptyLabels(),
nonIndexedLabels: labels.FromStrings("foo", "15.0"),
want: 15,
wantLbs: labels.EmptyLabels(),
wantOk: true,
in: labels.EmptyLabels(),
structuredMetadata: labels.FromStrings("foo", "15.0"),
want: 15,
wantLbs: labels.EmptyLabels(),
wantOk: true,
},
{
name: "convert float as vector with non-indexed labels with no grouping",
name: "convert float as vector with structured metadata with no grouping",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertFloat, nil, false, true, nil, NoopStage,
)),
in: labels.FromStrings("bar", "buzz"),
nonIndexedLabels: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.EmptyLabels(),
wantOk: true,
in: labels.FromStrings("bar", "buzz"),
structuredMetadata: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.EmptyLabels(),
wantOk: true,
},
{
name: "convert float with non-indexed labels and grouping",
name: "convert float with structured metadata and grouping",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertFloat, []string{"bar", "buzz"}, false, false, nil, NoopStage,
)),
in: labels.FromStrings("bar", "buzz", "namespace", "dev"),
nonIndexedLabels: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.FromStrings("bar", "buzz", "buzz", "blip"),
wantOk: true,
in: labels.FromStrings("bar", "buzz", "namespace", "dev"),
structuredMetadata: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.FromStrings("bar", "buzz", "buzz", "blip"),
wantOk: true,
},
{
name: "convert float with non-indexed labels and grouping without",
name: "convert float with structured metadata and grouping without",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertFloat, []string{"bar", "buzz"}, true, false, nil, NoopStage,
)),
in: labels.FromStrings("bar", "buzz", "namespace", "dev"),
nonIndexedLabels: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.FromStrings("namespace", "dev"),
wantOk: true,
in: labels.FromStrings("bar", "buzz", "namespace", "dev"),
structuredMetadata: labels.FromStrings("foo", "15.0", "buzz", "blip"),
want: 15,
wantLbs: labels.FromStrings("namespace", "dev"),
wantOk: true,
},
{
name: "convert duration with",
@ -131,7 +131,7 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
wantOk: true,
},
{
name: "convert duration with non-indexed labels",
name: "convert duration with structured metadata",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertDuration, []string{"bar", "buzz"}, false, false, nil, NoopStage,
)),
@ -139,8 +139,8 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
"bar", "foo",
"namespace", "dev",
),
nonIndexedLabels: labels.FromStrings("foo", "500ms", "buzz", "blip"),
want: 0.5,
structuredMetadata: labels.FromStrings("foo", "500ms", "buzz", "blip"),
want: 0.5,
wantLbs: labels.FromStrings("bar", "foo",
"buzz", "blip",
),
@ -163,7 +163,7 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
wantOk: true,
},
{
name: "convert bytes with non-indexed labels",
name: "convert bytes with structured metadata",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertBytes, []string{"bar", "buzz"}, false, false, nil, NoopStage,
)),
@ -171,8 +171,8 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
"bar", "foo",
"namespace", "dev",
),
nonIndexedLabels: labels.FromStrings("foo", "13 MiB", "buzz", "blip"),
want: 13 * 1024 * 1024,
structuredMetadata: labels.FromStrings("foo", "13 MiB", "buzz", "blip"),
want: 13 * 1024 * 1024,
wantLbs: labels.FromStrings("bar", "foo",
"buzz", "blip",
),
@ -194,12 +194,12 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
wantOk: true,
},
{
name: "not convertable with non-indexed labels",
name: "not convertable with structured metadata",
ex: mustSampleExtractor(LabelExtractorWithStages(
"foo", ConvertFloat, []string{"bar", "buzz"}, false, false, nil, NoopStage,
)),
in: labels.FromStrings("bar", "foo"),
nonIndexedLabels: labels.FromStrings("foo", "not_a_number"),
in: labels.FromStrings("bar", "foo"),
structuredMetadata: labels.FromStrings("foo", "not_a_number"),
wantLbs: labels.FromStrings("__error__", "SampleExtractionErr",
"__error_details__", "strconv.ParseFloat: parsing \"not_a_number\": invalid syntax",
"bar", "foo",
@ -235,12 +235,12 @@ func Test_labelSampleExtractor_Extract(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
outval, outlbs, ok := tt.ex.ForStream(tt.in).Process(0, []byte(tt.line), tt.nonIndexedLabels...)
outval, outlbs, ok := tt.ex.ForStream(tt.in).Process(0, []byte(tt.line), tt.structuredMetadata...)
require.Equal(t, tt.wantOk, ok)
require.Equal(t, tt.want, outval)
require.Equal(t, tt.wantLbs, outlbs.Labels())
outval, outlbs, ok = tt.ex.ForStream(tt.in).ProcessString(0, tt.line, tt.nonIndexedLabels...)
outval, outlbs, ok = tt.ex.ForStream(tt.in).ProcessString(0, tt.line, tt.structuredMetadata...)
require.Equal(t, tt.wantOk, ok)
require.Equal(t, tt.want, outval)
require.Equal(t, tt.wantLbs, outlbs.Labels())
@ -361,10 +361,10 @@ func TestNewLineSampleExtractor(t *testing.T) {
require.False(t, ok)
}
func TestNewLineSampleExtractorWithNonIndexedLabels(t *testing.T) {
func TestNewLineSampleExtractorWithStructuredMetadata(t *testing.T) {
lbs := labels.FromStrings("foo", "bar")
nonIndexedLabels := labels.FromStrings("user", "bob")
expectedLabelsResults := append(lbs, nonIndexedLabels...)
structuredMetadata := labels.FromStrings("user", "bob")
expectedLabelsResults := append(lbs, structuredMetadata...)
se, err := NewLineSampleExtractor(CountExtractor, []Stage{
NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, "user", "bob")),
@ -372,29 +372,29 @@ func TestNewLineSampleExtractorWithNonIndexedLabels(t *testing.T) {
require.NoError(t, err)
sse := se.ForStream(lbs)
f, l, ok := sse.Process(0, []byte(`foo`), nonIndexedLabels...)
f, l, ok := sse.Process(0, []byte(`foo`), structuredMetadata...)
require.True(t, ok)
require.Equal(t, 1., f)
assertLabelResult(t, expectedLabelsResults, l)
f, l, ok = sse.ProcessString(0, `foo`, nonIndexedLabels...)
f, l, ok = sse.ProcessString(0, `foo`, structuredMetadata...)
require.True(t, ok)
require.Equal(t, 1., f)
assertLabelResult(t, expectedLabelsResults, l)
// test duplicated non-indexed labels with stream labels
// test duplicated structured metadata with stream labels
expectedLabelsResults = append(lbs, labels.Label{
Name: "foo_extracted", Value: "baz",
})
expectedLabelsResults = append(expectedLabelsResults, nonIndexedLabels...)
f, l, ok = sse.Process(0, []byte(`foo`), append(nonIndexedLabels, labels.Label{
expectedLabelsResults = append(expectedLabelsResults, structuredMetadata...)
f, l, ok = sse.Process(0, []byte(`foo`), append(structuredMetadata, labels.Label{
Name: "foo", Value: "baz",
})...)
require.True(t, ok)
require.Equal(t, 1., f)
assertLabelResult(t, expectedLabelsResults, l)
f, l, ok = sse.ProcessString(0, `foo`, append(nonIndexedLabels, labels.Label{
f, l, ok = sse.ProcessString(0, `foo`, append(structuredMetadata, labels.Label{
Name: "foo", Value: "baz",
})...)
require.True(t, ok)
@ -409,7 +409,7 @@ func TestNewLineSampleExtractorWithNonIndexedLabels(t *testing.T) {
require.NoError(t, err)
sse = se.ForStream(lbs)
f, l, ok = sse.Process(0, []byte(`foo`), nonIndexedLabels...)
f, l, ok = sse.Process(0, []byte(`foo`), structuredMetadata...)
require.True(t, ok)
require.Equal(t, 3., f)
assertLabelResult(t, labels.FromStrings("foo", "bar"), l)
@ -427,12 +427,12 @@ func TestFilteringSampleExtractor(t *testing.T) {
}, newStubExtractor())
tt := []struct {
name string
ts int64
line string
labels labels.Labels
nonIndexedLabels labels.Labels
ok bool
name string
ts int64
line string
labels labels.Labels
structuredMetadata labels.Labels
ok bool
}{
{"it is after the timerange", 6, "line", labels.FromStrings("baz", "foo"), nil, true},
{"it is before the timerange", 1, "line", labels.FromStrings("baz", "foo"), nil, true},
@ -440,17 +440,17 @@ func TestFilteringSampleExtractor(t *testing.T) {
{"it doesn't match all the selectors", 3, "line", labels.FromStrings("foo", "bar"), nil, true},
{"it doesn't match any selectors", 3, "line", labels.FromStrings("beep", "boop"), nil, true},
{"it matches all selectors", 3, "line", labels.FromStrings("foo", "bar", "bar", "baz"), nil, false},
{"it doesn't match all non-indexed labels", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "alice"), true},
{"it matches all non-indexed labels", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "bob"), false},
{"it doesn't match all structured metadata", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "alice"), true},
{"it matches all structured metadata", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "bob"), false},
{"it tries all the filters", 5, "line", labels.FromStrings("baz", "foo"), nil, false},
}
for _, test := range tt {
t.Run(test.name, func(t *testing.T) {
_, _, ok := se.ForStream(test.labels).Process(test.ts, []byte(test.line), test.nonIndexedLabels...)
_, _, ok := se.ForStream(test.labels).Process(test.ts, []byte(test.line), test.structuredMetadata...)
require.Equal(t, test.ok, ok)
_, _, ok = se.ForStream(test.labels).ProcessString(test.ts, test.line, test.nonIndexedLabels...)
_, _, ok = se.ForStream(test.labels).ProcessString(test.ts, test.line, test.structuredMetadata...)
require.Equal(t, test.ok, ok)
})
}

@ -23,8 +23,8 @@ type StreamPipeline interface {
BaseLabels() LabelsResult
// Process processes a log line and returns the transformed line and the labels.
// The buffer returned for the log line can be reused on subsequent calls to Process and therefore must be copied.
Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) (resultLine []byte, resultLabels LabelsResult, matches bool)
ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (resultLine string, resultLabels LabelsResult, matches bool)
Process(ts int64, line []byte, structuredMetadata ...labels.Label) (resultLine []byte, resultLabels LabelsResult, matches bool)
ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (resultLine string, resultLabels LabelsResult, matches bool)
}
// Stage is a single step of a Pipeline.
@ -87,14 +87,14 @@ type noopStreamPipeline struct {
builder *LabelsBuilder
}
func (n noopStreamPipeline) Process(_ int64, line []byte, nonIndexedLabels ...labels.Label) ([]byte, LabelsResult, bool) {
func (n noopStreamPipeline) Process(_ int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) {
n.builder.Reset()
n.builder.Add(nonIndexedLabels...)
n.builder.Add(structuredMetadata...)
return line, n.builder.LabelsResult(), true
}
func (n noopStreamPipeline) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (string, LabelsResult, bool) {
_, lr, ok := n.Process(ts, unsafeGetBytes(line), nonIndexedLabels...)
func (n noopStreamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) {
_, lr, ok := n.Process(ts, unsafeGetBytes(line), structuredMetadata...)
return line, lr, ok
}
@ -201,10 +201,10 @@ func (p *pipeline) Reset() {
}
}
func (p *streamPipeline) Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) ([]byte, LabelsResult, bool) {
func (p *streamPipeline) Process(ts int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) {
var ok bool
p.builder.Reset()
p.builder.Add(nonIndexedLabels...)
p.builder.Add(structuredMetadata...)
for _, s := range p.stages {
line, ok = s.Process(ts, line, p.builder)
@ -215,9 +215,9 @@ func (p *streamPipeline) Process(ts int64, line []byte, nonIndexedLabels ...labe
return line, p.builder.LabelsResult(), true
}
func (p *streamPipeline) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (string, LabelsResult, bool) {
func (p *streamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) {
// Stages only read from the line.
lb, lr, ok := p.Process(ts, unsafeGetBytes(line), nonIndexedLabels...)
lb, lr, ok := p.Process(ts, unsafeGetBytes(line), structuredMetadata...)
// but the returned line needs to be copied.
return string(lb), lr, ok
}
@ -296,34 +296,34 @@ func (sp *filteringStreamPipeline) BaseLabels() LabelsResult {
return sp.pipeline.BaseLabels()
}
func (sp *filteringStreamPipeline) Process(ts int64, line []byte, nonIndexedLabels ...labels.Label) ([]byte, LabelsResult, bool) {
func (sp *filteringStreamPipeline) Process(ts int64, line []byte, structuredMetadata ...labels.Label) ([]byte, LabelsResult, bool) {
for _, filter := range sp.filters {
if ts < filter.start || ts > filter.end {
continue
}
_, _, matches := filter.pipeline.Process(ts, line, nonIndexedLabels...)
_, _, matches := filter.pipeline.Process(ts, line, structuredMetadata...)
if matches { // When the filter matches, don't run the next step
return nil, nil, false
}
}
return sp.pipeline.Process(ts, line, nonIndexedLabels...)
return sp.pipeline.Process(ts, line, structuredMetadata...)
}
func (sp *filteringStreamPipeline) ProcessString(ts int64, line string, nonIndexedLabels ...labels.Label) (string, LabelsResult, bool) {
func (sp *filteringStreamPipeline) ProcessString(ts int64, line string, structuredMetadata ...labels.Label) (string, LabelsResult, bool) {
for _, filter := range sp.filters {
if ts < filter.start || ts > filter.end {
continue
}
_, _, matches := filter.pipeline.ProcessString(ts, line, nonIndexedLabels...)
_, _, matches := filter.pipeline.ProcessString(ts, line, structuredMetadata...)
if matches { // When the filter matches, don't run the next step
return "", nil, false
}
}
return sp.pipeline.ProcessString(ts, line, nonIndexedLabels...)
return sp.pipeline.ProcessString(ts, line, structuredMetadata...)
}
// ReduceStages reduces multiple stages into one.

@ -24,27 +24,27 @@ func TestNoopPipeline(t *testing.T) {
require.Equal(t, NewLabelsResult(lbs, lbs.Hash()), lbr)
require.Equal(t, true, matches)
nonIndexedLabels := labels.Labels{
structuredMetadata := labels.Labels{
{Name: "y", Value: "1"},
{Name: "z", Value: "2"},
}
expectedLabelsResults := append(lbs, nonIndexedLabels...)
l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), nonIndexedLabels...)
expectedLabelsResults := append(lbs, structuredMetadata...)
l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), structuredMetadata...)
require.Equal(t, []byte(""), l)
require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr)
require.Equal(t, true, matches)
ls, lbr, matches = pipeline.ForStream(lbs).ProcessString(0, "", nonIndexedLabels...)
ls, lbr, matches = pipeline.ForStream(lbs).ProcessString(0, "", structuredMetadata...)
require.Equal(t, "", ls)
require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr)
require.Equal(t, true, matches)
// test duplicated non-indexed labels with stream labels
// test duplicated structured metadata with stream labels
expectedLabelsResults = append(lbs, labels.Label{
Name: "foo_extracted", Value: "baz",
})
expectedLabelsResults = append(expectedLabelsResults, nonIndexedLabels...)
l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), append(nonIndexedLabels, labels.Label{
expectedLabelsResults = append(expectedLabelsResults, structuredMetadata...)
l, lbr, matches = pipeline.ForStream(lbs).Process(0, []byte(""), append(structuredMetadata, labels.Label{
Name: "foo", Value: "baz",
})...)
require.Equal(t, []byte(""), l)
@ -92,39 +92,39 @@ func TestPipeline(t *testing.T) {
require.Len(t, p.baseBuilder.add, 0)
}
func TestPipelineWithNonIndexedLabels(t *testing.T) {
func TestPipelineWithStructuredMetadata(t *testing.T) {
lbs := labels.FromStrings("foo", "bar")
nonIndexedLabels := labels.FromStrings("user", "bob")
expectedLabelsResults := append(lbs, nonIndexedLabels...)
structuredMetadata := labels.FromStrings("user", "bob")
expectedLabelsResults := append(lbs, structuredMetadata...)
p := NewPipeline([]Stage{
NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")),
NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, "user", "bob")),
newMustLineFormatter("lbs {{.foo}} {{.user}}"),
}).(*pipeline)
l, lbr, matches := p.ForStream(lbs).Process(0, []byte("line"), nonIndexedLabels...)
l, lbr, matches := p.ForStream(lbs).Process(0, []byte("line"), structuredMetadata...)
require.Equal(t, []byte("lbs bar bob"), l)
require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr)
require.Equal(t, true, matches)
ls, lbr, matches := p.ForStream(lbs).ProcessString(0, "line", nonIndexedLabels...)
ls, lbr, matches := p.ForStream(lbs).ProcessString(0, "line", structuredMetadata...)
require.Equal(t, "lbs bar bob", ls)
require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr)
require.Equal(t, true, matches)
// test duplicated non-indexed labels with stream labels
// test duplicated structured metadata with stream labels
expectedLabelsResults = append(lbs, labels.Label{
Name: "foo_extracted", Value: "baz",
})
expectedLabelsResults = append(expectedLabelsResults, nonIndexedLabels...)
l, lbr, matches = p.ForStream(lbs).Process(0, []byte("line"), append(nonIndexedLabels, labels.Label{
expectedLabelsResults = append(expectedLabelsResults, structuredMetadata...)
l, lbr, matches = p.ForStream(lbs).Process(0, []byte("line"), append(structuredMetadata, labels.Label{
Name: "foo", Value: "baz",
})...)
require.Equal(t, []byte("lbs bar bob"), l)
require.Equal(t, NewLabelsResult(expectedLabelsResults, expectedLabelsResults.Hash()), lbr)
require.Equal(t, true, matches)
ls, lbr, matches = p.ForStream(lbs).ProcessString(0, "line", append(nonIndexedLabels, labels.Label{
ls, lbr, matches = p.ForStream(lbs).ProcessString(0, "line", append(structuredMetadata, labels.Label{
Name: "foo", Value: "baz",
})...)
require.Equal(t, "lbs bar bob", ls)
@ -141,12 +141,12 @@ func TestPipelineWithNonIndexedLabels(t *testing.T) {
require.Equal(t, nil, lbr)
require.Equal(t, false, matches)
l, lbr, matches = p.ForStream(labels.EmptyLabels()).Process(0, []byte("line"), nonIndexedLabels...)
l, lbr, matches = p.ForStream(labels.EmptyLabels()).Process(0, []byte("line"), structuredMetadata...)
require.Equal(t, []byte(nil), l)
require.Equal(t, nil, lbr)
require.Equal(t, false, matches)
ls, lbr, matches = p.ForStream(labels.EmptyLabels()).ProcessString(0, "line", nonIndexedLabels...)
ls, lbr, matches = p.ForStream(labels.EmptyLabels()).ProcessString(0, "line", structuredMetadata...)
require.Equal(t, "", ls)
require.Equal(t, nil, lbr)
require.Equal(t, false, matches)
@ -163,12 +163,12 @@ func TestPipelineWithNonIndexedLabels(t *testing.T) {
func TestFilteringPipeline(t *testing.T) {
tt := []struct {
name string
ts int64
line string
inputStreamLabels labels.Labels
nonIndexedLabels labels.Labels
ok bool
name string
ts int64
line string
inputStreamLabels labels.Labels
structuredMetadata labels.Labels
ok bool
}{
{"it is before the timerange", 1, "line", labels.FromStrings("baz", "foo"), nil, true},
{"it is after the timerange", 6, "line", labels.FromStrings("baz", "foo"), nil, true},
@ -176,8 +176,8 @@ func TestFilteringPipeline(t *testing.T) {
{"it doesn't match all the selectors", 3, "line", labels.FromStrings("foo", "bar"), nil, true},
{"it doesn't match any selectors", 3, "line", labels.FromStrings("beep", "boop"), nil, true},
{"it matches all selectors", 3, "line", labels.FromStrings("foo", "bar", "bar", "baz"), nil, false},
{"it doesn't match all non-indexed labels", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "alice"), true},
{"it matches all non-indexed labels", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "bob"), false},
{"it doesn't match all structured metadata", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "alice"), true},
{"it matches all structured metadata", 3, "line", labels.FromStrings("foo", "baz"), labels.FromStrings("user", "bob"), false},
{"it tries all the filters", 5, "line", labels.FromStrings("baz", "foo"), nil, false},
}
@ -190,10 +190,10 @@ func TestFilteringPipeline(t *testing.T) {
}, downstream)
t.Run(test.name, func(t *testing.T) {
_, _, matches := p.ForStream(test.inputStreamLabels).Process(test.ts, []byte(test.line), test.nonIndexedLabels...)
_, _, matches := p.ForStream(test.inputStreamLabels).Process(test.ts, []byte(test.line), test.structuredMetadata...)
require.Equal(t, test.ok, matches)
_, _, matches = p.ForStream(test.inputStreamLabels).ProcessString(test.ts, test.line, test.nonIndexedLabels...)
_, _, matches = p.ForStream(test.inputStreamLabels).ProcessString(test.ts, test.line, test.structuredMetadata...)
require.Equal(t, test.ok, matches)
p.Reset()
@ -203,7 +203,7 @@ func TestFilteringPipeline(t *testing.T) {
}
//nolint:unparam
func newPipelineFilter(start, end int64, lbls, nonIndexedLbls labels.Labels, filter string) PipelineFilter {
func newPipelineFilter(start, end int64, lbls, structuredMetadata labels.Labels, filter string) PipelineFilter {
var stages []Stage
var matchers []*labels.Matcher
lbls.Range(func(l labels.Label) {
@ -211,7 +211,7 @@ func newPipelineFilter(start, end int64, lbls, nonIndexedLbls labels.Labels, fil
matchers = append(matchers, m)
})
nonIndexedLbls.Range(func(l labels.Label) {
structuredMetadata.Range(func(l labels.Label) {
s := NewStringLabelFilter(labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value))
stages = append(stages, s)
})

@ -129,7 +129,7 @@ func RecordRangeAndInstantQueryMetrics(
"returned_lines", returnedLines,
"throughput", strings.Replace(humanize.Bytes(uint64(stats.Summary.BytesProcessedPerSecond)), " ", "", 1),
"total_bytes", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalBytesProcessed)), " ", "", 1),
"total_bytes_non_indexed_labels", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalNonIndexedLabelsBytesProcessed)), " ", "", 1),
"total_bytes_structured_metadata", strings.Replace(humanize.Bytes(uint64(stats.Summary.TotalStructuredMetadataBytesProcessed)), " ", "", 1),
"lines_per_second", stats.Summary.LinesProcessedPerSecond,
"total_lines", stats.Summary.TotalLinesProcessed,
"post_filter_lines", stats.Summary.TotalPostFilterLines,

@ -152,8 +152,8 @@ func JoinIngesters(ctx context.Context, inc Ingester) {
func (r *Result) ComputeSummary(execTime time.Duration, queueTime time.Duration, totalEntriesReturned int) {
r.Summary.TotalBytesProcessed = r.Querier.Store.Chunk.DecompressedBytes + r.Querier.Store.Chunk.HeadChunkBytes +
r.Ingester.Store.Chunk.DecompressedBytes + r.Ingester.Store.Chunk.HeadChunkBytes
r.Summary.TotalNonIndexedLabelsBytesProcessed = r.Querier.Store.Chunk.DecompressedNonIndexedLabelsBytes + r.Querier.Store.Chunk.HeadChunkNonIndexedLabelsBytes +
r.Ingester.Store.Chunk.DecompressedNonIndexedLabelsBytes + r.Ingester.Store.Chunk.HeadChunkNonIndexedLabelsBytes
r.Summary.TotalStructuredMetadataBytesProcessed = r.Querier.Store.Chunk.DecompressedStructuredMetadataBytes + r.Querier.Store.Chunk.HeadChunkStructuredMetadataBytes +
r.Ingester.Store.Chunk.DecompressedStructuredMetadataBytes + r.Ingester.Store.Chunk.HeadChunkStructuredMetadataBytes
r.Summary.TotalLinesProcessed = r.Querier.Store.Chunk.DecompressedLines + r.Querier.Store.Chunk.HeadChunkLines +
r.Ingester.Store.Chunk.DecompressedLines + r.Ingester.Store.Chunk.HeadChunkLines
r.Summary.TotalPostFilterLines = r.Querier.Store.Chunk.PostFilterLines + r.Ingester.Store.Chunk.PostFilterLines
@ -177,10 +177,10 @@ func (s *Store) Merge(m Store) {
s.ChunksDownloadTime += m.ChunksDownloadTime
s.ChunkRefsFetchTime += m.ChunkRefsFetchTime
s.Chunk.HeadChunkBytes += m.Chunk.HeadChunkBytes
s.Chunk.HeadChunkNonIndexedLabelsBytes += m.Chunk.HeadChunkNonIndexedLabelsBytes
s.Chunk.HeadChunkStructuredMetadataBytes += m.Chunk.HeadChunkStructuredMetadataBytes
s.Chunk.HeadChunkLines += m.Chunk.HeadChunkLines
s.Chunk.DecompressedBytes += m.Chunk.DecompressedBytes
s.Chunk.DecompressedNonIndexedLabelsBytes += m.Chunk.DecompressedNonIndexedLabelsBytes
s.Chunk.DecompressedStructuredMetadataBytes += m.Chunk.DecompressedStructuredMetadataBytes
s.Chunk.DecompressedLines += m.Chunk.DecompressedLines
s.Chunk.CompressedBytes += m.Chunk.CompressedBytes
s.Chunk.TotalDuplicates += m.Chunk.TotalDuplicates
@ -296,8 +296,8 @@ func (c *Context) AddHeadChunkBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.HeadChunkBytes, i)
}
func (c *Context) AddHeadChunkNonIndexedLabelsBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.HeadChunkNonIndexedLabelsBytes, i)
func (c *Context) AddHeadChunkStructuredMetadataBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.HeadChunkStructuredMetadataBytes, i)
}
func (c *Context) AddCompressedBytes(i int64) {
@ -308,8 +308,8 @@ func (c *Context) AddDecompressedBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.DecompressedBytes, i)
}
func (c *Context) AddDecompressedNonIndexedLabelsBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.DecompressedNonIndexedLabelsBytes, i)
func (c *Context) AddDecompressedStructuredMetadataBytes(i int64) {
atomic.AddInt64(&c.store.Chunk.DecompressedStructuredMetadataBytes, i)
}
func (c *Context) AddDecompressedLines(i int64) {

@ -175,7 +175,7 @@ type Summary struct {
BytesProcessedPerSecond int64 `protobuf:"varint,1,opt,name=bytesProcessedPerSecond,proto3" json:"bytesProcessedPerSecond"`
// Total lines processed per second.
LinesProcessedPerSecond int64 `protobuf:"varint,2,opt,name=linesProcessedPerSecond,proto3" json:"linesProcessedPerSecond"`
// Total bytes processed. Includes non-indexed labels bytes.
// Total bytes processed. Includes structured metadata bytes.
TotalBytesProcessed int64 `protobuf:"varint,3,opt,name=totalBytesProcessed,proto3" json:"totalBytesProcessed"`
// Total lines processed.
TotalLinesProcessed int64 `protobuf:"varint,4,opt,name=totalLinesProcessed,proto3" json:"totalLinesProcessed"`
@ -199,7 +199,7 @@ type Summary struct {
// Total lines post query filtering
TotalPostFilterLines int64 `protobuf:"varint,11,opt,name=totalPostFilterLines,proto3" json:"totalPostFilterLines"`
// Total bytes processed of metadata.
TotalNonIndexedLabelsBytesProcessed int64 `protobuf:"varint,12,opt,name=totalNonIndexedLabelsBytesProcessed,proto3" json:"totalNonIndexedLabelsBytesProcessed"`
TotalStructuredMetadataBytesProcessed int64 `protobuf:"varint,12,opt,name=totalStructuredMetadataBytesProcessed,proto3" json:"totalStructuredMetadataBytesProcessed"`
}
func (m *Summary) Reset() { *m = Summary{} }
@ -311,9 +311,9 @@ func (m *Summary) GetTotalPostFilterLines() int64 {
return 0
}
func (m *Summary) GetTotalNonIndexedLabelsBytesProcessed() int64 {
func (m *Summary) GetTotalStructuredMetadataBytesProcessed() int64 {
if m != nil {
return m.TotalNonIndexedLabelsBytesProcessed
return m.TotalStructuredMetadataBytesProcessed
}
return 0
}
@ -520,11 +520,11 @@ func (m *Store) GetChunkRefsFetchTime() int64 {
}
type Chunk struct {
// Total bytes processed but was already in memory (found in the headchunk). Includes non-indexed labels bytes.
// Total bytes processed but was already in memory (found in the headchunk). Includes structured metadata bytes.
HeadChunkBytes int64 `protobuf:"varint,4,opt,name=headChunkBytes,proto3" json:"headChunkBytes"`
// Total lines processed but was already in memory. (found in the headchunk)
HeadChunkLines int64 `protobuf:"varint,5,opt,name=headChunkLines,proto3" json:"headChunkLines"`
// Total bytes decompressed and processed from chunks. Includes non-indexed labels bytes.
// Total bytes decompressed and processed from chunks. Includes structured metadata bytes.
DecompressedBytes int64 `protobuf:"varint,6,opt,name=decompressedBytes,proto3" json:"decompressedBytes"`
// Total lines decompressed and processed from chunks.
DecompressedLines int64 `protobuf:"varint,7,opt,name=decompressedLines,proto3" json:"decompressedLines"`
@ -535,9 +535,9 @@ type Chunk struct {
// Total lines post filtering
PostFilterLines int64 `protobuf:"varint,10,opt,name=postFilterLines,proto3" json:"postFilterLines"`
// Total bytes processed for metadata but was already in memory. (found in the headchunk)
HeadChunkNonIndexedLabelsBytes int64 `protobuf:"varint,11,opt,name=headChunkNonIndexedLabelsBytes,proto3" json:"headChunkNonIndexedLabelsBytes"`
HeadChunkStructuredMetadataBytes int64 `protobuf:"varint,11,opt,name=headChunkStructuredMetadataBytes,proto3" json:"headChunkStructuredMetadataBytes"`
// Total bytes of entries metadata decompressed and processed from chunks.
DecompressedNonIndexedLabelsBytes int64 `protobuf:"varint,12,opt,name=decompressedNonIndexedLabelsBytes,proto3" json:"decompressedNonIndexedLabelsBytes"`
DecompressedStructuredMetadataBytes int64 `protobuf:"varint,12,opt,name=decompressedStructuredMetadataBytes,proto3" json:"decompressedStructuredMetadataBytes"`
}
func (m *Chunk) Reset() { *m = Chunk{} }
@ -621,16 +621,16 @@ func (m *Chunk) GetPostFilterLines() int64 {
return 0
}
func (m *Chunk) GetHeadChunkNonIndexedLabelsBytes() int64 {
func (m *Chunk) GetHeadChunkStructuredMetadataBytes() int64 {
if m != nil {
return m.HeadChunkNonIndexedLabelsBytes
return m.HeadChunkStructuredMetadataBytes
}
return 0
}
func (m *Chunk) GetDecompressedNonIndexedLabelsBytes() int64 {
func (m *Chunk) GetDecompressedStructuredMetadataBytes() int64 {
if m != nil {
return m.DecompressedNonIndexedLabelsBytes
return m.DecompressedStructuredMetadataBytes
}
return 0
}
@ -740,78 +740,78 @@ func init() {
func init() { proto.RegisterFile("pkg/logqlmodel/stats/stats.proto", fileDescriptor_6cdfe5d2aea33ebb) }
var fileDescriptor_6cdfe5d2aea33ebb = []byte{
// 1123 bytes of a gzipped FileDescriptorProto
// 1127 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4f, 0x6f, 0xe3, 0x44,
0x14, 0x8f, 0x53, 0x9c, 0xb4, 0xd3, 0xbf, 0x3b, 0xed, 0xb2, 0x01, 0x24, 0x7b, 0x09, 0x42, 0xac,
0x04, 0x6a, 0xc4, 0x1f, 0x09, 0x81, 0x58, 0x09, 0xb9, 0x4b, 0xa5, 0x4a, 0x05, 0xca, 0x2b, 0x5c,
0xb8, 0x39, 0xf6, 0x34, 0x31, 0x75, 0xec, 0xd4, 0x7f, 0x96, 0xed, 0x8d, 0x8f, 0xc0, 0xb7, 0x80,
0x0b, 0x07, 0xc4, 0x97, 0xd8, 0x63, 0x6f, 0xec, 0xc9, 0xa2, 0xe9, 0x05, 0xf9, 0xb4, 0x12, 0x77,
0xb4, 0x9a, 0x37, 0x13, 0xdb, 0xe3, 0x38, 0x6a, 0x2f, 0xf1, 0xbc, 0xdf, 0xfb, 0xfd, 0xde, 0x1b,
0xcf, 0xcc, 0x7b, 0x9e, 0x90, 0x87, 0xd3, 0xf3, 0xd1, 0xc0, 0x0f, 0x47, 0x17, 0xfe, 0x24, 0x74,
0x99, 0x3f, 0x88, 0x13, 0x3b, 0x89, 0xc5, 0xef, 0xfe, 0x34, 0x0a, 0x93, 0x90, 0xea, 0x68, 0xbc,
0xb9, 0x37, 0x0a, 0x47, 0x21, 0x22, 0x03, 0x3e, 0x12, 0xce, 0xfe, 0x7f, 0x1a, 0xe9, 0x00, 0x8b,
0x53, 0x3f, 0xa1, 0x9f, 0x91, 0x6e, 0x9c, 0x4e, 0x26, 0x76, 0x74, 0xd9, 0xd3, 0x1e, 0x6a, 0x8f,
0xd6, 0x3f, 0xda, 0xda, 0x17, 0x61, 0x4e, 0x05, 0x6a, 0x6d, 0x3f, 0xcf, 0xcc, 0x56, 0x9e, 0x99,
0x73, 0x1a, 0xcc, 0x07, 0x5c, 0x7a, 0x91, 0xb2, 0xc8, 0x63, 0x51, 0xaf, 0xad, 0x48, 0xbf, 0x13,
0x68, 0x29, 0x95, 0x34, 0x98, 0x0f, 0xe8, 0x63, 0xb2, 0xea, 0x05, 0x23, 0x16, 0x27, 0x2c, 0xea,
0xad, 0xa0, 0x76, 0x5b, 0x6a, 0x8f, 0x24, 0x6c, 0xed, 0x48, 0x71, 0x41, 0x84, 0x62, 0x44, 0x3f,
0x21, 0x1d, 0xc7, 0x76, 0xc6, 0x2c, 0xee, 0xbd, 0x86, 0xe2, 0x4d, 0x29, 0x3e, 0x40, 0xd0, 0xda,
0x94, 0x52, 0x1d, 0x49, 0x20, 0xb9, 0xfd, 0x3f, 0xdb, 0xa4, 0x23, 0x18, 0xf4, 0x43, 0xa2, 0x3b,
0xe3, 0x34, 0x38, 0x97, 0xef, 0xbc, 0x51, 0xd5, 0x57, 0xe4, 0x9c, 0x02, 0xe2, 0xc1, 0x25, 0x5e,
0xe0, 0xb2, 0x67, 0xf2, 0x5d, 0x97, 0x48, 0x90, 0x02, 0xe2, 0xc1, 0xa7, 0x19, 0xe1, 0x2a, 0xcb,
0x77, 0x54, 0x35, 0x5b, 0x52, 0x23, 0x39, 0x20, 0x9f, 0xf4, 0x80, 0xac, 0x23, 0x4d, 0x6c, 0x90,
0x7c, 0x43, 0x55, 0xba, 0x2b, 0xa5, 0x55, 0x22, 0x54, 0x0d, 0x7a, 0x48, 0x36, 0x9e, 0x86, 0x7e,
0x3a, 0x61, 0x32, 0x8a, 0xde, 0x10, 0x65, 0x4f, 0x46, 0x51, 0x98, 0xa0, 0x58, 0xfd, 0xbf, 0x3a,
0xa4, 0x2b, 0x4f, 0x02, 0xfd, 0x81, 0x3c, 0x18, 0x5e, 0x26, 0x2c, 0x3e, 0x89, 0x42, 0x87, 0xc5,
0x31, 0x73, 0x4f, 0x58, 0x74, 0xca, 0x9c, 0x30, 0x70, 0x71, 0x19, 0x57, 0xac, 0xb7, 0xf2, 0xcc,
0x5c, 0x46, 0x81, 0x65, 0x0e, 0x1e, 0xd6, 0xf7, 0x82, 0xc6, 0xb0, 0xed, 0x32, 0xec, 0x12, 0x0a,
0x2c, 0x73, 0xd0, 0x23, 0xb2, 0x9b, 0x84, 0x89, 0xed, 0x5b, 0x4a, 0x5a, 0xdc, 0x89, 0x15, 0xeb,
0x41, 0x9e, 0x99, 0x4d, 0x6e, 0x68, 0x02, 0x8b, 0x50, 0xc7, 0x4a, 0x2a, 0xdc, 0x99, 0x6a, 0x28,
0xd5, 0x0d, 0x4d, 0x20, 0x7d, 0x44, 0x56, 0xd9, 0x33, 0xe6, 0x7c, 0xef, 0x4d, 0x18, 0xee, 0x89,
0x66, 0x6d, 0xf0, 0x33, 0x3e, 0xc7, 0xa0, 0x18, 0xd1, 0xf7, 0xc9, 0xda, 0x45, 0xca, 0x52, 0x86,
0xd4, 0x0e, 0x52, 0x37, 0xf3, 0xcc, 0x2c, 0x41, 0x28, 0x87, 0x74, 0x9f, 0x90, 0x38, 0x1d, 0x8a,
0xea, 0x8a, 0x7b, 0x5d, 0x9c, 0xd8, 0x56, 0x9e, 0x99, 0x15, 0x14, 0x2a, 0x63, 0x7a, 0x4c, 0xf6,
0x70, 0x76, 0x5f, 0x05, 0x09, 0xfa, 0x58, 0x92, 0x46, 0x01, 0x73, 0x7b, 0xab, 0xa8, 0xec, 0xe5,
0x99, 0xd9, 0xe8, 0x87, 0x46, 0x94, 0xf6, 0x49, 0x27, 0x9e, 0xfa, 0x5e, 0x12, 0xf7, 0xd6, 0x50,
0x4f, 0xf8, 0xa9, 0x16, 0x08, 0xc8, 0x27, 0x72, 0xc6, 0x76, 0xe4, 0xc6, 0x3d, 0x52, 0xe1, 0x20,
0x02, 0xf2, 0x59, 0xcc, 0xea, 0x24, 0x8c, 0x93, 0x43, 0xcf, 0x4f, 0x58, 0x84, 0xab, 0xd7, 0x5b,
0xaf, 0xcd, 0xaa, 0xe6, 0x87, 0x46, 0x94, 0x5e, 0x92, 0x77, 0x10, 0xff, 0x26, 0x0c, 0x8e, 0x78,
0x39, 0x32, 0xf7, 0xd8, 0x1e, 0x32, 0x3f, 0xae, 0x1d, 0x88, 0x0d, 0x0c, 0xfe, 0x5e, 0x9e, 0x99,
0x77, 0xa1, 0xc3, 0x5d, 0x48, 0xfd, 0x2f, 0x48, 0x57, 0xf6, 0x40, 0xde, 0x36, 0xe2, 0x24, 0x8c,
0x58, 0xad, 0xd3, 0x9c, 0x72, 0xac, 0x6c, 0x1b, 0x48, 0x01, 0xf1, 0xe8, 0xff, 0xd1, 0x26, 0xab,
0x47, 0x65, 0xab, 0xdb, 0xc0, 0x8c, 0xc0, 0x78, 0xcd, 0x8a, 0x4a, 0xd3, 0xad, 0x1d, 0x5e, 0xb6,
0x55, 0x1c, 0x14, 0x8b, 0x1e, 0x12, 0x8a, 0xf6, 0x01, 0x6f, 0x5d, 0xf1, 0xd7, 0x76, 0x82, 0x5a,
0x51, 0x4e, 0xaf, 0xe7, 0x99, 0xd9, 0xe0, 0x85, 0x06, 0xac, 0xc8, 0x6e, 0xa1, 0x1d, 0xcb, 0xea,
0x29, 0xb3, 0x4b, 0x1c, 0x14, 0x8b, 0x7e, 0x4e, 0xb6, 0xca, 0xb3, 0x7f, 0xca, 0x82, 0x44, 0x96,
0x0a, 0xcd, 0x33, 0xb3, 0xe6, 0x81, 0x9a, 0x5d, 0xae, 0x97, 0x7e, 0xe7, 0xf5, 0xfa, 0xbb, 0x4d,
0x74, 0xf4, 0x17, 0x89, 0xc5, 0x4b, 0x00, 0x3b, 0x93, 0x8d, 0xa9, 0x4c, 0x5c, 0x78, 0xa0, 0x66,
0xd3, 0x6f, 0xc9, 0xfd, 0x0a, 0xf2, 0x24, 0xfc, 0x39, 0xf0, 0x43, 0xdb, 0x2d, 0x56, 0xed, 0x8d,
0x3c, 0x33, 0x9b, 0x09, 0xd0, 0x0c, 0xf3, 0x3d, 0x70, 0x14, 0x0c, 0x2b, 0x79, 0xa5, 0xdc, 0x83,
0x45, 0x2f, 0x34, 0x60, 0xe5, 0xb7, 0xaa, 0xf6, 0x25, 0xe0, 0xd8, 0x92, 0x6f, 0xd5, 0x3c, 0x35,
0xb0, 0xb3, 0xf8, 0x90, 0x25, 0xce, 0xb8, 0xe8, 0x37, 0xd5, 0xd4, 0x8a, 0x17, 0x1a, 0xb0, 0xfe,
0x6f, 0x3a, 0xd1, 0x31, 0x0f, 0x5f, 0xd9, 0x31, 0xb3, 0x5d, 0x91, 0x94, 0x1f, 0xf6, 0xea, 0x96,
0xaa, 0x1e, 0xa8, 0xd9, 0x8a, 0x56, 0x14, 0xb4, 0xde, 0xa0, 0x15, 0xa5, 0x5c, 0xb3, 0xe9, 0x01,
0xb9, 0xe7, 0x32, 0x27, 0x9c, 0x4c, 0x23, 0xac, 0x2c, 0x91, 0xba, 0x83, 0xf2, 0xfb, 0x79, 0x66,
0x2e, 0x3a, 0x61, 0x11, 0xaa, 0x07, 0x11, 0x73, 0xe8, 0x36, 0x07, 0x11, 0xd3, 0x58, 0x84, 0xe8,
0x63, 0xb2, 0x5d, 0x9f, 0x87, 0xe8, 0x96, 0xbb, 0x79, 0x66, 0xd6, 0x5d, 0x50, 0x07, 0xb8, 0x1c,
0x8f, 0xc9, 0x93, 0x74, 0xea, 0x7b, 0x8e, 0xcd, 0xe5, 0x6b, 0xa5, 0xbc, 0xe6, 0x82, 0x3a, 0xc0,
0xe5, 0xd3, 0x5a, 0x57, 0x24, 0xa5, 0xbc, 0xe6, 0x82, 0x3a, 0x40, 0x7f, 0x22, 0x46, 0xb1, 0xb0,
0x8d, 0xbd, 0x4b, 0xf6, 0xd8, 0x7e, 0x9e, 0x99, 0xb7, 0x30, 0xe1, 0x16, 0x3f, 0x8d, 0xc9, 0xdb,
0xd5, 0xd5, 0x6b, 0x4e, 0x27, 0xba, 0xee, 0xbb, 0x79, 0x66, 0xde, 0x4e, 0x86, 0xdb, 0x29, 0xfd,
0xff, 0xdb, 0x44, 0xc7, 0x5b, 0x0d, 0x6f, 0x59, 0x4c, 0x7c, 0x9f, 0x0e, 0xc3, 0x34, 0x50, 0x1a,
0x66, 0x15, 0x07, 0xc5, 0xa2, 0x5f, 0x92, 0x1d, 0x36, 0xff, 0xaa, 0x5d, 0xa4, 0xbc, 0xf5, 0x8a,
0xc2, 0xd7, 0xad, 0xbd, 0x3c, 0x33, 0x17, 0x7c, 0xb0, 0x80, 0xd0, 0x4f, 0xc9, 0xa6, 0xc4, 0xb0,
0x17, 0x89, 0x9b, 0x86, 0x6e, 0xdd, 0xcb, 0x33, 0x53, 0x75, 0x80, 0x6a, 0x72, 0x21, 0x5e, 0x8d,
0x80, 0x39, 0xcc, 0x7b, 0x5a, 0xdc, 0x2b, 0x50, 0xa8, 0x38, 0x40, 0x35, 0xf9, 0x0d, 0x01, 0x01,
0xec, 0xb0, 0xa2, 0xa4, 0xf0, 0x86, 0x50, 0x80, 0x50, 0x0e, 0xf9, 0xc5, 0x23, 0x12, 0x73, 0x15,
0xf5, 0xa3, 0x8b, 0x8b, 0xc7, 0x1c, 0x83, 0x62, 0xc4, 0x17, 0xd0, 0xad, 0x76, 0xac, 0x6e, 0xd9,
0xf3, 0xab, 0x38, 0x28, 0x96, 0x35, 0xbc, 0xba, 0x36, 0x5a, 0x2f, 0xae, 0x8d, 0xd6, 0xcb, 0x6b,
0x43, 0xfb, 0x65, 0x66, 0x68, 0xbf, 0xcf, 0x0c, 0xed, 0xf9, 0xcc, 0xd0, 0xae, 0x66, 0x86, 0xf6,
0xcf, 0xcc, 0xd0, 0xfe, 0x9d, 0x19, 0xad, 0x97, 0x33, 0x43, 0xfb, 0xf5, 0xc6, 0x68, 0x5d, 0xdd,
0x18, 0xad, 0x17, 0x37, 0x46, 0xeb, 0xc7, 0x0f, 0x46, 0x5e, 0x32, 0x4e, 0x87, 0xfb, 0x4e, 0x38,
0x19, 0x8c, 0x22, 0xfb, 0xcc, 0x0e, 0xec, 0x81, 0x1f, 0x9e, 0x7b, 0x83, 0xa6, 0xff, 0x37, 0xc3,
0x0e, 0xfe, 0x7b, 0xf9, 0xf8, 0x55, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0xe9, 0x4e, 0xb9, 0xfe,
0x0c, 0x00, 0x00,
0x14, 0x4f, 0x52, 0x9c, 0x74, 0xa7, 0x7f, 0x77, 0xda, 0x65, 0x03, 0x48, 0x76, 0x15, 0x40, 0x2c,
0x02, 0x35, 0xe2, 0x8f, 0x84, 0x40, 0xac, 0x84, 0xdc, 0xa5, 0x52, 0xa5, 0x5d, 0x51, 0x5e, 0xe1,
0xc2, 0xcd, 0xb5, 0xa7, 0x89, 0x55, 0xc7, 0x4e, 0xed, 0xf1, 0xb2, 0x3d, 0xc1, 0x47, 0xe0, 0x63,
0x70, 0xe1, 0x80, 0x38, 0xf1, 0x0d, 0xf6, 0xd8, 0x1b, 0x7b, 0xb2, 0x68, 0x7a, 0x41, 0x3e, 0xad,
0xc4, 0x1d, 0xa1, 0x79, 0x33, 0xb1, 0x3d, 0x8e, 0xa3, 0xed, 0x25, 0x9e, 0xf7, 0x7b, 0xbf, 0xdf,
0x7b, 0xe3, 0x99, 0x79, 0xcf, 0x13, 0xb2, 0x37, 0x3d, 0x1f, 0x0d, 0x83, 0x68, 0x74, 0x11, 0x4c,
0x22, 0x8f, 0x05, 0xc3, 0x84, 0x3b, 0x3c, 0x91, 0xbf, 0xfb, 0xd3, 0x38, 0xe2, 0x11, 0x35, 0xd0,
0x78, 0x73, 0x77, 0x14, 0x8d, 0x22, 0x44, 0x86, 0x62, 0x24, 0x9d, 0x83, 0x7f, 0xdb, 0xa4, 0x0b,
0x2c, 0x49, 0x03, 0x4e, 0x3f, 0x27, 0xbd, 0x24, 0x9d, 0x4c, 0x9c, 0xf8, 0xb2, 0xdf, 0xde, 0x6b,
0x3f, 0x58, 0xfb, 0x78, 0x73, 0x5f, 0x86, 0x39, 0x91, 0xa8, 0xbd, 0xf5, 0x3c, 0xb3, 0x5a, 0x79,
0x66, 0xcd, 0x69, 0x30, 0x1f, 0x08, 0xe9, 0x45, 0xca, 0x62, 0x9f, 0xc5, 0xfd, 0x8e, 0x26, 0xfd,
0x56, 0xa2, 0xa5, 0x54, 0xd1, 0x60, 0x3e, 0xa0, 0x0f, 0xc9, 0xaa, 0x1f, 0x8e, 0x58, 0xc2, 0x59,
0xdc, 0x5f, 0x41, 0xed, 0x96, 0xd2, 0x1e, 0x29, 0xd8, 0xde, 0x56, 0xe2, 0x82, 0x08, 0xc5, 0x88,
0x7e, 0x4a, 0xba, 0xae, 0xe3, 0x8e, 0x59, 0xd2, 0x7f, 0x0d, 0xc5, 0x1b, 0x4a, 0x7c, 0x80, 0xa0,
0xbd, 0xa1, 0xa4, 0x06, 0x92, 0x40, 0x71, 0x07, 0xbf, 0x77, 0x48, 0x57, 0x32, 0xe8, 0x47, 0xc4,
0x70, 0xc7, 0x69, 0x78, 0xae, 0xde, 0x79, 0xbd, 0xaa, 0xaf, 0xc8, 0x05, 0x05, 0xe4, 0x43, 0x48,
0xfc, 0xd0, 0x63, 0xcf, 0xd4, 0xbb, 0x2e, 0x91, 0x20, 0x05, 0xe4, 0x43, 0x4c, 0x33, 0xc6, 0x55,
0x56, 0xef, 0xa8, 0x6b, 0x36, 0x95, 0x46, 0x71, 0x40, 0x3d, 0xe9, 0x01, 0x59, 0x43, 0x9a, 0xdc,
0x20, 0xf5, 0x86, 0xba, 0x74, 0x47, 0x49, 0xab, 0x44, 0xa8, 0x1a, 0xf4, 0x90, 0xac, 0x3f, 0x8d,
0x82, 0x74, 0xc2, 0x54, 0x14, 0xa3, 0x21, 0xca, 0xae, 0x8a, 0xa2, 0x31, 0x41, 0xb3, 0x06, 0x7f,
0x76, 0x49, 0x4f, 0x9d, 0x04, 0xfa, 0x3d, 0xb9, 0x7f, 0x7a, 0xc9, 0x59, 0x72, 0x1c, 0x47, 0x2e,
0x4b, 0x12, 0xe6, 0x1d, 0xb3, 0xf8, 0x84, 0xb9, 0x51, 0xe8, 0xe1, 0x32, 0xae, 0xd8, 0x6f, 0xe5,
0x99, 0xb5, 0x8c, 0x02, 0xcb, 0x1c, 0x22, 0x6c, 0xe0, 0x87, 0x8d, 0x61, 0x3b, 0x65, 0xd8, 0x25,
0x14, 0x58, 0xe6, 0xa0, 0x47, 0x64, 0x87, 0x47, 0xdc, 0x09, 0x6c, 0x2d, 0x2d, 0xee, 0xc4, 0x8a,
0x7d, 0x3f, 0xcf, 0xac, 0x26, 0x37, 0x34, 0x81, 0x45, 0xa8, 0xc7, 0x5a, 0x2a, 0xdc, 0x99, 0x6a,
0x28, 0xdd, 0x0d, 0x4d, 0x20, 0x7d, 0x40, 0x56, 0xd9, 0x33, 0xe6, 0x7e, 0xe7, 0x4f, 0x18, 0xee,
0x49, 0xdb, 0x5e, 0x17, 0x67, 0x7c, 0x8e, 0x41, 0x31, 0xa2, 0x1f, 0x90, 0x3b, 0x17, 0x29, 0x4b,
0x19, 0x52, 0xbb, 0x48, 0xdd, 0xc8, 0x33, 0xab, 0x04, 0xa1, 0x1c, 0xd2, 0x7d, 0x42, 0x92, 0xf4,
0x54, 0x56, 0x57, 0xd2, 0xef, 0xe1, 0xc4, 0x36, 0xf3, 0xcc, 0xaa, 0xa0, 0x50, 0x19, 0xd3, 0xc7,
0x64, 0x17, 0x67, 0xf7, 0x75, 0xc8, 0xd1, 0xc7, 0x78, 0x1a, 0x87, 0xcc, 0xeb, 0xaf, 0xa2, 0xb2,
0x9f, 0x67, 0x56, 0xa3, 0x1f, 0x1a, 0x51, 0x3a, 0x20, 0xdd, 0x64, 0x1a, 0xf8, 0x3c, 0xe9, 0xdf,
0x41, 0x3d, 0x11, 0xa7, 0x5a, 0x22, 0xa0, 0x9e, 0xc8, 0x19, 0x3b, 0xb1, 0x97, 0xf4, 0x49, 0x85,
0x83, 0x08, 0xa8, 0x67, 0x31, 0xab, 0xe3, 0x28, 0xe1, 0x87, 0x7e, 0xc0, 0x59, 0x8c, 0xab, 0xd7,
0x5f, 0xab, 0xcd, 0xaa, 0xe6, 0x87, 0x46, 0x94, 0xfe, 0x44, 0xde, 0x45, 0xfc, 0x84, 0xc7, 0xa9,
0xcb, 0xd3, 0x98, 0x79, 0x4f, 0x18, 0x77, 0x3c, 0x87, 0x3b, 0xb5, 0x23, 0xb1, 0x8e, 0xe1, 0xdf,
0xcf, 0x33, 0xeb, 0x76, 0x02, 0xb8, 0x1d, 0x6d, 0xf0, 0x25, 0xe9, 0xa9, 0x4e, 0x28, 0x9a, 0x47,
0xc2, 0xa3, 0x98, 0xd5, 0xfa, 0xcd, 0x89, 0xc0, 0xca, 0xe6, 0x81, 0x14, 0x90, 0x8f, 0xc1, 0x6f,
0x1d, 0xb2, 0x7a, 0x54, 0x36, 0xbc, 0x75, 0xcc, 0x09, 0x4c, 0x54, 0xae, 0xac, 0x37, 0xc3, 0xde,
0x16, 0xc5, 0x5b, 0xc5, 0x41, 0xb3, 0xe8, 0x21, 0xa1, 0x68, 0x1f, 0x88, 0x06, 0x96, 0x3c, 0x71,
0x38, 0x6a, 0x65, 0x51, 0xbd, 0x9e, 0x67, 0x56, 0x83, 0x17, 0x1a, 0xb0, 0x22, 0xbb, 0x8d, 0x76,
0xa2, 0x6a, 0xa8, 0xcc, 0xae, 0x70, 0xd0, 0x2c, 0xfa, 0x05, 0xd9, 0x2c, 0x2b, 0xe0, 0x84, 0x85,
0x5c, 0x15, 0x0c, 0xcd, 0x33, 0xab, 0xe6, 0x81, 0x9a, 0x5d, 0xae, 0x97, 0x71, 0xeb, 0xf5, 0xfa,
0xab, 0x43, 0x0c, 0xf4, 0x17, 0x89, 0xe5, 0x4b, 0x00, 0x3b, 0x53, 0xed, 0xa9, 0x4c, 0x5c, 0x78,
0xa0, 0x66, 0xd3, 0x6f, 0xc8, 0xbd, 0x0a, 0xf2, 0x28, 0xfa, 0x31, 0x0c, 0x22, 0xc7, 0x2b, 0x56,
0xed, 0x8d, 0x3c, 0xb3, 0x9a, 0x09, 0xd0, 0x0c, 0x8b, 0x3d, 0x70, 0x35, 0x0c, 0xeb, 0x79, 0xa5,
0xdc, 0x83, 0x45, 0x2f, 0x34, 0x60, 0xe5, 0x17, 0xab, 0xf6, 0x3d, 0x10, 0xd8, 0x92, 0x2f, 0xd6,
0x3c, 0x35, 0xb0, 0xb3, 0xe4, 0x90, 0x71, 0x77, 0x5c, 0x74, 0x9d, 0x6a, 0x6a, 0xcd, 0x0b, 0x0d,
0xd8, 0xe0, 0x0f, 0x83, 0x18, 0x98, 0x47, 0xac, 0xec, 0x98, 0x39, 0x9e, 0x4c, 0x2a, 0x0e, 0x7b,
0x75, 0x4b, 0x75, 0x0f, 0xd4, 0x6c, 0x4d, 0x2b, 0xcb, 0xda, 0x68, 0xd0, 0xca, 0x82, 0xae, 0xd9,
0xf4, 0x80, 0xdc, 0xf5, 0x98, 0x1b, 0x4d, 0xa6, 0x31, 0x56, 0x96, 0x4c, 0xdd, 0x45, 0xf9, 0xbd,
0x3c, 0xb3, 0x16, 0x9d, 0xb0, 0x08, 0xd5, 0x83, 0xc8, 0x39, 0xf4, 0x9a, 0x83, 0xc8, 0x69, 0x2c,
0x42, 0xf4, 0x21, 0xd9, 0xaa, 0xcf, 0x43, 0xf6, 0xcc, 0x9d, 0x3c, 0xb3, 0xea, 0x2e, 0xa8, 0x03,
0x42, 0x8e, 0xc7, 0xe4, 0x51, 0x3a, 0x0d, 0x7c, 0xd7, 0x11, 0xf2, 0x3b, 0xa5, 0xbc, 0xe6, 0x82,
0x3a, 0x20, 0xe4, 0xd3, 0x5a, 0x6f, 0x24, 0xa5, 0xbc, 0xe6, 0x82, 0x3a, 0x40, 0xa7, 0x64, 0xaf,
0x58, 0xd8, 0x25, 0xdd, 0x4b, 0xf5, 0xda, 0x77, 0xf2, 0xcc, 0x7a, 0x25, 0x17, 0x5e, 0xc9, 0xa0,
0x97, 0xe4, 0xed, 0xea, 0x1a, 0x2e, 0x4b, 0x2a, 0x3b, 0xf0, 0x7b, 0x79, 0x66, 0xdd, 0x86, 0x0e,
0xb7, 0x21, 0x0d, 0xfe, 0xeb, 0x10, 0x03, 0xef, 0x39, 0xa2, 0x7d, 0x31, 0xf9, 0xc5, 0x3a, 0x8c,
0xd2, 0x50, 0x6b, 0x9e, 0x55, 0x1c, 0x34, 0x8b, 0x7e, 0x45, 0xb6, 0xd9, 0xfc, 0x3b, 0x77, 0x91,
0x8a, 0x36, 0x2c, 0x9b, 0x80, 0x61, 0xef, 0xe6, 0x99, 0xb5, 0xe0, 0x83, 0x05, 0x84, 0x7e, 0x46,
0x36, 0x14, 0x86, 0x7d, 0x49, 0xde, 0x3d, 0x0c, 0xfb, 0x6e, 0x9e, 0x59, 0xba, 0x03, 0x74, 0x53,
0x08, 0xf1, 0xb2, 0x04, 0xcc, 0x65, 0xfe, 0xd3, 0xe2, 0xa6, 0x81, 0x42, 0xcd, 0x01, 0xba, 0x29,
0xee, 0x0c, 0x08, 0x60, 0xb7, 0x95, 0xe5, 0x85, 0x77, 0x86, 0x02, 0x84, 0x72, 0x28, 0xae, 0x22,
0xb1, 0x9c, 0xab, 0xac, 0x25, 0x43, 0x5e, 0x45, 0xe6, 0x18, 0x14, 0x23, 0xb1, 0x80, 0x5e, 0xb5,
0x7b, 0xf5, 0xca, 0xfe, 0x5f, 0xc5, 0x41, 0xb3, 0xec, 0xd3, 0xab, 0x6b, 0xb3, 0xf5, 0xe2, 0xda,
0x6c, 0xbd, 0xbc, 0x36, 0xdb, 0x3f, 0xcf, 0xcc, 0xf6, 0xaf, 0x33, 0xb3, 0xfd, 0x7c, 0x66, 0xb6,
0xaf, 0x66, 0x66, 0xfb, 0xef, 0x99, 0xd9, 0xfe, 0x67, 0x66, 0xb6, 0x5e, 0xce, 0xcc, 0xf6, 0x2f,
0x37, 0x66, 0xeb, 0xea, 0xc6, 0x6c, 0xbd, 0xb8, 0x31, 0x5b, 0x3f, 0x7c, 0x38, 0xf2, 0xf9, 0x38,
0x3d, 0xdd, 0x77, 0xa3, 0xc9, 0x70, 0x14, 0x3b, 0x67, 0x4e, 0xe8, 0x0c, 0x83, 0xe8, 0xdc, 0x1f,
0x36, 0xfd, 0xe3, 0x39, 0xed, 0xe2, 0xff, 0x99, 0x4f, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x14,
0xee, 0x83, 0x9b, 0x10, 0x0d, 0x00, 0x00,
}
func (this *Result) Equal(that interface{}) bool {
@ -935,7 +935,7 @@ func (this *Summary) Equal(that interface{}) bool {
if this.TotalPostFilterLines != that1.TotalPostFilterLines {
return false
}
if this.TotalNonIndexedLabelsBytesProcessed != that1.TotalNonIndexedLabelsBytesProcessed {
if this.TotalStructuredMetadataBytesProcessed != that1.TotalStructuredMetadataBytesProcessed {
return false
}
return true
@ -1076,10 +1076,10 @@ func (this *Chunk) Equal(that interface{}) bool {
if this.PostFilterLines != that1.PostFilterLines {
return false
}
if this.HeadChunkNonIndexedLabelsBytes != that1.HeadChunkNonIndexedLabelsBytes {
if this.HeadChunkStructuredMetadataBytes != that1.HeadChunkStructuredMetadataBytes {
return false
}
if this.DecompressedNonIndexedLabelsBytes != that1.DecompressedNonIndexedLabelsBytes {
if this.DecompressedStructuredMetadataBytes != that1.DecompressedStructuredMetadataBytes {
return false
}
return true
@ -1170,7 +1170,7 @@ func (this *Summary) GoString() string {
s = append(s, "Splits: "+fmt.Sprintf("%#v", this.Splits)+",\n")
s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n")
s = append(s, "TotalPostFilterLines: "+fmt.Sprintf("%#v", this.TotalPostFilterLines)+",\n")
s = append(s, "TotalNonIndexedLabelsBytesProcessed: "+fmt.Sprintf("%#v", this.TotalNonIndexedLabelsBytesProcessed)+",\n")
s = append(s, "TotalStructuredMetadataBytesProcessed: "+fmt.Sprintf("%#v", this.TotalStructuredMetadataBytesProcessed)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -1225,8 +1225,8 @@ func (this *Chunk) GoString() string {
s = append(s, "CompressedBytes: "+fmt.Sprintf("%#v", this.CompressedBytes)+",\n")
s = append(s, "TotalDuplicates: "+fmt.Sprintf("%#v", this.TotalDuplicates)+",\n")
s = append(s, "PostFilterLines: "+fmt.Sprintf("%#v", this.PostFilterLines)+",\n")
s = append(s, "HeadChunkNonIndexedLabelsBytes: "+fmt.Sprintf("%#v", this.HeadChunkNonIndexedLabelsBytes)+",\n")
s = append(s, "DecompressedNonIndexedLabelsBytes: "+fmt.Sprintf("%#v", this.DecompressedNonIndexedLabelsBytes)+",\n")
s = append(s, "HeadChunkStructuredMetadataBytes: "+fmt.Sprintf("%#v", this.HeadChunkStructuredMetadataBytes)+",\n")
s = append(s, "DecompressedStructuredMetadataBytes: "+fmt.Sprintf("%#v", this.DecompressedStructuredMetadataBytes)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -1410,8 +1410,8 @@ func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.TotalNonIndexedLabelsBytesProcessed != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.TotalNonIndexedLabelsBytesProcessed))
if m.TotalStructuredMetadataBytesProcessed != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.TotalStructuredMetadataBytesProcessed))
i--
dAtA[i] = 0x60
}
@ -1634,13 +1634,13 @@ func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.DecompressedNonIndexedLabelsBytes != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.DecompressedNonIndexedLabelsBytes))
if m.DecompressedStructuredMetadataBytes != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.DecompressedStructuredMetadataBytes))
i--
dAtA[i] = 0x60
}
if m.HeadChunkNonIndexedLabelsBytes != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkNonIndexedLabelsBytes))
if m.HeadChunkStructuredMetadataBytes != 0 {
i = encodeVarintStats(dAtA, i, uint64(m.HeadChunkStructuredMetadataBytes))
i--
dAtA[i] = 0x58
}
@ -1826,8 +1826,8 @@ func (m *Summary) Size() (n int) {
if m.TotalPostFilterLines != 0 {
n += 1 + sovStats(uint64(m.TotalPostFilterLines))
}
if m.TotalNonIndexedLabelsBytesProcessed != 0 {
n += 1 + sovStats(uint64(m.TotalNonIndexedLabelsBytesProcessed))
if m.TotalStructuredMetadataBytesProcessed != 0 {
n += 1 + sovStats(uint64(m.TotalStructuredMetadataBytesProcessed))
}
return n
}
@ -1916,11 +1916,11 @@ func (m *Chunk) Size() (n int) {
if m.PostFilterLines != 0 {
n += 1 + sovStats(uint64(m.PostFilterLines))
}
if m.HeadChunkNonIndexedLabelsBytes != 0 {
n += 1 + sovStats(uint64(m.HeadChunkNonIndexedLabelsBytes))
if m.HeadChunkStructuredMetadataBytes != 0 {
n += 1 + sovStats(uint64(m.HeadChunkStructuredMetadataBytes))
}
if m.DecompressedNonIndexedLabelsBytes != 0 {
n += 1 + sovStats(uint64(m.DecompressedNonIndexedLabelsBytes))
if m.DecompressedStructuredMetadataBytes != 0 {
n += 1 + sovStats(uint64(m.DecompressedStructuredMetadataBytes))
}
return n
}
@ -2004,7 +2004,7 @@ func (this *Summary) String() string {
`Splits:` + fmt.Sprintf("%v", this.Splits) + `,`,
`Shards:` + fmt.Sprintf("%v", this.Shards) + `,`,
`TotalPostFilterLines:` + fmt.Sprintf("%v", this.TotalPostFilterLines) + `,`,
`TotalNonIndexedLabelsBytesProcessed:` + fmt.Sprintf("%v", this.TotalNonIndexedLabelsBytesProcessed) + `,`,
`TotalStructuredMetadataBytesProcessed:` + fmt.Sprintf("%v", this.TotalStructuredMetadataBytesProcessed) + `,`,
`}`,
}, "")
return s
@ -2059,8 +2059,8 @@ func (this *Chunk) String() string {
`CompressedBytes:` + fmt.Sprintf("%v", this.CompressedBytes) + `,`,
`TotalDuplicates:` + fmt.Sprintf("%v", this.TotalDuplicates) + `,`,
`PostFilterLines:` + fmt.Sprintf("%v", this.PostFilterLines) + `,`,
`HeadChunkNonIndexedLabelsBytes:` + fmt.Sprintf("%v", this.HeadChunkNonIndexedLabelsBytes) + `,`,
`DecompressedNonIndexedLabelsBytes:` + fmt.Sprintf("%v", this.DecompressedNonIndexedLabelsBytes) + `,`,
`HeadChunkStructuredMetadataBytes:` + fmt.Sprintf("%v", this.HeadChunkStructuredMetadataBytes) + `,`,
`DecompressedStructuredMetadataBytes:` + fmt.Sprintf("%v", this.DecompressedStructuredMetadataBytes) + `,`,
`}`,
}, "")
return s
@ -2716,9 +2716,9 @@ func (m *Summary) Unmarshal(dAtA []byte) error {
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TotalNonIndexedLabelsBytesProcessed", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field TotalStructuredMetadataBytesProcessed", wireType)
}
m.TotalNonIndexedLabelsBytesProcessed = 0
m.TotalStructuredMetadataBytesProcessed = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStats
@ -2728,7 +2728,7 @@ func (m *Summary) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.TotalNonIndexedLabelsBytesProcessed |= int64(b&0x7F) << shift
m.TotalStructuredMetadataBytesProcessed |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -3331,9 +3331,9 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
case 11:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkNonIndexedLabelsBytes", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field HeadChunkStructuredMetadataBytes", wireType)
}
m.HeadChunkNonIndexedLabelsBytes = 0
m.HeadChunkStructuredMetadataBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStats
@ -3343,16 +3343,16 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.HeadChunkNonIndexedLabelsBytes |= int64(b&0x7F) << shift
m.HeadChunkStructuredMetadataBytes |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 12:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DecompressedNonIndexedLabelsBytes", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field DecompressedStructuredMetadataBytes", wireType)
}
m.DecompressedNonIndexedLabelsBytes = 0
m.DecompressedStructuredMetadataBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowStats
@ -3362,7 +3362,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
m.DecompressedNonIndexedLabelsBytes |= int64(b&0x7F) << shift
m.DecompressedStructuredMetadataBytes |= int64(b&0x7F) << shift
if b < 0x80 {
break
}

@ -57,7 +57,7 @@ message Summary {
int64 bytesProcessedPerSecond = 1 [(gogoproto.jsontag) = "bytesProcessedPerSecond"];
// Total lines processed per second.
int64 linesProcessedPerSecond = 2 [(gogoproto.jsontag) = "linesProcessedPerSecond"];
// Total bytes processed. Includes non-indexed labels bytes.
// Total bytes processed. Includes structured metadata bytes.
int64 totalBytesProcessed = 3 [(gogoproto.jsontag) = "totalBytesProcessed"];
// Total lines processed.
int64 totalLinesProcessed = 4 [(gogoproto.jsontag) = "totalLinesProcessed"];
@ -81,7 +81,7 @@ message Summary {
// Total lines post query filtering
int64 totalPostFilterLines = 11 [(gogoproto.jsontag) = "totalPostFilterLines"];
// Total bytes processed of metadata.
int64 totalNonIndexedLabelsBytesProcessed = 12 [(gogoproto.jsontag) = "totalNonIndexedLabelsBytesProcessed"];
int64 totalStructuredMetadataBytesProcessed = 12 [(gogoproto.jsontag) = "totalStructuredMetadataBytesProcessed"];
}
message Querier {
@ -125,11 +125,11 @@ message Store {
}
message Chunk {
// Total bytes processed but was already in memory (found in the headchunk). Includes non-indexed labels bytes.
// Total bytes processed but was already in memory (found in the headchunk). Includes structured metadata bytes.
int64 headChunkBytes = 4 [(gogoproto.jsontag) = "headChunkBytes"];
// Total lines processed but was already in memory. (found in the headchunk)
int64 headChunkLines = 5 [(gogoproto.jsontag) = "headChunkLines"];
// Total bytes decompressed and processed from chunks. Includes non-indexed labels bytes.
// Total bytes decompressed and processed from chunks. Includes structured metadata bytes.
int64 decompressedBytes = 6 [(gogoproto.jsontag) = "decompressedBytes"];
// Total lines decompressed and processed from chunks.
int64 decompressedLines = 7 [(gogoproto.jsontag) = "decompressedLines"];
@ -140,9 +140,9 @@ message Chunk {
// Total lines post filtering
int64 postFilterLines = 10 [(gogoproto.jsontag) = "postFilterLines"];
// Total bytes processed for metadata but was already in memory. (found in the headchunk)
int64 headChunkNonIndexedLabelsBytes = 11 [(gogoproto.jsontag) = "headChunkNonIndexedLabelsBytes"];
int64 headChunkStructuredMetadataBytes = 11 [(gogoproto.jsontag) = "headChunkStructuredMetadataBytes"];
// Total bytes of entries metadata decompressed and processed from chunks.
int64 decompressedNonIndexedLabelsBytes = 12 [(gogoproto.jsontag) = "decompressedNonIndexedLabelsBytes"];
int64 decompressedStructuredMetadataBytes = 12 [(gogoproto.jsontag) = "decompressedStructuredMetadataBytes"];
}
message Cache {

@ -31,9 +31,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=

@ -216,9 +216,9 @@ func (m *LabelPairAdapter) GetValue() string {
}
type EntryAdapter struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
NonIndexedLabels []LabelPairAdapter `protobuf:"bytes,3,rep,name=nonIndexedLabels,proto3" json:"nonIndexedLabels,omitempty"`
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
StructuredMetadata []LabelPairAdapter `protobuf:"bytes,3,rep,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"`
}
func (m *EntryAdapter) Reset() { *m = EntryAdapter{} }
@ -267,9 +267,9 @@ func (m *EntryAdapter) GetLine() string {
return ""
}
func (m *EntryAdapter) GetNonIndexedLabels() []LabelPairAdapter {
func (m *EntryAdapter) GetStructuredMetadata() []LabelPairAdapter {
if m != nil {
return m.NonIndexedLabels
return m.StructuredMetadata
}
return nil
}
@ -285,39 +285,39 @@ func init() {
func init() { proto.RegisterFile("pkg/push/push.proto", fileDescriptor_35ec442956852c9e) }
var fileDescriptor_35ec442956852c9e = []byte{
// 498 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xf5, 0x26, 0x6e, 0xda, 0x4e, 0x4a, 0xa9, 0x96, 0xb6, 0x04, 0x0b, 0xad, 0x23, 0x9f, 0x72,
0x00, 0x5b, 0x0a, 0x07, 0x2e, 0x5c, 0x62, 0x09, 0xa9, 0x48, 0x3d, 0x54, 0x06, 0x81, 0xc4, 0x6d,
0x43, 0xb6, 0xb6, 0xa9, 0xed, 0x35, 0xde, 0x35, 0xa2, 0x37, 0x3e, 0xa1, 0xfc, 0x05, 0x9f, 0xd2,
0x63, 0x8e, 0x15, 0x07, 0x43, 0x9c, 0x0b, 0xca, 0xa9, 0x9f, 0x80, 0xbc, 0xb6, 0x49, 0x09, 0x97,
0xf5, 0x9b, 0xb7, 0x33, 0xf3, 0x9e, 0x67, 0x6c, 0x78, 0x90, 0x5e, 0xf8, 0x4e, 0x9a, 0x8b, 0x40,
0x1d, 0x76, 0x9a, 0x71, 0xc9, 0xf1, 0x4e, 0xc4, 0x7d, 0x85, 0x8c, 0x43, 0x9f, 0xfb, 0x5c, 0x41,
0xa7, 0x42, 0xf5, 0xbd, 0x61, 0xfa, 0x9c, 0xfb, 0x11, 0x73, 0x54, 0x34, 0xcd, 0xcf, 0x1d, 0x19,
0xc6, 0x4c, 0x48, 0x1a, 0xa7, 0x75, 0x82, 0xf5, 0x0e, 0xfa, 0x67, 0xb9, 0x08, 0x3c, 0xf6, 0x29,
0x67, 0x42, 0xe2, 0x13, 0xd8, 0x16, 0x32, 0x63, 0x34, 0x16, 0x03, 0x34, 0xec, 0x8e, 0xfa, 0xe3,
0x87, 0x76, 0xab, 0x60, 0xbf, 0x56, 0x17, 0x93, 0x19, 0x4d, 0x25, 0xcb, 0xdc, 0xa3, 0x1f, 0x85,
0xd9, 0xab, 0xa9, 0x55, 0x61, 0xb6, 0x55, 0x5e, 0x0b, 0xac, 0x7d, 0xd8, 0xab, 0x1b, 0x8b, 0x94,
0x27, 0x82, 0x59, 0xdf, 0x10, 0xdc, 0xfb, 0xa7, 0x03, 0xb6, 0xa0, 0x17, 0xd1, 0x29, 0x8b, 0x2a,
0x29, 0x34, 0xda, 0x75, 0x61, 0x55, 0x98, 0x0d, 0xe3, 0x35, 0x4f, 0x3c, 0x81, 0x6d, 0x96, 0xc8,
0x2c, 0x64, 0x62, 0xd0, 0x51, 0x7e, 0x8e, 0xd7, 0x7e, 0x5e, 0x26, 0x32, 0xbb, 0x6c, 0xed, 0xdc,
0xbf, 0x2e, 0x4c, 0xad, 0x32, 0xd2, 0xa4, 0x7b, 0x2d, 0xc0, 0x8f, 0x40, 0x0f, 0xa8, 0x08, 0x06,
0xdd, 0x21, 0x1a, 0xe9, 0xee, 0xd6, 0xaa, 0x30, 0xd1, 0x53, 0x4f, 0x51, 0xd6, 0x0b, 0x38, 0x38,
0xad, 0x74, 0xce, 0x68, 0x98, 0xb5, 0xae, 0x30, 0xe8, 0x09, 0x8d, 0x59, 0xed, 0xc9, 0x53, 0x18,
0x1f, 0xc2, 0xd6, 0x67, 0x1a, 0xe5, 0x6c, 0xd0, 0x51, 0x64, 0x1d, 0x58, 0x25, 0x82, 0xbd, 0xbb,
0x1e, 0xf0, 0x09, 0xec, 0xfe, 0x1d, 0xaf, 0xaa, 0xef, 0x8f, 0x0d, 0xbb, 0x5e, 0x80, 0xdd, 0x2e,
0xc0, 0x7e, 0xd3, 0x66, 0xb8, 0xfb, 0x8d, 0xe5, 0x8e, 0x14, 0x57, 0x3f, 0x4d, 0xe4, 0xad, 0x8b,
0xf1, 0x63, 0xd0, 0xa3, 0x30, 0x69, 0xf4, 0xdc, 0x9d, 0x55, 0x61, 0xaa, 0xd8, 0x53, 0x27, 0xfe,
0x08, 0x07, 0x09, 0x4f, 0x5e, 0x25, 0x33, 0xf6, 0x85, 0xcd, 0x4e, 0xeb, 0x11, 0x76, 0xd5, 0x74,
0x8c, 0xf5, 0x74, 0x36, 0x5f, 0xcc, 0xb5, 0x1a, 0x39, 0x63, 0xb3, 0xf6, 0x09, 0x8f, 0x43, 0xc9,
0xe2, 0x54, 0x5e, 0x7a, 0xff, 0xf5, 0x1d, 0x4f, 0xa0, 0x57, 0xad, 0x91, 0x65, 0xf8, 0x39, 0xe8,
0x15, 0xc2, 0x47, 0x6b, 0x8d, 0x3b, 0x5f, 0x8e, 0x71, 0xbc, 0x49, 0x37, 0x7b, 0xd7, 0xdc, 0xb7,
0xf3, 0x05, 0xd1, 0x6e, 0x16, 0x44, 0xbb, 0x5d, 0x10, 0xf4, 0xb5, 0x24, 0xe8, 0x7b, 0x49, 0xd0,
0x75, 0x49, 0xd0, 0xbc, 0x24, 0xe8, 0x57, 0x49, 0xd0, 0xef, 0x92, 0x68, 0xb7, 0x25, 0x41, 0x57,
0x4b, 0xa2, 0xcd, 0x97, 0x44, 0xbb, 0x59, 0x12, 0xed, 0xfd, 0xd0, 0x0f, 0x65, 0x90, 0x4f, 0xed,
0x0f, 0x3c, 0x76, 0xfc, 0x8c, 0x9e, 0xd3, 0x84, 0x3a, 0x11, 0xbf, 0x08, 0x9d, 0xf6, 0x37, 0x98,
0xf6, 0x94, 0xda, 0xb3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x6f, 0x19, 0xc8, 0x19, 0x03,
0x00, 0x00,
// 503 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x31, 0x6f, 0xd3, 0x40,
0x14, 0xf6, 0x25, 0x69, 0xda, 0x5e, 0x4a, 0x41, 0x47, 0x5b, 0x8c, 0x55, 0x9d, 0x23, 0x8b, 0x21,
0x03, 0xd8, 0x52, 0x18, 0x58, 0x58, 0x62, 0x09, 0xa9, 0x03, 0x48, 0x95, 0x41, 0x20, 0xb1, 0x5d,
0x9a, 0xab, 0x6d, 0xd5, 0xf6, 0x99, 0xbb, 0x33, 0x52, 0x37, 0x7e, 0x42, 0xf9, 0x17, 0xfc, 0x94,
0x8e, 0x19, 0x2b, 0x06, 0x43, 0x9c, 0xa5, 0xca, 0xd4, 0x9f, 0x80, 0x7c, 0xf6, 0x91, 0x52, 0xba,
0x9c, 0xbf, 0xf7, 0xdd, 0x7b, 0xef, 0xfb, 0xfc, 0x9e, 0x0d, 0x1f, 0xe7, 0x67, 0xa1, 0x97, 0x17,
0x22, 0x52, 0x87, 0x9b, 0x73, 0x26, 0x19, 0xda, 0x4a, 0x58, 0xa8, 0x90, 0xb5, 0x17, 0xb2, 0x90,
0x29, 0xe8, 0xd5, 0xa8, 0xb9, 0xb7, 0xec, 0x90, 0xb1, 0x30, 0xa1, 0x9e, 0x8a, 0xa6, 0xc5, 0xa9,
0x27, 0xe3, 0x94, 0x0a, 0x49, 0xd2, 0xbc, 0x49, 0x70, 0x3e, 0xc1, 0xc1, 0x71, 0x21, 0xa2, 0x80,
0x7e, 0x29, 0xa8, 0x90, 0xe8, 0x08, 0x6e, 0x0a, 0xc9, 0x29, 0x49, 0x85, 0x09, 0x86, 0xdd, 0xd1,
0x60, 0xfc, 0xc4, 0xd5, 0x0a, 0xee, 0x7b, 0x75, 0x31, 0x99, 0x91, 0x5c, 0x52, 0xee, 0xef, 0xff,
0x2c, 0xed, 0x7e, 0x43, 0xad, 0x4a, 0x5b, 0x57, 0x05, 0x1a, 0x38, 0xbb, 0x70, 0xa7, 0x69, 0x2c,
0x72, 0x96, 0x09, 0xea, 0x7c, 0x07, 0xf0, 0xc1, 0x3f, 0x1d, 0x90, 0x03, 0xfb, 0x09, 0x99, 0xd2,
0xa4, 0x96, 0x02, 0xa3, 0x6d, 0x1f, 0xae, 0x4a, 0xbb, 0x65, 0x82, 0xf6, 0x89, 0x26, 0x70, 0x93,
0x66, 0x92, 0xc7, 0x54, 0x98, 0x1d, 0xe5, 0xe7, 0x60, 0xed, 0xe7, 0x4d, 0x26, 0xf9, 0xb9, 0xb6,
0xf3, 0xf0, 0xb2, 0xb4, 0x8d, 0xda, 0x48, 0x9b, 0x1e, 0x68, 0x80, 0x9e, 0xc2, 0x5e, 0x44, 0x44,
0x64, 0x76, 0x87, 0x60, 0xd4, 0xf3, 0x37, 0x56, 0xa5, 0x0d, 0x5e, 0x04, 0x8a, 0x72, 0x5e, 0xc3,
0x47, 0x6f, 0x6b, 0x9d, 0x63, 0x12, 0x73, 0xed, 0x0a, 0xc1, 0x5e, 0x46, 0x52, 0xda, 0x78, 0x0a,
0x14, 0x46, 0x7b, 0x70, 0xe3, 0x2b, 0x49, 0x0a, 0x6a, 0x76, 0x14, 0xd9, 0x04, 0xce, 0x35, 0x80,
0x3b, 0xb7, 0x3d, 0xa0, 0x23, 0xb8, 0xfd, 0x77, 0xbc, 0xaa, 0x7e, 0x30, 0xb6, 0xdc, 0x66, 0x01,
0xae, 0x5e, 0x80, 0xfb, 0x41, 0x67, 0xf8, 0xbb, 0xad, 0xe5, 0x8e, 0x14, 0x17, 0xbf, 0x6c, 0x10,
0xac, 0x8b, 0xd1, 0x21, 0xec, 0x25, 0x71, 0xd6, 0xea, 0xf9, 0x5b, 0xab, 0xd2, 0x56, 0x71, 0xa0,
0x4e, 0x94, 0x43, 0x24, 0x24, 0x2f, 0x4e, 0x64, 0xc1, 0xe9, 0xec, 0x1d, 0x95, 0x64, 0x46, 0x24,
0x31, 0xbb, 0x6a, 0x3e, 0xd6, 0x7a, 0x3e, 0x77, 0x5f, 0xcd, 0x7f, 0xd6, 0x0a, 0x1e, 0xfe, 0x5f,
0xfd, 0x9c, 0xa5, 0xb1, 0xa4, 0x69, 0x2e, 0xcf, 0x83, 0x7b, 0x7a, 0x8f, 0x27, 0xb0, 0x5f, 0x2f,
0x93, 0x72, 0xf4, 0x0a, 0xf6, 0x6a, 0x84, 0xf6, 0xd7, 0x3a, 0xb7, 0xbe, 0x1f, 0xeb, 0xe0, 0x2e,
0xdd, 0x6e, 0xdf, 0xf0, 0x3f, 0xce, 0x17, 0xd8, 0xb8, 0x5a, 0x60, 0xe3, 0x66, 0x81, 0xc1, 0xb7,
0x0a, 0x83, 0x1f, 0x15, 0x06, 0x97, 0x15, 0x06, 0xf3, 0x0a, 0x83, 0xdf, 0x15, 0x06, 0xd7, 0x15,
0x36, 0x6e, 0x2a, 0x0c, 0x2e, 0x96, 0xd8, 0x98, 0x2f, 0xb1, 0x71, 0xb5, 0xc4, 0xc6, 0xe7, 0x61,
0x18, 0xcb, 0xa8, 0x98, 0xba, 0x27, 0x2c, 0xf5, 0x42, 0x4e, 0x4e, 0x49, 0x46, 0xbc, 0x84, 0x9d,
0xc5, 0x9e, 0xfe, 0x19, 0xa6, 0x7d, 0xa5, 0xf6, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a,
0x46, 0x64, 0x71, 0x1f, 0x03, 0x00, 0x00,
}
func (this *PushRequest) Equal(that interface{}) bool {
@ -457,11 +457,11 @@ func (this *EntryAdapter) Equal(that interface{}) bool {
if this.Line != that1.Line {
return false
}
if len(this.NonIndexedLabels) != len(that1.NonIndexedLabels) {
if len(this.StructuredMetadata) != len(that1.StructuredMetadata) {
return false
}
for i := range this.NonIndexedLabels {
if !this.NonIndexedLabels[i].Equal(&that1.NonIndexedLabels[i]) {
for i := range this.StructuredMetadata {
if !this.StructuredMetadata[i].Equal(&that1.StructuredMetadata[i]) {
return false
}
}
@ -523,12 +523,12 @@ func (this *EntryAdapter) GoString() string {
s = append(s, "&push.EntryAdapter{")
s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n")
if this.NonIndexedLabels != nil {
vs := make([]*LabelPairAdapter, len(this.NonIndexedLabels))
if this.StructuredMetadata != nil {
vs := make([]*LabelPairAdapter, len(this.StructuredMetadata))
for i := range vs {
vs[i] = &this.NonIndexedLabels[i]
vs[i] = &this.StructuredMetadata[i]
}
s = append(s, "NonIndexedLabels: "+fmt.Sprintf("%#v", vs)+",\n")
s = append(s, "StructuredMetadata: "+fmt.Sprintf("%#v", vs)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
@ -788,10 +788,10 @@ func (m *EntryAdapter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.NonIndexedLabels) > 0 {
for iNdEx := len(m.NonIndexedLabels) - 1; iNdEx >= 0; iNdEx-- {
if len(m.StructuredMetadata) > 0 {
for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.NonIndexedLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
size, err := m.StructuredMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@ -906,8 +906,8 @@ func (m *EntryAdapter) Size() (n int) {
if l > 0 {
n += 1 + l + sovPush(uint64(l))
}
if len(m.NonIndexedLabels) > 0 {
for _, e := range m.NonIndexedLabels {
if len(m.StructuredMetadata) > 0 {
for _, e := range m.StructuredMetadata {
l = e.Size()
n += 1 + l + sovPush(uint64(l))
}
@ -972,15 +972,15 @@ func (this *EntryAdapter) String() string {
if this == nil {
return "nil"
}
repeatedStringForNonIndexedLabels := "[]LabelPairAdapter{"
for _, f := range this.NonIndexedLabels {
repeatedStringForNonIndexedLabels += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + ","
repeatedStringForStructuredMetadata := "[]LabelPairAdapter{"
for _, f := range this.StructuredMetadata {
repeatedStringForStructuredMetadata += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + ","
}
repeatedStringForNonIndexedLabels += "}"
repeatedStringForStructuredMetadata += "}"
s := strings.Join([]string{`&EntryAdapter{`,
`Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Line:` + fmt.Sprintf("%v", this.Line) + `,`,
`NonIndexedLabels:` + repeatedStringForNonIndexedLabels + `,`,
`StructuredMetadata:` + repeatedStringForStructuredMetadata + `,`,
`}`,
}, "")
return s
@ -1484,7 +1484,7 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -1511,8 +1511,8 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NonIndexedLabels = append(m.NonIndexedLabels, LabelPairAdapter{})
if err := m.NonIndexedLabels[len(m.NonIndexedLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
m.StructuredMetadata = append(m.StructuredMetadata, LabelPairAdapter{})
if err := m.StructuredMetadata[len(m.StructuredMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex

@ -42,8 +42,8 @@ message EntryAdapter {
(gogoproto.jsontag) = "ts"
];
string line = 2 [(gogoproto.jsontag) = "line"];
repeated LabelPairAdapter nonIndexedLabels = 3 [
repeated LabelPairAdapter structuredMetadata = 3 [
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "nonIndexedLabels,omitempty"
(gogoproto.jsontag) = "structuredMetadata,omitempty"
];
}

@ -22,9 +22,9 @@ type Stream struct {
// Entry is a log entry with a timestamp.
type Entry struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
NonIndexedLabels LabelsAdapter `protobuf:"bytes,3,opt,name=nonIndexedLabels,proto3" json:"nonIndexedLabels,omitempty"`
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
StructuredMetadata LabelsAdapter `protobuf:"bytes,3,opt,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"`
}
// LabelAdapter should be a copy of the Prometheus labels.Label type.
@ -172,10 +172,10 @@ func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.NonIndexedLabels) > 0 {
for iNdEx := len(m.NonIndexedLabels) - 1; iNdEx >= 0; iNdEx-- {
if len(m.StructuredMetadata) > 0 {
for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := (*LabelAdapter)(&m.NonIndexedLabels[iNdEx]).MarshalToSizedBuffer(dAtA[:i])
size, err := (*LabelAdapter)(&m.StructuredMetadata[iNdEx]).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@ -235,7 +235,7 @@ func (m *Stream) Unmarshal(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -439,7 +439,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -466,8 +466,8 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NonIndexedLabels = append(m.NonIndexedLabels, LabelAdapter{})
if err := m.NonIndexedLabels[len(m.NonIndexedLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
m.StructuredMetadata = append(m.StructuredMetadata, LabelAdapter{})
if err := m.StructuredMetadata[len(m.StructuredMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@ -655,8 +655,8 @@ func (m *Entry) Size() (n int) {
if l > 0 {
n += 1 + l + sovPush(uint64(l))
}
if len(m.NonIndexedLabels) > 0 {
for _, e := range m.NonIndexedLabels {
if len(m.StructuredMetadata) > 0 {
for _, e := range m.StructuredMetadata {
l = e.Size()
n += 1 + l + sovPush(uint64(l))
}
@ -739,8 +739,8 @@ func (m *Entry) Equal(that interface{}) bool {
if m.Line != that1.Line {
return false
}
for i := range m.NonIndexedLabels {
if !m.NonIndexedLabels[i].Equal(that1.NonIndexedLabels[i]) {
for i := range m.StructuredMetadata {
if !m.StructuredMetadata[i].Equal(that1.StructuredMetadata[i]) {
return false
}
}

@ -14,20 +14,20 @@ var (
Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`,
Hash: 1234*10 ^ 9,
Entries: []Entry{
{now, line, ""},
{now.Add(1 * time.Second), line, `{traceID="1234"}`},
{now.Add(2 * time.Second), line, ""},
{now.Add(3 * time.Second), line, `{user="abc"}`},
{now, line, nil},
{now.Add(1 * time.Second), line, LabelsAdapter{{Name: "traceID", Value: "1234"}}},
{now.Add(2 * time.Second), line, nil},
{now.Add(3 * time.Second), line, LabelsAdapter{{Name: "user", Value: "abc"}}},
},
}
streamAdapter = StreamAdapter{
Labels: `{job="foobar", cluster="foo-central1", namespace="bar", container_name="buzz"}`,
Hash: 1234*10 ^ 9,
Entries: []EntryAdapter{
{now, line, ""},
{now.Add(1 * time.Second), line, `{traceID="1234"}`},
{now.Add(2 * time.Second), line, ""},
{now.Add(3 * time.Second), line, `{user="abc"}`},
{now, line, nil},
{now.Add(1 * time.Second), line, []LabelPairAdapter{{Name: "traceID", Value: "1234"}}},
{now.Add(2 * time.Second), line, nil},
{now.Add(3 * time.Second), line, []LabelPairAdapter{{Name: "user", Value: "abc"}}},
},
}
)

@ -32,10 +32,10 @@ var (
"compressedBytes": 1,
"decompressedBytes": 2,
"decompressedLines": 3,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 4,
"headChunkLines": 5,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 8
},
@ -55,10 +55,10 @@ var (
"compressedBytes": 11,
"decompressedBytes": 12,
"decompressedLines": 13,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 14,
"headChunkLines": 15,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 19
},
@ -126,7 +126,7 @@ var (
"totalBytesProcessed": 24,
"totalEntriesReturned": 10,
"totalLinesProcessed": 25,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}`

@ -1340,10 +1340,10 @@ var (
"compressedBytes": 1,
"decompressedBytes": 2,
"decompressedLines": 3,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 4,
"headChunkLines": 5,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 8
},
@ -1363,10 +1363,10 @@ var (
"compressedBytes": 11,
"decompressedBytes": 12,
"decompressedLines": 13,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 14,
"headChunkLines": 15,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 19
},
@ -1434,7 +1434,7 @@ var (
"totalBytesProcessed": 24,
"totalEntriesReturned": 10,
"totalLinesProcessed": 25,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
},`

@ -23,10 +23,10 @@ var emptyStats = `"stats": {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -46,10 +46,10 @@ var emptyStats = `"stats": {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -113,7 +113,7 @@ var emptyStats = `"stats": {
"totalBytesProcessed":0,
"totalEntriesReturned":0,
"totalLinesProcessed":0,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}`

@ -34,7 +34,7 @@ func fillCache(t *testing.T, scfg config.SchemaConfig, cache cache.Cache) ([]str
for i := 0; i < 111; i++ {
ts := model.TimeFromUnix(int64(i * chunkLen))
cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 256*1024, 0)
cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
err := cs.Append(&logproto.Entry{
Timestamp: ts.Time(),

@ -80,7 +80,7 @@ func TestGrpcStore(t *testing.T) {
newChunkData := func() chunk.Data {
return chunkenc.NewFacade(
chunkenc.NewMemChunk(
chunkenc.ChunkFormatV3, chunkenc.EncNone, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 256*1024, 0,
chunkenc.ChunkFormatV3, chunkenc.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0,
), 0, 0)
}

@ -85,7 +85,7 @@ func CreateChunks(scfg config.SchemaConfig, startIndex, batchSize int, from mode
}
func DummyChunkFor(from, through model.Time, metric labels.Labels) chunk.Chunk {
cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 256*1024, 0)
cs := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncGZIP, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
for ts := from; ts <= through; ts = ts.Add(15 * time.Second) {
err := cs.Append(&logproto.Entry{Timestamp: ts.Time(), Line: fmt.Sprintf("line ts=%d", ts)})

@ -322,7 +322,7 @@ func makeChunks(now time.Time, tpls ...c) []chunk.Chunk {
from := int(chk.from) / int(time.Hour)
// This is only here because it's helpful for debugging.
// This isn't even the write format for Loki but we dont' care for the sake of these tests.
memChk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncNone, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 256*1024, 0)
memChk := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncNone, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 256*1024, 0)
// To make sure the fetcher doesn't swap keys and buffers each chunk is built with different, but deterministic data
for i := 0; i < from; i++ {
_ = memChk.Append(&logproto.Entry{

@ -125,7 +125,7 @@ func fillStore(cm storage.ClientMetrics) error {
if flushCount >= maxChunks {
return
}
chunkEnc = chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncLZ4_64k, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, 262144, 1572864)
chunkEnc = chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncLZ4_64k, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, 262144, 1572864)
}
}
}(i)

@ -80,12 +80,12 @@ func (d *DeleteRequest) FilterFunction(lbls labels.Labels) (filter.Func, error)
}
f := p.ForStream(lbls).ProcessString
return func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
if ts.Before(d.timeInterval.start) || ts.After(d.timeInterval.end) {
return false
}
result, _, skip := f(0, s, nonIndexedLabels...)
result, _, skip := f(0, s, structuredMetadata...)
if len(result) != 0 || skip {
d.Metrics.deletedLinesTotal.WithLabelValues(d.UserID).Inc()
d.DeletedLines++

@ -30,8 +30,8 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
lbl := `{foo="bar", fizz="buzz"}`
lblWithLineFilter := `{foo="bar", fizz="buzz"} |= "filter"`
lblWithNonIndexedLabelsFilter := `{foo="bar", fizz="buzz"} | ping="pong"`
lblWithLineAndNonIndexedLabelsFilter := `{foo="bar", fizz="buzz"} | ping="pong" |= "filter"`
lblWithStructuredMetadataFilter := `{foo="bar", fizz="buzz"} | ping="pong"`
lblWithLineAndStructuredMetadataFilter := `{foo="bar", fizz="buzz"} | ping="pong" |= "filter"`
chunkEntry := retention.ChunkEntry{
ChunkRef: retention.ChunkRef{
@ -84,18 +84,18 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
},
},
{
name: "whole chunk deleted with non-indexed labels filter present",
name: "whole chunk deleted with structured metadata filter present",
deleteRequest: DeleteRequest{
UserID: user1,
StartTime: now.Add(-3 * time.Hour),
EndTime: now.Add(-time.Hour),
Query: lblWithNonIndexedLabelsFilter,
Query: lblWithStructuredMetadataFilter,
},
expectedResp: resp{
isDeleted: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
tsUnixNano := ts.UnixNano()
if labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-time.Hour).UnixNano() {
if labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-time.Hour).UnixNano() {
return true
}
return false
@ -103,18 +103,18 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
},
},
{
name: "whole chunk deleted with line and non-indexed labels filter present",
name: "whole chunk deleted with line and structured metadata filter present",
deleteRequest: DeleteRequest{
UserID: user1,
StartTime: now.Add(-3 * time.Hour),
EndTime: now.Add(-time.Hour),
Query: lblWithLineAndNonIndexedLabelsFilter,
Query: lblWithLineAndStructuredMetadataFilter,
},
expectedResp: resp{
isDeleted: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
tsUnixNano := ts.UnixNano()
if strings.Contains(s, "filter") && labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-time.Hour).UnixNano() {
if strings.Contains(s, "filter") && labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-time.Hour).UnixNano() {
return true
}
return false
@ -179,18 +179,18 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
},
},
{
name: "chunk deleted from end with non-indexed labels filter present",
name: "chunk deleted from end with structured metadata filter present",
deleteRequest: DeleteRequest{
UserID: user1,
StartTime: now.Add(-2 * time.Hour),
EndTime: now,
Query: lblWithNonIndexedLabelsFilter,
Query: lblWithStructuredMetadataFilter,
},
expectedResp: resp{
isDeleted: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
tsUnixNano := ts.UnixNano()
if labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() {
if labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() {
return true
}
return false
@ -198,18 +198,18 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
},
},
{
name: "chunk deleted from end with line and non-indexed labels filter present",
name: "chunk deleted from end with line and structured metadata filter present",
deleteRequest: DeleteRequest{
UserID: user1,
StartTime: now.Add(-2 * time.Hour),
EndTime: now,
Query: lblWithLineAndNonIndexedLabelsFilter,
Query: lblWithLineAndStructuredMetadataFilter,
},
expectedResp: resp{
isDeleted: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
tsUnixNano := ts.UnixNano()
if strings.Contains(s, "filter") && labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() {
if strings.Contains(s, "filter") && labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() {
return true
}
return false
@ -289,14 +289,14 @@ func TestDeleteRequest_IsDeleted(t *testing.T) {
line = "filter bar"
}
// mix of empty, ding=dong and ping=pong as non-indexed labels
var nonIndexedLabels []labels.Label
// mix of empty, ding=dong and ping=pong as structured metadata
var structuredMetadata []labels.Label
if start.Time().Minute()%3 == 0 {
nonIndexedLabels = []labels.Label{{Name: lblPing, Value: lblPong}}
structuredMetadata = []labels.Label{{Name: lblPing, Value: lblPong}}
} else if start.Time().Minute()%2 == 0 {
nonIndexedLabels = []labels.Label{{Name: "ting", Value: "tong"}}
structuredMetadata = []labels.Label{{Name: "ting", Value: "tong"}}
}
require.Equal(t, tc.expectedResp.expectedFilter(start.Time(), line, nonIndexedLabels...), filterFunc(start.Time(), line, nonIndexedLabels...), "line", line, "time", start.Time(), "now", now.Time())
require.Equal(t, tc.expectedResp.expectedFilter(start.Time(), line, structuredMetadata...), filterFunc(start.Time(), line, structuredMetadata...), "line", line, "time", start.Time(), "now", now.Time())
}
})
}
@ -335,7 +335,7 @@ func TestDeleteRequest_FilterFunction(t *testing.T) {
require.Equal(t, float64(1), testutil.ToFloat64(dr.Metrics.deletedLinesTotal))
})
t.Run("one line matching with non-indexed labels filter", func(t *testing.T) {
t.Run("one line matching with structured metadata filter", func(t *testing.T) {
dr := DeleteRequest{
Query: `{foo="bar"} | ping="pong"`,
DeletedLines: 0,
@ -358,7 +358,7 @@ func TestDeleteRequest_FilterFunction(t *testing.T) {
require.Equal(t, float64(1), testutil.ToFloat64(dr.Metrics.deletedLinesTotal))
})
t.Run("one line matching with line and non-indexed labels filter", func(t *testing.T) {
t.Run("one line matching with line and structured metadata filter", func(t *testing.T) {
dr := DeleteRequest{
Query: `{foo="bar"} | ping="pong" |= "some"`,
DeletedLines: 0,

@ -269,9 +269,9 @@ func (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time)
}
d.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc()
return true, func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return true, func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
for _, ff := range filterFuncs {
if ff(ts, s, nonIndexedLabels...) {
if ff(ts, s, structuredMetadata...) {
return true
}
}

@ -28,8 +28,8 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
lblFoo, err := syntax.ParseLabels(`{foo="bar"}`)
require.NoError(t, err)
streamSelectorWithLineFilters := lblFoo.String() + `|="fizz"`
streamSelectorWithNonIndexedLabelsFilters := lblFoo.String() + `| ping="pong"`
streamSelectorWithLineAndNonIndexedLabelsFilters := lblFoo.String() + `| ping="pong" |= "fizz"`
streamSelectorWithStructuredMetadataFilters := lblFoo.String() + `| ping="pong"`
streamSelectorWithLineAndStructuredMetadataFilters := lblFoo.String() + `| ping="pong" |= "fizz"`
chunkEntry := retention.ChunkEntry{
ChunkRef: retention.ChunkRef{
@ -170,21 +170,21 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
},
},
{
name: "whole chunk deleted by single request with non-indexed labels filters",
name: "whole chunk deleted by single request with structured metadata filters",
deletionMode: deletionmode.FilterAndDelete,
batchSize: 70,
deleteRequestsFromStore: []DeleteRequest{
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-24 * time.Hour),
EndTime: now,
},
},
expectedResp: resp{
isExpired: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
},
expectedDeletionRangeByUser: map[string]model.Interval{
@ -195,21 +195,21 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
},
},
{
name: "whole chunk deleted by single request with line and non-indexed labels filters",
name: "whole chunk deleted by single request with line and structured metadata filters",
deletionMode: deletionmode.FilterAndDelete,
batchSize: 70,
deleteRequestsFromStore: []DeleteRequest{
{
UserID: testUserID,
Query: streamSelectorWithLineAndNonIndexedLabelsFilters,
Query: streamSelectorWithLineAndStructuredMetadataFilters,
StartTime: now.Add(-24 * time.Hour),
EndTime: now,
},
},
expectedResp: resp{
isExpired: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong && strings.Contains(s, "fizz")
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong && strings.Contains(s, "fizz")
},
},
expectedDeletionRangeByUser: map[string]model.Interval{
@ -333,27 +333,27 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
},
},
{
name: "multiple delete requests with non-indexed labels filters and one deleting the whole chunk",
name: "multiple delete requests with structured metadata filters and one deleting the whole chunk",
deletionMode: deletionmode.FilterAndDelete,
batchSize: 70,
deleteRequestsFromStore: []DeleteRequest{
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-48 * time.Hour),
EndTime: now.Add(-24 * time.Hour),
},
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-12 * time.Hour),
EndTime: now,
},
},
expectedResp: resp{
isExpired: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
},
expectedDeletionRangeByUser: map[string]model.Interval{
@ -476,27 +476,27 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
},
},
{
name: "multiple overlapping requests with non-indexed labels filters deleting the whole chunk",
name: "multiple overlapping requests with structured metadata filters deleting the whole chunk",
deletionMode: deletionmode.FilterAndDelete,
batchSize: 70,
deleteRequestsFromStore: []DeleteRequest{
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-13 * time.Hour),
EndTime: now.Add(-6 * time.Hour),
},
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-8 * time.Hour),
EndTime: now,
},
},
expectedResp: resp{
isExpired: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
},
expectedDeletionRangeByUser: map[string]model.Interval{
@ -581,33 +581,33 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
},
},
{
name: "multiple non-overlapping requests with non-indexed labels filter deleting the whole chunk",
name: "multiple non-overlapping requests with structured metadata filter deleting the whole chunk",
deletionMode: deletionmode.FilterAndDelete,
batchSize: 70,
deleteRequestsFromStore: []DeleteRequest{
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-12 * time.Hour),
EndTime: now.Add(-6*time.Hour) - 1,
},
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-6 * time.Hour),
EndTime: now.Add(-4*time.Hour) - 1,
},
{
UserID: testUserID,
Query: streamSelectorWithNonIndexedLabelsFilters,
Query: streamSelectorWithStructuredMetadataFilters,
StartTime: now.Add(-4 * time.Hour),
EndTime: now,
},
},
expectedResp: resp{
isExpired: true,
expectedFilter: func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
return labels.Labels(nonIndexedLabels).Get(lblPing) == lblPong
expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
return labels.Labels(structuredMetadata).Get(lblPing) == lblPong
},
},
expectedDeletionRangeByUser: map[string]model.Interval{
@ -758,14 +758,14 @@ func TestDeleteRequestsManager_Expired(t *testing.T) {
if start.Time().Minute()%2 == 1 {
line = "fizz buzz"
}
// mix of empty, ding=dong and ping=pong as non-indexed labels
var nonIndexedLabels []labels.Label
// mix of empty, ding=dong and ping=pong as structured metadata
var structuredMetadata []labels.Label
if start.Time().Minute()%3 == 0 {
nonIndexedLabels = []labels.Label{{Name: lblPing, Value: lblPong}}
structuredMetadata = []labels.Label{{Name: lblPing, Value: lblPong}}
} else if start.Time().Minute()%2 == 0 {
nonIndexedLabels = []labels.Label{{Name: "ting", Value: "tong"}}
structuredMetadata = []labels.Label{{Name: "ting", Value: "tong"}}
}
require.Equal(t, tc.expectedResp.expectedFilter(start.Time(), line, nonIndexedLabels...), filterFunc(start.Time(), line, nonIndexedLabels...), "line", line, "time", start.Time(), "now", now.Time())
require.Equal(t, tc.expectedResp.expectedFilter(start.Time(), line, structuredMetadata...), filterFunc(start.Time(), line, structuredMetadata...), "line", line, "time", start.Time(), "now", now.Time())
}
require.Equal(t, len(tc.expectedDeletionRangeByUser), len(mgr.deleteRequestsToProcess))

@ -366,8 +366,8 @@ func (c *chunkRewriter) rewriteChunk(ctx context.Context, ce ChunkEntry, tableIn
return false, false, fmt.Errorf("expected 1 entry for chunk %s but found %d in storage", chunkID, len(chks))
}
newChunkData, err := chks[0].Data.Rebound(ce.From, ce.Through, func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool {
if filterFunc(ts, s, nonIndexedLabels...) {
newChunkData, err := chks[0].Data.Rebound(ce.From, ce.Through, func(ts time.Time, s string, structuredMetadata ...labels.Label) bool {
if filterFunc(ts, s, structuredMetadata...) {
linesDeleted = true
return true
}

@ -216,13 +216,13 @@ func createChunk(t testing.TB, userID string, lbs labels.Labels, from model.Time
labelsBuilder.Set(labels.MetricName, "logs")
metric := labelsBuilder.Labels()
fp := ingesterclient.Fingerprint(lbs)
chunkEnc := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, blockSize, targetSize)
chunkEnc := chunkenc.NewMemChunk(chunkenc.ChunkFormatV4, chunkenc.EncSnappy, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, blockSize, targetSize)
for ts := from; !ts.After(through); ts = ts.Add(1 * time.Minute) {
require.NoError(t, chunkEnc.Append(&logproto.Entry{
Timestamp: ts.Time(),
Line: ts.String(),
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", ts.String())),
Timestamp: ts.Time(),
Line: ts.String(),
StructuredMetadata: logproto.FromLabelsToLabelAdapters(labels.FromStrings("foo", ts.String())),
}))
}
@ -339,11 +339,11 @@ func TestChunkRewriter(t *testing.T) {
},
},
{
name: "rewrite first half using non-indexed labels",
name: "rewrite first half using structured metadata",
chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(2*time.Hour)),
filterFunc: func(ts time.Time, _ string, nonIndexedLabels ...labels.Label) bool {
filterFunc: func(ts time.Time, _ string, structuredMetadata ...labels.Label) bool {
tsUnixNano := ts.UnixNano()
if labels.Labels(nonIndexedLabels).Get("foo") == model.TimeFromUnixNano(ts.UnixNano()).String() &&
if labels.Labels(structuredMetadata).Get("foo") == model.TimeFromUnixNano(ts.UnixNano()).String() &&
todaysTableInterval.Start.UnixNano() <= tsUnixNano &&
tsUnixNano <= todaysTableInterval.Start.Add(time.Hour).UnixNano() {
return true
@ -544,20 +544,20 @@ func TestChunkRewriter(t *testing.T) {
require.Equal(t, expectedChunks[i][len(expectedChunks[i])-1].End, chunks[i].Through)
lokiChunk := chunks[i].Data.(*chunkenc.Facade).LokiChunk()
newChunkItr, err := lokiChunk.Iterator(context.Background(), chunks[i].From.Time(), chunks[i].Through.Add(time.Minute).Time(), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepNonIndexedLabels())
newChunkItr, err := lokiChunk.Iterator(context.Background(), chunks[i].From.Time(), chunks[i].Through.Add(time.Minute).Time(), logproto.FORWARD, log.NewNoopPipeline().ForStream(labels.Labels{}), iter.WithKeepStructuredMetadata())
require.NoError(t, err)
for _, interval := range expectedChunks[i] {
for curr := interval.Start; curr <= interval.End; curr = curr.Add(time.Minute) {
expectedNonIndexedLabels := labels.FromStrings("foo", curr.String())
expectedStructuredMetadata := labels.FromStrings("foo", curr.String())
require.True(t, newChunkItr.Next())
require.Equal(t, logproto.Entry{
Timestamp: curr.Time(),
Line: curr.String(),
NonIndexedLabels: logproto.FromLabelsToLabelAdapters(expectedNonIndexedLabels),
Timestamp: curr.Time(),
Line: curr.String(),
StructuredMetadata: logproto.FromLabelsToLabelAdapters(expectedStructuredMetadata),
}, newChunkItr.Entry())
require.Equal(t, expectedNonIndexedLabels.String(), newChunkItr.Labels())
require.Equal(t, expectedStructuredMetadata.String(), newChunkItr.Labels())
}
}

@ -383,4 +383,4 @@ var streamsFixture = []*logproto.Stream{
},
},
}
var storeFixture = newMockChunkStore(chunkenc.ChunkFormatV3, chunkenc.UnorderedWithNonIndexedLabelsHeadBlockFmt, streamsFixture)
var storeFixture = newMockChunkStore(chunkenc.ChunkFormatV3, chunkenc.UnorderedWithStructuredMetadataHeadBlockFmt, streamsFixture)

@ -6,4 +6,4 @@ import (
"github.com/prometheus/prometheus/model/labels"
)
type Func func(ts time.Time, s string, nonIndexedLabels ...labels.Label) bool
type Func func(ts time.Time, s string, structuredMetadata ...labels.Label) bool

@ -30,7 +30,7 @@ var queryTests = []struct {
{
Timestamp: mustParse(time.RFC3339Nano, "2019-09-13T18:32:23.380001319Z"),
Line: "super line with labels",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
},
@ -51,7 +51,7 @@ var queryTests = []struct {
{
"ts": "2019-09-13T18:32:23.380001319Z",
"line": "super line with labels",
"nonIndexedLabels": {
"structuredMetadata": {
"foo": "a",
"bar": "b"
}
@ -70,10 +70,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -93,10 +93,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -160,7 +160,7 @@ var queryTests = []struct {
"totalBytesProcessed": 0,
"totalEntriesReturned": 0,
"totalLinesProcessed": 0,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}
@ -202,7 +202,7 @@ var tailTests = []struct {
{
Timestamp: mustParse(time.RFC3339Nano, "2019-09-13T18:32:23.380001319Z"),
Line: "super line with labels",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
},
@ -230,7 +230,7 @@ var tailTests = []struct {
{
"ts": "2019-09-13T18:32:23.380001319Z",
"line": "super line with labels",
"nonIndexedLabels": {
"structuredMetadata": {
"foo": "a",
"bar": "b"
}

@ -37,7 +37,7 @@ var queryTests = []struct {
{
Timestamp: time.Unix(0, 123456789012346),
Line: "super line with labels",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
},
@ -72,10 +72,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -95,10 +95,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -162,7 +162,7 @@ var queryTests = []struct {
"totalBytesProcessed": 0,
"totalEntriesReturned": 0,
"totalLinesProcessed": 0,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}
@ -237,10 +237,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -260,10 +260,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -327,7 +327,7 @@ var queryTests = []struct {
"totalBytesProcessed": 0,
"totalEntriesReturned": 0,
"totalLinesProcessed": 0,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}
@ -423,10 +423,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -446,10 +446,10 @@ var queryTests = []struct {
"compressedBytes": 0,
"decompressedBytes": 0,
"decompressedLines": 0,
"decompressedNonIndexedLabelsBytes": 0,
"decompressedStructuredMetadataBytes": 0,
"headChunkBytes": 0,
"headChunkLines": 0,
"headChunkNonIndexedLabelsBytes": 0,
"headChunkStructuredMetadataBytes": 0,
"postFilterLines": 0,
"totalDuplicates": 0
}
@ -513,7 +513,7 @@ var queryTests = []struct {
"totalBytesProcessed": 0,
"totalEntriesReturned": 0,
"totalLinesProcessed": 0,
"totalNonIndexedLabelsBytesProcessed": 0,
"totalStructuredMetadataBytesProcessed": 0,
"totalPostFilterLines": 0
}
}
@ -557,7 +557,7 @@ var tailTests = []struct {
{
Timestamp: time.Unix(0, 123456789012346),
Line: "super line with labels",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "foo", Value: "a"},
{Name: "bar", Value: "b"},
},

@ -310,10 +310,10 @@ func encodeStream(stream logproto.Stream, s *jsoniter.Stream) error {
s.WriteRaw(`"`)
s.WriteMore()
s.WriteStringWithHTMLEscaped(e.Line)
if len(e.NonIndexedLabels) > 0 {
if len(e.StructuredMetadata) > 0 {
s.WriteMore()
s.WriteObjectStart()
for i, lbl := range e.NonIndexedLabels {
for i, lbl := range e.StructuredMetadata {
if i > 0 {
s.WriteMore()
}

@ -28,7 +28,7 @@ var pushTests = []struct {
{
Timestamp: mustParse(time.RFC3339Nano, "2019-09-13T18:32:23.380001319Z"),
Line: "super line with labels",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "a", Value: "1"},
{Name: "b", Value: "2"},
},
@ -49,7 +49,7 @@ var pushTests = []struct {
{
"ts": "2019-09-13T18:32:23.380001319Z",
"line": "super line with labels",
"nonIndexedLabels": {
"structuredMetadata": {
"a": "1",
"b": "2"
}

@ -52,7 +52,7 @@ var pushTests = []struct {
{
Timestamp: time.Unix(0, 123456789012345),
Line: "super line",
NonIndexedLabels: []logproto.LabelAdapter{
StructuredMetadata: []logproto.LabelAdapter{
{Name: "a", Value: "1"},
{Name: "b", Value: "2"},
},

@ -216,9 +216,9 @@ func (m *LabelPairAdapter) GetValue() string {
}
type EntryAdapter struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
NonIndexedLabels []LabelPairAdapter `protobuf:"bytes,3,rep,name=nonIndexedLabels,proto3" json:"nonIndexedLabels,omitempty"`
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
StructuredMetadata []LabelPairAdapter `protobuf:"bytes,3,rep,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"`
}
func (m *EntryAdapter) Reset() { *m = EntryAdapter{} }
@ -267,9 +267,9 @@ func (m *EntryAdapter) GetLine() string {
return ""
}
func (m *EntryAdapter) GetNonIndexedLabels() []LabelPairAdapter {
func (m *EntryAdapter) GetStructuredMetadata() []LabelPairAdapter {
if m != nil {
return m.NonIndexedLabels
return m.StructuredMetadata
}
return nil
}
@ -285,39 +285,39 @@ func init() {
func init() { proto.RegisterFile("pkg/push/push.proto", fileDescriptor_35ec442956852c9e) }
var fileDescriptor_35ec442956852c9e = []byte{
// 498 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xf5, 0x26, 0x6e, 0xda, 0x4e, 0x4a, 0xa9, 0x96, 0xb6, 0x04, 0x0b, 0xad, 0x23, 0x9f, 0x72,
0x00, 0x5b, 0x0a, 0x07, 0x2e, 0x5c, 0x62, 0x09, 0xa9, 0x48, 0x3d, 0x54, 0x06, 0x81, 0xc4, 0x6d,
0x43, 0xb6, 0xb6, 0xa9, 0xed, 0x35, 0xde, 0x35, 0xa2, 0x37, 0x3e, 0xa1, 0xfc, 0x05, 0x9f, 0xd2,
0x63, 0x8e, 0x15, 0x07, 0x43, 0x9c, 0x0b, 0xca, 0xa9, 0x9f, 0x80, 0xbc, 0xb6, 0x49, 0x09, 0x97,
0xf5, 0x9b, 0xb7, 0x33, 0xf3, 0x9e, 0x67, 0x6c, 0x78, 0x90, 0x5e, 0xf8, 0x4e, 0x9a, 0x8b, 0x40,
0x1d, 0x76, 0x9a, 0x71, 0xc9, 0xf1, 0x4e, 0xc4, 0x7d, 0x85, 0x8c, 0x43, 0x9f, 0xfb, 0x5c, 0x41,
0xa7, 0x42, 0xf5, 0xbd, 0x61, 0xfa, 0x9c, 0xfb, 0x11, 0x73, 0x54, 0x34, 0xcd, 0xcf, 0x1d, 0x19,
0xc6, 0x4c, 0x48, 0x1a, 0xa7, 0x75, 0x82, 0xf5, 0x0e, 0xfa, 0x67, 0xb9, 0x08, 0x3c, 0xf6, 0x29,
0x67, 0x42, 0xe2, 0x13, 0xd8, 0x16, 0x32, 0x63, 0x34, 0x16, 0x03, 0x34, 0xec, 0x8e, 0xfa, 0xe3,
0x87, 0x76, 0xab, 0x60, 0xbf, 0x56, 0x17, 0x93, 0x19, 0x4d, 0x25, 0xcb, 0xdc, 0xa3, 0x1f, 0x85,
0xd9, 0xab, 0xa9, 0x55, 0x61, 0xb6, 0x55, 0x5e, 0x0b, 0xac, 0x7d, 0xd8, 0xab, 0x1b, 0x8b, 0x94,
0x27, 0x82, 0x59, 0xdf, 0x10, 0xdc, 0xfb, 0xa7, 0x03, 0xb6, 0xa0, 0x17, 0xd1, 0x29, 0x8b, 0x2a,
0x29, 0x34, 0xda, 0x75, 0x61, 0x55, 0x98, 0x0d, 0xe3, 0x35, 0x4f, 0x3c, 0x81, 0x6d, 0x96, 0xc8,
0x2c, 0x64, 0x62, 0xd0, 0x51, 0x7e, 0x8e, 0xd7, 0x7e, 0x5e, 0x26, 0x32, 0xbb, 0x6c, 0xed, 0xdc,
0xbf, 0x2e, 0x4c, 0xad, 0x32, 0xd2, 0xa4, 0x7b, 0x2d, 0xc0, 0x8f, 0x40, 0x0f, 0xa8, 0x08, 0x06,
0xdd, 0x21, 0x1a, 0xe9, 0xee, 0xd6, 0xaa, 0x30, 0xd1, 0x53, 0x4f, 0x51, 0xd6, 0x0b, 0x38, 0x38,
0xad, 0x74, 0xce, 0x68, 0x98, 0xb5, 0xae, 0x30, 0xe8, 0x09, 0x8d, 0x59, 0xed, 0xc9, 0x53, 0x18,
0x1f, 0xc2, 0xd6, 0x67, 0x1a, 0xe5, 0x6c, 0xd0, 0x51, 0x64, 0x1d, 0x58, 0x25, 0x82, 0xbd, 0xbb,
0x1e, 0xf0, 0x09, 0xec, 0xfe, 0x1d, 0xaf, 0xaa, 0xef, 0x8f, 0x0d, 0xbb, 0x5e, 0x80, 0xdd, 0x2e,
0xc0, 0x7e, 0xd3, 0x66, 0xb8, 0xfb, 0x8d, 0xe5, 0x8e, 0x14, 0x57, 0x3f, 0x4d, 0xe4, 0xad, 0x8b,
0xf1, 0x63, 0xd0, 0xa3, 0x30, 0x69, 0xf4, 0xdc, 0x9d, 0x55, 0x61, 0xaa, 0xd8, 0x53, 0x27, 0xfe,
0x08, 0x07, 0x09, 0x4f, 0x5e, 0x25, 0x33, 0xf6, 0x85, 0xcd, 0x4e, 0xeb, 0x11, 0x76, 0xd5, 0x74,
0x8c, 0xf5, 0x74, 0x36, 0x5f, 0xcc, 0xb5, 0x1a, 0x39, 0x63, 0xb3, 0xf6, 0x09, 0x8f, 0x43, 0xc9,
0xe2, 0x54, 0x5e, 0x7a, 0xff, 0xf5, 0x1d, 0x4f, 0xa0, 0x57, 0xad, 0x91, 0x65, 0xf8, 0x39, 0xe8,
0x15, 0xc2, 0x47, 0x6b, 0x8d, 0x3b, 0x5f, 0x8e, 0x71, 0xbc, 0x49, 0x37, 0x7b, 0xd7, 0xdc, 0xb7,
0xf3, 0x05, 0xd1, 0x6e, 0x16, 0x44, 0xbb, 0x5d, 0x10, 0xf4, 0xb5, 0x24, 0xe8, 0x7b, 0x49, 0xd0,
0x75, 0x49, 0xd0, 0xbc, 0x24, 0xe8, 0x57, 0x49, 0xd0, 0xef, 0x92, 0x68, 0xb7, 0x25, 0x41, 0x57,
0x4b, 0xa2, 0xcd, 0x97, 0x44, 0xbb, 0x59, 0x12, 0xed, 0xfd, 0xd0, 0x0f, 0x65, 0x90, 0x4f, 0xed,
0x0f, 0x3c, 0x76, 0xfc, 0x8c, 0x9e, 0xd3, 0x84, 0x3a, 0x11, 0xbf, 0x08, 0x9d, 0xf6, 0x37, 0x98,
0xf6, 0x94, 0xda, 0xb3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x6f, 0x19, 0xc8, 0x19, 0x03,
0x00, 0x00,
// 503 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x31, 0x6f, 0xd3, 0x40,
0x14, 0xf6, 0x25, 0x69, 0xda, 0x5e, 0x4a, 0x41, 0x47, 0x5b, 0x8c, 0x55, 0x9d, 0x23, 0x8b, 0x21,
0x03, 0xd8, 0x52, 0x18, 0x58, 0x58, 0x62, 0x09, 0xa9, 0x03, 0x48, 0x95, 0x41, 0x20, 0xb1, 0x5d,
0x9a, 0xab, 0x6d, 0xd5, 0xf6, 0x99, 0xbb, 0x33, 0x52, 0x37, 0x7e, 0x42, 0xf9, 0x17, 0xfc, 0x94,
0x8e, 0x19, 0x2b, 0x06, 0x43, 0x9c, 0xa5, 0xca, 0xd4, 0x9f, 0x80, 0x7c, 0xf6, 0x91, 0x52, 0xba,
0x9c, 0xbf, 0xf7, 0xdd, 0x7b, 0xef, 0xfb, 0xfc, 0x9e, 0x0d, 0x1f, 0xe7, 0x67, 0xa1, 0x97, 0x17,
0x22, 0x52, 0x87, 0x9b, 0x73, 0x26, 0x19, 0xda, 0x4a, 0x58, 0xa8, 0x90, 0xb5, 0x17, 0xb2, 0x90,
0x29, 0xe8, 0xd5, 0xa8, 0xb9, 0xb7, 0xec, 0x90, 0xb1, 0x30, 0xa1, 0x9e, 0x8a, 0xa6, 0xc5, 0xa9,
0x27, 0xe3, 0x94, 0x0a, 0x49, 0xd2, 0xbc, 0x49, 0x70, 0x3e, 0xc1, 0xc1, 0x71, 0x21, 0xa2, 0x80,
0x7e, 0x29, 0xa8, 0x90, 0xe8, 0x08, 0x6e, 0x0a, 0xc9, 0x29, 0x49, 0x85, 0x09, 0x86, 0xdd, 0xd1,
0x60, 0xfc, 0xc4, 0xd5, 0x0a, 0xee, 0x7b, 0x75, 0x31, 0x99, 0x91, 0x5c, 0x52, 0xee, 0xef, 0xff,
0x2c, 0xed, 0x7e, 0x43, 0xad, 0x4a, 0x5b, 0x57, 0x05, 0x1a, 0x38, 0xbb, 0x70, 0xa7, 0x69, 0x2c,
0x72, 0x96, 0x09, 0xea, 0x7c, 0x07, 0xf0, 0xc1, 0x3f, 0x1d, 0x90, 0x03, 0xfb, 0x09, 0x99, 0xd2,
0xa4, 0x96, 0x02, 0xa3, 0x6d, 0x1f, 0xae, 0x4a, 0xbb, 0x65, 0x82, 0xf6, 0x89, 0x26, 0x70, 0x93,
0x66, 0x92, 0xc7, 0x54, 0x98, 0x1d, 0xe5, 0xe7, 0x60, 0xed, 0xe7, 0x4d, 0x26, 0xf9, 0xb9, 0xb6,
0xf3, 0xf0, 0xb2, 0xb4, 0x8d, 0xda, 0x48, 0x9b, 0x1e, 0x68, 0x80, 0x9e, 0xc2, 0x5e, 0x44, 0x44,
0x64, 0x76, 0x87, 0x60, 0xd4, 0xf3, 0x37, 0x56, 0xa5, 0x0d, 0x5e, 0x04, 0x8a, 0x72, 0x5e, 0xc3,
0x47, 0x6f, 0x6b, 0x9d, 0x63, 0x12, 0x73, 0xed, 0x0a, 0xc1, 0x5e, 0x46, 0x52, 0xda, 0x78, 0x0a,
0x14, 0x46, 0x7b, 0x70, 0xe3, 0x2b, 0x49, 0x0a, 0x6a, 0x76, 0x14, 0xd9, 0x04, 0xce, 0x35, 0x80,
0x3b, 0xb7, 0x3d, 0xa0, 0x23, 0xb8, 0xfd, 0x77, 0xbc, 0xaa, 0x7e, 0x30, 0xb6, 0xdc, 0x66, 0x01,
0xae, 0x5e, 0x80, 0xfb, 0x41, 0x67, 0xf8, 0xbb, 0xad, 0xe5, 0x8e, 0x14, 0x17, 0xbf, 0x6c, 0x10,
0xac, 0x8b, 0xd1, 0x21, 0xec, 0x25, 0x71, 0xd6, 0xea, 0xf9, 0x5b, 0xab, 0xd2, 0x56, 0x71, 0xa0,
0x4e, 0x94, 0x43, 0x24, 0x24, 0x2f, 0x4e, 0x64, 0xc1, 0xe9, 0xec, 0x1d, 0x95, 0x64, 0x46, 0x24,
0x31, 0xbb, 0x6a, 0x3e, 0xd6, 0x7a, 0x3e, 0x77, 0x5f, 0xcd, 0x7f, 0xd6, 0x0a, 0x1e, 0xfe, 0x5f,
0xfd, 0x9c, 0xa5, 0xb1, 0xa4, 0x69, 0x2e, 0xcf, 0x83, 0x7b, 0x7a, 0x8f, 0x27, 0xb0, 0x5f, 0x2f,
0x93, 0x72, 0xf4, 0x0a, 0xf6, 0x6a, 0x84, 0xf6, 0xd7, 0x3a, 0xb7, 0xbe, 0x1f, 0xeb, 0xe0, 0x2e,
0xdd, 0x6e, 0xdf, 0xf0, 0x3f, 0xce, 0x17, 0xd8, 0xb8, 0x5a, 0x60, 0xe3, 0x66, 0x81, 0xc1, 0xb7,
0x0a, 0x83, 0x1f, 0x15, 0x06, 0x97, 0x15, 0x06, 0xf3, 0x0a, 0x83, 0xdf, 0x15, 0x06, 0xd7, 0x15,
0x36, 0x6e, 0x2a, 0x0c, 0x2e, 0x96, 0xd8, 0x98, 0x2f, 0xb1, 0x71, 0xb5, 0xc4, 0xc6, 0xe7, 0x61,
0x18, 0xcb, 0xa8, 0x98, 0xba, 0x27, 0x2c, 0xf5, 0x42, 0x4e, 0x4e, 0x49, 0x46, 0xbc, 0x84, 0x9d,
0xc5, 0x9e, 0xfe, 0x19, 0xa6, 0x7d, 0xa5, 0xf6, 0xf2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3a,
0x46, 0x64, 0x71, 0x1f, 0x03, 0x00, 0x00,
}
func (this *PushRequest) Equal(that interface{}) bool {
@ -457,11 +457,11 @@ func (this *EntryAdapter) Equal(that interface{}) bool {
if this.Line != that1.Line {
return false
}
if len(this.NonIndexedLabels) != len(that1.NonIndexedLabels) {
if len(this.StructuredMetadata) != len(that1.StructuredMetadata) {
return false
}
for i := range this.NonIndexedLabels {
if !this.NonIndexedLabels[i].Equal(&that1.NonIndexedLabels[i]) {
for i := range this.StructuredMetadata {
if !this.StructuredMetadata[i].Equal(&that1.StructuredMetadata[i]) {
return false
}
}
@ -523,12 +523,12 @@ func (this *EntryAdapter) GoString() string {
s = append(s, "&push.EntryAdapter{")
s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n")
if this.NonIndexedLabels != nil {
vs := make([]*LabelPairAdapter, len(this.NonIndexedLabels))
if this.StructuredMetadata != nil {
vs := make([]*LabelPairAdapter, len(this.StructuredMetadata))
for i := range vs {
vs[i] = &this.NonIndexedLabels[i]
vs[i] = &this.StructuredMetadata[i]
}
s = append(s, "NonIndexedLabels: "+fmt.Sprintf("%#v", vs)+",\n")
s = append(s, "StructuredMetadata: "+fmt.Sprintf("%#v", vs)+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
@ -788,10 +788,10 @@ func (m *EntryAdapter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.NonIndexedLabels) > 0 {
for iNdEx := len(m.NonIndexedLabels) - 1; iNdEx >= 0; iNdEx-- {
if len(m.StructuredMetadata) > 0 {
for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.NonIndexedLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
size, err := m.StructuredMetadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@ -906,8 +906,8 @@ func (m *EntryAdapter) Size() (n int) {
if l > 0 {
n += 1 + l + sovPush(uint64(l))
}
if len(m.NonIndexedLabels) > 0 {
for _, e := range m.NonIndexedLabels {
if len(m.StructuredMetadata) > 0 {
for _, e := range m.StructuredMetadata {
l = e.Size()
n += 1 + l + sovPush(uint64(l))
}
@ -972,15 +972,15 @@ func (this *EntryAdapter) String() string {
if this == nil {
return "nil"
}
repeatedStringForNonIndexedLabels := "[]LabelPairAdapter{"
for _, f := range this.NonIndexedLabels {
repeatedStringForNonIndexedLabels += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + ","
repeatedStringForStructuredMetadata := "[]LabelPairAdapter{"
for _, f := range this.StructuredMetadata {
repeatedStringForStructuredMetadata += strings.Replace(strings.Replace(f.String(), "LabelPairAdapter", "LabelPairAdapter", 1), `&`, ``, 1) + ","
}
repeatedStringForNonIndexedLabels += "}"
repeatedStringForStructuredMetadata += "}"
s := strings.Join([]string{`&EntryAdapter{`,
`Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`Line:` + fmt.Sprintf("%v", this.Line) + `,`,
`NonIndexedLabels:` + repeatedStringForNonIndexedLabels + `,`,
`StructuredMetadata:` + repeatedStringForStructuredMetadata + `,`,
`}`,
}, "")
return s
@ -1484,7 +1484,7 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -1511,8 +1511,8 @@ func (m *EntryAdapter) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NonIndexedLabels = append(m.NonIndexedLabels, LabelPairAdapter{})
if err := m.NonIndexedLabels[len(m.NonIndexedLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
m.StructuredMetadata = append(m.StructuredMetadata, LabelPairAdapter{})
if err := m.StructuredMetadata[len(m.StructuredMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex

@ -42,8 +42,8 @@ message EntryAdapter {
(gogoproto.jsontag) = "ts"
];
string line = 2 [(gogoproto.jsontag) = "line"];
repeated LabelPairAdapter nonIndexedLabels = 3 [
repeated LabelPairAdapter structuredMetadata = 3 [
(gogoproto.nullable) = false,
(gogoproto.jsontag) = "nonIndexedLabels,omitempty"
(gogoproto.jsontag) = "structuredMetadata,omitempty"
];
}

@ -22,9 +22,9 @@ type Stream struct {
// Entry is a log entry with a timestamp.
type Entry struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
NonIndexedLabels LabelsAdapter `protobuf:"bytes,3,opt,name=nonIndexedLabels,proto3" json:"nonIndexedLabels,omitempty"`
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"`
Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"`
StructuredMetadata LabelsAdapter `protobuf:"bytes,3,opt,name=structuredMetadata,proto3" json:"structuredMetadata,omitempty"`
}
// LabelAdapter should be a copy of the Prometheus labels.Label type.
@ -172,10 +172,10 @@ func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.NonIndexedLabels) > 0 {
for iNdEx := len(m.NonIndexedLabels) - 1; iNdEx >= 0; iNdEx-- {
if len(m.StructuredMetadata) > 0 {
for iNdEx := len(m.StructuredMetadata) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := (*LabelAdapter)(&m.NonIndexedLabels[iNdEx]).MarshalToSizedBuffer(dAtA[:i])
size, err := (*LabelAdapter)(&m.StructuredMetadata[iNdEx]).MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@ -235,7 +235,7 @@ func (m *Stream) Unmarshal(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -439,7 +439,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NonIndexedLabels", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field StructuredMetadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@ -466,8 +466,8 @@ func (m *Entry) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.NonIndexedLabels = append(m.NonIndexedLabels, LabelAdapter{})
if err := m.NonIndexedLabels[len(m.NonIndexedLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
m.StructuredMetadata = append(m.StructuredMetadata, LabelAdapter{})
if err := m.StructuredMetadata[len(m.StructuredMetadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@ -655,8 +655,8 @@ func (m *Entry) Size() (n int) {
if l > 0 {
n += 1 + l + sovPush(uint64(l))
}
if len(m.NonIndexedLabels) > 0 {
for _, e := range m.NonIndexedLabels {
if len(m.StructuredMetadata) > 0 {
for _, e := range m.StructuredMetadata {
l = e.Size()
n += 1 + l + sovPush(uint64(l))
}
@ -739,8 +739,8 @@ func (m *Entry) Equal(that interface{}) bool {
if m.Line != that1.Line {
return false
}
for i := range m.NonIndexedLabels {
if !m.NonIndexedLabels[i].Equal(that1.NonIndexedLabels[i]) {
for i := range m.StructuredMetadata {
if !m.StructuredMetadata[i].Equal(that1.StructuredMetadata[i]) {
return false
}
}

Loading…
Cancel
Save