Remove `cortexpb` dependency from Loki packages. Use only `logproto` now. (#5264)

* Getting rid of `cortexpb`

Signed-off-by: Kaviraj <kavirajkanagaraj@gmail.com>

* Get rid of `cortex/pkg/tenants`

Signed-off-by: Kaviraj <kavirajkanagaraj@gmail.com>

* Fix linter errors

Signed-off-by: Kaviraj <kavirajkanagaraj@gmail.com>

* go mod vendor

Signed-off-by: Kaviraj <kavirajkanagaraj@gmail.com>

* Rename some import alias

Signed-off-by: Kaviraj <kavirajkanagaraj@gmail.com>
pull/5268/head
Kaviraj Kanagaraj 3 years ago committed by GitHub
parent 09a5a2cee3
commit f083aab09d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 19
      pkg/ingester/client/compat.go
  2. 10
      pkg/ingester/client/fnv.go
  3. 4
      pkg/ingester/instance.go
  4. 4
      pkg/querier/querier.go
  5. 3
      pkg/querier/queryrange/roundtrip.go
  6. 2
      pkg/storage/stores/shipper/downloads/index_set.go
  7. 2
      pkg/storage/stores/shipper/downloads/table.go
  8. 2
      pkg/storage/stores/shipper/uploads/table.go
  9. 5
      pkg/util/extract/extract.go
  10. 23
      pkg/util/http_test.go
  11. 7
      pkg/util/limiter/query_limiter.go
  12. 67
      pkg/util/validation/errors.go
  13. 59
      pkg/util/validation/validate.go
  14. 267
      vendor/github.com/cortexproject/cortex/pkg/cortexpb/compat.go
  15. 2647
      vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go
  16. 70
      vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto
  17. 309
      vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go
  18. 67
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go
  19. 262
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
  20. 41
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go
  21. 29
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go
  22. 6
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/dep.go
  23. 99
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go
  24. 7584
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go
  25. 159
      vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto
  26. 2
      vendor/modules.txt

@ -3,6 +3,8 @@ package client
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/logproto"
)
const (
@ -27,6 +29,23 @@ func LabelsToKeyString(l labels.Labels) string {
return string(l.Bytes(b))
}
// FastFingerprint runs the same algorithm as Prometheus labelSetToFastFingerprint()
func FastFingerprint(ls []logproto.LabelAdapter) model.Fingerprint {
if len(ls) == 0 {
return model.Metric(nil).FastFingerprint()
}
var result uint64
for _, l := range ls {
sum := hashNew()
sum = hashAdd(sum, l.Name)
sum = hashAddByte(sum, model.SeparatorByte)
sum = hashAdd(sum, l.Value)
result ^= sum
}
return model.Fingerprint(result)
}
// Fingerprint runs the same algorithm as Prometheus labelSetToFingerprint()
func Fingerprint(labels labels.Labels) model.Fingerprint {
sum := hashNew()

@ -15,3 +15,13 @@ func hashAddByte(h uint64, b byte) uint64 {
h *= prime64
return h
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
// Note this is the same algorithm as Go stdlib `sum64a.Write()`
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}

@ -26,7 +26,7 @@ import (
"github.com/grafana/loki/pkg/querier/astmapper"
"github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage"
cutil "github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/math"
"github.com/grafana/loki/pkg/validation"
@ -538,7 +538,7 @@ func (i *instance) forMatchingStreams(
shards *astmapper.ShardAnnotation,
fn func(*stream) error,
) error {
filters, matchers := cutil.SplitFiltersAndMatchers(matchers)
filters, matchers := util.SplitFiltersAndMatchers(matchers)
ids, err := i.index.Lookup(matchers, shards)
if err != nil {
return err

@ -21,7 +21,7 @@ import (
listutil "github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/spanlogger"
cortex_validation "github.com/grafana/loki/pkg/util/validation"
util_validation "github.com/grafana/loki/pkg/util/validation"
"github.com/grafana/loki/pkg/validation"
)
@ -537,7 +537,7 @@ func validateQueryTimeRangeLimits(ctx context.Context, userID string, limits tim
}
if maxQueryLength := limits.MaxQueryLength(userID); maxQueryLength > 0 && (through).Sub(from) > maxQueryLength {
return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, cortex_validation.ErrQueryTooLong, (through).Sub(from), maxQueryLength)
return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, util_validation.ErrQueryTooLong, (through).Sub(from), maxQueryLength)
}
if through.Before(from) {
return time.Time{}, time.Time{}, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from)

@ -6,8 +6,6 @@ import (
"strings"
"time"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/model/labels"
@ -18,6 +16,7 @@ import (
"github.com/grafana/loki/pkg/querier/queryrange/queryrangebase"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/cache"
"github.com/grafana/loki/pkg/tenant"
)
// Config is the configuration for the queryrange tripperware

@ -10,7 +10,6 @@ import (
"sync"
"time"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
@ -21,6 +20,7 @@ import (
chunk_util "github.com/grafana/loki/pkg/storage/chunk/util"
"github.com/grafana/loki/pkg/storage/stores/shipper/storage"
shipper_util "github.com/grafana/loki/pkg/storage/stores/shipper/util"
"github.com/grafana/loki/pkg/tenant"
util_log "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/spanlogger"
)

@ -9,7 +9,6 @@ import (
"sync"
"time"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
@ -19,6 +18,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk"
chunk_util "github.com/grafana/loki/pkg/storage/chunk/util"
"github.com/grafana/loki/pkg/storage/stores/shipper/storage"
"github.com/grafana/loki/pkg/tenant"
util_log "github.com/grafana/loki/pkg/util/log"
)

@ -13,7 +13,6 @@ import (
"sync"
"time"
"github.com/cortexproject/cortex/pkg/tenant"
"github.com/go-kit/log/level"
"go.etcd.io/bbolt"
@ -22,6 +21,7 @@ import (
"github.com/grafana/loki/pkg/storage/chunk/local"
chunk_util "github.com/grafana/loki/pkg/storage/chunk/util"
shipper_util "github.com/grafana/loki/pkg/storage/stores/shipper/util"
"github.com/grafana/loki/pkg/tenant"
util_log "github.com/grafana/loki/pkg/util/log"
)

@ -3,9 +3,10 @@ package extract
import (
"fmt"
"github.com/cortexproject/cortex/pkg/cortexpb"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/grafana/loki/pkg/logproto"
)
var (
@ -37,7 +38,7 @@ func MetricNameMatcherFromMatchers(matchers []*labels.Matcher) (*labels.Matcher,
// UnsafeMetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs.
// The returned metric name string is a reference to the label value (no copy).
func UnsafeMetricNameFromLabelAdapters(labels []cortexpb.LabelAdapter) (string, error) {
func UnsafeMetricNameFromLabelAdapters(labels []logproto.LabelAdapter) (string, error) {
for _, label := range labels {
if label.Name == model.MetricNameLabel {
return label.Value, nil

@ -11,11 +11,11 @@ import (
"strconv"
"testing"
"github.com/cortexproject/cortex/pkg/cortexpb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
)
@ -140,20 +140,19 @@ func TestStreamWriteYAMLResponse(t *testing.T) {
func TestParseProtoReader(t *testing.T) {
// 47 bytes compressed and 53 uncompressed
req := &cortexpb.PreallocWriteRequest{
WriteRequest: cortexpb.WriteRequest{
Timeseries: []cortexpb.PreallocTimeseries{
req := &logproto.PreallocWriteRequest{
WriteRequest: logproto.WriteRequest{
Timeseries: []logproto.PreallocTimeseries{
{
TimeSeries: &cortexpb.TimeSeries{
Labels: []cortexpb.LabelAdapter{
TimeSeries: &logproto.TimeSeries{
Labels: []logproto.LabelAdapter{
{Name: "foo", Value: "bar"},
},
Samples: []cortexpb.Sample{
{Value: 10, TimestampMs: 1},
{Value: 20, TimestampMs: 2},
{Value: 30, TimestampMs: 3},
Samples: []logproto.Sample{
{Value: 10, Timestamp: 1},
{Value: 20, Timestamp: 2},
{Value: 30, Timestamp: 3},
},
Exemplars: []cortexpb.Exemplar{},
},
},
},
@ -182,7 +181,7 @@ func TestParseProtoReader(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
w := httptest.NewRecorder()
assert.Nil(t, util.SerializeProtoResponse(w, req, tt.compression))
var fromWire cortexpb.PreallocWriteRequest
var fromWire logproto.PreallocWriteRequest
reader := w.Result().Body
if tt.useBytesBuffer {

@ -5,10 +5,11 @@ import (
"fmt"
"sync"
"github.com/cortexproject/cortex/pkg/cortexpb"
"github.com/cortexproject/cortex/pkg/ingester/client"
"github.com/prometheus/common/model"
"go.uber.org/atomic"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/logproto"
)
type queryLimiterCtxKey struct{}
@ -61,7 +62,7 @@ func QueryLimiterFromContextWithFallback(ctx context.Context) *QueryLimiter {
}
// AddSeries adds the input series and returns an error if the limit is reached.
func (ql *QueryLimiter) AddSeries(seriesLabels []cortexpb.LabelAdapter) error {
func (ql *QueryLimiter) AddSeries(seriesLabels []logproto.LabelAdapter) error {
// If the max series is unlimited just return without managing map
if ql.maxSeriesPerQuery == 0 {
return nil

@ -2,11 +2,11 @@ package validation
import (
"fmt"
"strconv"
"strings"
"github.com/cortexproject/cortex/pkg/cortexpb"
"github.com/prometheus/common/model"
"github.com/grafana/loki/pkg/logproto"
)
// ValidationError is an error returned by series validation.
@ -19,14 +19,14 @@ type ValidationError error
type genericValidationError struct {
message string
cause string
series []cortexpb.LabelAdapter
series []logproto.LabelAdapter
}
func (e *genericValidationError) Error() string {
return fmt.Sprintf(e.message, e.cause, formatLabelSet(e.series))
}
func newLabelNameTooLongError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
func newLabelNameTooLongError(series []logproto.LabelAdapter, labelName string) ValidationError {
return &genericValidationError{
message: "label name too long: %.200q metric %.200q",
cause: labelName,
@ -38,21 +38,21 @@ func newLabelNameTooLongError(series []cortexpb.LabelAdapter, labelName string)
// are formatted in different order in Error.
type labelValueTooLongError struct {
labelValue string
series []cortexpb.LabelAdapter
series []logproto.LabelAdapter
}
func (e *labelValueTooLongError) Error() string {
return fmt.Sprintf("label value too long for metric: %.200q label value: %.200q", formatLabelSet(e.series), e.labelValue)
}
func newLabelValueTooLongError(series []cortexpb.LabelAdapter, labelValue string) ValidationError {
func newLabelValueTooLongError(series []logproto.LabelAdapter, labelValue string) ValidationError {
return &labelValueTooLongError{
labelValue: labelValue,
series: series,
}
}
func newInvalidLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
func newInvalidLabelError(series []logproto.LabelAdapter, labelName string) ValidationError {
return &genericValidationError{
message: "sample invalid label: %.200q metric %.200q",
cause: labelName,
@ -60,7 +60,7 @@ func newInvalidLabelError(series []cortexpb.LabelAdapter, labelName string) Vali
}
}
func newDuplicatedLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
func newDuplicatedLabelError(series []logproto.LabelAdapter, labelName string) ValidationError {
return &genericValidationError{
message: "duplicate label name: %.200q metric %.200q",
cause: labelName,
@ -68,7 +68,7 @@ func newDuplicatedLabelError(series []cortexpb.LabelAdapter, labelName string) V
}
}
func newLabelsNotSortedError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
func newLabelsNotSortedError(series []logproto.LabelAdapter, labelName string) ValidationError {
return &genericValidationError{
message: "labels not sorted: %.200q metric %.200q",
cause: labelName,
@ -77,11 +77,11 @@ func newLabelsNotSortedError(series []cortexpb.LabelAdapter, labelName string) V
}
type tooManyLabelsError struct {
series []cortexpb.LabelAdapter
series []logproto.LabelAdapter
limit int
}
func newTooManyLabelsError(series []cortexpb.LabelAdapter, limit int) ValidationError {
func newTooManyLabelsError(series []logproto.LabelAdapter, limit int) ValidationError {
return &tooManyLabelsError{
series: series,
limit: limit,
@ -91,7 +91,7 @@ func newTooManyLabelsError(series []cortexpb.LabelAdapter, limit int) Validation
func (e *tooManyLabelsError) Error() string {
return fmt.Sprintf(
"series has too many labels (actual: %d, limit: %d) series: '%s'",
len(e.series), e.limit, cortexpb.FromLabelAdaptersToMetric(e.series).String())
len(e.series), e.limit, logproto.FromLabelAdaptersToMetric(e.series).String())
}
type noMetricNameError struct{}
@ -145,51 +145,10 @@ func newSampleTimestampTooNewError(metricName string, timestamp int64) Validatio
}
}
// exemplarValidationError is a ValidationError implementation suitable for exemplar validation errors.
type exemplarValidationError struct {
message string
seriesLabels []cortexpb.LabelAdapter
exemplarLabels []cortexpb.LabelAdapter
timestamp int64
}
func (e *exemplarValidationError) Error() string {
return fmt.Sprintf(e.message, e.timestamp, cortexpb.FromLabelAdaptersToLabels(e.seriesLabels).String(), cortexpb.FromLabelAdaptersToLabels(e.exemplarLabels).String())
}
func newExemplarEmtpyLabelsError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
return &exemplarValidationError{
message: "exemplar missing labels, timestamp: %d series: %s labels: %s",
seriesLabels: seriesLabels,
exemplarLabels: exemplarLabels,
timestamp: timestamp,
}
}
func newExemplarMissingTimestampError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
return &exemplarValidationError{
message: "exemplar missing timestamp, timestamp: %d series: %s labels: %s",
seriesLabels: seriesLabels,
exemplarLabels: exemplarLabels,
timestamp: timestamp,
}
}
var labelLenMsg = "exemplar combined labelset exceeds " + strconv.Itoa(ExemplarMaxLabelSetLength) + " characters, timestamp: %d series: %s labels: %s"
func newExemplarLabelLengthError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
return &exemplarValidationError{
message: labelLenMsg,
seriesLabels: seriesLabels,
exemplarLabels: exemplarLabels,
timestamp: timestamp,
}
}
// formatLabelSet formats label adapters as a metric name with labels, while preserving
// label order, and keeping duplicates. If there are multiple "__name__" labels, only
// first one is used as metric name, other ones will be included as regular labels.
func formatLabelSet(ls []cortexpb.LabelAdapter) string {
func formatLabelSet(ls []logproto.LabelAdapter) string {
metricName, hasMetricName := "", false
labelStrings := make([]string, 0, len(ls))

@ -4,15 +4,14 @@ import (
"net/http"
"strings"
"time"
"unicode/utf8"
"github.com/cortexproject/cortex/pkg/cortexpb"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/weaveworks/common/httpgrpc"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/util"
"github.com/grafana/loki/pkg/util/extract"
)
@ -45,11 +44,6 @@ const (
labelsNotSorted = "labels_not_sorted"
labelValueTooLong = "label_value_too_long"
// Exemplar-specific validation reasons
exemplarLabelsMissing = "exemplar_labels_missing"
exemplarLabelsTooLong = "exemplar_labels_too_long"
exemplarTimestampInvalid = "exemplar_timestamp_invalid"
// RateLimited is one of the values for the reason to discard samples.
// Declared here to avoid duplication in ingester and distributor.
RateLimited = "rate_limited"
@ -109,54 +103,17 @@ type SampleValidationConfig interface {
// ValidateSample returns an err if the sample is invalid.
// The returned error may retain the provided series labels.
func ValidateSample(cfg SampleValidationConfig, userID string, ls []cortexpb.LabelAdapter, s cortexpb.Sample) ValidationError {
func ValidateSample(cfg SampleValidationConfig, userID string, ls []logproto.LabelAdapter, s logproto.Sample) ValidationError {
unsafeMetricName, _ := extract.UnsafeMetricNameFromLabelAdapters(ls)
if cfg.RejectOldSamples(userID) && model.Time(s.TimestampMs) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
if cfg.RejectOldSamples(userID) && model.Time(s.Timestamp) < model.Now().Add(-cfg.RejectOldSamplesMaxAge(userID)) {
DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
return newSampleTimestampTooOldError(unsafeMetricName, s.TimestampMs)
return newSampleTimestampTooOldError(unsafeMetricName, s.Timestamp)
}
if model.Time(s.TimestampMs) > model.Now().Add(cfg.CreationGracePeriod(userID)) {
if model.Time(s.Timestamp) > model.Now().Add(cfg.CreationGracePeriod(userID)) {
DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc()
return newSampleTimestampTooNewError(unsafeMetricName, s.TimestampMs)
}
return nil
}
// ValidateExemplar returns an error if the exemplar is invalid.
// The returned error may retain the provided series labels.
func ValidateExemplar(userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError {
if len(e.Labels) <= 0 {
DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc()
return newExemplarEmtpyLabelsError(ls, []cortexpb.LabelAdapter{}, e.TimestampMs)
}
if e.TimestampMs == 0 {
DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc()
return newExemplarMissingTimestampError(
ls,
e.Labels,
e.TimestampMs,
)
}
// Exemplar label length does not include chars involved in text
// rendering such as quotes, commas, etc. See spec and const definition.
labelSetLen := 0
for _, l := range e.Labels {
labelSetLen += utf8.RuneCountInString(l.Name)
labelSetLen += utf8.RuneCountInString(l.Value)
}
if labelSetLen > ExemplarMaxLabelSetLength {
DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc()
return newExemplarLabelLengthError(
ls,
e.Labels,
e.TimestampMs,
)
return newSampleTimestampTooNewError(unsafeMetricName, s.Timestamp)
}
return nil
@ -172,7 +129,7 @@ type LabelValidationConfig interface {
// ValidateLabels returns an err if the labels are invalid.
// The returned error may retain the provided series labels.
func ValidateLabels(cfg LabelValidationConfig, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool) ValidationError {
func ValidateLabels(cfg LabelValidationConfig, userID string, ls []logproto.LabelAdapter, skipLabelNameValidation bool) ValidationError {
if cfg.EnforceMetricName(userID) {
unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(ls)
if err != nil {
@ -227,7 +184,7 @@ type MetadataValidationConfig interface {
}
// ValidateMetadata returns an err if a metric metadata is invalid.
func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *cortexpb.MetricMetadata) error {
func ValidateMetadata(cfg MetadataValidationConfig, userID string, metadata *logproto.MetricMetadata) error {
if cfg.EnforceMetadataMetricName(userID) && metadata.GetMetricFamilyName() == "" {
DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc()
return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName)

@ -1,267 +0,0 @@
package cortexpb
import (
stdjson "encoding/json"
"fmt"
"math"
"sort"
"strconv"
"strings"
"time"
"unsafe"
jsoniter "github.com/json-iterator/go"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
"github.com/cortexproject/cortex/pkg/util"
)
// ToWriteRequest converts matched slices of Labels, Samples and Metadata into a WriteRequest proto.
// It gets timeseries from the pool, so ReuseSlice() should be called when done.
func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, source WriteRequest_SourceEnum) *WriteRequest {
req := &WriteRequest{
Timeseries: PreallocTimeseriesSliceFromPool(),
Metadata: metadata,
Source: source,
}
for i, s := range samples {
ts := TimeseriesFromPool()
ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...)
ts.Samples = append(ts.Samples, s)
req.Timeseries = append(req.Timeseries, PreallocTimeseries{TimeSeries: ts})
}
return req
}
// FromLabelAdaptersToLabels casts []LabelAdapter to labels.Labels.
// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
// This allows us to use labels.Labels directly in protos.
//
// Note: while resulting labels.Labels is supposedly sorted, this function
// doesn't enforce that. If input is not sorted, output will be wrong.
func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels {
return *(*labels.Labels)(unsafe.Pointer(&ls))
}
// FromLabelAdaptersToLabelsWithCopy converts []LabelAdapter to labels.Labels.
// Do NOT use unsafe to convert between data types because this function may
// get in input labels whose data structure is reused.
func FromLabelAdaptersToLabelsWithCopy(input []LabelAdapter) labels.Labels {
return CopyLabels(FromLabelAdaptersToLabels(input))
}
// Efficiently copies labels input slice. To be used in cases where input slice
// can be reused, but long-term copy is needed.
func CopyLabels(input []labels.Label) labels.Labels {
result := make(labels.Labels, len(input))
size := 0
for _, l := range input {
size += len(l.Name)
size += len(l.Value)
}
// Copy all strings into the buffer, and use 'yoloString' to convert buffer
// slices to strings.
buf := make([]byte, size)
for i, l := range input {
result[i].Name, buf = copyStringToBuffer(l.Name, buf)
result[i].Value, buf = copyStringToBuffer(l.Value, buf)
}
return result
}
// Copies string to buffer (which must be big enough), and converts buffer slice containing
// the string copy into new string.
func copyStringToBuffer(in string, buf []byte) (string, []byte) {
l := len(in)
c := copy(buf, in)
if c != l {
panic("not copied full string")
}
return yoloString(buf[0:l]), buf[l:]
}
// FromLabelsToLabelAdapters casts labels.Labels to []LabelAdapter.
// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
// This allows us to use labels.Labels directly in protos.
func FromLabelsToLabelAdapters(ls labels.Labels) []LabelAdapter {
return *(*[]LabelAdapter)(unsafe.Pointer(&ls))
}
// FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric.
// Don't do this on any performance sensitive paths.
func FromLabelAdaptersToMetric(ls []LabelAdapter) model.Metric {
return util.LabelsToMetric(FromLabelAdaptersToLabels(ls))
}
// FromMetricsToLabelAdapters converts model.Metric to []LabelAdapter.
// Don't do this on any performance sensitive paths.
// The result is sorted.
func FromMetricsToLabelAdapters(metric model.Metric) []LabelAdapter {
result := make([]LabelAdapter, 0, len(metric))
for k, v := range metric {
result = append(result, LabelAdapter{
Name: string(k),
Value: string(v),
})
}
sort.Sort(byLabel(result)) // The labels should be sorted upon initialisation.
return result
}
func FromExemplarsToExemplarProtos(es []exemplar.Exemplar) []Exemplar {
result := make([]Exemplar, 0, len(es))
for _, e := range es {
result = append(result, Exemplar{
Labels: FromLabelsToLabelAdapters(e.Labels),
Value: e.Value,
TimestampMs: e.Ts,
})
}
return result
}
func FromExemplarProtosToExemplars(es []Exemplar) []exemplar.Exemplar {
result := make([]exemplar.Exemplar, 0, len(es))
for _, e := range es {
result = append(result, exemplar.Exemplar{
Labels: FromLabelAdaptersToLabels(e.Labels),
Value: e.Value,
Ts: e.TimestampMs,
})
}
return result
}
type byLabel []LabelAdapter
func (s byLabel) Len() int { return len(s) }
func (s byLabel) Less(i, j int) bool { return strings.Compare(s[i].Name, s[j].Name) < 0 }
func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// MetricMetadataMetricTypeToMetricType converts a metric type from our internal client
// to a Prometheus one.
func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) textparse.MetricType {
switch mt {
case UNKNOWN:
return textparse.MetricTypeUnknown
case COUNTER:
return textparse.MetricTypeCounter
case GAUGE:
return textparse.MetricTypeGauge
case HISTOGRAM:
return textparse.MetricTypeHistogram
case GAUGEHISTOGRAM:
return textparse.MetricTypeGaugeHistogram
case SUMMARY:
return textparse.MetricTypeSummary
case INFO:
return textparse.MetricTypeInfo
case STATESET:
return textparse.MetricTypeStateset
default:
return textparse.MetricTypeUnknown
}
}
// isTesting is only set from tests to get special behaviour to verify that custom sample encode and decode is used,
// both when using jsonitor or standard json package.
var isTesting = false
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
if isTesting && math.IsNaN(s.Value) {
return nil, fmt.Errorf("test sample")
}
t, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(model.Time(s.TimestampMs))
if err != nil {
return nil, err
}
v, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(model.SampleValue(s.Value))
if err != nil {
return nil, err
}
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
var t model.Time
var v model.SampleValue
vs := [...]stdjson.Unmarshaler{&t, &v}
if err := jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &vs); err != nil {
return err
}
s.TimestampMs = int64(t)
s.Value = float64(v)
if isTesting && math.IsNaN(float64(v)) {
return fmt.Errorf("test sample")
}
return nil
}
func SampleJsoniterEncode(ptr unsafe.Pointer, stream *jsoniter.Stream) {
sample := (*Sample)(ptr)
if isTesting && math.IsNaN(sample.Value) {
stream.Error = fmt.Errorf("test sample")
return
}
stream.WriteArrayStart()
stream.WriteFloat64(float64(sample.TimestampMs) / float64(time.Second/time.Millisecond))
stream.WriteMore()
stream.WriteString(model.SampleValue(sample.Value).String())
stream.WriteArrayEnd()
}
func SampleJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
if !iter.ReadArray() {
iter.ReportError("cortexpb.Sample", "expected [")
return
}
t := model.Time(iter.ReadFloat64() * float64(time.Second/time.Millisecond))
if !iter.ReadArray() {
iter.ReportError("cortexpb.Sample", "expected ,")
return
}
bs := iter.ReadStringAsSlice()
ss := *(*string)(unsafe.Pointer(&bs))
v, err := strconv.ParseFloat(ss, 64)
if err != nil {
iter.ReportError("cortexpb.Sample", err.Error())
return
}
if isTesting && math.IsNaN(v) {
iter.Error = fmt.Errorf("test sample")
return
}
if iter.ReadArray() {
iter.ReportError("cortexpb.Sample", "expected ]")
}
*(*Sample)(ptr) = Sample{
TimestampMs: int64(t),
Value: v,
}
}
func init() {
jsoniter.RegisterTypeEncoderFunc("cortexpb.Sample", SampleJsoniterEncode, func(unsafe.Pointer) bool { return false })
jsoniter.RegisterTypeDecoderFunc("cortexpb.Sample", SampleJsoniterDecode)
}

File diff suppressed because it is too large Load Diff

@ -1,70 +0,0 @@
syntax = "proto3";
package cortexpb;
option go_package = "cortexpb";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
message WriteRequest {
repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"];
enum SourceEnum {
API = 0;
RULE = 1;
}
SourceEnum Source = 2;
repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true];
bool skip_label_name_validation = 1000; //set intentionally high to keep WriteRequest compatible with upstream Prometheus
}
message WriteResponse {}
message TimeSeries {
repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
// Sorted by time, oldest sample first.
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
}
message LabelPair {
bytes name = 1;
bytes value = 2;
}
message Sample {
double value = 1;
int64 timestamp_ms = 2;
}
message MetricMetadata {
enum MetricType {
UNKNOWN = 0;
COUNTER = 1;
GAUGE = 2;
HISTOGRAM = 3;
GAUGEHISTOGRAM = 4;
SUMMARY = 5;
INFO = 6;
STATESET = 7;
}
MetricType type = 1;
string metric_family_name = 2;
string help = 4;
string unit = 5;
}
message Metric {
repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
}
message Exemplar {
// Exemplar labels, different than series labels
repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
double value = 2;
int64 timestamp_ms = 3;
}

@ -1,309 +0,0 @@
package cortexpb
import (
"flag"
"fmt"
"io"
"strings"
"sync"
"unsafe"
"github.com/prometheus/prometheus/model/labels"
)
var (
expectedTimeseries = 100
expectedLabels = 20
expectedSamplesPerSeries = 10
expectedExemplarsPerSeries = 1
/*
We cannot pool these as pointer-to-slice because the place we use them is in WriteRequest which is generated from Protobuf
and we don't have an option to make it a pointer. There is overhead here 24 bytes of garbage every time a PreallocTimeseries
is re-used. But since the slices are far far larger, we come out ahead.
*/
slicePool = sync.Pool{
New: func() interface{} {
return make([]PreallocTimeseries, 0, expectedTimeseries)
},
}
timeSeriesPool = sync.Pool{
New: func() interface{} {
return &TimeSeries{
Labels: make([]LabelAdapter, 0, expectedLabels),
Samples: make([]Sample, 0, expectedSamplesPerSeries),
Exemplars: make([]Exemplar, 0, expectedExemplarsPerSeries),
}
},
}
)
// PreallocConfig configures how structures will be preallocated to optimise
// proto unmarshalling.
type PreallocConfig struct{}
// RegisterFlags registers configuration settings.
func (PreallocConfig) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&expectedTimeseries, "ingester-client.expected-timeseries", expectedTimeseries, "Expected number of timeseries per request, used for preallocations.")
f.IntVar(&expectedLabels, "ingester-client.expected-labels", expectedLabels, "Expected number of labels per timeseries, used for preallocations.")
f.IntVar(&expectedSamplesPerSeries, "ingester-client.expected-samples-per-series", expectedSamplesPerSeries, "Expected number of samples per timeseries, used for preallocations.")
}
// PreallocWriteRequest is a WriteRequest which preallocs slices on Unmarshal.
type PreallocWriteRequest struct {
WriteRequest
}
// Unmarshal implements proto.Message.
func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error {
p.Timeseries = PreallocTimeseriesSliceFromPool()
return p.WriteRequest.Unmarshal(dAtA)
}
// PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal.
type PreallocTimeseries struct {
*TimeSeries
}
// Unmarshal implements proto.Message.
func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error {
p.TimeSeries = TimeseriesFromPool()
return p.TimeSeries.Unmarshal(dAtA)
}
// LabelAdapter is a labels.Label that can be marshalled to/from protos.
type LabelAdapter labels.Label
// Marshal implements proto.Marshaller.
func (bs *LabelAdapter) Marshal() ([]byte, error) {
size := bs.Size()
buf := make([]byte, size)
n, err := bs.MarshalToSizedBuffer(buf[:size])
if err != nil {
return nil, err
}
return buf[:n], err
}
func (bs *LabelAdapter) MarshalTo(dAtA []byte) (int, error) {
size := bs.Size()
return bs.MarshalToSizedBuffer(dAtA[:size])
}
// MarshalTo implements proto.Marshaller.
func (bs *LabelAdapter) MarshalToSizedBuffer(buf []byte) (n int, err error) {
ls := (*labels.Label)(bs)
i := len(buf)
if len(ls.Value) > 0 {
i -= len(ls.Value)
copy(buf[i:], ls.Value)
i = encodeVarintCortex(buf, i, uint64(len(ls.Value)))
i--
buf[i] = 0x12
}
if len(ls.Name) > 0 {
i -= len(ls.Name)
copy(buf[i:], ls.Name)
i = encodeVarintCortex(buf, i, uint64(len(ls.Name)))
i--
buf[i] = 0xa
}
return len(buf) - i, nil
}
// Unmarshal a LabelAdapter, implements proto.Unmarshaller.
// NB this is a copy of the autogenerated code to unmarshal a LabelPair,
// with the byte copying replaced with a yoloString.
func (bs *LabelAdapter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: LabelPair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthCortex
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
bs.Name = yoloString(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthCortex
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthCortex
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
bs.Value = yoloString(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCortex(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthCortex
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthCortex
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func yoloString(buf []byte) string {
return *((*string)(unsafe.Pointer(&buf)))
}
// Size implements proto.Sizer.
func (bs *LabelAdapter) Size() (n int) {
ls := (*labels.Label)(bs)
if bs == nil {
return 0
}
var l int
_ = l
l = len(ls.Name)
if l > 0 {
n += 1 + l + sovCortex(uint64(l))
}
l = len(ls.Value)
if l > 0 {
n += 1 + l + sovCortex(uint64(l))
}
return n
}
// Equal implements proto.Equaler.
func (bs *LabelAdapter) Equal(other LabelAdapter) bool {
return bs.Name == other.Name && bs.Value == other.Value
}
// Compare implements proto.Comparer.
func (bs *LabelAdapter) Compare(other LabelAdapter) int {
if c := strings.Compare(bs.Name, other.Name); c != 0 {
return c
}
return strings.Compare(bs.Value, other.Value)
}
// PreallocTimeseriesSliceFromPool retrieves a slice of PreallocTimeseries from a sync.Pool.
// ReuseSlice should be called once done.
func PreallocTimeseriesSliceFromPool() []PreallocTimeseries {
return slicePool.Get().([]PreallocTimeseries)
}
// ReuseSlice puts the slice back into a sync.Pool for reuse.
func ReuseSlice(ts []PreallocTimeseries) {
for i := range ts {
ReuseTimeseries(ts[i].TimeSeries)
}
slicePool.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details
}
// TimeseriesFromPool retrieves a pointer to a TimeSeries from a sync.Pool.
// ReuseTimeseries should be called once done, unless ReuseSlice was called on the slice that contains this TimeSeries.
func TimeseriesFromPool() *TimeSeries {
return timeSeriesPool.Get().(*TimeSeries)
}
// ReuseTimeseries puts the timeseries back into a sync.Pool for reuse.
func ReuseTimeseries(ts *TimeSeries) {
// Name and Value may point into a large gRPC buffer, so clear the reference to allow GC
for i := 0; i < len(ts.Labels); i++ {
ts.Labels[i].Name = ""
ts.Labels[i].Value = ""
}
ts.Labels = ts.Labels[:0]
ts.Samples = ts.Samples[:0]
// Name and Value may point into a large gRPC buffer, so clear the reference in each exemplar to allow GC
for i := range ts.Exemplars {
for j := range ts.Exemplars[i].Labels {
ts.Exemplars[i].Labels[j].Name = ""
ts.Exemplars[i].Labels[j].Value = ""
}
}
ts.Exemplars = ts.Exemplars[:0]
timeSeriesPool.Put(ts)
}

@ -1,67 +0,0 @@
package client
import (
"flag"
"github.com/go-kit/log"
"github.com/grafana/dskit/grpcclient"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)
var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "ingester_client_request_duration_seconds",
Help: "Time spent doing Ingester requests.",
Buckets: prometheus.ExponentialBuckets(0.001, 4, 6),
}, []string{"operation", "status_code"})
// HealthAndIngesterClient is the union of IngesterClient and grpc_health_v1.HealthClient.
type HealthAndIngesterClient interface {
IngesterClient
grpc_health_v1.HealthClient
Close() error
}
type closableHealthAndIngesterClient struct {
IngesterClient
grpc_health_v1.HealthClient
conn *grpc.ClientConn
}
// MakeIngesterClient makes a new IngesterClient
func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error) {
dialOpts, err := cfg.GRPCClientConfig.DialOption(grpcclient.Instrument(ingesterClientRequestDuration))
if err != nil {
return nil, err
}
conn, err := grpc.Dial(addr, dialOpts...)
if err != nil {
return nil, err
}
return &closableHealthAndIngesterClient{
IngesterClient: NewIngesterClient(conn),
HealthClient: grpc_health_v1.NewHealthClient(conn),
conn: conn,
}, nil
}
func (c *closableHealthAndIngesterClient) Close() error {
return c.conn.Close()
}
// Config is the configuration struct for the ingester client
type Config struct {
GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"`
}
// RegisterFlags registers configuration settings used by the ingester client config.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", f)
}
func (cfg *Config) Validate(log log.Logger) error {
return cfg.GRPCClientConfig.Validate(log)
}

@ -1,262 +0,0 @@
package client
import (
"fmt"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/cortexproject/cortex/pkg/cortexpb"
)
// ToQueryRequest builds a QueryRequest proto.
func ToQueryRequest(from, to model.Time, matchers []*labels.Matcher) (*QueryRequest, error) {
ms, err := toLabelMatchers(matchers)
if err != nil {
return nil, err
}
return &QueryRequest{
StartTimestampMs: int64(from),
EndTimestampMs: int64(to),
Matchers: ms,
}, nil
}
// FromQueryRequest unpacks a QueryRequest proto.
func FromQueryRequest(req *QueryRequest) (model.Time, model.Time, []*labels.Matcher, error) {
matchers, err := FromLabelMatchers(req.Matchers)
if err != nil {
return 0, 0, nil, err
}
from := model.Time(req.StartTimestampMs)
to := model.Time(req.EndTimestampMs)
return from, to, matchers, nil
}
// ToExemplarQueryRequest builds an ExemplarQueryRequest proto.
func ToExemplarQueryRequest(from, to model.Time, matchers ...[]*labels.Matcher) (*ExemplarQueryRequest, error) {
var reqMatchers []*LabelMatchers
for _, m := range matchers {
ms, err := toLabelMatchers(m)
if err != nil {
return nil, err
}
reqMatchers = append(reqMatchers, &LabelMatchers{ms})
}
return &ExemplarQueryRequest{
StartTimestampMs: int64(from),
EndTimestampMs: int64(to),
Matchers: reqMatchers,
}, nil
}
// FromExemplarQueryRequest unpacks a ExemplarQueryRequest proto.
func FromExemplarQueryRequest(req *ExemplarQueryRequest) (int64, int64, [][]*labels.Matcher, error) {
var result [][]*labels.Matcher
for _, m := range req.Matchers {
matchers, err := FromLabelMatchers(m.Matchers)
if err != nil {
return 0, 0, nil, err
}
result = append(result, matchers)
}
return req.StartTimestampMs, req.EndTimestampMs, result, nil
}
// ToQueryResponse builds a QueryResponse proto.
func ToQueryResponse(matrix model.Matrix) *QueryResponse {
resp := &QueryResponse{}
for _, ss := range matrix {
ts := cortexpb.TimeSeries{
Labels: cortexpb.FromMetricsToLabelAdapters(ss.Metric),
Samples: make([]cortexpb.Sample, 0, len(ss.Values)),
}
for _, s := range ss.Values {
ts.Samples = append(ts.Samples, cortexpb.Sample{
Value: float64(s.Value),
TimestampMs: int64(s.Timestamp),
})
}
resp.Timeseries = append(resp.Timeseries, ts)
}
return resp
}
// FromQueryResponse unpacks a QueryResponse proto.
func FromQueryResponse(resp *QueryResponse) model.Matrix {
m := make(model.Matrix, 0, len(resp.Timeseries))
for _, ts := range resp.Timeseries {
var ss model.SampleStream
ss.Metric = cortexpb.FromLabelAdaptersToMetric(ts.Labels)
ss.Values = make([]model.SamplePair, 0, len(ts.Samples))
for _, s := range ts.Samples {
ss.Values = append(ss.Values, model.SamplePair{
Value: model.SampleValue(s.Value),
Timestamp: model.Time(s.TimestampMs),
})
}
m = append(m, &ss)
}
return m
}
// ToMetricsForLabelMatchersRequest builds a MetricsForLabelMatchersRequest proto
func ToMetricsForLabelMatchersRequest(from, to model.Time, matchers []*labels.Matcher) (*MetricsForLabelMatchersRequest, error) {
ms, err := toLabelMatchers(matchers)
if err != nil {
return nil, err
}
return &MetricsForLabelMatchersRequest{
StartTimestampMs: int64(from),
EndTimestampMs: int64(to),
MatchersSet: []*LabelMatchers{{Matchers: ms}},
}, nil
}
// FromMetricsForLabelMatchersRequest unpacks a MetricsForLabelMatchersRequest proto
func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (model.Time, model.Time, [][]*labels.Matcher, error) {
matchersSet := make([][]*labels.Matcher, 0, len(req.MatchersSet))
for _, matchers := range req.MatchersSet {
matchers, err := FromLabelMatchers(matchers.Matchers)
if err != nil {
return 0, 0, nil, err
}
matchersSet = append(matchersSet, matchers)
}
from := model.Time(req.StartTimestampMs)
to := model.Time(req.EndTimestampMs)
return from, to, matchersSet, nil
}
// FromMetricsForLabelMatchersResponse unpacks a MetricsForLabelMatchersResponse proto
func FromMetricsForLabelMatchersResponse(resp *MetricsForLabelMatchersResponse) []model.Metric {
metrics := []model.Metric{}
for _, m := range resp.Metric {
metrics = append(metrics, cortexpb.FromLabelAdaptersToMetric(m.Labels))
}
return metrics
}
// ToLabelValuesRequest builds a LabelValuesRequest proto
func ToLabelValuesRequest(labelName model.LabelName, from, to model.Time, matchers []*labels.Matcher) (*LabelValuesRequest, error) {
ms, err := toLabelMatchers(matchers)
if err != nil {
return nil, err
}
return &LabelValuesRequest{
LabelName: string(labelName),
StartTimestampMs: int64(from),
EndTimestampMs: int64(to),
Matchers: &LabelMatchers{Matchers: ms},
}, nil
}
// FromLabelValuesRequest unpacks a LabelValuesRequest proto
func FromLabelValuesRequest(req *LabelValuesRequest) (string, int64, int64, []*labels.Matcher, error) {
var err error
var matchers []*labels.Matcher
if req.Matchers != nil {
matchers, err = FromLabelMatchers(req.Matchers.Matchers)
if err != nil {
return "", 0, 0, nil, err
}
}
return req.LabelName, req.StartTimestampMs, req.EndTimestampMs, matchers, nil
}
func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) {
result := make([]*LabelMatcher, 0, len(matchers))
for _, matcher := range matchers {
var mType MatchType
switch matcher.Type {
case labels.MatchEqual:
mType = EQUAL
case labels.MatchNotEqual:
mType = NOT_EQUAL
case labels.MatchRegexp:
mType = REGEX_MATCH
case labels.MatchNotRegexp:
mType = REGEX_NO_MATCH
default:
return nil, fmt.Errorf("invalid matcher type")
}
result = append(result, &LabelMatcher{
Type: mType,
Name: matcher.Name,
Value: matcher.Value,
})
}
return result, nil
}
func FromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) {
result := make([]*labels.Matcher, 0, len(matchers))
for _, matcher := range matchers {
var mtype labels.MatchType
switch matcher.Type {
case EQUAL:
mtype = labels.MatchEqual
case NOT_EQUAL:
mtype = labels.MatchNotEqual
case REGEX_MATCH:
mtype = labels.MatchRegexp
case REGEX_NO_MATCH:
mtype = labels.MatchNotRegexp
default:
return nil, fmt.Errorf("invalid matcher type")
}
matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value)
if err != nil {
return nil, err
}
result = append(result, matcher)
}
return result, nil
}
// FastFingerprint runs the same algorithm as Prometheus labelSetToFastFingerprint()
func FastFingerprint(ls []cortexpb.LabelAdapter) model.Fingerprint {
if len(ls) == 0 {
return model.Metric(nil).FastFingerprint()
}
var result uint64
for _, l := range ls {
sum := hashNew()
sum = hashAdd(sum, l.Name)
sum = hashAddByte(sum, model.SeparatorByte)
sum = hashAdd(sum, l.Value)
result ^= sum
}
return model.Fingerprint(result)
}
// Fingerprint runs the same algorithm as Prometheus labelSetToFingerprint()
func Fingerprint(labels labels.Labels) model.Fingerprint {
sum := hashNew()
for _, label := range labels {
sum = hashAddString(sum, label.Name)
sum = hashAddByte(sum, model.SeparatorByte)
sum = hashAddString(sum, label.Value)
sum = hashAddByte(sum, model.SeparatorByte)
}
return model.Fingerprint(sum)
}
// LabelsToKeyString is used to form a string to be used as
// the hashKey. Don't print, use l.String() for printing.
func LabelsToKeyString(l labels.Labels) string {
// We are allocating 1024, even though most series are less than 600b long.
// But this is not an issue as this function is being inlined when called in a loop
// and buffer allocated is a static buffer and not a dynamic buffer on the heap.
b := make([]byte, 0, 1024)
return string(l.Bytes(b))
}

@ -1,41 +0,0 @@
package client
import (
context "context"
)
// SendQueryStream wraps the stream's Send() checking if the context is done
// before calling Send().
func SendQueryStream(s Ingester_QueryStreamServer, m *QueryStreamResponse) error {
return sendWithContextErrChecking(s.Context(), func() error {
return s.Send(m)
})
}
// SendTimeSeriesChunk wraps the stream's Send() checking if the context is done
// before calling Send().
func SendTimeSeriesChunk(s Ingester_TransferChunksClient, m *TimeSeriesChunk) error {
return sendWithContextErrChecking(s.Context(), func() error {
return s.Send(m)
})
}
func sendWithContextErrChecking(ctx context.Context, send func() error) error {
// If the context has been canceled or its deadline exceeded, we should return it
// instead of the cryptic error the Send() will return.
if ctxErr := ctx.Err(); ctxErr != nil {
return ctxErr
}
if err := send(); err != nil {
// Experimentally, we've seen the context switching to done after the Send()
// has been called, so here we do recheck the context in case of error.
if ctxErr := ctx.Err(); ctxErr != nil {
return ctxErr
}
return err
}
return nil
}

@ -1,29 +0,0 @@
package client
// ChunksCount returns the number of chunks in response.
func (m *QueryStreamResponse) ChunksCount() int {
if len(m.Chunkseries) == 0 {
return 0
}
count := 0
for _, entry := range m.Chunkseries {
count += len(entry.Chunks)
}
return count
}
// ChunksSize returns the size of all chunks in the response.
func (m *QueryStreamResponse) ChunksSize() int {
if len(m.Chunkseries) == 0 {
return 0
}
size := 0
for _, entry := range m.Chunkseries {
for _, chunk := range entry.Chunks {
size += chunk.Size()
}
}
return size
}

@ -1,6 +0,0 @@
package client
import (
// This import exists to trick dep into vendoring the required protos
_ "github.com/gogo/protobuf/gogoproto"
)

@ -1,99 +0,0 @@
// Modified from github.com/prometheus/common/model/fnv.go
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
offset32 = 2166136261
prime32 = 16777619
)
// hashNew initializes a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
// Note this is the same algorithm as Go stdlib `sum64a.Write()`
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAddString(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}
// HashNew32 initializies a new fnv32 hash value.
func HashNew32() uint32 {
return offset32
}
// HashAdd32 adds a string to a fnv32 hash value, returning the updated hash.
// Note this is the same algorithm as Go stdlib `sum32.Write()`
func HashAdd32(h uint32, s string) uint32 {
for i := 0; i < len(s); i++ {
h *= prime32
h ^= uint32(s[i])
}
return h
}
// HashAddByte32 adds a byte to a fnv32 hash value, returning the updated hash.
func HashAddByte32(h uint32, b byte) uint32 {
h *= prime32
h ^= uint32(b)
return h
}
// HashNew32a initializies a new fnv32a hash value.
func HashNew32a() uint32 {
return offset32
}
// HashAdd32a adds a string to a fnv32a hash value, returning the updated hash.
// Note this is the same algorithm as Go stdlib `sum32.Write()`
func HashAdd32a(h uint32, s string) uint32 {
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= prime32
}
return h
}
// HashAddByte32a adds a byte to a fnv32a hash value, returning the updated hash.
func HashAddByte32a(h uint32, b byte) uint32 {
h ^= uint32(b)
h *= prime32
return h
}

File diff suppressed because it is too large Load Diff

@ -1,159 +0,0 @@
syntax = "proto3";
// TODO: Rename to ingesterpb
package cortex;
option go_package = "client";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service Ingester {
rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
rpc Query(QueryRequest) returns (QueryResponse) {};
rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {};
rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {};
rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse) {};
rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse) {};
rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {};
rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {};
rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {};
rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {};
// TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server).
rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {};
}
message ReadRequest {
repeated QueryRequest queries = 1;
}
message ReadResponse {
repeated QueryResponse results = 1;
}
message QueryRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated LabelMatcher matchers = 3;
}
message ExemplarQueryRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated LabelMatchers matchers = 3;
}
message QueryResponse {
repeated cortexpb.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
}
// QueryStreamResponse contains a batch of timeseries chunks or timeseries. Only one of these series will be populated.
message QueryStreamResponse {
repeated TimeSeriesChunk chunkseries = 1 [(gogoproto.nullable) = false];
repeated cortexpb.TimeSeries timeseries = 2 [(gogoproto.nullable) = false];
}
message ExemplarQueryResponse {
repeated cortexpb.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
}
message LabelValuesRequest {
string label_name = 1;
int64 start_timestamp_ms = 2;
int64 end_timestamp_ms = 3;
LabelMatchers matchers = 4;
}
message LabelValuesResponse {
repeated string label_values = 1;
}
message LabelNamesRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
}
message LabelNamesResponse {
repeated string label_names = 1;
}
message UserStatsRequest {}
message UserStatsResponse {
double ingestion_rate = 1;
uint64 num_series = 2;
double api_ingestion_rate = 3;
double rule_ingestion_rate = 4;
}
message UserIDStatsResponse {
string user_id = 1;
UserStatsResponse data = 2;
}
message UsersStatsResponse {
repeated UserIDStatsResponse stats = 1;
}
message MetricsForLabelMatchersRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
repeated LabelMatchers matchers_set = 3;
}
message MetricsForLabelMatchersResponse {
repeated cortexpb.Metric metric = 1;
}
message MetricsMetadataRequest {
}
message MetricsMetadataResponse {
repeated cortexpb.MetricMetadata metadata = 1;
}
message TimeSeriesChunk {
string from_ingester_id = 1;
string user_id = 2;
repeated cortexpb.LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/cortexpb.LabelAdapter"];
repeated Chunk chunks = 4 [(gogoproto.nullable) = false];
}
message Chunk {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
int32 encoding = 3;
bytes data = 4;
}
message TransferChunksResponse {
}
message LabelMatchers {
repeated LabelMatcher matchers = 1;
}
enum MatchType {
EQUAL = 0;
NOT_EQUAL = 1;
REGEX_MATCH = 2;
REGEX_NO_MATCH = 3;
}
message LabelMatcher {
MatchType type = 1;
string name = 2;
string value = 3;
}
message TimeSeriesFile {
string from_ingester_id = 1;
string user_id = 2;
string filename = 3;
bytes data = 4;
}

@ -248,8 +248,6 @@ github.com/coreos/go-systemd/v22/journal
github.com/coreos/pkg/capnslog
# github.com/cortexproject/cortex v1.10.1-0.20220110092510-e0807c4eb487
## explicit; go 1.16
github.com/cortexproject/cortex/pkg/cortexpb
github.com/cortexproject/cortex/pkg/ingester/client
github.com/cortexproject/cortex/pkg/prom1/storage/metric
github.com/cortexproject/cortex/pkg/storage/bucket
github.com/cortexproject/cortex/pkg/storage/bucket/azure

Loading…
Cancel
Save