From e3e4a2d9ae8fd1fda1eacb0d56448cee47ee52ec Mon Sep 17 00:00:00 2001 From: Tom Wilkie Date: Thu, 17 Jan 2019 11:06:49 +0000 Subject: [PATCH] Update vendored Cortex. (#213) Signed-off-by: Tom Wilkie --- Gopkg.lock | 19 +- Gopkg.toml | 3 +- .../cortex/pkg/chunk/aws/aws_autoscaling.go | 18 +- .../pkg/chunk/aws/dynamodb_storage_client.go | 55 +- .../pkg/chunk/aws/dynamodb_table_client.go | 34 +- .../cortex/pkg/chunk/aws/fixtures.go | 4 +- .../cortex/pkg/chunk/aws/mock.go | 2 +- .../cortex/pkg/chunk/aws/s3_storage_client.go | 10 +- .../cortex/pkg/chunk/cache/background.go | 4 +- .../cortex/pkg/chunk/cache/cache.go | 18 +- .../cortex/pkg/chunk/cache/diskcache.go | 4 +- .../cortex/pkg/chunk/cache/fifo_cache.go | 4 +- .../cortex/pkg/chunk/cache/instrumented.go | 10 +- .../cortex/pkg/chunk/cache/memcached.go | 40 +- .../pkg/chunk/cache/memcached_client.go | 10 +- .../cortex/pkg/chunk/cassandra/fixtures.go | 6 +- .../pkg/chunk/cassandra/table_client.go | 6 + .../cortex/pkg/chunk/chunk_store.go | 7 +- .../cortex/pkg/chunk/encoding/factory.go | 4 +- .../pkg/chunk/gcp/bigtable_index_client.go | 16 +- ...nk_client.go => bigtable_object_client.go} | 18 +- .../cortex/pkg/chunk/gcp/fixtures.go | 14 +- ...s_chunk_client.go => gcs_object_client.go} | 20 +- .../cortex/pkg/chunk/gcp/table_client.go | 8 + .../pkg/chunk/inmemory_storage_client.go | 14 + .../pkg/chunk/local/boltdb_index_client.go | 24 +- .../pkg/chunk/local/boltdb_table_client.go | 4 + .../cortex/pkg/chunk/local/fixtures.go | 4 +- .../pkg/chunk/local/fs_object_client.go | 12 - .../cortexproject/cortex/pkg/chunk/schema.go | 3 + .../cortex/pkg/chunk/schema_caching.go | 113 ++++ .../cortex/pkg/chunk/schema_config.go | 7 +- .../cortex/pkg/chunk/series_store.go | 7 + .../pkg/chunk/storage/caching_fixtures.go | 4 +- .../pkg/chunk/storage/caching_index_client.go | 12 +- .../cortex/pkg/chunk/storage/factory.go | 17 +- .../cortex/pkg/chunk/table_client.go | 1 + .../cortex/pkg/chunk/table_manager.go | 68 +- .../cortex/pkg/chunk/testutils/testutils.go | 4 +- .../pkg/chunk/util/parallel_chunk_fetch.go | 2 +- .../cortex/pkg/ingester/client/client.go | 24 +- .../cortex/pkg/ingester/client/compat.go | 60 +- .../cortex/pkg/ingester/client/fnv.go | 4 +- .../cortex/pkg/ingester/index/index.go | 93 ++- .../cortex/pkg/ring/consul_client.go | 2 +- .../cortex/pkg/ring/consul_metrics.go | 14 +- .../cortex/pkg/ring/lifecycler.go | 22 +- .../cortex/pkg/util/grpcclient/grpcclient.go | 37 ++ .../cortexproject/cortex/pkg/util/hash_fp.go | 14 + .../cortexproject/cortex/pkg/util/http.go | 2 +- .../cortexproject/cortex/pkg/util/net.go | 6 +- .../cortex/pkg/util/validation/validate.go | 4 + vendor/github.com/etcd-io/bbolt/.travis.yml | 17 + vendor/github.com/etcd-io/bbolt/Makefile | 22 +- vendor/github.com/etcd-io/bbolt/README.md | 245 ++++--- vendor/github.com/etcd-io/bbolt/appveyor.yml | 18 - vendor/github.com/etcd-io/bbolt/bolt_386.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_amd64.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_arm.go | 23 +- vendor/github.com/etcd-io/bbolt/bolt_arm64.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_linux.go | 2 +- .../github.com/etcd-io/bbolt/bolt_mips64x.go | 12 + vendor/github.com/etcd-io/bbolt/bolt_mipsx.go | 12 + .../github.com/etcd-io/bbolt/bolt_openbsd.go | 2 +- vendor/github.com/etcd-io/bbolt/bolt_ppc.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_ppc64.go | 5 +- .../github.com/etcd-io/bbolt/bolt_ppc64le.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_s390x.go | 5 +- vendor/github.com/etcd-io/bbolt/bolt_unix.go | 42 +- .../etcd-io/bbolt/bolt_unix_solaris.go | 44 +- .../github.com/etcd-io/bbolt/bolt_windows.go | 57 +- .../github.com/etcd-io/bbolt/boltsync_unix.go | 2 +- vendor/github.com/etcd-io/bbolt/bucket.go | 51 +- vendor/github.com/etcd-io/bbolt/cursor.go | 10 +- vendor/github.com/etcd-io/bbolt/db.go | 228 +++++-- vendor/github.com/etcd-io/bbolt/doc.go | 4 +- vendor/github.com/etcd-io/bbolt/errors.go | 2 +- vendor/github.com/etcd-io/bbolt/freelist.go | 169 +++-- vendor/github.com/etcd-io/bbolt/node.go | 4 +- vendor/github.com/etcd-io/bbolt/page.go | 33 +- vendor/github.com/etcd-io/bbolt/tx.go | 89 ++- .../prometheus/prometheus/config/config.go | 608 ------------------ 82 files changed, 1412 insertions(+), 1231 deletions(-) rename vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/{bigtable_chunk_client.go => bigtable_object_client.go} (83%) rename vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/{gcs_chunk_client.go => gcs_object_client.go} (67%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go create mode 100644 vendor/github.com/etcd-io/bbolt/.travis.yml delete mode 100644 vendor/github.com/etcd-io/bbolt/appveyor.yml create mode 100644 vendor/github.com/etcd-io/bbolt/bolt_mips64x.go create mode 100644 vendor/github.com/etcd-io/bbolt/bolt_mipsx.go delete mode 100644 vendor/github.com/prometheus/prometheus/config/config.go diff --git a/Gopkg.lock b/Gopkg.lock index 142c29baa4..50fbe3b36e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -163,8 +163,8 @@ revision = "3a0bb77429bd3a61596f5e8a3172445844342120" [[projects]] - branch = "dev" - digest = "1:263542950680f505b60f5d66e5cf828bf0206ad485a64e956d3f9be87c475788" + branch = "master" + digest = "1:e7525991325b400ecf4f6c6383e965b12801b87e2f687afda23f57f091f9fcc0" name = "github.com/cortexproject/cortex" packages = [ "pkg/chunk", @@ -184,14 +184,14 @@ "pkg/util", "pkg/util/extract", "pkg/util/flagext", + "pkg/util/grpcclient", "pkg/util/middleware", "pkg/util/spanlogger", "pkg/util/validation", "pkg/util/wire", ] pruneopts = "UT" - revision = "26c93125feb2a50a2b9f5f496e731eae875a20e5" - source = "github.com/grafana/cortex" + revision = "4c9e2025ab6733e0f42187b64d2522d558f9f31c" [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" @@ -210,12 +210,12 @@ version = "v3.2.0" [[projects]] - digest = "1:c28625428387b63dd7154eb857f51e700465cfbf7c06f619e71f2da33cefe47e" + digest = "1:3762d59edaa6e5c71d5e594c020c8391f274ff283e9c30fb43c518ec59a3f9b3" name = "github.com/etcd-io/bbolt" packages = ["."] pruneopts = "UT" - revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9" - version = "v1.3.0" + revision = "7ee3ded59d4835e10f3e7d0f7603c42aa5e83820" + version = "v1.3.1-etcd.8" [[projects]] digest = "1:865079840386857c809b72ce300be7580cb50d3d3129ce11bf9aa6ca2bc1934a" @@ -721,10 +721,9 @@ [[projects]] branch = "master" - digest = "1:55e2b00b96200066b7feaf44eb05b1e539e76d217d5be54978a4e0a24b2b3f66" + digest = "1:efd4f8cbe8bca553023cd1ac1db42875f1606f410ba585ff2486f9b04ab0af3f" name = "github.com/prometheus/prometheus" packages = [ - "config", "discovery", "discovery/azure", "discovery/config", @@ -1337,11 +1336,11 @@ "github.com/prometheus/client_golang/prometheus/promauto", "github.com/prometheus/common/model", "github.com/prometheus/common/version", - "github.com/prometheus/prometheus/config", "github.com/prometheus/prometheus/discovery", "github.com/prometheus/prometheus/discovery/config", "github.com/prometheus/prometheus/discovery/targetgroup", "github.com/prometheus/prometheus/pkg/labels", + "github.com/prometheus/prometheus/pkg/relabel", "github.com/prometheus/prometheus/relabel", "github.com/stretchr/testify/assert", "github.com/stretchr/testify/require", diff --git a/Gopkg.toml b/Gopkg.toml index 23bf0b22ec..b46c93b448 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -26,8 +26,7 @@ [[constraint]] name = "github.com/cortexproject/cortex" - source = "github.com/grafana/cortex" - branch = "dev" + branch = "master" [[constraint]] name = "github.com/weaveworks/common" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go index d32b9a8370..4643ed7897 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go @@ -19,7 +19,7 @@ const ( autoScalingPolicyNamePrefix = "DynamoScalingPolicy_cortex_" ) -var applicationAutoScalingRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ +var applicationAutoScalingRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "application_autoscaling_request_duration_seconds", Help: "Time spent doing ApplicationAutoScaling requests.", @@ -27,10 +27,10 @@ var applicationAutoScalingRequestDuration = prometheus.NewHistogramVec(prometheu // AWS latency seems to range from a few ms to a few sec. So use 8 buckets // from 128us to 2s. TODO: Confirm that this is the case for ApplicationAutoScaling. Buckets: prometheus.ExponentialBuckets(0.000128, 4, 8), -}, []string{"operation", "status_code"}) +}, []string{"operation", "status_code"})) func init() { - prometheus.MustRegister(applicationAutoScalingRequestDuration) + applicationAutoScalingRequestDuration.Register() } type awsAutoscale struct { @@ -58,7 +58,7 @@ func (a *awsAutoscale) PostCreateTable(ctx context.Context, desc chunk.TableDesc func (a *awsAutoscale) DescribeTable(ctx context.Context, desc *chunk.TableDesc) error { err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.DescribeScalableTargetsWithContext", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DescribeScalableTargetsWithContext", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := a.ApplicationAutoScaling.DescribeScalableTargetsWithContext(ctx, &applicationautoscaling.DescribeScalableTargetsInput{ ResourceIds: []*string{aws.String("table/" + desc.Name)}, ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), @@ -94,7 +94,7 @@ func (a *awsAutoscale) DescribeTable(ctx context.Context, desc *chunk.TableDesc) } err = a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.DescribeScalingPoliciesWithContext", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DescribeScalingPoliciesWithContext", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := a.ApplicationAutoScaling.DescribeScalingPoliciesWithContext(ctx, &applicationautoscaling.DescribeScalingPoliciesInput{ PolicyNames: []*string{aws.String(autoScalingPolicyNamePrefix + desc.Name)}, ResourceId: aws.String("table/" + desc.Name), @@ -151,7 +151,7 @@ func (a *awsAutoscale) UpdateTable(ctx context.Context, current chunk.TableDesc, func (a *awsAutoscale) enableAutoScaling(ctx context.Context, desc chunk.TableDesc) error { // Registers or updates a scalable target if err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.RegisterScalableTarget", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.RegisterScalableTarget", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &applicationautoscaling.RegisterScalableTargetInput{ MinCapacity: aws.Int64(desc.WriteScale.MinCapacity), MaxCapacity: aws.Int64(desc.WriteScale.MaxCapacity), @@ -172,7 +172,7 @@ func (a *awsAutoscale) enableAutoScaling(ctx context.Context, desc chunk.TableDe // Puts or updates a scaling policy return a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.PutScalingPolicy", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.PutScalingPolicy", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &applicationautoscaling.PutScalingPolicyInput{ PolicyName: aws.String(autoScalingPolicyNamePrefix + desc.Name), PolicyType: aws.String("TargetTrackingScaling"), @@ -197,7 +197,7 @@ func (a *awsAutoscale) enableAutoScaling(ctx context.Context, desc chunk.TableDe func (a *awsAutoscale) disableAutoScaling(ctx context.Context, desc chunk.TableDesc) error { // Deregister scalable target if err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.DeregisterScalableTarget", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DeregisterScalableTarget", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &applicationautoscaling.DeregisterScalableTargetInput{ ResourceId: aws.String("table/" + desc.Name), ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), @@ -212,7 +212,7 @@ func (a *awsAutoscale) disableAutoScaling(ctx context.Context, desc chunk.TableD // Delete scaling policy return a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "ApplicationAutoScaling.DeleteScalingPolicy", applicationAutoScalingRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DeleteScalingPolicy", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &applicationautoscaling.DeleteScalingPolicyInput{ PolicyName: aws.String(autoScalingPolicyNamePrefix + desc.Name), ResourceId: aws.String("table/" + desc.Name), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index d27978fb73..05485e549d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -47,7 +47,7 @@ const ( ) var ( - dynamoRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + dynamoRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "dynamo_request_duration_seconds", Help: "Time spent doing DynamoDB requests.", @@ -55,7 +55,7 @@ var ( // DynamoDB latency seems to range from a few ms to a few sec and is // important. So use 8 buckets from 128us to 2s. Buckets: prometheus.ExponentialBuckets(0.000128, 4, 8), - }, []string{"operation", "status_code"}) + }, []string{"operation", "status_code"})) dynamoConsumedCapacity = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", Name: "dynamo_consumed_capacity_total", @@ -88,7 +88,7 @@ var ( ) func init() { - prometheus.MustRegister(dynamoRequestDuration) + dynamoRequestDuration.Register() prometheus.MustRegister(dynamoConsumedCapacity) prometheus.MustRegister(dynamoFailures) prometheus.MustRegister(dynamoQueryPagesCount) @@ -135,8 +135,7 @@ func (cfg *StorageConfig) RegisterFlags(f *flag.FlagSet) { "If only region is specified as a host, proper endpoint will be deduced. Use inmemory:/// to use a mock in-memory implementation.") } -// DynamoDBStorageClient implements both chunk.IndexClient and chunk.ObjectClient for DynamoDB. -type DynamoDBStorageClient struct { +type dynamoDBStorageClient struct { cfg DynamoDBConfig schemaCfg chunk.SchemaConfig @@ -149,14 +148,24 @@ type DynamoDBStorageClient struct { batchWriteItemRequestFn func(ctx context.Context, input *dynamodb.BatchWriteItemInput) dynamoDBRequest } -// NewDynamoDBStorageClient makes a new DynamoDB-backed IndexClient and ObjectClient. -func NewDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (*DynamoDBStorageClient, error) { +// NewDynamoDBIndexClient makes a new DynamoDB-backed IndexClient. +func NewDynamoDBIndexClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { + return newDynamoDBStorageClient(cfg, schemaCfg) +} + +// NewDynamoDBObjectClient makes a new DynamoDB-backed ObjectClient. +func NewDynamoDBObjectClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { + return newDynamoDBStorageClient(cfg, schemaCfg) +} + +// newDynamoDBStorageClient makes a new DynamoDB-backed IndexClient and ObjectClient. +func newDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) (*dynamoDBStorageClient, error) { dynamoDB, err := dynamoClientFromURL(cfg.DynamoDB.URL) if err != nil { return nil, err } - client := &DynamoDBStorageClient{ + client := &dynamoDBStorageClient{ cfg: cfg, schemaCfg: schemaCfg, DynamoDB: dynamoDB, @@ -168,11 +177,11 @@ func NewDynamoDBStorageClient(cfg DynamoDBConfig, schemaCfg chunk.SchemaConfig) } // Stop implements chunk.IndexClient. -func (a DynamoDBStorageClient) Stop() { +func (a dynamoDBStorageClient) Stop() { } // NewWriteBatch implements chunk.IndexClient. -func (a DynamoDBStorageClient) NewWriteBatch() chunk.WriteBatch { +func (a dynamoDBStorageClient) NewWriteBatch() chunk.WriteBatch { return dynamoDBWriteBatch(map[string][]*dynamodb.WriteRequest{}) } @@ -198,7 +207,7 @@ func logRetry(ctx context.Context, unprocessed dynamoDBWriteBatch) { // BatchWrite writes requests to the underlying storage, handling retries and backoff. // Structure is identical to getDynamoDBChunks(), but operating on different datatypes // so cannot share implementation. If you fix a bug here fix it there too. -func (a DynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.WriteBatch) error { +func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.WriteBatch) error { outstanding := input.(dynamoDBWriteBatch) unprocessed := dynamoDBWriteBatch{} @@ -217,7 +226,7 @@ func (a DynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.TimeRequestHistogram(ctx, "DynamoDB.BatchWriteItem", dynamoRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchWriteItem", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return request.Send() }) resp := request.Data().(*dynamodb.BatchWriteItemOutput) @@ -272,11 +281,11 @@ func (a DynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write } // QueryPages implements chunk.IndexClient. -func (a DynamoDBStorageClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) bool) error { +func (a dynamoDBStorageClient) QueryPages(ctx context.Context, queries []chunk.IndexQuery, callback func(chunk.IndexQuery, chunk.ReadBatch) bool) error { return chunk_util.DoParallelQueries(ctx, a.query, queries, callback) } -func (a DynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery, callback func(result chunk.ReadBatch) (shouldContinue bool)) error { +func (a dynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery, callback func(result chunk.ReadBatch) (shouldContinue bool)) error { sp, ctx := ot.StartSpanFromContext(ctx, "QueryPages", ot.Tag{Key: "tableName", Value: query.TableName}, ot.Tag{Key: "hashValue", Value: query.HashValue}) defer sp.Finish() @@ -346,7 +355,7 @@ func (a DynamoDBStorageClient) query(ctx context.Context, query chunk.IndexQuery return nil } -func (a DynamoDBStorageClient) queryPage(ctx context.Context, input *dynamodb.QueryInput, page dynamoDBRequest) (*dynamoDBReadResponse, error) { +func (a dynamoDBStorageClient) queryPage(ctx context.Context, input *dynamodb.QueryInput, page dynamoDBRequest) (*dynamoDBReadResponse, error) { backoff := util.NewBackoff(ctx, a.cfg.backoffConfig) defer func() { dynamoQueryRetryCount.WithLabelValues("queryPage").Observe(float64(backoff.NumRetries())) @@ -354,7 +363,7 @@ func (a DynamoDBStorageClient) queryPage(ctx context.Context, input *dynamodb.Qu var err error for backoff.Ongoing() { - err = instrument.TimeRequestHistogram(ctx, "DynamoDB.QueryPages", dynamoRequestDuration, func(_ context.Context) error { + err = instrument.CollectedRequest(ctx, "DynamoDB.QueryPages", dynamoRequestDuration, instrument.ErrorCode, func(_ context.Context) error { return page.Send() }) @@ -392,19 +401,19 @@ type dynamoDBRequest interface { Retryable() bool } -func (a DynamoDBStorageClient) queryRequest(ctx context.Context, input *dynamodb.QueryInput) dynamoDBRequest { +func (a dynamoDBStorageClient) queryRequest(ctx context.Context, input *dynamodb.QueryInput) dynamoDBRequest { req, _ := a.DynamoDB.QueryRequest(input) req.SetContext(ctx) return dynamoDBRequestAdapter{req} } -func (a DynamoDBStorageClient) batchGetItemRequest(ctx context.Context, input *dynamodb.BatchGetItemInput) dynamoDBRequest { +func (a dynamoDBStorageClient) batchGetItemRequest(ctx context.Context, input *dynamodb.BatchGetItemInput) dynamoDBRequest { req, _ := a.DynamoDB.BatchGetItemRequest(input) req.SetContext(ctx) return dynamoDBRequestAdapter{req} } -func (a DynamoDBStorageClient) batchWriteItemRequest(ctx context.Context, input *dynamodb.BatchWriteItemInput) dynamoDBRequest { +func (a dynamoDBStorageClient) batchWriteItemRequest(ctx context.Context, input *dynamodb.BatchWriteItemInput) dynamoDBRequest { req, _ := a.DynamoDB.BatchWriteItemRequest(input) req.SetContext(ctx) return dynamoDBRequestAdapter{req} @@ -451,7 +460,7 @@ type chunksPlusError struct { } // GetChunks implements chunk.ObjectClient. -func (a DynamoDBStorageClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { +func (a dynamoDBStorageClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { sp, ctx := ot.StartSpanFromContext(ctx, "GetChunks.DynamoDB") defer sp.Finish() sp.LogFields(otlog.Int("chunks requested", len(chunks))) @@ -503,7 +512,7 @@ var placeholder = []byte{'c'} // Fetch a set of chunks from DynamoDB, handling retries and backoff. // Structure is identical to BatchWrite(), but operating on different datatypes // so cannot share implementation. If you fix a bug here fix it there too. -func (a DynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { +func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []chunk.Chunk) ([]chunk.Chunk, error) { sp, ctx := ot.StartSpanFromContext(ctx, "getDynamoDBChunks", ot.Tag{Key: "numChunks", Value: len(chunks)}) defer sp.Finish() outstanding := dynamoDBReadRequest{} @@ -532,7 +541,7 @@ func (a DynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []c ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.TimeRequestHistogram(ctx, "DynamoDB.BatchGetItemPages", dynamoRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchGetItemPages", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return request.Send() }) response := request.Data().(*dynamodb.BatchGetItemOutput) @@ -622,7 +631,7 @@ func processChunkResponse(response *dynamodb.BatchGetItemOutput, chunksByKey map } // PutChunks implements chunk.ObjectClient. -func (a DynamoDBStorageClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { +func (a dynamoDBStorageClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { var ( dynamoDBWrites = dynamoDBWriteBatch{} ) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go index 549ab5c6c3..bbb2d42b2e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go @@ -100,7 +100,7 @@ func (d callManager) backoffAndRetry(ctx context.Context, fn func(context.Contex func (d dynamoTableClient) ListTables(ctx context.Context) ([]string, error) { table := []string{} err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.ListTablesPages", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.ListTablesPages", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return d.DynamoDB.ListTablesPagesWithContext(ctx, &dynamodb.ListTablesInput{}, func(resp *dynamodb.ListTablesOutput, _ bool) bool { for _, s := range resp.TableNames { table = append(table, *s) @@ -126,7 +126,7 @@ func chunkTagsToDynamoDB(ts chunk.Tags) []*dynamodb.Tag { func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) error { var tableARN *string if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.CreateTable", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.CreateTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { input := &dynamodb.CreateTableInput{ TableName: aws.String(desc.Name), AttributeDefinitions: []*dynamodb.AttributeDefinition{ @@ -177,7 +177,7 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc tags := chunkTagsToDynamoDB(desc.Tags) if len(tags) > 0 { return d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.TagResource", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := d.DynamoDB.TagResourceWithContext(ctx, &dynamodb.TagResourceInput{ ResourceArn: tableARN, Tags: tags, @@ -189,10 +189,28 @@ func (d dynamoTableClient) CreateTable(ctx context.Context, desc chunk.TableDesc return nil } +func (d dynamoTableClient) DeleteTable(ctx context.Context, name string) error { + if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DeleteTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + input := &dynamodb.DeleteTableInput{TableName: aws.String(name)} + _, err := d.DynamoDB.DeleteTableWithContext(ctx, input) + if err != nil { + return err + } + + return nil + }) + }); err != nil { + return err + } + + return nil +} + func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { var tableARN *string err = d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ TableName: aws.String(name), }) @@ -222,7 +240,7 @@ func (d dynamoTableClient) DescribeTable(ctx context.Context, name string) (desc } err = d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.ListTagsOfResource", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.ListTagsOfResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.ListTagsOfResourceWithContext(ctx, &dynamodb.ListTagsOfResourceInput{ ResourceArn: tableARN, }) @@ -254,7 +272,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch if current.ProvisionedRead != expected.ProvisionedRead || current.ProvisionedWrite != expected.ProvisionedWrite { level.Info(util.Logger).Log("msg", "updating provisioned throughput on table", "table", expected.Name, "old_read", current.ProvisionedRead, "old_write", current.ProvisionedWrite, "new_read", expected.ProvisionedRead, "new_write", expected.ProvisionedWrite) if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.UpdateTable", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := d.DynamoDB.UpdateTableWithContext(ctx, &dynamodb.UpdateTableInput{ TableName: aws.String(expected.Name), ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ @@ -277,7 +295,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch if !current.Tags.Equals(expected.Tags) { var tableARN *string if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.DescribeTable", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { out, err := d.DynamoDB.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ TableName: aws.String(expected.Name), }) @@ -294,7 +312,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch } return d.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.TimeRequestHistogram(ctx, "DynamoDB.TagResource", dynamoRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "DynamoDB.TagResource", dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := d.DynamoDB.TagResourceWithContext(ctx, &dynamodb.TagResourceInput{ ResourceArn: tableARN, Tags: chunkTagsToDynamoDB(expected.Tags), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go index 8128b620e6..e35027adc9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go @@ -37,7 +37,7 @@ var Fixtures = []testutils.Fixture{ table := &dynamoTableClient{ DynamoDB: dynamoDB, } - index := &DynamoDBStorageClient{ + index := &dynamoDBStorageClient{ DynamoDB: dynamoDB, queryRequestFn: dynamoDB.queryRequest, batchGetItemRequestFn: dynamoDB.batchGetItemRequest, @@ -74,7 +74,7 @@ func dynamoDBFixture(provisionedErr, gangsize, maxParallelism int) testutils.Fix table := &dynamoTableClient{ DynamoDB: dynamoDB, } - storage := &DynamoDBStorageClient{ + storage := &dynamoDBStorageClient{ cfg: DynamoDBConfig{ ChunkGangSize: gangsize, ChunkGetMaxParallelism: maxParallelism, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go index df9c63112b..149cad5223 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go @@ -48,7 +48,7 @@ func newMockDynamoDB(unprocessed int, provisionedErr int) *mockDynamoDBClient { } } -func (a DynamoDBStorageClient) setErrorParameters(provisionedErr, errAfter int) { +func (a dynamoDBStorageClient) setErrorParameters(provisionedErr, errAfter int) { if m, ok := a.DynamoDB.(*mockDynamoDBClient); ok { m.provisionedErr = provisionedErr m.errAfter = errAfter diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 1fe6c782bb..439970c42e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -20,16 +20,16 @@ import ( ) var ( - s3RequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + s3RequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "s3_request_duration_seconds", Help: "Time spent doing S3 requests.", Buckets: []float64{.025, .05, .1, .25, .5, 1, 2}, - }, []string{"operation", "status_code"}) + }, []string{"operation", "status_code"})) ) func init() { - prometheus.MustRegister(s3RequestDuration) + s3RequestDuration.Register() } type s3ObjectClient struct { @@ -66,7 +66,7 @@ func (a s3ObjectClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([] func (a s3ObjectClient) getChunk(ctx context.Context, decodeContext *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { var resp *s3.GetObjectOutput - err := instrument.TimeRequestHistogram(ctx, "S3.GetObject", s3RequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "S3.GetObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { var err error resp, err = a.S3.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: aws.String(a.bucketName), @@ -124,7 +124,7 @@ func (a s3ObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) err } func (a s3ObjectClient) putS3Chunk(ctx context.Context, key string, buf []byte) error { - return instrument.TimeRequestHistogram(ctx, "S3.PutObject", s3RequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := a.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{ Body: bytes.NewReader(buf), Bucket: aws.String(a.bucketName), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go index 3e39d1f437..6b552047c0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go @@ -26,8 +26,8 @@ var ( // BackgroundConfig is config for a Background Cache. type BackgroundConfig struct { - WriteBackGoroutines int `yaml:"writeback_goroutines"` - WriteBackBuffer int `yaml:"writeback_buffer"` + WriteBackGoroutines int + WriteBackBuffer int } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index f78e29811e..62ef40958d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -15,16 +15,16 @@ type Cache interface { // Config for building Caches. type Config struct { - EnableDiskcache bool `yaml:"enable_disk_cache"` - EnableFifoCache bool `yaml:"enable_fifo_cache"` + EnableDiskcache bool + EnableFifoCache bool - DefaultValidity time.Duration `yaml:"default_validity"` + DefaultValidity time.Duration - background BackgroundConfig `yaml:"background"` - memcache MemcachedConfig `yaml:"memcache"` - memcacheClient MemcachedClientConfig `yaml:"memcache_client"` - diskcache DiskcacheConfig `yaml:"diskcache"` - fifocache FifoCacheConfig `yaml:"fifocache"` + background BackgroundConfig + memcache MemcachedConfig + memcacheClient MemcachedClientConfig + diskcache DiskcacheConfig + fifocache FifoCacheConfig // This is to name the cache metrics properly. prefix string @@ -81,7 +81,7 @@ func New(cfg Config) (Cache, error) { } client := NewMemcachedClient(cfg.memcacheClient) - cache := NewMemcached(cfg.memcache, client) + cache := NewMemcached(cfg.memcache, client, cfg.prefix) cacheName := cfg.prefix + "memcache" caches = append(caches, NewBackground(cacheName, cfg.background, Instrument(cacheName, cache))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/diskcache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/diskcache.go index bb32132c6c..6463827f73 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/diskcache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/diskcache.go @@ -54,8 +54,8 @@ const ( // DiskcacheConfig for the Disk cache. type DiskcacheConfig struct { - Path string `yaml:"path"` - Size int `yaml:"size"` + Path string + Size int } // RegisterFlags adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index 3de533b93c..59c7d35b71 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -56,8 +56,8 @@ var ( // FifoCacheConfig holds config for the FifoCache. type FifoCacheConfig struct { - Size int `yaml:"size"` - Validity time.Duration `yaml:"validity"` + Size int + Validity time.Duration } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go index 1d194d74ad..28b0c6fa93 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go @@ -10,13 +10,13 @@ import ( ) var ( - requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + requestDuration = instr.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "cache_request_duration_seconds", Help: "Total time spent in seconds doing cache requests.", // Cache requests are very quick: smallest bucket is 16us, biggest is 1s. Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), - }, []string{"method", "status_code"}) + }, []string{"method", "status_code"})) fetchedKeys = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "cortex", @@ -32,7 +32,7 @@ var ( ) func init() { - prometheus.MustRegister(requestDuration) + requestDuration.Register() prometheus.MustRegister(fetchedKeys) prometheus.MustRegister(hits) } @@ -55,7 +55,7 @@ type instrumentedCache struct { func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]byte) { method := i.name + ".store" - instr.TimeRequestHistogram(ctx, method, requestDuration, func(ctx context.Context) error { + instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys", len(keys))) i.Cache.Store(ctx, keys, bufs) @@ -71,7 +71,7 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string, method = i.name + ".fetch" ) - instr.TimeRequestHistogram(ctx, method, requestDuration, func(ctx context.Context) error { + instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys requested", len(keys))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go index a8476c256f..37836c77df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go @@ -14,29 +14,36 @@ import ( opentracing "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" instr "github.com/weaveworks/common/instrument" ) var ( - memcacheRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + memcacheRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "memcache_request_duration_seconds", Help: "Total time spent in seconds doing memcache requests.", // Memecache requests are very quick: smallest bucket is 16us, biggest is 1s Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), - }, []string{"method", "status_code"}) + }, []string{"method", "status_code", "name"}) ) -func init() { - prometheus.MustRegister(memcacheRequestDuration) +type observableVecCollector struct { + v prometheus.ObserverVec +} + +func (observableVecCollector) Register() {} +func (observableVecCollector) Before(method string, start time.Time) {} +func (o observableVecCollector) After(method, statusCode string, start time.Time) { + o.v.WithLabelValues(method, statusCode).Observe(time.Now().Sub(start).Seconds()) } // MemcachedConfig is config to make a Memcached type MemcachedConfig struct { - Expiration time.Duration `yaml:"expiration"` + Expiration time.Duration - BatchSize int `yaml:"batch_size"` - Parallelism int `yaml:"parallelism"` + BatchSize int + Parallelism int } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet @@ -50,16 +57,25 @@ func (cfg *MemcachedConfig) RegisterFlagsWithPrefix(prefix, description string, type Memcached struct { cfg MemcachedConfig memcache MemcachedClient + name string + + requestDuration observableVecCollector wg sync.WaitGroup inputCh chan *work } // NewMemcached makes a new Memcache -func NewMemcached(cfg MemcachedConfig, client MemcachedClient) *Memcached { +func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string) *Memcached { c := &Memcached{ cfg: cfg, memcache: client, + name: name, + requestDuration: observableVecCollector{ + v: memcacheRequestDuration.MustCurryWith(prometheus.Labels{ + "name": name, + }), + }, } if cfg.BatchSize == 0 || cfg.Parallelism == 0 { @@ -116,7 +132,7 @@ func memcacheStatusCode(err error) string { // Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { - instr.TimeRequestHistogramStatus(ctx, "Memcache.Get", memcacheRequestDuration, memcacheStatusCode, func(ctx context.Context) error { + instr.CollectedRequest(ctx, "Memcache.Get", c.requestDuration, memcacheStatusCode, func(ctx context.Context) error { if c.cfg.BatchSize == 0 { found, bufs, missed = c.fetch(ctx, keys) return nil @@ -130,7 +146,7 @@ func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, b func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { var items map[string]*memcache.Item - instr.TimeRequestHistogramStatus(ctx, "Memcache.GetMulti", memcacheRequestDuration, memcacheStatusCode, func(_ context.Context) error { + instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { sp := opentracing.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys requested", len(keys))) @@ -202,7 +218,7 @@ func (c *Memcached) fetchKeysBatched(ctx context.Context, keys []string) (found // Store stores the key in the cache. func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) { for i := range keys { - err := instr.TimeRequestHistogramStatus(ctx, "Memcache.Put", memcacheRequestDuration, memcacheStatusCode, func(_ context.Context) error { + err := instr.CollectedRequest(ctx, "Memcache.Put", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { item := memcache.Item{ Key: keys[i], Value: bufs[i], @@ -211,7 +227,7 @@ func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) { return c.memcache.Set(&item) }) if err != nil { - level.Error(util.Logger).Log("msg", "failed to put to memcached", "err", err) + level.Error(util.Logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go index b932fe1bb0..d43fa93713 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go @@ -33,11 +33,11 @@ type memcachedClient struct { // MemcachedClientConfig defines how a MemcachedClient should be constructed. type MemcachedClientConfig struct { - Host string `yaml:"host"` - Service string `yaml:"service"` - Timeout time.Duration `yaml:"timeout"` - MaxIdleConns int `yaml:"max_idle_conns"` - UpdateInterval time.Duration `yaml:"update_interval"` + Host string + Service string + Timeout time.Duration + MaxIdleConns int + UpdateInterval time.Duration } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go index 54b5ebae01..20ab38ca9b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/fixtures.go @@ -17,7 +17,7 @@ import ( type fixture struct { name string indexClient chunk.IndexClient - chunkClient chunk.ObjectClient + objectClient chunk.ObjectClient tableClient chunk.TableClient schemaConfig chunk.SchemaConfig } @@ -27,7 +27,7 @@ func (f fixture) Name() string { } func (f fixture) Clients() (chunk.IndexClient, chunk.ObjectClient, chunk.TableClient, chunk.SchemaConfig, error) { - return f.indexClient, f.chunkClient, f.tableClient, f.schemaConfig, nil + return f.indexClient, f.objectClient, f.tableClient, f.schemaConfig, nil } func (f fixture) Teardown() error { @@ -65,7 +65,7 @@ func Fixtures() ([]testutils.Fixture, error) { fixture{ name: "Cassandra", indexClient: storageClient, - chunkClient: storageClient, + objectClient: storageClient, tableClient: tableClient, schemaConfig: schemaConfig, }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go index 1e683fc40c..e4335632ae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/table_client.go @@ -50,6 +50,12 @@ func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) err return errors.WithStack(err) } +func (c *tableClient) DeleteTable(ctx context.Context, name string) error { + err := c.session.Query(fmt.Sprintf(` + DROP TABLE IF EXISTS %s;`, name)).WithContext(ctx).Exec() + return errors.WithStack(err) +} + func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { return chunk.TableDesc{ Name: name, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 030f207362..d33a509a17 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -54,13 +54,15 @@ func init() { // StoreConfig specifies config for a ChunkStore type StoreConfig struct { - ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` - WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"` + ChunkCacheConfig cache.Config + WriteDedupeCacheConfig cache.Config MinChunkAge time.Duration CardinalityCacheSize int CardinalityCacheValidity time.Duration CardinalityLimit int + + CacheLookupsOlderThan time.Duration } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -73,6 +75,7 @@ func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.CardinalityCacheSize, "store.cardinality-cache-size", 0, "Size of in-memory cardinality cache, 0 to disable.") f.DurationVar(&cfg.CardinalityCacheValidity, "store.cardinality-cache-validity", 1*time.Hour, "Period for which entries in the cardinality cache are valid.") f.IntVar(&cfg.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries.") + f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.") } // store implements Store diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go index 92897acf87..f9062d3adb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go @@ -97,7 +97,7 @@ func NewForEncoding(encoding Encoding) (Chunk, error) { // MustRegisterEncoding add a new chunk encoding. There is no locking, so this // must be called in init(). -func MustRegisterEncoding(enc Encoding, name string, new func() Chunk) { +func MustRegisterEncoding(enc Encoding, name string, f func() Chunk) { _, ok := encodings[enc] if ok { panic("double register encoding") @@ -105,6 +105,6 @@ func MustRegisterEncoding(enc Encoding, name string, new func() Chunk) { encodings[enc] = encoding{ Name: name, - New: new, + New: f, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index 570ce99d1a..15975b7594 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -10,10 +10,12 @@ import ( "cloud.google.com/go/bigtable" ot "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" + "google.golang.org/api/option" "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/pkg/errors" ) @@ -30,12 +32,21 @@ const ( type Config struct { Project string `yaml:"project"` Instance string `yaml:"instance"` + + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` + + ColumnKey bool } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Project, "bigtable.project", "", "Bigtable project ID.") f.StringVar(&cfg.Instance, "bigtable.instance", "", "Bigtable instance ID.") + + cfg.GRPCClientConfig.RegisterFlags("bigtable", f) + + // Deprecated. + f.Int("bigtable.max-recv-msg-size", 100<<20, "DEPRECATED. Bigtable grpc max receive message size.") } // storageClientColumnKey implements chunk.storageClient for GCP. @@ -53,7 +64,10 @@ type storageClientV1 struct { // NewStorageClientV1 returns a new v1 StorageClient. func NewStorageClientV1(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { - client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, instrumentation()...) + opts := instrumentation() + opts = append(opts, option.WithGRPCDialOption(cfg.GRPCClientConfig.DialOption())) + + client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_chunk_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go similarity index 83% rename from vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_chunk_client.go rename to vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go index 67eab81c7d..7235ccc7a7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_chunk_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go @@ -13,35 +13,35 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -type bigtableChunkClient struct { +type bigtableObjectClient struct { cfg Config schemaCfg chunk.SchemaConfig client *bigtable.Client } -// NewBigtableChunkClient makes a new chunk.ChunkClient that stores chunks in +// NewBigtableObjectClient makes a new chunk.ObjectClient that stores chunks in // Bigtable. -func NewBigtableChunkClient(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { +func NewBigtableObjectClient(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, instrumentation()...) if err != nil { return nil, err } - return newBigtableChunkClient(cfg, schemaCfg, client), nil + return newBigtableObjectClient(cfg, schemaCfg, client), nil } -func newBigtableChunkClient(cfg Config, schemaCfg chunk.SchemaConfig, client *bigtable.Client) chunk.ObjectClient { - return &bigtableChunkClient{ +func newBigtableObjectClient(cfg Config, schemaCfg chunk.SchemaConfig, client *bigtable.Client) chunk.ObjectClient { + return &bigtableObjectClient{ cfg: cfg, schemaCfg: schemaCfg, client: client, } } -func (s *bigtableChunkClient) Stop() { +func (s *bigtableObjectClient) Stop() { s.client.Close() } -func (s *bigtableChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { +func (s *bigtableObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { keys := map[string][]string{} muts := map[string][]*bigtable.Mutation{} @@ -75,7 +75,7 @@ func (s *bigtableChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chun return nil } -func (s *bigtableChunkClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { +func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { sp, ctx := ot.StartSpanFromContext(ctx, "GetChunks") defer sp.Finish() sp.LogFields(otlog.Int("chunks requested", len(input))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go index 1dc336205f..0ebc1b0bcb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go @@ -25,7 +25,7 @@ type fixture struct { name string - gcsChunkClient bool + gcsObjectClient bool columnKeyClient bool } @@ -81,12 +81,12 @@ func (f *fixture) Clients() ( iClient = newStorageClientV1(Config{}, schemaConfig, client) } - if f.gcsChunkClient { - cClient = newGCSChunkClient(GCSConfig{ + if f.gcsObjectClient { + cClient = newGCSObjectClient(GCSConfig{ BucketName: "chunks", }, schemaConfig, f.gcssrv.Client()) } else { - cClient = newBigtableChunkClient(Config{}, schemaConfig, client) + cClient = newBigtableObjectClient(Config{}, schemaConfig, client) } return @@ -108,12 +108,12 @@ var Fixtures = []testutils.Fixture{ columnKeyClient: true, }, &fixture{ - name: "bigtable-gcs", - gcsChunkClient: true, + name: "bigtable-gcs", + gcsObjectClient: true, }, &fixture{ name: "bigtable-columnkey-gcs", - gcsChunkClient: true, + gcsObjectClient: true, columnKeyClient: true, }, } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_chunk_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go similarity index 67% rename from vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_chunk_client.go rename to vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go index 94e57d82c4..ae43806556 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_chunk_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go @@ -12,7 +12,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/util" ) -type gcsChunkClient struct { +type gcsObjectClient struct { cfg GCSConfig schemaCfg chunk.SchemaConfig client *storage.Client @@ -29,18 +29,18 @@ func (cfg *GCSConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.BucketName, "gcs.bucketname", "", "Name of GCS bucket to put chunks in.") } -// NewGCSChunkClient makes a new chunk.ChunkClient that writes chunks to GCS. -func NewGCSChunkClient(ctx context.Context, cfg GCSConfig, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { +// NewGCSObjectClient makes a new chunk.ObjectClient that writes chunks to GCS. +func NewGCSObjectClient(ctx context.Context, cfg GCSConfig, schemaCfg chunk.SchemaConfig) (chunk.ObjectClient, error) { client, err := storage.NewClient(ctx, instrumentation()...) if err != nil { return nil, err } - return newGCSChunkClient(cfg, schemaCfg, client), nil + return newGCSObjectClient(cfg, schemaCfg, client), nil } -func newGCSChunkClient(cfg GCSConfig, schemaCfg chunk.SchemaConfig, client *storage.Client) chunk.ObjectClient { +func newGCSObjectClient(cfg GCSConfig, schemaCfg chunk.SchemaConfig, client *storage.Client) chunk.ObjectClient { bucket := client.Bucket(cfg.BucketName) - return &gcsChunkClient{ + return &gcsObjectClient{ cfg: cfg, schemaCfg: schemaCfg, client: client, @@ -48,11 +48,11 @@ func newGCSChunkClient(cfg GCSConfig, schemaCfg chunk.SchemaConfig, client *stor } } -func (s *gcsChunkClient) Stop() { +func (s *gcsObjectClient) Stop() { s.client.Close() } -func (s *gcsChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { +func (s *gcsObjectClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) error { for _, chunk := range chunks { buf, err := chunk.Encode() if err != nil { @@ -69,11 +69,11 @@ func (s *gcsChunkClient) PutChunks(ctx context.Context, chunks []chunk.Chunk) er return nil } -func (s *gcsChunkClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { +func (s *gcsObjectClient) GetChunks(ctx context.Context, input []chunk.Chunk) ([]chunk.Chunk, error) { return util.GetParallelChunks(ctx, input, s.getChunk) } -func (s *gcsChunkClient) getChunk(ctx context.Context, decodeContext *chunk.DecodeContext, input chunk.Chunk) (chunk.Chunk, error) { +func (s *gcsObjectClient) getChunk(ctx context.Context, decodeContext *chunk.DecodeContext, input chunk.Chunk) (chunk.Chunk, error) { reader, err := s.bucket.Object(input.ExternalKey()).NewReader(ctx) if err != nil { return chunk.Chunk{}, errors.WithStack(err) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go index 7e48b6a136..2a62c1b042 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go @@ -74,6 +74,14 @@ func alreadyExistsError(err error) bool { return ok && strings.Contains(serr.Message(), "already exists") } +func (c *tableClient) DeleteTable(ctx context.Context, name string) error { + if err := c.client.DeleteTable(ctx, name); err != nil { + return err + } + + return nil +} + func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { return chunk.TableDesc{ Name: name, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index 000c6c072f..a397b74db8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -74,6 +74,20 @@ func (m *MockStorage) CreateTable(_ context.Context, desc TableDesc) error { return nil } +// DeleteTable implements StorageClient. +func (m *MockStorage) DeleteTable(_ context.Context, name string) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + if _, ok := m.tables[name]; !ok { + return fmt.Errorf("table does not exist") + } + + delete(m.tables, name) + + return nil +} + // DescribeTable implements StorageClient. func (m *MockStorage) DescribeTable(_ context.Context, name string) (desc TableDesc, isActive bool, err error) { m.mtx.RLock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index a3bd75f71a..69ad4e6e12 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "flag" + "fmt" + "os" "path" "sync" @@ -34,7 +36,7 @@ type boltIndexClient struct { cfg BoltDBConfig dbsMtx sync.RWMutex - dbs map[string]*bolt.DB + dbs map[string]*bbolt.DB } // NewBoltDBIndexClient creates a new IndexClient that used BoltDB. @@ -45,7 +47,7 @@ func NewBoltDBIndexClient(cfg BoltDBConfig) (chunk.IndexClient, error) { return &boltIndexClient{ cfg: cfg, - dbs: map[string]*bolt.DB{}, + dbs: map[string]*bbolt.DB{}, }, nil } @@ -63,7 +65,7 @@ func (b *boltIndexClient) NewWriteBatch() chunk.WriteBatch { } } -func (b *boltIndexClient) getDB(name string) (*bolt.DB, error) { +func (b *boltIndexClient) getDB(name string) (*bbolt.DB, error) { b.dbsMtx.RLock() db, ok := b.dbs[name] b.dbsMtx.RUnlock() @@ -79,7 +81,7 @@ func (b *boltIndexClient) getDB(name string) (*bolt.DB, error) { } // Open the database. - db, err := bolt.Open(path.Join(b.cfg.Directory, name), 0666, nil) + db, err := bbolt.Open(path.Join(b.cfg.Directory, name), 0666, nil) if err != nil { return nil, err } @@ -95,7 +97,7 @@ func (b *boltIndexClient) BatchWrite(ctx context.Context, batch chunk.WriteBatch return err } - if err := db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bbolt.Tx) error { b, err := tx.CreateBucketIfNotExists(bucketName) if err != nil { return err @@ -136,7 +138,7 @@ func (b *boltIndexClient) query(ctx context.Context, query chunk.IndexQuery, cal rowPrefix := []byte(query.HashValue + separator) - return db.View(func(tx *bolt.Tx) error { + return db.View(func(tx *bbolt.Tx) error { b := tx.Bucket(bucketName) if b == nil { return nil @@ -214,3 +216,13 @@ func (b *boltReadBatchIterator) RangeValue() []byte { func (b *boltReadBatchIterator) Value() []byte { return b.value } + +func ensureDirectory(dir string) error { + info, err := os.Stat(dir) + if os.IsNotExist(err) { + return os.MkdirAll(dir, 0777) + } else if err == nil && !info.IsDir() { + return fmt.Errorf("not a directory: %s", dir) + } + return err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go index 5abbba8a2d..72c568772f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_table_client.go @@ -21,6 +21,10 @@ func (c *tableClient) CreateTable(ctx context.Context, desc chunk.TableDesc) err return nil } +func (c *tableClient) DeleteTable(ctx context.Context, name string) error { + return nil +} + func (c *tableClient) DescribeTable(ctx context.Context, name string) (desc chunk.TableDesc, isActive bool, err error) { return chunk.TableDesc{ Name: name, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go index 5bf81427ed..baf338d70c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fixtures.go @@ -21,7 +21,7 @@ func (f *fixture) Name() string { } func (f *fixture) Clients() ( - indexClient chunk.IndexClient, chunkClient chunk.ObjectClient, tableClient chunk.TableClient, + indexClient chunk.IndexClient, objectClient chunk.ObjectClient, tableClient chunk.TableClient, schemaConfig chunk.SchemaConfig, err error, ) { f.dirname, err = ioutil.TempDir(os.TempDir(), "boltdb") @@ -36,7 +36,7 @@ func (f *fixture) Clients() ( return } - chunkClient, err = NewFSObjectClient(FSConfig{ + objectClient, err = NewFSObjectClient(FSConfig{ Directory: f.dirname, }) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index 4fad8700b6..e21b23022a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -4,9 +4,7 @@ import ( "context" "encoding/base64" "flag" - "fmt" "io/ioutil" - "os" "path" "github.com/cortexproject/cortex/pkg/chunk" @@ -73,13 +71,3 @@ func (f *fsObjectClient) getChunk(_ context.Context, decodeContext *chunk.Decode return c, nil } - -func ensureDirectory(dir string) error { - info, err := os.Stat(dir) - if os.IsNotExist(err) { - return os.MkdirAll(dir, 0777) - } else if err == nil && !info.IsDir() { - return fmt.Errorf("not a directory: %s", dir) - } - return err -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index c326137b23..37033156fd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -58,6 +58,9 @@ type IndexQuery struct { // Filters for querying ValueEqual []byte + + // If the result of this lookup is immutable or not (for caching). + Immutable bool } // IndexEntry describes an entry in the chunk index diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go new file mode 100644 index 0000000000..2444b0c458 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go @@ -0,0 +1,113 @@ +package chunk + +import ( + "time" + + "github.com/prometheus/common/model" + "github.com/weaveworks/common/mtime" +) + +type schemaCaching struct { + Schema + + cacheOlderThan time.Duration +} + +func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID string, metricName model.LabelValue) ([]IndexQuery, error) { + cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) + + cacheableQueries, err := s.Schema.GetReadQueriesForMetric(cFrom, cThrough, userID, metricName) + if err != nil { + return nil, err + } + + activeQueries, err := s.Schema.GetReadQueriesForMetric(from, through, userID, metricName) + if err != nil { + return nil, err + } + + return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil +} + +func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName) ([]IndexQuery, error) { + cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) + + cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabel(cFrom, cThrough, userID, metricName, labelName) + if err != nil { + return nil, err + } + + activeQueries, err := s.Schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) + if err != nil { + return nil, err + } + + return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil +} + +func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName model.LabelValue, labelName model.LabelName, labelValue model.LabelValue) ([]IndexQuery, error) { + cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) + + cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabelValue(cFrom, cThrough, userID, metricName, labelName, labelValue) + if err != nil { + return nil, err + } + + activeQueries, err := s.Schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) + if err != nil { + return nil, err + } + + return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil +} + +// If the query resulted in series IDs, use this method to find chunks. +func (s *schemaCaching) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { + cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) + + cacheableQueries, err := s.Schema.GetChunksForSeries(cFrom, cThrough, userID, seriesID) + if err != nil { + return nil, err + } + + activeQueries, err := s.Schema.GetChunksForSeries(from, through, userID, seriesID) + if err != nil { + return nil, err + } + + return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil +} + +func splitTimesByCacheability(from, through model.Time, cacheBefore model.Time) (model.Time, model.Time, model.Time, model.Time) { + if from.After(cacheBefore) { + return 0, 0, from, through + } + + if through.Before(cacheBefore) { + return from, through, 0, 0 + } + + return from, cacheBefore, cacheBefore, through +} + +func mergeCacheableAndActiveQueries(cacheableQueries []IndexQuery, activeQueries []IndexQuery) []IndexQuery { + finalQueries := make([]IndexQuery, 0, len(cacheableQueries)+len(activeQueries)) + +Outer: + for _, cq := range cacheableQueries { + for _, aq := range activeQueries { + // When deduping, the bucket values only influence TableName and HashValue + // and just checking those is enough. + if cq.TableName == aq.TableName && cq.HashValue == aq.HashValue { + continue Outer + } + } + + cq.Immutable = true + finalQueries = append(finalQueries, cq) + } + + finalQueries = append(finalQueries, activeQueries...) + + return finalQueries +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 453ae83c70..f787ab2b97 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -335,13 +335,14 @@ func (cfg *AutoScalingConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { f.Float64Var(&cfg.TargetValue, argPrefix+".target-value", 80, "DynamoDB target ratio of consumed capacity to provisioned capacity.") } -func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg ProvisionConfig, beginGrace, endGrace time.Duration) []TableDesc { +func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg ProvisionConfig, beginGrace, endGrace time.Duration, retention time.Duration) []TableDesc { var ( periodSecs = int64(cfg.Period / time.Second) beginGraceSecs = int64(beginGrace / time.Second) endGraceSecs = int64(endGrace / time.Second) firstTable = from.Unix() / periodSecs lastTable = through.Unix() / periodSecs + tablesToKeep = int64(int64(retention/time.Second) / periodSecs) now = mtime.Now().Unix() result = []TableDesc{} ) @@ -349,6 +350,10 @@ func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg Pr if through.Unix()%secondsInDay == 0 { lastTable-- } + // Don't make tables further back than the configured retention + if retention > 0 && lastTable > tablesToKeep && lastTable-firstTable >= tablesToKeep { + firstTable = lastTable - tablesToKeep + } for i := firstTable; i <= lastTable; i++ { table := TableDesc{ // Name construction needs to be consistent with chunk_store.bigBuckets diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index e7e809ea3d..9ae1b2dcb7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -73,6 +73,13 @@ func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Ob return nil, err } + if cfg.CacheLookupsOlderThan != 0 { + schema = &schemaCaching{ + Schema: schema, + cacheOlderThan: cfg.CacheLookupsOlderThan, + } + } + return &seriesStore{ store: store{ cfg: cfg, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go index 99736fba46..cc36d5cb27 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go @@ -16,12 +16,12 @@ type fixture struct { func (f fixture) Name() string { return "caching-store" } func (f fixture) Clients() (chunk.IndexClient, chunk.ObjectClient, chunk.TableClient, chunk.SchemaConfig, error) { - indexClient, chunkClient, tableClient, schemaConfig, err := f.fixture.Clients() + indexClient, objectClient, tableClient, schemaConfig, err := f.fixture.Clients() indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ Size: 500, Validity: 5 * time.Minute, }), 5*time.Minute) - return indexClient, chunkClient, tableClient, schemaConfig, err + return indexClient, objectClient, tableClient, schemaConfig, err } func (f fixture) Teardown() error { return f.fixture.Teardown() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index 605e601dc7..10ab426823 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -100,10 +100,18 @@ func (s *cachingIndexClient) QueryPages(ctx context.Context, queries []chunk.Ind TableName: queries[0].TableName, HashValue: queries[0].HashValue, }) - results[key] = ReadBatch{ + + rb := ReadBatch{ Key: key, Expiry: expiryTime.UnixNano(), } + + // If the query is cacheable forever, nil the expiry. + if queries[0].Immutable { + rb.Expiry = 0 + } + + results[key] = rb } err := s.IndexClient.QueryPages(ctx, cacheableMissed, func(cacheableQuery chunk.IndexQuery, r chunk.ReadBatch) bool { @@ -232,7 +240,7 @@ func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (bat // Make sure the hash(key) is not a collision in the cache by looking at the // key in the value. - if key != readBatch.Key || time.Now().After(time.Unix(0, readBatch.Expiry)) { + if key != readBatch.Key || (readBatch.Expiry != 0 && time.Now().After(time.Unix(0, readBatch.Expiry))) { cacheCorruptErrs.Inc() continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 5278137324..114b369064 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -46,10 +46,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Deprecated flags!! f.IntVar(&cfg.IndexCacheSize, "store.index-cache-size", 0, "Deprecated: Use -store.index-cache-read.*; Size of in-memory index cache, 0 to disable.") - f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Deprecated: Use -store.index-cache-read.*; Period for which entries in the index cache are valid. Should be no higher than -ingester.max-chunk-idle.") cfg.memcacheClient.RegisterFlagsWithPrefix("index.", "Deprecated: Use -store.index-cache-read.*;", f) cfg.indexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) + f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") } // NewStore makes the storage clients based on the configuration. @@ -59,14 +59,14 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf // Building up from deprecated flags. var caches []cache.Cache if cfg.IndexCacheSize > 0 { - fifocache := cache.Instrument("fifo-index", cache.NewFifoCache("index", cache.FifoCacheConfig{Size: cfg.IndexCacheSize, Validity: cfg.IndexCacheValidity})) + fifocache := cache.Instrument("fifo-index", cache.NewFifoCache("index", cache.FifoCacheConfig{Size: cfg.IndexCacheSize})) caches = append(caches, fifocache) } if cfg.memcacheClient.Host != "" { client := cache.NewMemcachedClient(cfg.memcacheClient) memcache := cache.Instrument("memcache-index", cache.NewMemcached(cache.MemcachedConfig{ Expiration: cfg.IndexCacheValidity, - }, client)) + }, client, "memcache-index")) caches = append(caches, cache.NewBackground("memcache-index", cache.BackgroundConfig{ WriteBackGoroutines: 10, WriteBackBuffer: 100, @@ -76,7 +76,6 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf var tieredCache cache.Cache if len(caches) > 0 { tieredCache = cache.NewTiered(caches) - cfg.indexQueriesCacheConfig.DefaultValidity = cfg.IndexCacheValidity } else { tieredCache, err = cache.New(cfg.indexQueriesCacheConfig) if err != nil { @@ -133,7 +132,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun if len(path) > 0 { level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } - return aws.NewDynamoDBStorageClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) + return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) case "gcp": return gcp.NewStorageClientV1(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcp-columnkey", "bigtable": @@ -163,13 +162,13 @@ func NewObjectClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chu if len(path) > 0 { level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } - return aws.NewDynamoDBStorageClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) + return aws.NewDynamoDBObjectClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) case "gcp": - return gcp.NewBigtableChunkClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) + return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcp-columnkey", "bigtable": - return gcp.NewBigtableChunkClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) + return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcs": - return gcp.NewGCSChunkClient(context.Background(), cfg.GCSConfig, schemaCfg) + return gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, schemaCfg) case "cassandra": return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg) case "filesystem": diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_client.go index 24d175229b..c7a447992b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_client.go @@ -6,6 +6,7 @@ import "context" type TableClient interface { ListTables(ctx context.Context) ([]string, error) CreateTable(ctx context.Context, desc TableDesc) error + DeleteTable(ctx context.Context, name string) error DescribeTable(ctx context.Context, name string) (desc TableDesc, isActive bool, err error) UpdateTable(ctx context.Context, current, expected TableDesc) error } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index c837584da9..07b3c0ed82 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -24,12 +24,12 @@ const ( ) var ( - syncTableDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + syncTableDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "dynamo_sync_tables_seconds", Help: "Time spent doing SyncTables.", Buckets: prometheus.DefBuckets, - }, []string{"operation", "status_code"}) + }, []string{"operation", "status_code"})) tableCapacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cortex", Name: "dynamo_table_capacity_units", @@ -39,7 +39,7 @@ var ( func init() { prometheus.MustRegister(tableCapacity) - prometheus.MustRegister(syncTableDuration) + syncTableDuration.Register() } // TableManagerConfig holds config for a TableManager @@ -47,6 +47,12 @@ type TableManagerConfig struct { // Master 'off-switch' for table capacity updates, e.g. when troubleshooting ThroughputUpdatesDisabled bool + // Master 'on-switch' for table retention deletions + RetentionDeletesEnabled bool + + // How far back tables will be kept before they are deleted + RetentionPeriod time.Duration + // Period with which the table manager will poll for tables. DynamoDBPollInterval time.Duration @@ -72,6 +78,8 @@ type ProvisionConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ThroughputUpdatesDisabled, "table-manager.throughput-updates-disabled", false, "If true, disable all changes to DB capacity") + f.BoolVar(&cfg.RetentionDeletesEnabled, "table-manager.retention-deletes-enabled", false, "If true, enables retention deletes of DB tables") + f.DurationVar(&cfg.RetentionPeriod, "table-manager.retention-period", 0, "Tables older than this retention period are deleted. Note: This setting is destructive to data!(default: 0, which disables deletion)") f.DurationVar(&cfg.DynamoDBPollInterval, "dynamodb.poll-interval", 2*time.Minute, "How frequently to poll DynamoDB to learn our capacity.") f.DurationVar(&cfg.CreationGracePeriod, "dynamodb.periodic-table.grace-period", 10*time.Minute, "DynamoDB periodic tables grace period (duration which table will be created/deleted before/after it's needed).") @@ -172,7 +180,7 @@ func (m *TableManager) loop() { ticker := time.NewTicker(m.cfg.DynamoDBPollInterval) defer ticker.Stop() - if err := instrument.TimeRequestHistogram(context.Background(), "TableManager.SyncTables", syncTableDuration, func(ctx context.Context) error { + if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", syncTableDuration, instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) @@ -181,7 +189,7 @@ func (m *TableManager) loop() { for { select { case <-ticker.C: - if err := instrument.TimeRequestHistogram(context.Background(), "TableManager.SyncTables", syncTableDuration, func(ctx context.Context) error { + if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", syncTableDuration, instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) @@ -198,11 +206,15 @@ func (m *TableManager) SyncTables(ctx context.Context) error { expected := m.calculateExpectedTables() level.Info(util.Logger).Log("msg", "synching tables", "num_expected_tables", len(expected), "expected_tables", len(expected)) - toCreate, toCheckThroughput, err := m.partitionTables(ctx, expected) + toCreate, toCheckThroughput, toDelete, err := m.partitionTables(ctx, expected) if err != nil { return err } + if err := m.deleteTables(ctx, toDelete); err != nil { + return err + } + if err := m.createTables(ctx, toCreate); err != nil { return err } @@ -259,11 +271,11 @@ func (m *TableManager) calculateExpectedTables() []TableDesc { } endModelTime := model.TimeFromUnix(endTime.Unix()) result = append(result, config.IndexTables.periodicTables( - config.From, endModelTime, m.cfg.IndexTables, m.cfg.CreationGracePeriod, m.maxChunkAge, + config.From, endModelTime, m.cfg.IndexTables, m.cfg.CreationGracePeriod, m.maxChunkAge, m.cfg.RetentionPeriod, )...) if config.ChunkTables.Prefix != "" { result = append(result, config.ChunkTables.periodicTables( - config.From, endModelTime, m.cfg.ChunkTables, m.cfg.CreationGracePeriod, m.maxChunkAge, + config.From, endModelTime, m.cfg.ChunkTables, m.cfg.CreationGracePeriod, m.maxChunkAge, m.cfg.RetentionPeriod, )...) } } @@ -274,14 +286,20 @@ func (m *TableManager) calculateExpectedTables() []TableDesc { } // partitionTables works out tables that need to be created vs tables that need to be updated -func (m *TableManager) partitionTables(ctx context.Context, descriptions []TableDesc) ([]TableDesc, []TableDesc, error) { +func (m *TableManager) partitionTables(ctx context.Context, descriptions []TableDesc) ([]TableDesc, []TableDesc, []TableDesc, error) { existingTables, err := m.client.ListTables(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } sort.Strings(existingTables) - toCreate, toCheck := []TableDesc{}, []TableDesc{} + tablePrefixes := map[string]struct{}{} + for _, cfg := range m.schemaCfg.Configs { + tablePrefixes[cfg.IndexTables.Prefix] = struct{}{} + tablePrefixes[cfg.ChunkTables.Prefix] = struct{}{} + } + + toCreate, toCheck, toDelete := []TableDesc{}, []TableDesc{}, []TableDesc{} i, j := 0, 0 for i < len(descriptions) && j < len(existingTables) { if descriptions[i].Name < existingTables[j] { @@ -289,7 +307,15 @@ func (m *TableManager) partitionTables(ctx context.Context, descriptions []Table toCreate = append(toCreate, descriptions[i]) i++ } else if descriptions[i].Name > existingTables[j] { - // existingTables[j].name isn't in descriptions, can ignore + // existingTables[j].name isn't in descriptions, and can be removed + if m.cfg.RetentionPeriod > 0 { + for tblPrefix := range tablePrefixes { + if strings.HasPrefix(existingTables[j], tblPrefix) { + toDelete = append(toDelete, TableDesc{Name: existingTables[j]}) + break + } + } + } j++ } else { // Table exists, need to check it has correct throughput @@ -302,7 +328,7 @@ func (m *TableManager) partitionTables(ctx context.Context, descriptions []Table toCreate = append(toCreate, descriptions[i]) } - return toCreate, toCheck, nil + return toCreate, toCheck, toDelete, nil } func (m *TableManager) createTables(ctx context.Context, descriptions []TableDesc) error { @@ -316,6 +342,22 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []TableDes return nil } +func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDesc) error { + for _, desc := range descriptions { + level.Info(util.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) + if !m.cfg.RetentionDeletesEnabled { + continue + } + + level.Info(util.Logger).Log("msg", "deleting table", "table", desc.Name) + err := m.client.DeleteTable(ctx, desc.Name) + if err != nil { + return err + } + } + return nil +} + func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDesc) error { for _, expected := range descriptions { level.Debug(util.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go index 768b180789..b3095884f6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go @@ -27,7 +27,7 @@ type Fixture interface { func Setup(fixture Fixture, tableName string) (chunk.IndexClient, chunk.ObjectClient, error) { var tbmConfig chunk.TableManagerConfig flagext.DefaultValues(&tbmConfig) - indexClient, chunkClient, tableClient, schemaConfig, err := fixture.Clients() + indexClient, objectClient, tableClient, schemaConfig, err := fixture.Clients() if err != nil { return nil, nil, err } @@ -45,7 +45,7 @@ func Setup(fixture Fixture, tableName string) (chunk.IndexClient, chunk.ObjectCl err = tableClient.CreateTable(context.Background(), chunk.TableDesc{ Name: tableName, }) - return indexClient, chunkClient, err + return indexClient, objectClient, err } // CreateChunks creates some chunks for testing diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/util/parallel_chunk_fetch.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/util/parallel_chunk_fetch.go index cf75317423..5b18578385 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/util/parallel_chunk_fetch.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/util/parallel_chunk_fetch.go @@ -13,7 +13,7 @@ const maxParallel = 1000 // GetParallelChunks fetches chunks in parallel (up to maxParallel). func GetParallelChunks(ctx context.Context, chunks []chunk.Chunk, f func(context.Context, *chunk.DecodeContext, chunk.Chunk) (chunk.Chunk, error)) ([]chunk.Chunk, error) { - sp, ctx := ot.StartSpanFromContext(ctx, "GetChunks") + sp, ctx := ot.StartSpanFromContext(ctx, "GetParallelChunks") defer sp.Finish() sp.LogFields(otlog.Int("chunks requested", len(chunks))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 509e0fd534..e14e7ceac8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -12,6 +12,7 @@ import ( _ "google.golang.org/grpc/encoding/gzip" // get gzip compressor registered "google.golang.org/grpc/health/grpc_health_v1" + "github.com/cortexproject/cortex/pkg/util/grpcclient" cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" "github.com/weaveworks/common/middleware" ) @@ -49,13 +50,7 @@ func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error middleware.StreamClientUserHeaderInterceptor, cortex_middleware.PrometheusGRPCStreamInstrumentation(ingesterClientRequestDuration), )), - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(cfg.MaxRecvMsgSize)), - } - if cfg.legacyCompressToIngester { - cfg.CompressToIngester = true - } - if cfg.CompressToIngester { - opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip"))) + cfg.GRPCClientConfig.DialOption(), } conn, err := grpc.Dial(addr, opts...) if err != nil { @@ -74,16 +69,15 @@ func (c *closableHealthAndIngesterClient) Close() error { // Config is the configuration struct for the ingester client type Config struct { - MaxRecvMsgSize int - CompressToIngester bool - legacyCompressToIngester bool + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } // RegisterFlags registers configuration settings used by the ingester client config. func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - // We have seen 20MB returns from queries - add a bit of headroom - f.IntVar(&cfg.MaxRecvMsgSize, "ingester.client.max-recv-message-size", 64*1024*1024, "Maximum message size, in bytes, this client will receive.") - f.BoolVar(&cfg.CompressToIngester, "ingester.client.compress-to-ingester", false, "Compress data in calls to ingesters.") - // moved from distributor pkg, but flag prefix left as back compat fallback for existing users. - f.BoolVar(&cfg.legacyCompressToIngester, "distributor.compress-to-ingester", false, "Compress data in calls to ingesters. (DEPRECATED: use ingester.client.compress-to-ingester instead") + cfg.GRPCClientConfig.RegisterFlags("ingester.client", f) + + // Deprecated. + f.Int("ingester.client.max-recv-message-size", 64*1024*1024, "DEPRECATED. Maximum message size, in bytes, this client will receive.") + f.Bool("ingester.client.compress-to-ingester", false, "DEPRECATED. Compress data in calls to ingesters.") + f.Bool("distributor.compress-to-ingester", false, "DEPRECATED. Compress data in calls to ingesters. (DEPRECATED: use ingester.client.compress-to-ingester instead") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index 717ca0efe0..c4764ca1d2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -2,14 +2,20 @@ package client import ( "bytes" - "encoding/json" + stdjson "encoding/json" "fmt" "sort" + "strconv" + "time" + "unsafe" + jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" ) +var json = jsoniter.ConfigCompatibleWithStandardLibrary + // FromWriteRequest converts a WriteRequest proto into an array of samples. func FromWriteRequest(req *WriteRequest) []model.Sample { // Just guess that there is one sample per timeseries @@ -251,9 +257,9 @@ func FastFingerprint(labelPairs []LabelPair) model.Fingerprint { var result uint64 for _, pair := range labelPairs { sum := hashNew() - sum = hashAdd(sum, string(pair.Name)) + sum = hashAdd(sum, pair.Name) sum = hashAddByte(sum, model.SeparatorByte) - sum = hashAdd(sum, string(pair.Value)) + sum = hashAdd(sum, pair.Value) result ^= sum } return model.Fingerprint(result) @@ -276,7 +282,7 @@ func (s Sample) MarshalJSON() ([]byte, error) { func (s *Sample) UnmarshalJSON(b []byte) error { var t model.Time var v model.SampleValue - vs := [...]json.Unmarshaler{&t, &v} + vs := [...]stdjson.Unmarshaler{&t, &v} if err := json.Unmarshal(b, &vs); err != nil { return err } @@ -284,3 +290,49 @@ func (s *Sample) UnmarshalJSON(b []byte) error { s.Value = float64(v) return nil } + +func init() { + + jsoniter.RegisterTypeEncoderFunc("client.Sample", func(ptr unsafe.Pointer, stream *jsoniter.Stream) { + sample := (*Sample)(ptr) + + stream.WriteArrayStart() + stream.WriteFloat64(float64(sample.TimestampMs) / float64(time.Second/time.Millisecond)) + stream.WriteMore() + stream.WriteString(model.SampleValue(sample.Value).String()) + stream.WriteArrayEnd() + }, func(unsafe.Pointer) bool { + return false + }) + + jsoniter.RegisterTypeDecoderFunc("client.Sample", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if !iter.ReadArray() { + iter.ReportError("client.Sample", "expected [") + return + } + + t := model.Time(iter.ReadFloat64() * float64(time.Second/time.Millisecond)) + + if !iter.ReadArray() { + iter.ReportError("client.Sample", "expected ,") + return + } + + bs := iter.ReadStringAsSlice() + ss := *(*string)(unsafe.Pointer(&bs)) + v, err := strconv.ParseFloat(ss, 64) + if err != nil { + iter.ReportError("client.Sample", err.Error()) + return + } + + if iter.ReadArray() { + iter.ReportError("client.Sample", "expected ]") + } + + *(*Sample)(ptr) = Sample{ + TimestampMs: int64(t), + Value: v, + } + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go index b3d3a87e09..41453b10aa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/fnv.go @@ -1,4 +1,4 @@ -// Copied from github.com/prometheus/common/model/fnv.go +// Modified from github.com/prometheus/common/model/fnv.go // Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ func hashNew() uint64 { } // hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { +func hashAdd(h uint64, s []byte) uint64 { for i := 0; i < len(s); i++ { h ^= uint64(s[i]) h *= prime64 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go index e9c641adcf..51cc924803 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/index/index.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" ) const indexShards = 32 @@ -19,7 +20,7 @@ type InvertedIndex struct { shards []indexShard } -// New returns an new InvertedIndex. +// New returns a new InvertedIndex. func New() *InvertedIndex { shards := make([]indexShard, indexShards) for i := 0; i < indexShards; i++ { @@ -32,7 +33,7 @@ func New() *InvertedIndex { // Add a fingerprint under the specified labels. func (ii *InvertedIndex) Add(labels []client.LabelPair, fp model.Fingerprint) { - shard := &ii.shards[hashFP(fp)%indexShards] + shard := &ii.shards[util.HashFP(fp)%indexShards] shard.add(labels, fp) } @@ -52,12 +53,6 @@ func (ii *InvertedIndex) Lookup(matchers []*labels.Matcher) []model.Fingerprint return result } -// Delete a fingerprint with the given label pairs. -func (ii *InvertedIndex) Delete(labels []client.LabelPair, fp model.Fingerprint) { - shard := &ii.shards[hashFP(fp)%indexShards] - shard.delete(labels, fp) -} - // LabelNames returns all label names. func (ii *InvertedIndex) LabelNames() model.LabelNames { results := make([]model.LabelNames, 0, indexShards) @@ -82,11 +77,18 @@ func (ii *InvertedIndex) LabelValues(name model.LabelName) model.LabelValues { return mergeLabelValueLists(results) } -const cacheLineSize = 64 +// Delete a fingerprint with the given label pairs. +func (ii *InvertedIndex) Delete(labels []client.LabelPair, fp model.Fingerprint) { + shard := &ii.shards[util.HashFP(fp)%indexShards] + shard.delete(labels, fp) +} // NB slice entries are sorted in fp order. type unlockIndex map[model.LabelName]map[model.LabelValue][]model.Fingerprint +// This is the prevalent value for Intel and AMD CPUs as-at 2018. +const cacheLineSize = 64 + type indexShard struct { mtx sync.RWMutex idx unlockIndex @@ -154,6 +156,37 @@ func (shard *indexShard) lookup(matchers []*labels.Matcher) []model.Fingerprint return result } +func (shard *indexShard) labelNames() model.LabelNames { + shard.mtx.RLock() + defer shard.mtx.RUnlock() + + results := make(model.LabelNames, 0, len(shard.idx)) + for name := range shard.idx { + results = append(results, name) + } + + sort.Sort(labelNames(results)) + return results +} + +func (shard *indexShard) labelValues(name model.LabelName) model.LabelValues { + shard.mtx.RLock() + defer shard.mtx.RUnlock() + + values, ok := shard.idx[name] + if !ok { + return nil + } + + results := make(model.LabelValues, 0, len(values)) + for val := range values { + results = append(results, val) + } + + sort.Sort(labelValues(results)) + return results +} + func (shard *indexShard) delete(labels []client.LabelPair, fp model.Fingerprint) { shard.mtx.Lock() defer shard.mtx.Unlock() @@ -188,37 +221,6 @@ func (shard *indexShard) delete(labels []client.LabelPair, fp model.Fingerprint) } } -func (shard *indexShard) labelNames() model.LabelNames { - shard.mtx.RLock() - defer shard.mtx.RUnlock() - - results := make(model.LabelNames, 0, len(shard.idx)) - for name := range shard.idx { - results = append(results, name) - } - - sort.Sort(labelNames(results)) - return results -} - -func (shard *indexShard) labelValues(name model.LabelName) model.LabelValues { - shard.mtx.RLock() - defer shard.mtx.RUnlock() - - values, ok := shard.idx[name] - if !ok { - return nil - } - - results := make(model.LabelValues, 0, len(values)) - for val := range values { - results = append(results, val) - } - - sort.Sort(labelValues(results)) - return results -} - // intersect two sorted lists of fingerprints. Assumes there are no duplicate // fingerprints within the input lists. func intersect(a, b []model.Fingerprint) []model.Fingerprint { @@ -239,17 +241,6 @@ func intersect(a, b []model.Fingerprint) []model.Fingerprint { return result } -// hashFP simply moves entropy from the most significant 48 bits of the -// fingerprint into the least significant 16 bits (by XORing) so that a simple -// MOD on the result can be used to pick a mutex while still making use of -// changes in more significant bits of the fingerprint. (The fast fingerprinting -// function we use is prone to only change a few bits for similar metrics. We -// really want to make use of every change in the fingerprint to vary mutex -// selection.) -func hashFP(fp model.Fingerprint) uint { - return uint(fp ^ (fp >> 32) ^ (fp >> 16)) -} - type labelValues model.LabelValues func (a labelValues) Len() int { return len(a) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go index 98374e59c4..3c89eb53d6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_client.go @@ -86,7 +86,7 @@ var ( // CAS atomically modifies a value in a callback. // If value doesn't exist you'll get nil as an argument to your callback. func (c *consulClient) CAS(ctx context.Context, key string, f CASCallback) error { - return instrument.TimeRequestHistogram(ctx, "CAS loop", consulRequestDuration, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "CAS loop", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { return c.cas(ctx, key, f) }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_metrics.go index 7720a3bbb2..efd9e1bd96 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/consul_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/consul_metrics.go @@ -8,15 +8,15 @@ import ( "github.com/weaveworks/common/instrument" ) -var consulRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ +var consulRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "consul_request_duration_seconds", Help: "Time spent on consul requests.", Buckets: prometheus.DefBuckets, -}, []string{"operation", "status_code"}) +}, []string{"operation", "status_code"})) func init() { - prometheus.MustRegister(consulRequestDuration) + consulRequestDuration.Register() } type consulMetrics struct { @@ -26,7 +26,7 @@ type consulMetrics struct { func (c consulMetrics) CAS(p *consul.KVPair, options *consul.WriteOptions) (bool, *consul.WriteMeta, error) { var ok bool var result *consul.WriteMeta - err := instrument.TimeRequestHistogram(options.Context(), "CAS", consulRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(options.Context(), "CAS", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { options = options.WithContext(ctx) var err error ok, result, err = c.kv.CAS(p, options) @@ -38,7 +38,7 @@ func (c consulMetrics) CAS(p *consul.KVPair, options *consul.WriteOptions) (bool func (c consulMetrics) Get(key string, options *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) { var kvp *consul.KVPair var meta *consul.QueryMeta - err := instrument.TimeRequestHistogram(options.Context(), "Get", consulRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(options.Context(), "Get", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { options = options.WithContext(ctx) var err error kvp, meta, err = c.kv.Get(key, options) @@ -50,7 +50,7 @@ func (c consulMetrics) Get(key string, options *consul.QueryOptions) (*consul.KV func (c consulMetrics) List(path string, options *consul.QueryOptions) (consul.KVPairs, *consul.QueryMeta, error) { var kvps consul.KVPairs var meta *consul.QueryMeta - err := instrument.TimeRequestHistogram(options.Context(), "List", consulRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(options.Context(), "List", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { options = options.WithContext(ctx) var err error kvps, meta, err = c.kv.List(path, options) @@ -61,7 +61,7 @@ func (c consulMetrics) List(path string, options *consul.QueryOptions) (consul.K func (c consulMetrics) Put(p *consul.KVPair, options *consul.WriteOptions) (*consul.WriteMeta, error) { var result *consul.WriteMeta - err := instrument.TimeRequestHistogram(options.Context(), "Put", consulRequestDuration, func(ctx context.Context) error { + err := instrument.CollectedRequest(options.Context(), "Put", consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { options = options.WithContext(ctx) var err error result, err = c.kv.Put(p, options) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index 7abd5d53fb..70e11ec066 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -17,10 +17,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" ) -const ( - minReadyDuration = 1 * time.Minute -) - var ( consulHeartbeats = promauto.NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_consul_heartbeats_total", @@ -41,13 +37,14 @@ type LifecyclerConfig struct { RingConfig Config `yaml:"ring,omitempty"` // Config for the ingester lifecycle control - ListenPort *int - NumTokens int `yaml:"num_tokens,omitempty"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period,omitempty"` - JoinAfter time.Duration `yaml:"join_after,omitempty"` - ClaimOnRollout bool `yaml:"claim_on_rollout,omitempty"` - NormaliseTokens bool `yaml:"normalise_tokens,omitempty"` - InfNames []string `yaml:"interface_names"` + ListenPort *int + NumTokens int `yaml:"num_tokens,omitempty"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period,omitempty"` + JoinAfter time.Duration `yaml:"join_after,omitempty"` + MinReadyDuration time.Duration `yaml:"min_ready_duration,omitempty"` + ClaimOnRollout bool `yaml:"claim_on_rollout,omitempty"` + NormaliseTokens bool `yaml:"normalise_tokens,omitempty"` + InfNames []string `yaml:"interface_names"` // For testing, you can override the address and ID of this ingester Addr string `yaml:"address"` @@ -63,6 +60,7 @@ func (cfg *LifecyclerConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.NumTokens, "ingester.num-tokens", 128, "Number of tokens for each ingester.") f.DurationVar(&cfg.HeartbeatPeriod, "ingester.heartbeat-period", 5*time.Second, "Period at which to heartbeat to consul.") f.DurationVar(&cfg.JoinAfter, "ingester.join-after", 0*time.Second, "Period to wait for a claim from another ingester; will join automatically after this.") + f.DurationVar(&cfg.MinReadyDuration, "ingester.min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") f.BoolVar(&cfg.ClaimOnRollout, "ingester.claim-on-rollout", false, "Send chunks to PENDING ingesters on exit.") f.BoolVar(&cfg.NormaliseTokens, "ingester.normalise-tokens", false, "Store tokens in a normalised fashion to reduce allocations.") @@ -167,7 +165,7 @@ func (i *Lifecycler) IsReady(ctx context.Context) bool { // Ingester always take at least minReadyDuration to become ready to work // around race conditions with ingesters exiting and updating the ring - if time.Now().Sub(i.startTime) < minReadyDuration { + if time.Now().Sub(i.startTime) < i.cfg.MinReadyDuration { return false } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go new file mode 100644 index 0000000000..01a1200af5 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -0,0 +1,37 @@ +package grpcclient + +import ( + "flag" + + "google.golang.org/grpc" +) + +// Config for a gRPC client. +type Config struct { + MaxRecvMsgSize int `yaml:"max_recv_msg_size"` + MaxSendMsgSize int `yaml:"max_send_msg_size"` + UseGzipCompression bool `yaml:"use_gzip_compression"` +} + +// RegisterFlags registers flags. +func (cfg *Config) RegisterFlags(prefix string, f *flag.FlagSet) { + f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") + f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") + f.BoolVar(&cfg.UseGzipCompression, prefix+".grpc-use-gzip-compression", false, "Use compression when sending messages.") +} + +// CallOptions returns the config in terms of CallOptions. +func (cfg *Config) CallOptions() []grpc.CallOption { + var opts []grpc.CallOption + opts = append(opts, grpc.MaxCallRecvMsgSize(cfg.MaxRecvMsgSize)) + opts = append(opts, grpc.MaxCallSendMsgSize(cfg.MaxSendMsgSize)) + if cfg.UseGzipCompression { + opts = append(opts, grpc.UseCompressor("gzip")) + } + return opts +} + +// DialOption returns the config as a grpc.DialOptions. +func (cfg *Config) DialOption() grpc.DialOption { + return grpc.WithDefaultCallOptions(cfg.CallOptions()...) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go new file mode 100644 index 0000000000..ba0a03801e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/hash_fp.go @@ -0,0 +1,14 @@ +package util + +import "github.com/prometheus/common/model" + +// HashFP simply moves entropy from the most significant 48 bits of the +// fingerprint into the least significant 16 bits (by XORing) so that a simple +// MOD on the result can be used to pick a mutex while still making use of +// changes in more significant bits of the fingerprint. (The fast fingerprinting +// function we use is prone to only change a few bits for similar metrics. We +// really want to make use of every change in the fingerprint to vary mutex +// selection.) +func HashFP(fp model.Fingerprint) uint { + return uint(fp ^ (fp >> 32) ^ (fp >> 16)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/http.go b/vendor/github.com/cortexproject/cortex/pkg/util/http.go index 8857f7cfb3..dc97e8917d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/http.go @@ -74,7 +74,7 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, req proto.Message, return nil, err } - if err := instrument.TimeRequestHistogram(ctx, "util.ParseProtoRequest[unmarshal]", nil, func(_ context.Context) error { + if err := instrument.CollectedRequest(ctx, "util.ParseProtoRequest[unmarshal]", &instrument.HistogramCollector{}, instrument.ErrorCode, func(_ context.Context) error { return proto.Unmarshal(body, req) }); err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net.go b/vendor/github.com/cortexproject/cortex/pkg/util/net.go index 37e086deff..e0fa12e6ff 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/net.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/net.go @@ -12,17 +12,17 @@ func GetFirstAddressOf(names []string) (string, error) { for _, name := range names { inf, err := net.InterfaceByName(name) if err != nil { - level.Error(Logger).Log("msg", "error getting interface", "inf", name, "err", err) + level.Warn(Logger).Log("msg", "error getting interface", "inf", name, "err", err) continue } addrs, err := inf.Addrs() if err != nil { - level.Error(Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) + level.Warn(Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) continue } if len(addrs) <= 0 { - level.Error(Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) + level.Warn(Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index d7cafc3b17..88a45998a9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -28,6 +28,10 @@ const ( invalidLabel = "label_invalid" labelNameTooLong = "label_name_too_long" labelValueTooLong = "label_value_too_long" + + // RateLimited is one of the values for the reason to discard samples. + // Declared here to avoid duplication in ingester and distributor. + RateLimited = "rate_limited" ) // DiscardedSamples is a metric of the number of discarded samples, by reason. diff --git a/vendor/github.com/etcd-io/bbolt/.travis.yml b/vendor/github.com/etcd-io/bbolt/.travis.yml new file mode 100644 index 0000000000..a60300c558 --- /dev/null +++ b/vendor/github.com/etcd-io/bbolt/.travis.yml @@ -0,0 +1,17 @@ +language: go +go_import_path: go.etcd.io/bbolt + +sudo: false + +go: +- 1.11 + +before_install: +- go get -v honnef.co/go/tools/... +- go get -v github.com/kisielk/errcheck + +script: +- make fmt +- make test +- make race +# - make errcheck diff --git a/vendor/github.com/etcd-io/bbolt/Makefile b/vendor/github.com/etcd-io/bbolt/Makefile index e035e63adc..61ab8f03b8 100644 --- a/vendor/github.com/etcd-io/bbolt/Makefile +++ b/vendor/github.com/etcd-io/bbolt/Makefile @@ -7,12 +7,24 @@ default: build race: @go test -v -race -test.run="TestSimulate_(100op|1000op)" +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + # go get github.com/kisielk/errcheck errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt + @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt -test: - @go test -v -cover . - @go test -v ./cmd/bolt +test: + go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + go test -v ./cmd/bbolt -.PHONY: fmt test +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/etcd-io/bbolt/README.md b/vendor/github.com/etcd-io/bbolt/README.md index 8523e33773..6546fc01ac 100644 --- a/vendor/github.com/etcd-io/bbolt/README.md +++ b/vendor/github.com/etcd-io/bbolt/README.md @@ -1,5 +1,18 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== +bbolt +===== + +[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) +[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) +[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) +[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] [LMDB project][lmdb]. The goal of the project is to provide a simple, @@ -10,47 +23,55 @@ Since Bolt is meant to be used as such a low-level piece of functionality, simplicity is key. The API will be small and only focus on getting values and setting values. That's it. +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ ## Project Status -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Project versioning + +bbolt uses [semantic versioning](http://semver.org). +API should not change between patch and minor releases. +New minor versions may add additional features to the API. ## Table of Contents -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) + - [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) + - [Resources](#resources) + - [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) + - [Caveats & Limitations](#caveats--limitations) + - [Reading the Source](#reading-the-source) + - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -59,13 +80,28 @@ services every day. To start using Bolt, install Go and run `go get`: ```sh -$ go get github.com/boltdb/bolt/... +$ go get go.etcd.io/bbolt/... ``` This will retrieve the library and install the `bolt` command line utility into your `$GOBIN` path. +### Importing bbolt + +To use bbolt as an embedded key-value store, import as: + +```go +import bolt "go.etcd.io/bbolt" + +db, err := bolt.Open(path, 0666, nil) +if err != nil { + return err +} +defer db.Close() +``` + + ### Opening a database The top-level object in Bolt is a `DB`. It is represented as a single file on @@ -79,7 +115,7 @@ package main import ( "log" - "github.com/boltdb/bolt" + bolt "go.etcd.io/bbolt" ) func main() { @@ -209,7 +245,7 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close +You can use the `DB.Begin()` function directly but **please** be sure to close the transaction. ```go @@ -395,7 +431,7 @@ db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { fmt.Printf("key=%s, value=%s\n", k, v) } @@ -448,6 +484,10 @@ db.View(func(tx *bolt.Tx) error { }) ``` +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. ### Nested buckets @@ -460,6 +500,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) func (*Bucket) DeleteBucket(key []byte) error ``` +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + ### Database backups @@ -469,7 +558,7 @@ this from a read-only transaction, it will perform a hot backup and not block your other database reads and writes. By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to @@ -715,6 +804,9 @@ Here are a few things to note when evaluating and using Bolt: can be reused by a new page or can be unmapped from virtual memory and you'll see an `unexpected fault address` panic when accessing it. +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for buckets that have random inserts will cause your database to have very poor page utilization. @@ -755,7 +847,7 @@ Here are a few things to note when evaluating and using Bolt: ## Reading the Source -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, transactional key/value database so it can be a good starting point for people interested in how databases work. @@ -807,46 +899,55 @@ them via pull request. Below is a list of public, open source projects that use Bolt: -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. * [stow](https://github.com/djherbis/stow) - a persistence manager for objects backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. * [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. * [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/etcd-io/bbolt/appveyor.yml b/vendor/github.com/etcd-io/bbolt/appveyor.yml deleted file mode 100644 index 6e26e941d6..0000000000 --- a/vendor/github.com/etcd-io/bbolt/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/vendor/github.com/etcd-io/bbolt/bolt_386.go b/vendor/github.com/etcd-io/bbolt/bolt_386.go index e659bfb91f..4d35ee7cf3 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_386.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_386.go @@ -1,7 +1,10 @@ -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go b/vendor/github.com/etcd-io/bbolt/bolt_amd64.go index cca6b7eb70..60a52dad56 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_amd64.go @@ -1,7 +1,10 @@ -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm.go b/vendor/github.com/etcd-io/bbolt/bolt_arm.go index e659bfb91f..105d27ddb7 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_arm.go @@ -1,7 +1,28 @@ -package bolt +package bbolt + +import "unsafe" // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go b/vendor/github.com/etcd-io/bbolt/bolt_arm64.go index 6d2309352e..f5aa2a5ee2 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_arm64.go @@ -1,9 +1,12 @@ // +build arm64 -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_linux.go b/vendor/github.com/etcd-io/bbolt/bolt_linux.go index 2b67666140..7707bcacf0 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_linux.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_linux.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "syscall" diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go new file mode 100644 index 0000000000..baeb289fd9 --- /dev/null +++ b/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go new file mode 100644 index 0000000000..2d9b1a91f3 --- /dev/null +++ b/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go @@ -0,0 +1,12 @@ +// +build mips mipsle + +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go b/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go index 7058c3d734..d7f50358ef 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "syscall" diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc.go index 645ddc3edc..69804714aa 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc.go @@ -1,9 +1,12 @@ // +build ppc -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go index 2dc6be02e3..3565908576 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go @@ -1,9 +1,12 @@ // +build ppc64 -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go index 8351e129f6..422c7c69d6 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go @@ -1,9 +1,12 @@ // +build ppc64le -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go b/vendor/github.com/etcd-io/bbolt/bolt_s390x.go index f4dd26bbba..6d3fcb825d 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_s390x.go @@ -1,9 +1,12 @@ // +build s390x -package bolt +package bbolt // maxMapSize represents the largest mmap size supported by Bolt. const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix.go b/vendor/github.com/etcd-io/bbolt/bolt_unix.go index cad62dda1e..5f2bb51451 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_unix.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_unix.go @@ -1,41 +1,43 @@ // +build !windows,!plan9,!solaris -package bolt +package bbolt import ( "fmt" - "os" "syscall" "time" "unsafe" ) // flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { +func flock(db *DB, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } @@ -53,7 +55,9 @@ func mmap(db *DB, sz int) error { } // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { + err = madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. return fmt.Errorf("madvise: %s", err) } diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go b/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go index 307bf2b3ee..babad65786 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go @@ -1,8 +1,7 @@ -package bolt +package bbolt import ( "fmt" - "os" "syscall" "time" "unsafe" @@ -11,36 +10,35 @@ import ( ) // flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { +func flock(db *DB, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/etcd-io/bbolt/bolt_windows.go b/vendor/github.com/etcd-io/bbolt/bolt_windows.go index d538e6afd7..fca178bd29 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_windows.go +++ b/vendor/github.com/etcd-io/bbolt/bolt_windows.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "fmt" @@ -16,8 +16,6 @@ var ( ) const ( - lockExt = ".lock" - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx flagLockExclusive = 2 flagLockFailImmediately = 1 @@ -48,48 +46,47 @@ func fdatasync(db *DB) error { } // flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - +func flock(db *DB, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } + // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range + // -1..0 as the lock on the database file. + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } // funlock releases an advisory lock on a file descriptor. func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path+lockExt) + var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 + err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ + Offset: m1, + OffsetHigh: m1, + }) return err } diff --git a/vendor/github.com/etcd-io/bbolt/boltsync_unix.go b/vendor/github.com/etcd-io/bbolt/boltsync_unix.go index f50442523c..9587afefee 100644 --- a/vendor/github.com/etcd-io/bbolt/boltsync_unix.go +++ b/vendor/github.com/etcd-io/bbolt/boltsync_unix.go @@ -1,6 +1,6 @@ // +build !windows,!plan9,!linux,!openbsd -package bolt +package bbolt // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { diff --git a/vendor/github.com/etcd-io/bbolt/bucket.go b/vendor/github.com/etcd-io/bbolt/bucket.go index d2f8c524e4..84bfd4d6a2 100644 --- a/vendor/github.com/etcd-io/bbolt/bucket.go +++ b/vendor/github.com/etcd-io/bbolt/bucket.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "bytes" @@ -14,13 +14,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { + if b.tx.writable && !unaligned { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue } + return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - _, _, flags := c.seek(key) + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error { return nil } +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/etcd-io/bbolt/cursor.go b/vendor/github.com/etcd-io/bbolt/cursor.go index 1be9f35e3e..3000aced6c 100644 --- a/vendor/github.com/etcd-io/bbolt/cursor.go +++ b/vendor/github.com/etcd-io/bbolt/cursor.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "bytes" @@ -157,12 +157,6 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } // If this is a bucket then return a nil value. return c.keyValue() @@ -339,6 +333,8 @@ func (c *Cursor) nsearch(key []byte) { // keyValue returns the key and value of the current leaf element. func (c *Cursor) keyValue() ([]byte, []byte, uint32) { ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. if ref.count() == 0 || ref.index >= ref.count() { return nil, nil, 0 } diff --git a/vendor/github.com/etcd-io/bbolt/db.go b/vendor/github.com/etcd-io/bbolt/db.go index 1223493ca7..d91dcf813a 100644 --- a/vendor/github.com/etcd-io/bbolt/db.go +++ b/vendor/github.com/etcd-io/bbolt/db.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "errors" @@ -7,8 +7,7 @@ import ( "log" "os" "runtime" - "runtime/debug" - "strings" + "sort" "sync" "time" "unsafe" @@ -23,6 +22,8 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED +const pgidNoFreelist pgid = 0xffffffffffffffff + // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -39,6 +40,9 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -61,6 +65,11 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -96,8 +105,7 @@ type DB struct { path string file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV + dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int filesz int // current on disk file size @@ -107,9 +115,11 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - freelist *freelist stats Stats + freelist *freelist + freelistLoad sync.Once + pagePool sync.Pool batchMu sync.Mutex @@ -148,14 +158,17 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - + db := &DB{ + opened: true, + } // Set default options if no options are provided. if options == nil { options = DefaultOptions } + db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -183,7 +196,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + if err := flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() return nil, err } @@ -191,31 +204,41 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { + _ = db.close() return nil, err } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { + // clean up file descriptor on initialization fail + _ = db.close() return nil, err } } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { db.pageSize = int(m.pageSize) } + } else { + _ = db.close() + return nil, ErrInvalid } } @@ -232,14 +255,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } // Mark the database as opened and return. return db, nil } +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -341,9 +400,6 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -387,7 +443,8 @@ func (db *DB) init() error { } // Close releases all database resources. -// All transactions must be closed before closing the database. +// It will block waiting for any open transactions to finish +// before closing the database and returning. func (db *DB) Close() error { db.rwlock.Lock() defer db.rwlock.Unlock() @@ -395,8 +452,8 @@ func (db *DB) Close() error { db.metalock.Lock() defer db.metalock.Unlock() - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() + db.mmaplock.Lock() + defer db.mmaplock.Unlock() return db.close() } @@ -526,21 +583,36 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t + db.freePages() + return t, nil +} - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid } if minid > 0 { db.freelist.release(minid - 1) } - - return t, nil + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. } +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -552,7 +624,10 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] break } } @@ -630,11 +705,7 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - if err := t.Rollback(); err != nil { - return err - } - - return nil + return t.Rollback() } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -734,9 +805,7 @@ retry: // pass success, or bolt internal errors, to all callers for _, c := range b.calls { - if c.err != nil { - c.err <- err - } + c.err <- err } break retry } @@ -823,7 +892,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { +func (db *DB) allocate(txid txid, count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -835,7 +904,7 @@ func (db *DB) allocate(count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { + if p.id = db.freelist.allocate(txid, count); p.id != 0 { return p, nil } @@ -890,6 +959,38 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -900,6 +1001,10 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -916,6 +1021,14 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -952,15 +1065,11 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN + diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - type Info struct { Data uintptr PageSize int @@ -999,7 +1108,8 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1026,11 +1136,3 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/etcd-io/bbolt/doc.go b/vendor/github.com/etcd-io/bbolt/doc.go index cc937845db..95f25f01c6 100644 --- a/vendor/github.com/etcd-io/bbolt/doc.go +++ b/vendor/github.com/etcd-io/bbolt/doc.go @@ -1,5 +1,5 @@ /* -Package bolt implements a low-level key/value store in pure Go. It supports +package bbolt implements a low-level key/value store in pure Go. It supports fully serializable transactions, ACID semantics, and lock-free MVCC with multiple readers and a single writer. Bolt can be used for projects that want a simple data store without the need to add large dependencies such as @@ -41,4 +41,4 @@ point to different data or can point to invalid memory which will cause a panic. */ -package bolt +package bbolt diff --git a/vendor/github.com/etcd-io/bbolt/errors.go b/vendor/github.com/etcd-io/bbolt/errors.go index a3620a3ebb..48758ca577 100644 --- a/vendor/github.com/etcd-io/bbolt/errors.go +++ b/vendor/github.com/etcd-io/bbolt/errors.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import "errors" diff --git a/vendor/github.com/etcd-io/bbolt/freelist.go b/vendor/github.com/etcd-io/bbolt/freelist.go index 1b7ba91b2a..e4bcb2dcf9 100644 --- a/vendor/github.com/etcd-io/bbolt/freelist.go +++ b/vendor/github.com/etcd-io/bbolt/freelist.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "fmt" @@ -6,25 +6,40 @@ import ( "unsafe" ) +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. } // newFreelist returns an empty, initialized freelist. func newFreelist() *freelist { return &freelist{ - pending: make(map[txid][]pgid), + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), cache: make(map[pgid]bool), } } // size returns the size of the page after serialization. func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -40,27 +55,26 @@ func (f *freelist) free_count() int { // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int - for _, list := range f.pending { - count += len(list) + for _, txp := range f.pending { + count += len(txp.ids) } return count } -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) } - sort.Sort(m) - return pgids(f.ids).merge(m) + mergepgids(dst, f.ids, m) } // allocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { +func (f *freelist) allocate(txid txid, n int) pgid { if len(f.ids) == 0 { return 0 } @@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid { for i := pgid(0); i < pgid(n); i++ { delete(f.cache, initial+i) } - + f.allocs[initial] = txid return initial } @@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) { } // Free page and all its overflow pages. - var ids = f.pending[txid] + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + for id := p.id; id <= p.id+pgid(p.overflow); id++ { // Verify that page is not already free. if f.cache[id] { panic(fmt.Sprintf("page %d already freed", id)) } - // Add to the freelist and cache. - ids = append(ids, id) + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) f.cache[id] = true } - f.pending[txid] = ids } // release moves all page ids for a transaction id (or older) to the freelist. func (f *freelist) release(txid txid) { m := make(pgids, 0) - for tid, ids := range f.pending { + for tid, txp := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. // Don't remove from the cache since the page is still free. - m = append(m, ids...) + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { delete(f.pending, tid) } } @@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) { // rollback removes the pages from a given pending tx. func (f *freelist) rollback(txid txid) { // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) + txp := f.pending[txid] + if txp == nil { + return } - - // Remove pages from pending list. + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) } // freed returns whether a given page is in the free list. @@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. idx, count := 0, int(p.count) @@ -169,7 +248,7 @@ func (f *freelist) read(p *page) { if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] f.ids = make([]pgid, len(ids)) copy(f.ids, ids) @@ -181,27 +260,33 @@ func (f *freelist) read(p *page) { f.reindex() } +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. func (f *freelist) write(p *page) error { // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() // Update the header flag. p.flags |= freelistPageFlag // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) } return nil @@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) { // Build a cache of only pending pages. pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { pcache[pendingID] = true } } @@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) + f.cache = make(map[pgid]bool, len(f.ids)) for _, id := range f.ids { f.cache[id] = true } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { + for _, txp := range f.pending { + for _, pendingID := range txp.ids { f.cache[pendingID] = true } } diff --git a/vendor/github.com/etcd-io/bbolt/node.go b/vendor/github.com/etcd-io/bbolt/node.go index 159318b229..6c3fa553ea 100644 --- a/vendor/github.com/etcd-io/bbolt/node.go +++ b/vendor/github.com/etcd-io/bbolt/node.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "bytes" @@ -365,7 +365,7 @@ func (n *node) spill() error { } // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) if err != nil { return err } diff --git a/vendor/github.com/etcd-io/bbolt/page.go b/vendor/github.com/etcd-io/bbolt/page.go index 7651a6bf7d..bca9615f0f 100644 --- a/vendor/github.com/etcd-io/bbolt/page.go +++ b/vendor/github.com/etcd-io/bbolt/page.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "fmt" @@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } else if len(b) == 0 { + } + if len(b) == 0 { return a } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) + // Merged will hold all elements from both lists. + merged := dst[:0] // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids { } // Append what's left in follow. - merged = append(merged, follow...) - - return merged + _ = append(merged, follow...) } diff --git a/vendor/github.com/etcd-io/bbolt/tx.go b/vendor/github.com/etcd-io/bbolt/tx.go index 1cfb4cde85..f508641427 100644 --- a/vendor/github.com/etcd-io/bbolt/tx.go +++ b/vendor/github.com/etcd-io/bbolt/tx.go @@ -1,4 +1,4 @@ -package bolt +package bbolt import ( "fmt" @@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil + return fn(k, tx.root.Bucket(k)) }) } @@ -169,28 +166,18 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { return err } + } else { + tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -235,6 +222,31 @@ func (tx *Tx) Commit() error { return nil } +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -291,7 +303,9 @@ func (tx *Tx) close() { } // Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. +// This function exists for backwards compatibility. +// +// Deprecated; Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -305,7 +319,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { _ = f.Close() }() + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -333,7 +351,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -344,7 +362,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, f.Close() + return n, nil } // CopyFile copies the entire database to file at the given path. @@ -379,9 +397,14 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + // Check if any pages are double freed. freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -392,8 +415,10 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } } // Recursively check buckets. @@ -451,7 +476,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) + p, err := tx.db.allocate(tx.meta.txid, count) if err != nil { return nil, err } @@ -460,7 +485,7 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount++ + tx.stats.PageCount += count tx.stats.PageAlloc += count * tx.db.pageSize return p, nil diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go deleted file mode 100644 index d12c8d651d..0000000000 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ /dev/null @@ -1,608 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package config - -import ( - "fmt" - "io/ioutil" - "net/url" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/prometheus/prometheus/pkg/relabel" - - config_util "github.com/prometheus/common/config" - "github.com/prometheus/common/model" - sd_config "github.com/prometheus/prometheus/discovery/config" - "gopkg.in/yaml.v2" -) - -var ( - patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`) -) - -// Load parses the YAML input s into a Config. -func Load(s string) (*Config, error) { - cfg := &Config{} - // If the entire config body is empty the UnmarshalYAML method is - // never called. We thus have to set the DefaultConfig at the entry - // point as well. - *cfg = DefaultConfig - - err := yaml.UnmarshalStrict([]byte(s), cfg) - if err != nil { - return nil, err - } - cfg.original = s - return cfg, nil -} - -// LoadFile parses the given YAML file into a Config. -func LoadFile(filename string) (*Config, error) { - content, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - cfg, err := Load(string(content)) - if err != nil { - return nil, fmt.Errorf("parsing YAML file %s: %v", filename, err) - } - resolveFilepaths(filepath.Dir(filename), cfg) - return cfg, nil -} - -// The defaults applied before parsing the respective config sections. -var ( - // DefaultConfig is the default top-level configuration. - DefaultConfig = Config{ - GlobalConfig: DefaultGlobalConfig, - } - - // DefaultGlobalConfig is the default global configuration. - DefaultGlobalConfig = GlobalConfig{ - ScrapeInterval: model.Duration(1 * time.Minute), - ScrapeTimeout: model.Duration(10 * time.Second), - EvaluationInterval: model.Duration(1 * time.Minute), - } - - // DefaultScrapeConfig is the default scrape configuration. - DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the - // configured globals. - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - } - - // DefaultAlertmanagerConfig is the default alertmanager configuration. - DefaultAlertmanagerConfig = AlertmanagerConfig{ - Scheme: "http", - Timeout: model.Duration(10 * time.Second), - } - - // DefaultRemoteWriteConfig is the default remote write configuration. - DefaultRemoteWriteConfig = RemoteWriteConfig{ - RemoteTimeout: model.Duration(30 * time.Second), - QueueConfig: DefaultQueueConfig, - } - - // DefaultQueueConfig is the default remote queue configuration. - DefaultQueueConfig = QueueConfig{ - // With a maximum of 1000 shards, assuming an average of 100ms remote write - // time and 100 samples per batch, we will be able to push 1M samples/s. - MaxShards: 1000, - MinShards: 1, - MaxSamplesPerSend: 100, - - // By default, buffer 100 batches, which at 100ms per batch is 10s. At - // 1000 shards, this will buffer 10M samples total. - Capacity: 100 * 100, - BatchSendDeadline: model.Duration(5 * time.Second), - - // Max number of times to retry a batch on recoverable errors. - MaxRetries: 3, - MinBackoff: model.Duration(30 * time.Millisecond), - MaxBackoff: model.Duration(100 * time.Millisecond), - } - - // DefaultRemoteReadConfig is the default remote read configuration. - DefaultRemoteReadConfig = RemoteReadConfig{ - RemoteTimeout: model.Duration(1 * time.Minute), - } -) - -// Config is the top-level configuration for Prometheus's config files. -type Config struct { - GlobalConfig GlobalConfig `yaml:"global"` - AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` - RuleFiles []string `yaml:"rule_files,omitempty"` - ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` - - RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` - RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` - - // original is the input from which the config was parsed. - original string -} - -// resolveFilepaths joins all relative paths in a configuration -// with a given base directory. -func resolveFilepaths(baseDir string, cfg *Config) { - join := func(fp string) string { - if len(fp) > 0 && !filepath.IsAbs(fp) { - fp = filepath.Join(baseDir, fp) - } - return fp - } - - for i, rf := range cfg.RuleFiles { - cfg.RuleFiles[i] = join(rf) - } - - clientPaths := func(scfg *config_util.HTTPClientConfig) { - scfg.BearerTokenFile = join(scfg.BearerTokenFile) - scfg.TLSConfig.CAFile = join(scfg.TLSConfig.CAFile) - scfg.TLSConfig.CertFile = join(scfg.TLSConfig.CertFile) - scfg.TLSConfig.KeyFile = join(scfg.TLSConfig.KeyFile) - } - sdPaths := func(cfg *sd_config.ServiceDiscoveryConfig) { - for _, kcfg := range cfg.KubernetesSDConfigs { - kcfg.BearerTokenFile = join(kcfg.BearerTokenFile) - kcfg.TLSConfig.CAFile = join(kcfg.TLSConfig.CAFile) - kcfg.TLSConfig.CertFile = join(kcfg.TLSConfig.CertFile) - kcfg.TLSConfig.KeyFile = join(kcfg.TLSConfig.KeyFile) - } - for _, mcfg := range cfg.MarathonSDConfigs { - mcfg.AuthTokenFile = join(mcfg.AuthTokenFile) - mcfg.HTTPClientConfig.BearerTokenFile = join(mcfg.HTTPClientConfig.BearerTokenFile) - mcfg.HTTPClientConfig.TLSConfig.CAFile = join(mcfg.HTTPClientConfig.TLSConfig.CAFile) - mcfg.HTTPClientConfig.TLSConfig.CertFile = join(mcfg.HTTPClientConfig.TLSConfig.CertFile) - mcfg.HTTPClientConfig.TLSConfig.KeyFile = join(mcfg.HTTPClientConfig.TLSConfig.KeyFile) - } - for _, consulcfg := range cfg.ConsulSDConfigs { - consulcfg.TLSConfig.CAFile = join(consulcfg.TLSConfig.CAFile) - consulcfg.TLSConfig.CertFile = join(consulcfg.TLSConfig.CertFile) - consulcfg.TLSConfig.KeyFile = join(consulcfg.TLSConfig.KeyFile) - } - for _, filecfg := range cfg.FileSDConfigs { - for i, fn := range filecfg.Files { - filecfg.Files[i] = join(fn) - } - } - } - - for _, cfg := range cfg.ScrapeConfigs { - clientPaths(&cfg.HTTPClientConfig) - sdPaths(&cfg.ServiceDiscoveryConfig) - } - for _, cfg := range cfg.AlertingConfig.AlertmanagerConfigs { - clientPaths(&cfg.HTTPClientConfig) - sdPaths(&cfg.ServiceDiscoveryConfig) - } -} - -func (c Config) String() string { - b, err := yaml.Marshal(c) - if err != nil { - return fmt.Sprintf("", err) - } - return string(b) -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultConfig - // We want to set c to the defaults and then overwrite it with the input. - // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML - // again, we have to hide it using a type indirection. - type plain Config - if err := unmarshal((*plain)(c)); err != nil { - return err - } - - // If a global block was open but empty the default global config is overwritten. - // We have to restore it here. - if c.GlobalConfig.isZero() { - c.GlobalConfig = DefaultGlobalConfig - } - - for _, rf := range c.RuleFiles { - if !patRulePath.MatchString(rf) { - return fmt.Errorf("invalid rule file path %q", rf) - } - } - // Do global overrides and validate unique names. - jobNames := map[string]struct{}{} - for _, scfg := range c.ScrapeConfigs { - if scfg == nil { - return fmt.Errorf("empty or null scrape config section") - } - // First set the correct scrape interval, then check that the timeout - // (inferred or explicit) is not greater than that. - if scfg.ScrapeInterval == 0 { - scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval - } - if scfg.ScrapeTimeout > scfg.ScrapeInterval { - return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) - } - if scfg.ScrapeTimeout == 0 { - if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { - scfg.ScrapeTimeout = scfg.ScrapeInterval - } else { - scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout - } - } - - if _, ok := jobNames[scfg.JobName]; ok { - return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) - } - jobNames[scfg.JobName] = struct{}{} - } - for _, rwcfg := range c.RemoteWriteConfigs { - if rwcfg == nil { - return fmt.Errorf("empty or null remote write config section") - } - } - for _, rrcfg := range c.RemoteReadConfigs { - if rrcfg == nil { - return fmt.Errorf("empty or null remote read config section") - } - } - return nil -} - -// GlobalConfig configures values that are used across other configuration -// objects. -type GlobalConfig struct { - // How frequently to scrape targets by default. - ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` - // The default timeout when scraping targets. - ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` - // How frequently to evaluate rules by default. - EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` - // The labels to add to any timeseries that this Prometheus instance scrapes. - ExternalLabels model.LabelSet `yaml:"external_labels,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Create a clean global config as the previous one was already populated - // by the default due to the YAML parser behavior for empty blocks. - gc := &GlobalConfig{} - type plain GlobalConfig - if err := unmarshal((*plain)(gc)); err != nil { - return err - } - - // First set the correct scrape interval, then check that the timeout - // (inferred or explicit) is not greater than that. - if gc.ScrapeInterval == 0 { - gc.ScrapeInterval = DefaultGlobalConfig.ScrapeInterval - } - if gc.ScrapeTimeout > gc.ScrapeInterval { - return fmt.Errorf("global scrape timeout greater than scrape interval") - } - if gc.ScrapeTimeout == 0 { - if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval { - gc.ScrapeTimeout = gc.ScrapeInterval - } else { - gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout - } - } - if gc.EvaluationInterval == 0 { - gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval - } - *c = *gc - return nil -} - -// isZero returns true iff the global config is the zero value. -func (c *GlobalConfig) isZero() bool { - return c.ExternalLabels == nil && - c.ScrapeInterval == 0 && - c.ScrapeTimeout == 0 && - c.EvaluationInterval == 0 -} - -// ScrapeConfig configures a scraping unit for Prometheus. -type ScrapeConfig struct { - // The job name to which the job label is set by default. - JobName string `yaml:"job_name"` - // Indicator whether the scraped metrics should remain unmodified. - HonorLabels bool `yaml:"honor_labels,omitempty"` - // A set of query parameters with which the target is scraped. - Params url.Values `yaml:"params,omitempty"` - // How frequently to scrape the targets of this scrape config. - ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` - // The timeout for scraping targets of this config. - ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` - // The HTTP resource path on which to fetch metrics from targets. - MetricsPath string `yaml:"metrics_path,omitempty"` - // The URL scheme with which to fetch metrics from targets. - Scheme string `yaml:"scheme,omitempty"` - // More than this many samples post metric-relabelling will cause the scrape to fail. - SampleLimit uint `yaml:"sample_limit,omitempty"` - - // We cannot do proper Go type embedding below as the parser will then parse - // values arbitrarily into the overflow maps of further-down types. - - ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"` - HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` - - // List of target relabel configurations. - RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` - // List of metric relabel configurations. - MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultScrapeConfig - type plain ScrapeConfig - err := unmarshal((*plain)(c)) - if err != nil { - return err - } - if len(c.JobName) == 0 { - return fmt.Errorf("job_name is empty") - } - - // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - if err := c.HTTPClientConfig.Validate(); err != nil { - return err - } - - // The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - if err := c.ServiceDiscoveryConfig.Validate(); err != nil { - return err - } - - // Check for users putting URLs in target groups. - if len(c.RelabelConfigs) == 0 { - for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs { - for _, t := range tg.Targets { - if err := CheckTargetAddress(t[model.AddressLabel]); err != nil { - return err - } - } - } - } - - for _, rlcfg := range c.RelabelConfigs { - if rlcfg == nil { - return fmt.Errorf("empty or null target relabeling rule in scrape config") - } - } - for _, rlcfg := range c.MetricRelabelConfigs { - if rlcfg == nil { - return fmt.Errorf("empty or null metric relabeling rule in scrape config") - } - } - - // Add index to the static config target groups for unique identification - // within scrape pool. - for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { - tg.Source = fmt.Sprintf("%d", i) - } - - return nil -} - -// AlertingConfig configures alerting and alertmanager related configs. -type AlertingConfig struct { - AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"` - AlertmanagerConfigs []*AlertmanagerConfig `yaml:"alertmanagers,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - // Create a clean global config as the previous one was already populated - // by the default due to the YAML parser behavior for empty blocks. - *c = AlertingConfig{} - type plain AlertingConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - - for _, rlcfg := range c.AlertRelabelConfigs { - if rlcfg == nil { - return fmt.Errorf("empty or null alert relabeling rule") - } - } - return nil -} - -// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. -type AlertmanagerConfig struct { - // We cannot do proper Go type embedding below as the parser will then parse - // values arbitrarily into the overflow maps of further-down types. - - ServiceDiscoveryConfig sd_config.ServiceDiscoveryConfig `yaml:",inline"` - HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` - - // The URL scheme to use when talking to Alertmanagers. - Scheme string `yaml:"scheme,omitempty"` - // Path prefix to add in front of the push endpoint path. - PathPrefix string `yaml:"path_prefix,omitempty"` - // The timeout used when sending alerts. - Timeout model.Duration `yaml:"timeout,omitempty"` - - // List of Alertmanager relabel configurations. - RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultAlertmanagerConfig - type plain AlertmanagerConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - - // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - if err := c.HTTPClientConfig.Validate(); err != nil { - return err - } - - // The UnmarshalYAML method of ServiceDiscoveryConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - if err := c.ServiceDiscoveryConfig.Validate(); err != nil { - return err - } - - // Check for users putting URLs in target groups. - if len(c.RelabelConfigs) == 0 { - for _, tg := range c.ServiceDiscoveryConfig.StaticConfigs { - for _, t := range tg.Targets { - if err := CheckTargetAddress(t[model.AddressLabel]); err != nil { - return err - } - } - } - } - - for _, rlcfg := range c.RelabelConfigs { - if rlcfg == nil { - return fmt.Errorf("empty or null Alertmanager target relabeling rule") - } - } - - // Add index to the static config target groups for unique identification - // within scrape pool. - for i, tg := range c.ServiceDiscoveryConfig.StaticConfigs { - tg.Source = fmt.Sprintf("%d", i) - } - - return nil -} - -// CheckTargetAddress checks if target address is valid. -func CheckTargetAddress(address model.LabelValue) error { - // For now check for a URL, we may want to expand this later. - if strings.Contains(string(address), "/") { - return fmt.Errorf("%q is not a valid hostname", address) - } - return nil -} - -// ClientCert contains client cert credentials. -type ClientCert struct { - Cert string `yaml:"cert"` - Key config_util.Secret `yaml:"key"` -} - -// FileSDConfig is the configuration for file based discovery. -type FileSDConfig struct { - Files []string `yaml:"files"` - RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` -} - -// RemoteWriteConfig is the configuration for writing to remote storage. -type RemoteWriteConfig struct { - URL *config_util.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` - - // We cannot do proper Go type embedding below as the parser will then parse - // values arbitrarily into the overflow maps of further-down types. - HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` - QueueConfig QueueConfig `yaml:"queue_config,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultRemoteWriteConfig - type plain RemoteWriteConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - if c.URL == nil { - return fmt.Errorf("url for remote_write is empty") - } - for _, rlcfg := range c.WriteRelabelConfigs { - if rlcfg == nil { - return fmt.Errorf("empty or null relabeling rule in remote write config") - } - } - - // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - return c.HTTPClientConfig.Validate() -} - -// QueueConfig is the configuration for the queue used to write to remote -// storage. -type QueueConfig struct { - // Number of samples to buffer per shard before we start dropping them. - Capacity int `yaml:"capacity,omitempty"` - - // Max number of shards, i.e. amount of concurrency. - MaxShards int `yaml:"max_shards,omitempty"` - - // Min number of shards, i.e. amount of concurrency. - MinShards int `yaml:"min_shards,omitempty"` - - // Maximum number of samples per send. - MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` - - // Maximum time sample will wait in buffer. - BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"` - - // Max number of times to retry a batch on recoverable errors. - MaxRetries int `yaml:"max_retries,omitempty"` - - // On recoverable errors, backoff exponentially. - MinBackoff model.Duration `yaml:"min_backoff,omitempty"` - MaxBackoff model.Duration `yaml:"max_backoff,omitempty"` -} - -// RemoteReadConfig is the configuration for reading from remote storage. -type RemoteReadConfig struct { - URL *config_util.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - ReadRecent bool `yaml:"read_recent,omitempty"` - // We cannot do proper Go type embedding below as the parser will then parse - // values arbitrarily into the overflow maps of further-down types. - HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` - - // RequiredMatchers is an optional list of equality matchers which have to - // be present in a selector to query the remote read endpoint. - RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultRemoteReadConfig - type plain RemoteReadConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - if c.URL == nil { - return fmt.Errorf("url for remote_read is empty") - } - // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. - // We cannot make it a pointer as the parser panics for inlined pointer structs. - // Thus we just do its validation here. - return c.HTTPClientConfig.Validate() -}