Like Prometheus, but for logs.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
loki/pkg/querier/querier_test.go

1417 lines
48 KiB

package querier
import (
"context"
"errors"
"io"
"net/http"
"testing"
"time"
"github.com/go-kit/log"
"github.com/grafana/dskit/flagext"
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/ring"
ring_client "github.com/grafana/dskit/ring/client"
"github.com/grafana/dskit/user"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/grafana/loki/v3/pkg/compactor/deletion"
"github.com/grafana/loki/v3/pkg/ingester/client"
"github.com/grafana/loki/v3/pkg/logproto"
"github.com/grafana/loki/v3/pkg/logql"
"github.com/grafana/loki/v3/pkg/logql/syntax"
"github.com/grafana/loki/v3/pkg/querier/plan"
"github.com/grafana/loki/v3/pkg/storage"
"github.com/grafana/loki/v3/pkg/util/constants"
"github.com/grafana/loki/v3/pkg/validation"
)
const (
// Custom query timeout used in tests
queryTimeout = 12 * time.Second
)
func TestQuerier_Label_QueryTimeoutConfigFlag(t *testing.T) {
startTime := time.Now().Add(-1 * time.Minute)
endTime := time.Now()
request := logproto.LabelRequest{
Name: "test",
Values: true,
Start: &startTime,
End: &endTime,
}
ingesterClient := newQuerierClientMock()
ingesterClient.On("Label", mock.Anything, &request, mock.Anything).Return(mockLabelResponse([]string{}), nil)
store := newStoreMock()
store.On("LabelValuesForMetricName", mock.Anything, "test", model.TimeFromUnixNano(startTime.UnixNano()), model.TimeFromUnixNano(endTime.UnixNano()), "logs", "test").Return([]string{"foo", "bar"}, nil)
limitsCfg := defaultLimitsTestConfig()
limitsCfg.QueryTimeout = model.Duration(queryTimeout)
limits, err := validation.NewOverrides(limitsCfg, nil)
require.NoError(t, err)
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
_, err = q.Label(ctx, &request)
require.NoError(t, err)
calls := ingesterClient.GetMockedCallsByMethod("Label")
assert.Equal(t, 1, len(calls))
deadline, ok := calls[0].Arguments.Get(0).(context.Context).Deadline()
assert.True(t, ok)
assert.WithinDuration(t, deadline, time.Now().Add(queryTimeout), 1*time.Second)
calls = store.GetMockedCallsByMethod("LabelValuesForMetricName")
assert.Equal(t, 1, len(calls))
deadline, ok = calls[0].Arguments.Get(0).(context.Context).Deadline()
assert.True(t, ok)
assert.WithinDuration(t, deadline, time.Now().Add(queryTimeout), 1*time.Second)
store.AssertExpectations(t)
}
func TestQuerier_Tail_QueryTimeoutConfigFlag(t *testing.T) {
request := logproto.TailRequest{
Query: `{type="test"}`,
DelayFor: 0,
Limit: 10,
Start: time.Now(),
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`{type="test"}`),
},
}
store := newStoreMock()
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil)
queryClient := newQueryClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil)
tailClient := newTailClientMock()
tailClient.On("Recv").Return(mockTailResponse(mockStream(1, 2)), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
ingesterClient.On("Tail", mock.Anything, &request, mock.Anything).Return(tailClient, nil)
ingesterClient.On("TailersCount", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.TailersCountResponse{}, nil)
limitsCfg := defaultLimitsTestConfig()
limitsCfg.QueryTimeout = model.Duration(queryTimeout)
limits, err := validation.NewOverrides(limitsCfg, nil)
require.NoError(t, err)
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
_, err = q.Tail(ctx, &request, false)
require.NoError(t, err)
calls := ingesterClient.GetMockedCallsByMethod("Query")
assert.Equal(t, 1, len(calls))
deadline, ok := calls[0].Arguments.Get(0).(context.Context).Deadline()
assert.True(t, ok)
assert.WithinDuration(t, deadline, time.Now().Add(queryTimeout), 1*time.Second)
calls = ingesterClient.GetMockedCallsByMethod("Tail")
assert.Equal(t, 1, len(calls))
_, ok = calls[0].Arguments.Get(0).(context.Context).Deadline()
assert.False(t, ok)
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
calls = store.GetMockedCallsByMethod("SelectLogs")
assert.Equal(t, 1, len(calls))
deadline, ok = calls[0].Arguments.Get(0).(context.Context).Deadline()
assert.True(t, ok)
assert.WithinDuration(t, deadline, time.Now().Add(queryTimeout), 1*time.Second)
store.AssertExpectations(t)
}
func mockQuerierConfig() Config {
return Config{
TailMaxDuration: 1 * time.Minute,
}
}
func mockQueryResponse(streams []logproto.Stream) *logproto.QueryResponse {
return &logproto.QueryResponse{
Streams: streams,
}
}
func mockLabelResponse(values []string) *logproto.LabelResponse {
return &logproto.LabelResponse{
Values: values,
}
}
func defaultLimitsTestConfig() validation.Limits {
limits := validation.Limits{}
flagext.DefaultValues(&limits)
return limits
}
func TestQuerier_validateQueryRequest(t *testing.T) {
request := logproto.QueryRequest{
Selector: `{type="test", fail="yes"} |= "foo"`,
Limit: 10,
Start: time.Now().Add(-1 * time.Minute),
End: time.Now(),
Direction: logproto.FORWARD,
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`),
},
}
store := newStoreMock()
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil)
queryClient := newQueryClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, &request, mock.Anything).Return(queryClient, nil)
defaultLimits := defaultLimitsTestConfig()
defaultLimits.MaxStreamsMatchersPerQuery = 1
defaultLimits.MaxQueryLength = model.Duration(2 * time.Minute)
limits, err := validation.NewOverrides(defaultLimits, nil)
require.NoError(t, err)
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "max streams matchers per query exceeded, matchers-count > limit (2 > 1)"), err)
request.Selector = `{type="test"}`
request.Plan = &plan.QueryPlan{
AST: syntax.MustParseExpr(`{type="test"}`),
}
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.NoError(t, err)
request.Start = request.End.Add(-3*time.Minute - 2*time.Second)
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "the query time range exceeds the limit (query length: 3m2s, limit: 2m)"), err)
}
func TestQuerier_SeriesAPI(t *testing.T) {
mkReq := func(groups []string) *logproto.SeriesRequest {
return &logproto.SeriesRequest{
Start: time.Unix(0, 0),
End: time.Unix(10, 0),
Groups: groups,
}
}
mockSeriesResponse := func(series []map[string]string) *logproto.SeriesResponse {
resp := &logproto.SeriesResponse{}
for _, s := range series {
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
resp.Series = append(resp.Series, logproto.SeriesIdentifierFromMap(s))
}
return resp
}
for _, tc := range []struct {
desc string
req *logproto.SeriesRequest
setup func(*storeMock, *queryClientMock, *querierClientMock, validation.Limits, *logproto.SeriesRequest)
run func(*testing.T, *SingleTenantQuerier, *logproto.SeriesRequest)
}{
{
"ingester error",
mkReq([]string{`{a="1"}`}),
func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) {
ingester.On("Series", mock.Anything, req, mock.Anything).Return(nil, errors.New("tst-err"))
store.On("SelectSeries", mock.Anything, mock.Anything).Return(nil, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
ctx := user.InjectOrgID(context.Background(), "test")
_, err := q.Series(ctx, req)
require.Error(t, err)
},
},
{
"store error",
mkReq([]string{`{a="1"}`}),
func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) {
ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{
{"a": "1"},
}), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return(nil, context.DeadlineExceeded)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
ctx := user.InjectOrgID(context.Background(), "test")
_, err := q.Series(ctx, req)
require.Error(t, err)
},
},
{
"no matches",
mkReq([]string{`{a="1"}`}),
func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) {
ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse(nil), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return(nil, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := q.Series(ctx, req)
require.Nil(t, err)
require.Equal(t, &logproto.SeriesResponse{Series: make([]logproto.SeriesIdentifier, 0)}, resp)
},
},
{
"returns series",
mkReq([]string{`{a="1"}`}),
func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) {
ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{
{"a": "1", "b": "2"},
{"a": "1", "b": "3"},
}), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "4"},
}},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "5"},
}},
}, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := q.Series(ctx, req)
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "2"},
}},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "3"}},
},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "4"},
}},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "5"},
}},
}, resp.GetSeries())
},
},
{
"dedupes",
mkReq([]string{`{a="1"}`}),
func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) {
ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{
{"a": "1", "b": "2"},
}), nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "2"},
}},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "3"},
}},
}, nil)
},
func(t *testing.T, q *SingleTenantQuerier, req *logproto.SeriesRequest) {
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := q.Series(ctx, req)
require.Nil(t, err)
require.ElementsMatch(t, []logproto.SeriesIdentifier{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "2"},
}},
{Labels: []logproto.SeriesIdentifier_LabelsEntry{
{Key: "a", Value: "1"},
{Key: "b", Value: "3"},
}},
}, resp.GetSeries())
},
},
} {
t.Run(tc.desc, func(t *testing.T) {
store := newStoreMock()
queryClient := newQueryClientMock()
ingesterClient := newQuerierClientMock()
defaultLimits := defaultLimitsTestConfig()
if tc.setup != nil {
tc.setup(store, queryClient, ingesterClient, defaultLimits, tc.req)
}
limits, err := validation.NewOverrides(defaultLimits, nil)
require.NoError(t, err)
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
tc.run(t, q, tc.req)
})
}
}
func TestQuerier_IngesterMaxQueryLookback(t *testing.T) {
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
for _, tc := range []struct {
desc string
lookback time.Duration
end time.Time
skipIngesters bool
}{
{
desc: "0 value always queries ingesters",
lookback: 0,
end: time.Now().Add(time.Hour),
skipIngesters: false,
},
{
desc: "query ingester",
lookback: time.Hour,
end: time.Now(),
skipIngesters: false,
},
{
desc: "skip ingester",
lookback: time.Hour,
end: time.Now().Add(-2 * time.Hour),
skipIngesters: true,
},
} {
t.Run(tc.desc, func(t *testing.T) {
req := logproto.QueryRequest{
Selector: `{app="foo"}`,
Limit: 1000,
Start: tc.end.Add(-6 * time.Hour),
End: tc.end,
Direction: logproto.FORWARD,
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`{app="foo"}`),
},
}
queryClient := newQueryClientMock()
ingesterClient := newQuerierClientMock()
if !tc.skipIngesters {
ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil).Once()
queryClient.On("Recv").Return(nil, io.EOF).Once()
}
store := newStoreMock()
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil)
conf := mockQuerierConfig()
conf.QueryIngestersWithin = tc.lookback
q, err := newQuerier(
conf,
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
res, err := q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &req})
require.Nil(t, err)
// since streams are loaded lazily, force iterators to exhaust
//nolint:revive
for res.Next() {
}
queryClient.AssertExpectations(t)
ingesterClient.AssertExpectations(t)
store.AssertExpectations(t)
})
}
}
func TestQuerier_concurrentTailLimits(t *testing.T) {
request := logproto.TailRequest{
Query: "{type=\"test\"}",
DelayFor: 0,
Limit: 10,
Start: time.Now(),
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr("{type=\"test\"}"),
},
}
t.Parallel()
tests := map[string]struct {
ringIngesters []ring.InstanceDesc
expectedError error
tailersCount uint32
}{
"empty ring": {
ringIngesters: []ring.InstanceDesc{},
expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"),
},
"ring containing one pending ingester": {
ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING)},
expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "no active ingester found"),
},
"ring containing one active ingester and 0 active tailers": {
ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
},
"ring containing one active ingester and 1 active tailer": {
ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
tailersCount: 1,
},
"ring containing one pending and active ingester with 1 active tailer": {
ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.PENDING), mockInstanceDesc("2.2.2.2", ring.ACTIVE)},
tailersCount: 1,
},
"ring containing one active ingester and max active tailers": {
ringIngesters: []ring.InstanceDesc{mockInstanceDesc("1.1.1.1", ring.ACTIVE)},
expectedError: httpgrpc.Errorf(http.StatusBadRequest,
"max concurrent tail requests limit exceeded, count > limit (%d > %d)", 6, 5),
tailersCount: 5,
},
}
for testName, testData := range tests {
testData := testData
t.Run(testName, func(t *testing.T) {
// For this test's purpose, whenever a new ingester client needs to
// be created, the factory will always return the same mock instance
store := newStoreMock()
Improve metric queries by computing samples at the edges. (#2293) * First pass breaking the code appart. Wondering how we're going to achieve fast mutation of labels. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Work in progress. I realize I need hash for deduping lines. going to benchmark somes. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Tested some hash and decided which one to use. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Wip Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Starting working on ingester. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Trying to find a better hash function. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More hash testing we have a winner. xxhash it is. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Settle on xxhash Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Better params interfacing. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add interface for queryparams for things that exist in both type of params. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add storage sample iterator implementations. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing tests and verifying we don't get collions for the hashing method. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing ingesters tests and refactoring utility function/tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing and testing that stats are still well computed. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixing more tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * More engine tests finished. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes sharding evaluator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes more engine tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix error tests in the engine. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finish fixing all tests. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fixes a bug where extractor was not passed in correctly. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add notes about upgrade. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Renamed and fix a bug. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Add memchunk tests and starting test for sampleIterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Test heap sample iterator. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * working on test. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Finishing testing all new iterators. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Making sure all store functions are tested. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Benchmark and verify everything is working well. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Make the linter happy. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * use xxhash v2. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * Fix a flaky test because of map. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> * go.mod. Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com> Co-authored-by: Edward Welch <edward.welch@grafana.com>
6 years ago
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil)
queryClient := newQueryClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil)
tailClient := newTailClientMock()
tailClient.On("Recv").Return(mockTailResponse(mockStream(1, 2)), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
ingesterClient.On("Tail", mock.Anything, &request, mock.Anything).Return(tailClient, nil)
ingesterClient.On("TailersCount", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.TailersCountResponse{Count: testData.tailersCount}, nil)
defaultLimits := defaultLimitsTestConfig()
defaultLimits.MaxConcurrentTailRequests = 5
limits, err := validation.NewOverrides(defaultLimits, nil)
require.NoError(t, err)
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
newReadRingMock(testData.ringIngesters, 0),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
_, err = q.Tail(ctx, &request, false)
assert.Equal(t, testData.expectedError, err)
})
}
}
func TestQuerier_buildQueryIntervals(t *testing.T) {
// For simplicity it is always assumed that ingesterQueryStoreMaxLookback and queryIngestersWithin both would be set upto 11 hours so
// overlappingQuery has range of last 11 hours while nonOverlappingQuery has range older than last 11 hours.
// We would test the cases below with both the queries.
overlappingQuery := interval{
start: time.Now().Add(-6 * time.Hour),
end: time.Now(),
}
nonOverlappingQuery := interval{
start: time.Now().Add(-24 * time.Hour),
end: time.Now().Add(-12 * time.Hour),
}
type response struct {
ingesterQueryInterval *interval
storeQueryInterval *interval
}
compareResponse := func(t *testing.T, expectedResponse, actualResponse response) {
if expectedResponse.ingesterQueryInterval == nil {
require.Nil(t, actualResponse.ingesterQueryInterval)
} else {
require.InDelta(t, expectedResponse.ingesterQueryInterval.start.Unix(), actualResponse.ingesterQueryInterval.start.Unix(), 1)
require.InDelta(t, expectedResponse.ingesterQueryInterval.end.Unix(), actualResponse.ingesterQueryInterval.end.Unix(), 1)
}
if expectedResponse.storeQueryInterval == nil {
require.Nil(t, actualResponse.storeQueryInterval)
} else {
require.InDelta(t, expectedResponse.storeQueryInterval.start.Unix(), actualResponse.storeQueryInterval.start.Unix(), 1)
require.InDelta(t, expectedResponse.storeQueryInterval.end.Unix(), actualResponse.storeQueryInterval.end.Unix(), 1)
}
}
for _, tc := range []struct {
name string
ingesterQueryStoreMaxLookback time.Duration
queryIngestersWithin time.Duration
overlappingQueryExpectedResponse response
nonOverlappingQueryExpectedResponse response
}{
{
name: "default values, query ingesters and store for whole duration",
overlappingQueryExpectedResponse: response{ // query both store and ingesters
ingesterQueryInterval: &overlappingQuery,
storeQueryInterval: &overlappingQuery,
},
nonOverlappingQueryExpectedResponse: response{ // query both store and ingesters
ingesterQueryInterval: &nonOverlappingQuery,
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "ingesterQueryStoreMaxLookback set to 1h",
ingesterQueryStoreMaxLookback: time.Hour,
overlappingQueryExpectedResponse: response{ // query ingesters for last 1h and store until last 1h.
ingesterQueryInterval: &interval{
start: time.Now().Add(-time.Hour),
end: overlappingQuery.end,
},
storeQueryInterval: &interval{
start: overlappingQuery.start,
end: time.Now().Add(-time.Hour),
},
},
nonOverlappingQueryExpectedResponse: response{ // query just the store
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "ingesterQueryStoreMaxLookback set to 10h",
ingesterQueryStoreMaxLookback: 10 * time.Hour,
overlappingQueryExpectedResponse: response{ // query just the ingesters.
ingesterQueryInterval: &overlappingQuery,
},
nonOverlappingQueryExpectedResponse: response{ // query just the store
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "ingesterQueryStoreMaxLookback set to 1h and queryIngestersWithin set to 2h, ingesterQueryStoreMaxLookback takes precedence",
ingesterQueryStoreMaxLookback: time.Hour,
queryIngestersWithin: 2 * time.Hour,
overlappingQueryExpectedResponse: response{ // query ingesters for last 1h and store until last 1h.
ingesterQueryInterval: &interval{
start: time.Now().Add(-time.Hour),
end: overlappingQuery.end,
},
storeQueryInterval: &interval{
start: overlappingQuery.start,
end: time.Now().Add(-time.Hour),
},
},
nonOverlappingQueryExpectedResponse: response{ // query just the store
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "ingesterQueryStoreMaxLookback set to 2h and queryIngestersWithin set to 1h, ingesterQueryStoreMaxLookback takes precedence",
ingesterQueryStoreMaxLookback: 2 * time.Hour,
queryIngestersWithin: time.Hour,
overlappingQueryExpectedResponse: response{ // query ingesters for last 2h and store until last 2h.
ingesterQueryInterval: &interval{
start: time.Now().Add(-2 * time.Hour),
end: overlappingQuery.end,
},
storeQueryInterval: &interval{
start: overlappingQuery.start,
end: time.Now().Add(-2 * time.Hour),
},
},
nonOverlappingQueryExpectedResponse: response{ // query just the store
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "ingesterQueryStoreMaxLookback set to -1, query just ingesters",
ingesterQueryStoreMaxLookback: -1,
overlappingQueryExpectedResponse: response{
ingesterQueryInterval: &overlappingQuery,
},
nonOverlappingQueryExpectedResponse: response{
ingesterQueryInterval: &nonOverlappingQuery,
},
},
{
name: "queryIngestersWithin set to 1h",
queryIngestersWithin: time.Hour,
overlappingQueryExpectedResponse: response{ // query both store and ingesters since query overlaps queryIngestersWithin
ingesterQueryInterval: &overlappingQuery,
storeQueryInterval: &overlappingQuery,
},
nonOverlappingQueryExpectedResponse: response{ // query just the store since query doesn't overlap queryIngestersWithin
storeQueryInterval: &nonOverlappingQuery,
},
},
{
name: "queryIngestersWithin set to 10h",
queryIngestersWithin: 10 * time.Hour,
overlappingQueryExpectedResponse: response{ // query both store and ingesters since query overlaps queryIngestersWithin
ingesterQueryInterval: &overlappingQuery,
storeQueryInterval: &overlappingQuery,
},
nonOverlappingQueryExpectedResponse: response{ // query just the store since query doesn't overlap queryIngestersWithin
storeQueryInterval: &nonOverlappingQuery,
},
},
} {
t.Run(tc.name, func(t *testing.T) {
querier := SingleTenantQuerier{cfg: Config{
IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback,
QueryIngestersWithin: tc.queryIngestersWithin,
}}
ingesterQueryInterval, storeQueryInterval := querier.buildQueryIntervals(overlappingQuery.start, overlappingQuery.end)
compareResponse(t, tc.overlappingQueryExpectedResponse, response{
ingesterQueryInterval: ingesterQueryInterval,
storeQueryInterval: storeQueryInterval,
})
ingesterQueryInterval, storeQueryInterval = querier.buildQueryIntervals(nonOverlappingQuery.start, nonOverlappingQuery.end)
compareResponse(t, tc.nonOverlappingQueryExpectedResponse, response{
ingesterQueryInterval: ingesterQueryInterval,
storeQueryInterval: storeQueryInterval,
})
})
}
}
func TestQuerier_calculateIngesterMaxLookbackPeriod(t *testing.T) {
for _, tc := range []struct {
name string
ingesterQueryStoreMaxLookback time.Duration
queryIngestersWithin time.Duration
expected time.Duration
}{
{
name: "defaults are set; infinite lookback period if no values are set",
expected: -1,
},
{
name: "only setting ingesterQueryStoreMaxLookback",
ingesterQueryStoreMaxLookback: time.Hour,
expected: time.Hour,
},
{
name: "setting both ingesterQueryStoreMaxLookback and queryIngestersWithin; ingesterQueryStoreMaxLookback takes precedence",
ingesterQueryStoreMaxLookback: time.Hour,
queryIngestersWithin: time.Minute,
expected: time.Hour,
},
{
name: "only setting queryIngestersWithin",
queryIngestersWithin: time.Minute,
expected: time.Minute,
},
} {
t.Run(tc.name, func(t *testing.T) {
querier := SingleTenantQuerier{cfg: Config{
IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback,
QueryIngestersWithin: tc.queryIngestersWithin,
}}
assert.Equal(t, tc.expected, querier.calculateIngesterMaxLookbackPeriod())
})
}
}
func TestQuerier_isWithinIngesterMaxLookbackPeriod(t *testing.T) {
overlappingQuery := interval{
start: time.Now().Add(-6 * time.Hour),
end: time.Now(),
}
nonOverlappingQuery := interval{
start: time.Now().Add(-24 * time.Hour),
end: time.Now().Add(-12 * time.Hour),
}
for _, tc := range []struct {
name string
ingesterQueryStoreMaxLookback time.Duration
queryIngestersWithin time.Duration
overlappingWithinRange bool
nonOverlappingWithinRange bool
}{
{
name: "default values, query ingesters and store for whole duration",
overlappingWithinRange: true,
nonOverlappingWithinRange: true,
},
{
name: "ingesterQueryStoreMaxLookback set to 1h",
ingesterQueryStoreMaxLookback: time.Hour,
overlappingWithinRange: true,
nonOverlappingWithinRange: false,
},
{
name: "ingesterQueryStoreMaxLookback set to 10h",
ingesterQueryStoreMaxLookback: 10 * time.Hour,
overlappingWithinRange: true,
nonOverlappingWithinRange: false,
},
{
name: "ingesterQueryStoreMaxLookback set to 1h and queryIngestersWithin set to 16h, ingesterQueryStoreMaxLookback takes precedence",
ingesterQueryStoreMaxLookback: time.Hour,
queryIngestersWithin: 16 * time.Hour, // if used, this would put the nonOverlapping query in range
overlappingWithinRange: true,
nonOverlappingWithinRange: false,
},
{
name: "ingesterQueryStoreMaxLookback set to -1, query just ingesters",
ingesterQueryStoreMaxLookback: -1,
overlappingWithinRange: true,
nonOverlappingWithinRange: true,
},
{
name: "queryIngestersWithin set to 1h",
queryIngestersWithin: time.Hour,
overlappingWithinRange: true,
nonOverlappingWithinRange: false,
},
{
name: "queryIngestersWithin set to 10h",
queryIngestersWithin: 10 * time.Hour,
overlappingWithinRange: true,
nonOverlappingWithinRange: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
querier := SingleTenantQuerier{cfg: Config{
IngesterQueryStoreMaxLookback: tc.ingesterQueryStoreMaxLookback,
QueryIngestersWithin: tc.queryIngestersWithin,
}}
lookbackPeriod := querier.calculateIngesterMaxLookbackPeriod()
assert.Equal(t, tc.overlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, overlappingQuery.end))
assert.Equal(t, tc.nonOverlappingWithinRange, querier.isWithinIngesterMaxLookbackPeriod(lookbackPeriod, nonOverlappingQuery.end))
})
}
}
func TestQuerier_RequestingIngesters(t *testing.T) {
ctx := user.InjectOrgID(context.Background(), "test")
requestMapping := map[string]struct {
ingesterMethod string
storeMethod string
}{
"SelectLogs": {
ingesterMethod: "Query",
storeMethod: "SelectLogs",
},
"SelectSamples": {
ingesterMethod: "QuerySample",
storeMethod: "SelectSamples",
},
"LabelValuesForMetricName": {
ingesterMethod: "Label",
storeMethod: "LabelValuesForMetricName",
},
"LabelNamesForMetricName": {
ingesterMethod: "Label",
storeMethod: "LabelNamesForMetricName",
},
"Series": {
ingesterMethod: "Series",
storeMethod: "SelectSeries",
},
}
tests := []struct {
desc string
start, end time.Time
setIngesterQueryStoreMaxLookback bool
expectedCallsStore int
expectedCallsIngesters int
}{
{
desc: "Data in storage and ingesters",
start: time.Now().Add(-time.Hour * 2),
end: time.Now(),
expectedCallsStore: 1,
expectedCallsIngesters: 1,
},
{
desc: "Data in ingesters (IngesterQueryStoreMaxLookback not set)",
start: time.Now().Add(-time.Minute * 15),
end: time.Now(),
expectedCallsStore: 1,
expectedCallsIngesters: 1,
},
{
desc: "Data only in storage",
start: time.Now().Add(-time.Hour * 2),
end: time.Now().Add(-time.Hour * 1),
expectedCallsStore: 1,
expectedCallsIngesters: 0,
},
{
desc: "Data in ingesters (IngesterQueryStoreMaxLookback set)",
start: time.Now().Add(-time.Minute * 15),
end: time.Now(),
setIngesterQueryStoreMaxLookback: true,
expectedCallsStore: 0,
expectedCallsIngesters: 1,
},
}
requests := []struct {
name string
do func(querier *SingleTenantQuerier, start, end time.Time) error
}{
{
name: "SelectLogs",
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.SelectLogs(ctx, logql.SelectLogParams{
QueryRequest: &logproto.QueryRequest{
Selector: `{type="test", fail="yes"} |= "foo"`,
Limit: 10,
Start: start,
End: end,
Direction: logproto.FORWARD,
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`{type="test", fail="yes"} |= "foo"`),
},
},
})
return err
},
},
{
name: "SelectSamples",
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.SelectSamples(ctx, logql.SelectSampleParams{
SampleQueryRequest: &logproto.SampleQueryRequest{
Selector: `count_over_time({foo="bar"}[5m])`,
Start: start,
End: end,
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`),
},
},
})
return err
},
},
{
name: "LabelValuesForMetricName",
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.Label(ctx, &logproto.LabelRequest{
Name: "type",
Values: true,
Start: &start,
End: &end,
})
return err
},
},
{
name: "LabelNamesForMetricName",
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.Label(ctx, &logproto.LabelRequest{
Values: false,
Start: &start,
End: &end,
})
return err
},
},
{
name: "Series",
do: func(querier *SingleTenantQuerier, start, end time.Time) error {
_, err := querier.Series(ctx, &logproto.SeriesRequest{
Start: start,
End: end,
})
return err
},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
conf := mockQuerierConfig()
conf.QueryIngestersWithin = time.Minute * 30
if tc.setIngesterQueryStoreMaxLookback {
conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin
}
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
for _, request := range requests {
t.Run(request.name, func(t *testing.T) {
ingesterClient, store, querier, err := setupIngesterQuerierMocks(conf, limits)
require.NoError(t, err)
err = request.do(querier, tc.start, tc.end)
require.NoError(t, err)
callsIngesters := ingesterClient.GetMockedCallsByMethod(requestMapping[request.name].ingesterMethod)
assert.Equal(t, tc.expectedCallsIngesters, len(callsIngesters))
callsStore := store.GetMockedCallsByMethod(requestMapping[request.name].storeMethod)
assert.Equal(t, tc.expectedCallsStore, len(callsStore))
})
}
})
}
}
func TestQuerier_Volumes(t *testing.T) {
t.Run("it returns volumes from the store", func(t *testing.T) {
ret := &logproto.VolumeResponse{Volumes: []logproto.Volume{
{Name: "foo", Volume: 38},
}}
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
ingesterClient := newQuerierClientMock()
store := newStoreMock()
store.On("Volume", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
conf := mockQuerierConfig()
conf.QueryIngestersWithin = time.Minute * 30
conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin
querier, err := newQuerier(
conf,
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
now := time.Now()
from := model.TimeFromUnix(now.Add(-1 * time.Hour).Unix())
through := model.TimeFromUnix(now.Add(-35 * time.Minute).Unix())
req := &logproto.VolumeRequest{From: from, Through: through, Matchers: `{}`, Limit: 10}
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := querier.Volume(ctx, req)
require.NoError(t, err)
require.Equal(t, []logproto.Volume{{Name: "foo", Volume: 38}}, resp.Volumes)
})
t.Run("it returns volumes from the ingester", func(t *testing.T) {
ret := &logproto.VolumeResponse{Volumes: []logproto.Volume{
{Name: "foo", Volume: 38},
}}
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
ingesterClient := newQuerierClientMock()
ingesterClient.On("GetVolume", mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
store := newStoreMock()
conf := mockQuerierConfig()
conf.QueryIngestersWithin = time.Minute * 30
conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin
querier, err := newQuerier(
conf,
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
now := time.Now()
from := model.TimeFromUnix(now.Add(-15 * time.Minute).Unix())
through := model.TimeFromUnix(now.Unix())
req := &logproto.VolumeRequest{From: from, Through: through, Matchers: `{}`, Limit: 10}
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := querier.Volume(ctx, req)
require.NoError(t, err)
require.Equal(t, []logproto.Volume{{Name: "foo", Volume: 38}}, resp.Volumes)
})
t.Run("it merges volumes from the store and ingester", func(t *testing.T) {
ret := &logproto.VolumeResponse{Volumes: []logproto.Volume{
{Name: "foo", Volume: 38},
}}
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
ingesterClient := newQuerierClientMock()
ingesterClient.On("GetVolume", mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
store := newStoreMock()
store.On("Volume", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(ret, nil)
conf := mockQuerierConfig()
conf.QueryIngestersWithin = time.Minute * 30
conf.IngesterQueryStoreMaxLookback = conf.QueryIngestersWithin
querier, err := newQuerier(
conf,
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
require.NoError(t, err)
now := time.Now()
from := model.TimeFromUnix(now.Add(-time.Hour).Unix())
through := model.TimeFromUnix(now.Unix())
req := &logproto.VolumeRequest{From: from, Through: through, Matchers: `{}`, Limit: 10}
ctx := user.InjectOrgID(context.Background(), "test")
resp, err := querier.Volume(ctx, req)
require.NoError(t, err)
require.Equal(t, []logproto.Volume{{Name: "foo", Volume: 76}}, resp.Volumes)
})
}
func setupIngesterQuerierMocks(conf Config, limits *validation.Overrides) (*querierClientMock, *storeMock, *SingleTenantQuerier, error) {
queryClient := newQueryClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil)
querySampleClient := newQuerySampleClientMock()
querySampleClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 1)}), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
ingesterClient.On("QuerySample", mock.Anything, mock.Anything, mock.Anything).Return(querySampleClient, nil)
ingesterClient.On("Label", mock.Anything, mock.Anything, mock.Anything).Return(mockLabelResponse([]string{"bar"}), nil)
ingesterClient.On("Series", mock.Anything, mock.Anything, mock.Anything).Return(&logproto.SeriesResponse{
Series: []logproto.SeriesIdentifier{
{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "bar", Value: "1"}},
},
},
}, nil)
store := newStoreMock()
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(0, 1), nil)
store.On("SelectSamples", mock.Anything, mock.Anything).Return(mockSampleIterator(querySampleClient), nil)
store.On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"1", "2", "3"}, nil)
store.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return([]string{"foo"}, nil)
store.On("SelectSeries", mock.Anything, mock.Anything).Return([]logproto.SeriesIdentifier{
Optimize series response format by using repeated fileds. (#11498) **What this PR does / why we need it**: The Protobuf map type is encodied as a repeated field of map entries. Decoding them to a slice is much faster than decoding them into a map. Since Loki is not using the fast key check for a map we can use the slice decoding. This change also allows us to decode the JSON directly into the right protobuf struct. This doulbes the JSON decoding speed and reduces the memory pressure by ~40%. ``` › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > before.log › go test -bench=. -run=^$ -count=10 ./pkg/querier/queryrange > after.log › benchstat before.log after.log goos: darwin goarch: arm64 pkg: github.com/grafana/loki/pkg/querier/queryrange │ before.log │ after.log │ │ sec/op │ sec/op vs base │ ResponseMerge/mergeStreams_unlimited-10 32.36m ± 0% 32.63m ± 2% ~ (p=0.393 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_unlimited-10 1.050m ± 1% 1.080m ± 3% +2.84% (p=0.005 n=10) ResponseMerge/mergeStreams_limited-10 33.02m ± 0% 32.60m ± 1% -1.29% (p=0.004 n=10) ResponseMerge/mergeOrderedNonOverlappingStreams_limited-10 15.11m ± 0% 15.07m ± 0% ~ (p=0.075 n=10) _CodecDecodeLogs-10 4.395m ± 1% 4.364m ± 0% -0.72% (p=0.005 n=10) _CodecDecodeSamples-10 16.97m ± 0% 16.84m ± 2% -0.77% (p=0.023 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 745.8µ ± 8% 736.8µ ± 12% ~ (p=0.739 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 15.37m ± 1% 10.60m ± 0% -31.03% (p=0.000 n=10) _MergeResponses-10 1186.9m ± 2% 149.8m ± 1% -87.38% (p=0.000 n=10) _UnwrapSeries-10 9.399m ± 1% 4.049m ± 0% -56.92% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 666.0m ± 3% 194.6m ± 0% -70.79% (p=0.000 n=10) geomean 18.87m 12.51m -33.70% │ before.log │ after.log │ │ B/op │ B/op vs base │ _CodecDecodeLogs-10 3.649Mi ± 0% 3.649Mi ± 0% ~ (p=0.364 n=10) _CodecDecodeSamples-10 18.12Mi ± 0% 18.12Mi ± 0% ~ (p=0.926 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 7.647Mi ± 0% 7.647Mi ± 0% ~ (p=0.587 n=10) _CodecDecodeSeries/application/json;_charset=utf-8-10 27.94Mi ± 0% 16.99Mi ± 0% -39.18% (p=0.000 n=10) _MergeResponses-10 2.362Mi ± 0% 2.408Mi ± 0% +1.98% (p=0.000 n=10) _UnwrapSeries-10 19.495Mi ± 0% 8.595Mi ± 0% -55.91% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 772.3Mi ± 0% 772.3Mi ± 0% ~ (p=0.912 n=10) geomean 17.50Mi 14.54Mi -16.91% │ before.log │ after.log │ │ allocs/op │ allocs/op vs base │ _CodecDecodeLogs-10 41.10k ± 0% 41.10k ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSamples-10 411.9k ± 0% 411.9k ± 0% ~ (p=1.000 n=10) _CodecDecodeSeries/application/vnd.google.protobuf-10 32.00 ± 0% 32.00 ± 0% ~ (p=1.000 n=10) ¹ _CodecDecodeSeries/application/json;_charset=utf-8-10 304.2k ± 0% 298.1k ± 0% -2.01% (p=0.000 n=10) _MergeResponses-10 100.1k ± 0% 100.1k ± 0% -0.00% (p=0.002 n=10) _UnwrapSeries-10 201.1k ± 0% 198.0k ± 0% -1.54% (p=0.000 n=10) _DecodeMergeEncodeCycle-10 203.1k ± 0% 203.1k ± 0% ~ (p=0.621 n=10) geomean 48.95k 48.70k -0.51% ¹ all samples are equal ``` **Checklist** - [x] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15)
2 years ago
{Labels: []logproto.SeriesIdentifier_LabelsEntry{{Key: "foo", Value: "1"}}},
}, nil)
querier, err := newQuerier(
conf,
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
&mockDeleteGettter{},
store, limits)
if err != nil {
return nil, nil, nil, err
}
return ingesterClient, store, querier, nil
}
type fakeTimeLimits struct {
maxQueryLookback time.Duration
maxQueryLength time.Duration
}
func (f fakeTimeLimits) MaxQueryLookback(_ context.Context, _ string) time.Duration {
return f.maxQueryLookback
}
func (f fakeTimeLimits) MaxQueryLength(_ context.Context, _ string) time.Duration {
return f.maxQueryLength
}
func Test_validateQueryTimeRangeLimits(t *testing.T) {
now := time.Now()
nowFunc = func() time.Time { return now }
tests := []struct {
name string
Turn frontend Tripperware into a Middleware. (#10688) **What this PR does / why we need it**: Currently, a request to Loki's frontend API goes through these conversions: ``` http.Request ↓ limitedRoundTripper queryrangebase.Request ↓ queryrangebase.Middlware … ↓ queryrangebase.Request ↓ limitedRoundTripper http.Request ↓ grpcRoundTripperAdapter httpgrpc ↓ grpcRoundTripperAdapter http.Response ↓ limitedRoundTripper queryrangebase.Response ↓ limitedRoundtripper http.Response ``` Since `httgrpc` and `queryrangebase.Request` are Protobufs there's no good reason to encode and decode them to HTTP responses/requests. Furthermore, the encoding to HTTP makes it harder for us to encode query plans. Thus the conversions are changed to the following: ``` http.Request ↓ queryrangebase.Request ↓ queryrangebase.Middlware … ↓ queryrangebase.Request ↓ httpgrpc ↓ queryrangebase.Response ↓ http.Response ``` In order to achieve this the `http.RoundTripper` is pushed to the outside. Only the serialization layer from `http.Request` to `queryrangebase.Request` and `http.Response` to `queryrangebase.Response` will be an `http.RoundTripper`. Everything else is either a `queryrangebase.Handler` or `queryrangebase.Middleware`. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [ ] `CHANGELOG.md` updated - [ ] If the change is worth mentioning in the release notes, add `add-to-release-notes` label - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213)
2 years ago
limits TimeRangeLimits
from time.Time
through time.Time
wantFrom time.Time
wantThrough time.Time
wantErr bool
}{
{"no change", fakeTimeLimits{1000 * time.Hour, 1000 * time.Hour}, now, now.Add(24 * time.Hour), now, now.Add(24 * time.Hour), false},
{"clamped to 24h", fakeTimeLimits{24 * time.Hour, 1000 * time.Hour}, now.Add(-48 * time.Hour), now, now.Add(-24 * time.Hour), now, false},
{"end before start", fakeTimeLimits{}, now, now.Add(-48 * time.Hour), time.Time{}, time.Time{}, true},
{"query too long", fakeTimeLimits{maxQueryLength: 24 * time.Hour}, now.Add(-48 * time.Hour), now, time.Time{}, time.Time{}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
from, through, err := validateQueryTimeRangeLimits(context.Background(), "foo", tt.limits, tt.from, tt.through)
if tt.wantErr {
require.NotNil(t, err)
} else {
require.Nil(t, err)
}
require.Equal(t, tt.wantFrom, from, "wanted (%s) got (%s)", tt.wantFrom, from)
require.Equal(t, tt.wantThrough, through)
})
}
}
func TestQuerier_SelectLogWithDeletes(t *testing.T) {
store := newStoreMock()
store.On("SelectLogs", mock.Anything, mock.Anything).Return(mockStreamIterator(1, 2), nil)
queryClient := newQueryClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("Query", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
delGetter := &mockDeleteGettter{
results: []deletion.DeleteRequest{
Add a DeletionMode config variable (#5481) * Add deletionEnabled setting and remove delete request manager Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rebase Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove deletion handling from delete requests manager Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * change store so it stores a logql statement Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add validation code for logql statement Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Run deleteRequestsManager when deletion is enabled Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove unused variables Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert "Remove deletion handling from delete requests manager" This reverts commit ce4f774497aa590caff86b0745ec81588592a9e1. * Re-add IsDeleted method Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Re-add tests for IsDeleted Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix delete request store test Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix linting issue Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert compactor changes Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add deletion mode Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add v1 mode Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename LogQLRequest to Query Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix linting issues Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use DeleteMode in compactor module Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename logql to query Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Put cancel under delete verb Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update documentation Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert only the API surface area while keeping everything else * Use moved code in syntax package Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove duplicte import Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use renamed field in tests Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove duplicates and empty lines in changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update changelog description Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update pkg/storage/stores/shipper/compactor/deletion/delete_request.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Update CHANGELOG.md Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Make DeletionMode struct member non public Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert change to docs re cancellation Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use same variable names Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add parameter validation to changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename v1 to WholeStreamDeletion Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix default value of deletion mode config setting Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * reimplement new api * Add delete request handler when delete mode is set Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove unused variable Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add comment to change the code when other deletion modes are available Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * create expirationChecker if deletionMode is set Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Address review comments Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update pkg/storage/stores/shipper/compactor/compactor.go Co-authored-by: Sandeep Sukhani <sandeep.d.sukhani@gmail.com> * Rename AddQuery to SetQuery Signed-off-by: Michel Hollands <michel.hollands@grafana.com> Co-authored-by: Travis Patterson <travis.patterson@grafana.com> Co-authored-by: Christian Simon <simon@swine.de> Co-authored-by: Sandeep Sukhani <sandeep.d.sukhani@gmail.com>
4 years ago
{Query: `0`, StartTime: 0, EndTime: 100},
{Query: `1`, StartTime: 200, EndTime: 400},
{Query: `2`, StartTime: 400, EndTime: 500},
{Query: `3`, StartTime: 500, EndTime: 700},
{Query: `4`, StartTime: 700, EndTime: 900},
},
}
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
delGetter, store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
request := logproto.QueryRequest{
Selector: `{type="test"} |= "foo"`,
Limit: 10,
Start: time.Unix(0, 300000000),
End: time.Unix(0, 600000000),
Direction: logproto.FORWARD,
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`{type="test"} |= "foo"`),
},
}
_, err = q.SelectLogs(ctx, logql.SelectLogParams{QueryRequest: &request})
require.NoError(t, err)
expectedRequest := &logproto.QueryRequest{
Selector: request.Selector,
Limit: request.Limit,
Start: request.Start,
End: request.End,
Direction: request.Direction,
Deletes: []*logproto.Delete{
{Selector: "1", Start: 200000000, End: 400000000},
{Selector: "2", Start: 400000000, End: 500000000},
{Selector: "3", Start: 500000000, End: 700000000},
},
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(request.Selector),
},
}
require.Contains(t, store.Calls[0].Arguments, logql.SelectLogParams{QueryRequest: expectedRequest})
require.Contains(t, ingesterClient.Calls[0].Arguments, expectedRequest)
require.Equal(t, "test", delGetter.user)
}
func TestQuerier_SelectSamplesWithDeletes(t *testing.T) {
queryClient := newQuerySampleClientMock()
queryClient.On("Recv").Return(mockQueryResponse([]logproto.Stream{mockStream(1, 2)}), nil)
store := newStoreMock()
store.On("SelectSamples", mock.Anything, mock.Anything).Return(mockSampleIterator(queryClient), nil)
ingesterClient := newQuerierClientMock()
ingesterClient.On("QuerySample", mock.Anything, mock.Anything, mock.Anything).Return(queryClient, nil)
limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil)
require.NoError(t, err)
delGetter := &mockDeleteGettter{
results: []deletion.DeleteRequest{
Add a DeletionMode config variable (#5481) * Add deletionEnabled setting and remove delete request manager Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rebase Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove deletion handling from delete requests manager Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * change store so it stores a logql statement Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add validation code for logql statement Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Run deleteRequestsManager when deletion is enabled Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove unused variables Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert "Remove deletion handling from delete requests manager" This reverts commit ce4f774497aa590caff86b0745ec81588592a9e1. * Re-add IsDeleted method Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Re-add tests for IsDeleted Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix delete request store test Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix linting issue Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert compactor changes Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add deletion mode Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add v1 mode Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename LogQLRequest to Query Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix linting issues Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use DeleteMode in compactor module Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename logql to query Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Put cancel under delete verb Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update documentation Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert only the API surface area while keeping everything else * Use moved code in syntax package Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove duplicte import Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use renamed field in tests Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove duplicates and empty lines in changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update changelog description Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update pkg/storage/stores/shipper/compactor/deletion/delete_request.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_manager_test.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Update CHANGELOG.md Co-authored-by: Christian Simon <simon@swine.de> * Update pkg/storage/stores/shipper/compactor/deletion/delete_requests_store.go Co-authored-by: Christian Simon <simon@swine.de> * Make DeletionMode struct member non public Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Revert change to docs re cancellation Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Use same variable names Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add parameter validation to changelog Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Rename v1 to WholeStreamDeletion Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Fix default value of deletion mode config setting Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * reimplement new api * Add delete request handler when delete mode is set Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Remove unused variable Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Add comment to change the code when other deletion modes are available Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * create expirationChecker if deletionMode is set Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Address review comments Signed-off-by: Michel Hollands <michel.hollands@grafana.com> * Update pkg/storage/stores/shipper/compactor/compactor.go Co-authored-by: Sandeep Sukhani <sandeep.d.sukhani@gmail.com> * Rename AddQuery to SetQuery Signed-off-by: Michel Hollands <michel.hollands@grafana.com> Co-authored-by: Travis Patterson <travis.patterson@grafana.com> Co-authored-by: Christian Simon <simon@swine.de> Co-authored-by: Sandeep Sukhani <sandeep.d.sukhani@gmail.com>
4 years ago
{Query: `0`, StartTime: 0, EndTime: 100},
{Query: `1`, StartTime: 200, EndTime: 400},
{Query: `2`, StartTime: 400, EndTime: 500},
{Query: `3`, StartTime: 500, EndTime: 700},
{Query: `4`, StartTime: 700, EndTime: 900},
},
}
q, err := newQuerier(
mockQuerierConfig(),
mockIngesterClientConfig(),
newIngesterClientMockFactory(ingesterClient),
mockReadRingWithOneActiveIngester(),
delGetter, store, limits)
require.NoError(t, err)
ctx := user.InjectOrgID(context.Background(), "test")
request := logproto.SampleQueryRequest{
Selector: `count_over_time({foo="bar"}[5m])`,
Start: time.Unix(0, 300000000),
End: time.Unix(0, 600000000),
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(`count_over_time({foo="bar"}[5m])`),
},
}
_, err = q.SelectSamples(ctx, logql.SelectSampleParams{SampleQueryRequest: &request})
require.NoError(t, err)
expectedRequest := logql.SelectSampleParams{
SampleQueryRequest: &logproto.SampleQueryRequest{
Selector: request.Selector,
Start: request.Start,
End: request.End,
Deletes: []*logproto.Delete{
{Selector: "1", Start: 200000000, End: 400000000},
{Selector: "2", Start: 400000000, End: 500000000},
{Selector: "3", Start: 500000000, End: 700000000},
},
Plan: &plan.QueryPlan{
AST: syntax.MustParseExpr(request.Selector),
},
},
}
require.Contains(t, store.Calls[0].Arguments, expectedRequest)
require.Contains(t, ingesterClient.Calls[0].Arguments, expectedRequest.SampleQueryRequest)
require.Equal(t, "test", delGetter.user)
}
func newQuerier(cfg Config, clientCfg client.Config, clientFactory ring_client.PoolFactory, ring ring.ReadRing, dg *mockDeleteGettter, store storage.Store, limits *validation.Overrides) (*SingleTenantQuerier, error) {
iq, err := newIngesterQuerier(clientCfg, ring, cfg.ExtraQueryDelay, clientFactory, constants.Loki)
if err != nil {
return nil, err
}
return New(cfg, store, iq, limits, dg, nil, log.NewNopLogger())
}
type mockDeleteGettter struct {
user string
results []deletion.DeleteRequest
}
func (d *mockDeleteGettter) GetAllDeleteRequestsForUser(_ context.Context, userID string) ([]deletion.DeleteRequest, error) {
d.user = userID
return d.results, nil
}
func TestQuerier_isLabelRelevant(t *testing.T) {
for _, tc := range []struct {
name string
label string
values *logproto.UniqueLabelValues
expected bool
}{
{
label: "uuidv4 values are not relevant",
values: &logproto.UniqueLabelValues{Values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}},
expected: false,
},
{
label: "guid values are not relevant",
values: &logproto.UniqueLabelValues{Values: []string{"57808f62-f117-4a22-84a0-bc3282c7f106", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}},
expected: false,
},
{
label: "integer values are not relevant",
values: &logproto.UniqueLabelValues{Values: []string{"1", "2", "3", "4"}},
expected: false,
},
{
label: "string values are relevant",
values: &logproto.UniqueLabelValues{Values: []string{"ingester", "querier", "query-frontend", "index-gateway"}},
expected: true,
},
{
label: "guid with braces are not relevant",
values: &logproto.UniqueLabelValues{Values: []string{"{E9550CF7-58D9-48B9-8845-D9800C651AAC}", "{1617921B-1749-4FF0-A058-31AFB5D98149}", "{C119D92E-A4B9-48A3-A92C-6CA8AA8A6CCC}", "{228AAF1D-2DE7-4909-A4E9-246A7FA9D988}"}},
expected: false,
},
{
label: "float values are not relevant",
values: &logproto.UniqueLabelValues{Values: []string{"1.2", "2.5", "3.3", "4.1"}},
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
querier := &SingleTenantQuerier{cfg: mockQuerierConfig()}
assert.Equal(t, tc.expected, querier.isLabelRelevant(tc.label, tc.values))
})
}
}