mirror of https://github.com/grafana/loki
Ruler: remote rule evaluation (#8744)
**What this PR does / why we need it**: Adds the ability to evaluate recording & alerting rules against a given `query-frontend`, allowing these queries to be executed with all the parallelisation & optimisation that regular adhoc queries have. This is important because with `local` evaluation all queries are single-threaded, and rules that evaluate a large range/volume of data may timeout or OOM the `ruler` itself, leading to missed metrics or alerts. When `remote` evaluation mode is enabled, the `ruler` effectively just becomes a gRPC client for the `query-frontend`, which will dramatically improve the reliability of the `ruler` and also drastically reduce its resource requirements. **Which issue(s) this PR fixes**: This PR implements the feature discussed in https://github.com/grafana/loki/pull/8129 (**LID 0002: Remote Rule Evaluation**).pull/8792/head
parent
540380fa94
commit
33e44ed39d
@ -0,0 +1,53 @@ |
||||
package cluster |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
) |
||||
|
||||
func (c *Component) WithRulerRemoteWrite(name, url string) { |
||||
|
||||
// ensure remote-write is enabled
|
||||
c.WithExtraConfig(` |
||||
ruler: |
||||
remote_write: |
||||
enabled: true |
||||
`) |
||||
|
||||
c.WithExtraConfig(fmt.Sprintf(` |
||||
ruler: |
||||
remote_write: |
||||
clients: |
||||
%s: |
||||
url: %s/api/v1/write |
||||
queue_config: |
||||
# send immediately as soon as a sample is generated |
||||
capacity: 1 |
||||
batch_send_deadline: 0s |
||||
`, name, url)) |
||||
} |
||||
|
||||
func (c *Component) WithTenantRules(tenantFilesMap map[string]map[string]string) error { |
||||
sharedPath := c.ClusterSharedPath() |
||||
rulesPath := filepath.Join(sharedPath, "rules") |
||||
|
||||
if err := os.Mkdir(rulesPath, 0755); err != nil { |
||||
return fmt.Errorf("error creating rules path: %w", err) |
||||
} |
||||
|
||||
for tenant, files := range tenantFilesMap { |
||||
for filename, file := range files { |
||||
path := filepath.Join(rulesPath, tenant) |
||||
if err := os.Mkdir(path, 0755); err != nil { |
||||
return fmt.Errorf("error creating tenant %s rules path: %w", tenant, err) |
||||
} |
||||
if err := os.WriteFile(filepath.Join(path, filename), []byte(strings.TrimSpace(file)), 0644); err != nil { |
||||
return fmt.Errorf("error creating rule file at path %s: %w", path, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
@ -0,0 +1,175 @@ |
||||
package integration |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net/http" |
||||
"net/http/httptest" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/prometheus/prometheus/storage/remote" |
||||
"github.com/stretchr/testify/assert" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/grafana/loki/integration/client" |
||||
"github.com/grafana/loki/integration/cluster" |
||||
|
||||
"github.com/grafana/loki/pkg/ruler" |
||||
) |
||||
|
||||
// TestLocalRuleEval tests that rules are evaluated locally with an embedded query engine
|
||||
// and that the results are written to the backend correctly.
|
||||
func TestLocalRuleEval(t *testing.T) { |
||||
testRuleEval(t, ruler.EvalModeLocal) |
||||
} |
||||
|
||||
// TestRemoteRuleEval tests that rules are evaluated remotely against a configured query-frontend
|
||||
// and that the results are written to the backend correctly.
|
||||
func TestRemoteRuleEval(t *testing.T) { |
||||
testRuleEval(t, ruler.EvalModeRemote) |
||||
} |
||||
|
||||
// The only way we can test rule evaluation in an integration test is to use the remote-write feature.
|
||||
// In this test we stub out a remote-write receiver and check that the expected data is sent to it.
|
||||
// Both the local and the remote rule evaluation modes should produce the same result.
|
||||
func testRuleEval(t *testing.T, mode string) { |
||||
clu := cluster.New() |
||||
t.Cleanup(func() { |
||||
assert.NoError(t, clu.Cleanup()) |
||||
}) |
||||
|
||||
// initialise a write component and ingest some logs
|
||||
tWrite := clu.AddComponent( |
||||
"write", |
||||
"-target=write", |
||||
) |
||||
|
||||
now := time.Now() |
||||
tenantID := randStringRunes() |
||||
|
||||
require.NoError(t, clu.Run()) |
||||
|
||||
job := "accesslog" |
||||
|
||||
cliWrite := client.New(tenantID, "", tWrite.HTTPURL()) |
||||
cliWrite.Now = now |
||||
t.Run("ingest logs", func(t *testing.T) { |
||||
require.NoError(t, cliWrite.PushLogLineWithTimestamp("HEAD /", now, map[string]string{"method": "HEAD", "job": job})) |
||||
require.NoError(t, cliWrite.PushLogLineWithTimestamp("GET /", now, map[string]string{"method": "GET", "job": job})) |
||||
require.NoError(t, cliWrite.PushLogLineWithTimestamp("GET /", now.Add(time.Second), map[string]string{"method": "GET", "job": job})) |
||||
}) |
||||
|
||||
// advance time to after the last ingested log line so queries don't return empty results
|
||||
now = now.Add(time.Second * 2) |
||||
|
||||
// start up read component for remote rule evaluation
|
||||
tRead := clu.AddComponent( |
||||
"read", |
||||
"-target=read", |
||||
// we set a fake address here because deletion is not being tested,
|
||||
// and we have a circular dependency with the backend
|
||||
"-common.compactor-address=http://fake", |
||||
"-legacy-read-mode=false", |
||||
) |
||||
|
||||
require.NoError(t, clu.Run()) |
||||
|
||||
// start up a backend component which contains the ruler
|
||||
tBackend := clu.AddComponent( |
||||
"backend", |
||||
"-target=backend", |
||||
"-legacy-read-mode=false", |
||||
) |
||||
|
||||
rwHandler := func(called *bool, test func(w http.ResponseWriter, r *http.Request)) *httptest.Server { |
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { |
||||
if r.URL.Path != "/api/v1/write" { |
||||
t.Errorf("Expected to request '/api/v1/write', got: %s", r.URL.Path) |
||||
} |
||||
|
||||
test(w, r) |
||||
|
||||
*called = true |
||||
|
||||
w.WriteHeader(http.StatusOK) |
||||
})) |
||||
} |
||||
|
||||
// this is the function that will be called when the remote-write receiver receives a request.
|
||||
// it tests that the expected payload is received.
|
||||
expectedResults := func(w http.ResponseWriter, r *http.Request) { |
||||
wr, err := remote.DecodeWriteRequest(r.Body) |
||||
require.NoError(t, err) |
||||
|
||||
// depending on the rule interval, we may get multiple timeseries before remote-write is triggered,
|
||||
// so we just check that we have at least one that matches our requirements.
|
||||
require.GreaterOrEqual(t, len(wr.Timeseries), 1) |
||||
|
||||
// we expect to see two GET lines from the aggregation in the recording rule
|
||||
require.Equal(t, wr.Timeseries[len(wr.Timeseries)-1].Samples[0].Value, float64(2)) |
||||
} |
||||
|
||||
var called bool |
||||
server1 := rwHandler(&called, expectedResults) |
||||
defer server1.Close() |
||||
|
||||
// configure the backend component
|
||||
tBackend.WithRulerRemoteWrite("target1", server1.URL) |
||||
|
||||
if mode == ruler.EvalModeRemote { |
||||
tBackend.WithExtraConfig(fmt.Sprintf(` |
||||
ruler: |
||||
evaluation: |
||||
mode: %s |
||||
query_frontend: |
||||
address: %s |
||||
`, mode, tRead.GRPCURL())) |
||||
} |
||||
|
||||
record := fmt.Sprintf(` |
||||
groups: |
||||
- name: record |
||||
interval: 1s |
||||
rules: |
||||
- record: test |
||||
expr: sum by (method) (count_over_time({job="%s", method="GET"}[1m])) |
||||
labels: |
||||
foo: bar |
||||
`, job) |
||||
|
||||
require.NoError(t, tBackend.WithTenantRules(map[string]map[string]string{ |
||||
tenantID: { |
||||
"record.yaml": record, |
||||
}, |
||||
})) |
||||
|
||||
m, e := tBackend.MergedConfig() |
||||
require.NoError(t, e) |
||||
t.Logf("starting backend with config:\n%s\n", m) |
||||
|
||||
require.NoError(t, clu.Run()) |
||||
|
||||
cliBackend := client.New(tenantID, "", tBackend.HTTPURL()) |
||||
cliBackend.Now = now |
||||
t.Run(fmt.Sprintf("%s rule evaluation", mode), func(t *testing.T) { |
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) |
||||
defer cancel() |
||||
|
||||
// check rules exist
|
||||
resp, err := cliBackend.GetRules(ctx) |
||||
|
||||
require.NoError(t, err) |
||||
require.NotNil(t, resp) |
||||
|
||||
require.Equal(t, "success", resp.Status) |
||||
|
||||
require.Len(t, resp.Data.Groups, 1) |
||||
require.Len(t, resp.Data.Groups[0].Rules, 1) |
||||
|
||||
// ensure that both remote-write receivers were called
|
||||
require.Eventually(t, func() bool { |
||||
return assert.ObjectsAreEqualValues(true, called) |
||||
}, 20*time.Second, 100*time.Millisecond, "remote-write was not called") |
||||
}) |
||||
} |
||||
@ -0,0 +1,54 @@ |
||||
package util |
||||
|
||||
import ( |
||||
"fmt" |
||||
|
||||
"github.com/imdario/mergo" |
||||
"gopkg.in/yaml.v2" |
||||
) |
||||
|
||||
// YAMLMerger takes a set of given YAML fragments and merges them into a single YAML document.
|
||||
// The order in which these fragments is supplied is maintained, so subsequent fragments will override preceding ones.
|
||||
type YAMLMerger struct { |
||||
fragments [][]byte |
||||
} |
||||
|
||||
func NewYAMLMerger() *YAMLMerger { |
||||
return &YAMLMerger{} |
||||
} |
||||
|
||||
func (m *YAMLMerger) AddFragment(fragment []byte) { |
||||
m.fragments = append(m.fragments, fragment) |
||||
} |
||||
|
||||
func (m *YAMLMerger) Merge() ([]byte, error) { |
||||
merged := make(map[interface{}]interface{}) |
||||
for _, fragment := range m.fragments { |
||||
fragmentMap, err := yamlToMap(fragment) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to unmarshal given fragment %q to map: %w", fragment, err) |
||||
} |
||||
|
||||
if err = mergo.Merge(&merged, fragmentMap, mergo.WithOverride, mergo.WithTypeCheck); err != nil { |
||||
return nil, fmt.Errorf("failed to merge fragment %q with base: %w", fragment, err) |
||||
} |
||||
} |
||||
|
||||
mergedYAML, err := yaml.Marshal(merged) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return mergedYAML, nil |
||||
} |
||||
|
||||
func yamlToMap(fragment []byte) (interface{}, error) { |
||||
var fragmentMap map[interface{}]interface{} |
||||
|
||||
err := yaml.Unmarshal(fragment, &fragmentMap) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return fragmentMap, nil |
||||
} |
||||
@ -0,0 +1,36 @@ |
||||
package ruler |
||||
|
||||
import ( |
||||
"context" |
||||
"flag" |
||||
"fmt" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/grafana/loki/pkg/logqlmodel" |
||||
) |
||||
|
||||
// Evaluator is the interface that must be satisfied in order to accept rule evaluations from the Ruler.
|
||||
type Evaluator interface { |
||||
// Eval evaluates the given rule and returns the result.
|
||||
Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) |
||||
} |
||||
|
||||
type EvaluationConfig struct { |
||||
Mode string `yaml:"mode,omitempty"` |
||||
|
||||
QueryFrontend QueryFrontendConfig `yaml:"query_frontend,omitempty"` |
||||
} |
||||
|
||||
func (c *EvaluationConfig) RegisterFlags(f *flag.FlagSet) { |
||||
f.StringVar(&c.Mode, "ruler.evaluation.mode", EvalModeLocal, "The evaluation mode for the ruler. Can be either 'local' or 'remote'. If set to 'local', the ruler will evaluate rules locally. If set to 'remote', the ruler will evaluate rules remotely. If unset, the ruler will evaluate rules locally.") |
||||
c.QueryFrontend.RegisterFlags(f) |
||||
} |
||||
|
||||
func (c *EvaluationConfig) Validate() error { |
||||
if c.Mode != EvalModeLocal && c.Mode != EvalModeRemote { |
||||
return fmt.Errorf("invalid evaluation mode: %s. Acceptable modes are: %s", c.Mode, strings.Join([]string{EvalModeLocal, EvalModeRemote}, ", ")) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
@ -0,0 +1,53 @@ |
||||
package ruler |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
|
||||
"github.com/grafana/loki/pkg/logproto" |
||||
"github.com/grafana/loki/pkg/logql" |
||||
"github.com/grafana/loki/pkg/logqlmodel" |
||||
) |
||||
|
||||
const EvalModeLocal = "local" |
||||
|
||||
type LocalEvaluator struct { |
||||
cfg *EvaluationConfig |
||||
engine *logql.Engine |
||||
logger log.Logger |
||||
} |
||||
|
||||
func NewLocalEvaluator(cfg *EvaluationConfig, engine *logql.Engine, logger log.Logger) (*LocalEvaluator, error) { |
||||
if cfg == nil { |
||||
return nil, fmt.Errorf("given config is nil") |
||||
} |
||||
if engine == nil { |
||||
return nil, fmt.Errorf("given engine is nil") |
||||
} |
||||
|
||||
return &LocalEvaluator{cfg: cfg, engine: engine, logger: logger}, nil |
||||
} |
||||
|
||||
func (l *LocalEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) { |
||||
params := logql.NewLiteralParams( |
||||
qs, |
||||
now, |
||||
now, |
||||
0, |
||||
0, |
||||
logproto.FORWARD, |
||||
0, |
||||
nil, |
||||
) |
||||
|
||||
q := l.engine.Query(params) |
||||
res, err := q.Exec(ctx) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &res, nil |
||||
} |
||||
@ -0,0 +1,259 @@ |
||||
package ruler |
||||
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
// Provenance-includes-location: https://github.com/grafana/mimir/pull/1536/
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: The Cortex Authors.
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/json" |
||||
"flag" |
||||
"fmt" |
||||
"net/http" |
||||
"net/textproto" |
||||
"net/url" |
||||
"strconv" |
||||
"time" |
||||
|
||||
"github.com/go-kit/log" |
||||
"github.com/go-kit/log/level" |
||||
"github.com/grafana/dskit/crypto/tls" |
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" |
||||
otgrpc "github.com/opentracing-contrib/go-grpc" |
||||
"github.com/opentracing/opentracing-go" |
||||
"github.com/prometheus/prometheus/promql" |
||||
"github.com/weaveworks/common/httpgrpc" |
||||
"github.com/weaveworks/common/middleware" |
||||
"github.com/weaveworks/common/user" |
||||
"google.golang.org/grpc" |
||||
"google.golang.org/grpc/keepalive" |
||||
|
||||
"github.com/grafana/loki/pkg/loghttp" |
||||
"github.com/grafana/loki/pkg/logql" |
||||
"github.com/grafana/loki/pkg/logqlmodel" |
||||
"github.com/grafana/loki/pkg/querier/series" |
||||
"github.com/grafana/loki/pkg/util/build" |
||||
"github.com/grafana/loki/pkg/util/spanlogger" |
||||
) |
||||
|
||||
const ( |
||||
keepAlive = time.Second * 10 |
||||
keepAliveTimeout = time.Second * 5 |
||||
|
||||
serviceConfig = `{"loadBalancingPolicy": "round_robin"}` |
||||
|
||||
queryEndpointPath = "/loki/api/v1/query" |
||||
|
||||
mimeTypeFormPost = "application/x-www-form-urlencoded" |
||||
) |
||||
|
||||
const EvalModeRemote = "remote" |
||||
|
||||
var userAgent = fmt.Sprintf("loki-ruler/%s", build.Version) |
||||
|
||||
type RemoteEvaluator struct { |
||||
rq *remoteQuerier |
||||
logger log.Logger |
||||
} |
||||
|
||||
func NewRemoteEvaluator(cfg *EvaluationConfig, logger log.Logger) (*RemoteEvaluator, error) { |
||||
qfClient, err := dialQueryFrontend(cfg.QueryFrontend) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to dial query frontend for remote rule evaluation: %w", err) |
||||
} |
||||
|
||||
return &RemoteEvaluator{ |
||||
rq: newRemoteQuerier(qfClient, logger, WithOrgIDMiddleware), |
||||
logger: logger, |
||||
}, nil |
||||
} |
||||
|
||||
func (r *RemoteEvaluator) Eval(ctx context.Context, qs string, now time.Time) (*logqlmodel.Result, error) { |
||||
res, err := r.rq.Query(ctx, qs, now) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("failed to perform remote evaluation of query %q: %w", qs, err) |
||||
} |
||||
|
||||
return res, err |
||||
} |
||||
|
||||
// dialQueryFrontend creates and initializes a new httpgrpc.HTTPClient taking a QueryFrontendConfig configuration.
|
||||
func dialQueryFrontend(cfg QueryFrontendConfig) (httpgrpc.HTTPClient, error) { |
||||
tlsDialOptions, err := cfg.TLS.GetGRPCDialOptions(cfg.TLSEnabled) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
dialOptions := append( |
||||
[]grpc.DialOption{ |
||||
grpc.WithKeepaliveParams( |
||||
keepalive.ClientParameters{ |
||||
Time: keepAlive, |
||||
Timeout: keepAliveTimeout, |
||||
PermitWithoutStream: true, |
||||
}, |
||||
), |
||||
grpc.WithUnaryInterceptor( |
||||
grpc_middleware.ChainUnaryClient( |
||||
otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), |
||||
middleware.ClientUserHeaderInterceptor, |
||||
), |
||||
), |
||||
grpc.WithDefaultServiceConfig(serviceConfig), |
||||
}, |
||||
tlsDialOptions..., |
||||
) |
||||
|
||||
conn, err := grpc.Dial(cfg.Address, dialOptions...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return httpgrpc.NewHTTPClient(conn), nil |
||||
} |
||||
|
||||
// Middleware provides a mechanism to inspect outgoing remote querier requests.
|
||||
type Middleware func(ctx context.Context, req *httpgrpc.HTTPRequest) error |
||||
|
||||
// remoteQuerier executes read operations against a httpgrpc.HTTPClient.
|
||||
type remoteQuerier struct { |
||||
client httpgrpc.HTTPClient |
||||
middlewares []Middleware |
||||
logger log.Logger |
||||
} |
||||
|
||||
// newRemoteQuerier creates and initializes a new remoteQuerier instance.
|
||||
func newRemoteQuerier( |
||||
client httpgrpc.HTTPClient, |
||||
logger log.Logger, |
||||
middlewares ...Middleware, |
||||
) *remoteQuerier { |
||||
return &remoteQuerier{ |
||||
client: client, |
||||
middlewares: middlewares, |
||||
logger: logger, |
||||
} |
||||
} |
||||
|
||||
// Query performs a query for the given time.
|
||||
func (q *remoteQuerier) Query(ctx context.Context, qs string, t time.Time) (*logqlmodel.Result, error) { |
||||
logger, ctx := spanlogger.NewWithLogger(ctx, q.logger, "ruler.remoteEvaluation.Query") |
||||
defer logger.Span.Finish() |
||||
|
||||
return q.query(ctx, qs, t, logger) |
||||
} |
||||
|
||||
func (q *remoteQuerier) query(ctx context.Context, query string, ts time.Time, logger log.Logger) (*logqlmodel.Result, error) { |
||||
args := make(url.Values) |
||||
args.Set("query", query) |
||||
args.Set("direction", "forward") |
||||
if !ts.IsZero() { |
||||
args.Set("time", ts.Format(time.RFC3339Nano)) |
||||
} |
||||
body := []byte(args.Encode()) |
||||
hash := logql.HashedQuery(query) |
||||
|
||||
req := httpgrpc.HTTPRequest{ |
||||
Method: http.MethodPost, |
||||
Url: queryEndpointPath, |
||||
Body: body, |
||||
Headers: []*httpgrpc.Header{ |
||||
{Key: textproto.CanonicalMIMEHeaderKey("User-Agent"), Values: []string{userAgent}}, |
||||
{Key: textproto.CanonicalMIMEHeaderKey("Content-Type"), Values: []string{mimeTypeFormPost}}, |
||||
{Key: textproto.CanonicalMIMEHeaderKey("Content-Length"), Values: []string{strconv.Itoa(len(body))}}, |
||||
}, |
||||
} |
||||
|
||||
for _, mdw := range q.middlewares { |
||||
if err := mdw(ctx, &req); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
start := time.Now() |
||||
resp, err := q.client.Handle(ctx, &req) |
||||
if err != nil { |
||||
level.Warn(logger).Log("msg", "failed to remotely evaluate query expression", "err", err, "query_hash", hash, "qs", query, "ts", ts, "response_time", time.Since(start).Seconds()) |
||||
return nil, err |
||||
} |
||||
if resp.Code/100 != 2 { |
||||
return nil, fmt.Errorf("unexpected response status code %d: %s", resp.Code, string(resp.Body)) |
||||
} |
||||
level.Debug(logger).Log("msg", "query expression successfully evaluated", "query_hash", hash, "qs", query, "ts", ts, "response_time", time.Since(start).Seconds()) |
||||
|
||||
return decodeResponse(resp) |
||||
} |
||||
|
||||
func decodeResponse(resp *httpgrpc.HTTPResponse) (*logqlmodel.Result, error) { |
||||
var decoded loghttp.QueryResponse |
||||
if err := json.NewDecoder(bytes.NewReader(resp.Body)).Decode(&decoded); err != nil { |
||||
return nil, err |
||||
} |
||||
if decoded.Status == "error" { |
||||
return nil, fmt.Errorf("query response error: %s", decoded.Status) |
||||
} |
||||
|
||||
switch decoded.Data.ResultType { |
||||
case loghttp.ResultTypeVector: |
||||
var res promql.Vector |
||||
vec := decoded.Data.Result.(loghttp.Vector) |
||||
|
||||
for _, s := range vec { |
||||
res = append(res, promql.Sample{ |
||||
Metric: series.MetricToLabels(s.Metric), |
||||
Point: promql.Point{V: float64(s.Value), T: int64(s.Timestamp)}, |
||||
}) |
||||
} |
||||
|
||||
return &logqlmodel.Result{ |
||||
Statistics: decoded.Data.Statistics, |
||||
Data: res, |
||||
}, nil |
||||
case loghttp.ResultTypeScalar: |
||||
var res promql.Scalar |
||||
scalar := decoded.Data.Result.(loghttp.Scalar) |
||||
res.T = scalar.Timestamp.Unix() |
||||
res.V = float64(scalar.Value) |
||||
|
||||
return &logqlmodel.Result{ |
||||
Statistics: decoded.Data.Statistics, |
||||
Data: res, |
||||
}, nil |
||||
default: |
||||
return nil, fmt.Errorf("unsupported result type %s", decoded.Data.ResultType) |
||||
} |
||||
} |
||||
|
||||
// WithOrgIDMiddleware attaches 'X-Scope-OrgID' header value to the outgoing request by inspecting the passed context.
|
||||
// In case the expression to evaluate corresponds to a federated rule, the ExtractTenantIDs function will take care
|
||||
// of normalizing and concatenating source tenants by separating them with a '|' character.
|
||||
func WithOrgIDMiddleware(ctx context.Context, req *httpgrpc.HTTPRequest) error { |
||||
orgID, err := user.ExtractOrgID(ctx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
req.Headers = append(req.Headers, &httpgrpc.Header{ |
||||
Key: textproto.CanonicalMIMEHeaderKey(user.OrgIDHeaderName), |
||||
Values: []string{orgID}, |
||||
}) |
||||
return nil |
||||
} |
||||
|
||||
// QueryFrontendConfig defines query-frontend transport configuration.
|
||||
type QueryFrontendConfig struct { |
||||
// The address of the remote querier to connect to.
|
||||
Address string `yaml:"address"` |
||||
|
||||
// TLSEnabled tells whether TLS should be used to establish remote connection.
|
||||
TLSEnabled bool `yaml:"tls_enabled"` |
||||
|
||||
// TLS is the config for client TLS.
|
||||
TLS tls.ClientConfig `yaml:",inline"` |
||||
} |
||||
|
||||
func (c *QueryFrontendConfig) RegisterFlags(f *flag.FlagSet) { |
||||
f.StringVar(&c.Address, "ruler.evaluation.query-frontend.address", "", "GRPC listen address of the query-frontend(s). Must be a DNS address (prefixed with dns:///) to enable client side load balancing.") |
||||
f.BoolVar(&c.TLSEnabled, "ruler.evaluation.query-frontend.tls-enabled", false, "Set to true if query-frontend connection requires TLS.") |
||||
|
||||
c.TLS.RegisterFlagsWithPrefix("ruler.evaluation.query-frontend", f) |
||||
} |
||||
@ -0,0 +1,20 @@ |
||||
groups: |
||||
- name: Sample Rule Group |
||||
interval: 5s |
||||
rules: |
||||
- record: generated_logs:rate1m |
||||
expr: sum by (http_method) (rate({job="generated-logs"}[1m])) |
||||
labels: |
||||
source: "recording rule" |
||||
- record: scalar |
||||
expr: 10 |
||||
labels: |
||||
source: "static" |
||||
- alert: NoGeneratedLogs |
||||
expr: absent_over_time({job="generated-logs"}[1m]) |
||||
labels: |
||||
source: "alerting rule" |
||||
- alert: AlwaysFiring |
||||
expr: absent_over_time({job="blah"}[1m]) |
||||
labels: |
||||
source: "alerting rule" |
||||
Loading…
Reference in new issue