mirror of https://github.com/grafana/loki
Scheduler: Add query fairness control across multiple actors within a tenant (#8752)
**What this PR does / why we need it**: This PR wires up the scheduler with the hierarchical queues. It is the last PR to implement https://github.com/grafana/loki/pull/8585. When these changes are in place, the client performing query requests can control their QoS (query fairness) using the `X-Actor-Path` HTTP header. This header controls in which sub-queue of the tenant's scheduler queue the query request is enqueued. The place within the hierarchy where it is enqueued defines the probability with which the request gets dequeued. A common use-case for this QoS control is giving each Grafana user within a tenant their fair share of query execution time. Any documentation is still missing and will be provided by follow-up PRs. **Special notes for your reviewer**: ```console $ gotest -count=1 -v ./pkg/scheduler/queue/... -test.run=TestQueryFairness === RUN TestQueryFairness === RUN TestQueryFairness/use_hierarchical_queues_=_false dequeue_qos_test.go:109: duration actor a 2.007765568s dequeue_qos_test.go:109: duration actor b 2.209088331s dequeue_qos_test.go:112: total duration 2.209280772s === RUN TestQueryFairness/use_hierarchical_queues_=_true dequeue_qos_test.go:109: duration actor b 605.283144ms dequeue_qos_test.go:109: duration actor a 2.270931324s dequeue_qos_test.go:112: total duration 2.271108551s --- PASS: TestQueryFairness (4.48s) --- PASS: TestQueryFairness/use_hierarchical_queues_=_false (2.21s) --- PASS: TestQueryFairness/use_hierarchical_queues_=_true (2.27s) PASS ok github.com/grafana/loki/pkg/scheduler/queue 4.491s ``` ```console $ gotest -count=5 -v ./pkg/scheduler/queue/... -bench=Benchmark -test.run=^$ -benchtime=10000x -benchmem goos: linux goarch: amd64 pkg: github.com/grafana/loki/pkg/scheduler/queue cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz BenchmarkGetNextRequest BenchmarkGetNextRequest/without_sub-queues BenchmarkGetNextRequest/without_sub-queues-8 10000 29337 ns/op 1600 B/op 100 allocs/op BenchmarkGetNextRequest/without_sub-queues-8 10000 21348 ns/op 1600 B/op 100 allocs/op BenchmarkGetNextRequest/without_sub-queues-8 10000 21595 ns/op 1600 B/op 100 allocs/op BenchmarkGetNextRequest/without_sub-queues-8 10000 21189 ns/op 1600 B/op 100 allocs/op BenchmarkGetNextRequest/without_sub-queues-8 10000 21602 ns/op 1600 B/op 100 allocs/op BenchmarkGetNextRequest/with_1_level_of_sub-queues BenchmarkGetNextRequest/with_1_level_of_sub-queues-8 10000 33770 ns/op 2400 B/op 200 allocs/op BenchmarkGetNextRequest/with_1_level_of_sub-queues-8 10000 33596 ns/op 2400 B/op 200 allocs/op BenchmarkGetNextRequest/with_1_level_of_sub-queues-8 10000 34432 ns/op 2400 B/op 200 allocs/op BenchmarkGetNextRequest/with_1_level_of_sub-queues-8 10000 33760 ns/op 2400 B/op 200 allocs/op BenchmarkGetNextRequest/with_1_level_of_sub-queues-8 10000 33664 ns/op 2400 B/op 200 allocs/op BenchmarkGetNextRequest/with_2_levels_of_sub-queues BenchmarkGetNextRequest/with_2_levels_of_sub-queues-8 10000 71405 ns/op 3200 B/op 300 allocs/op BenchmarkGetNextRequest/with_2_levels_of_sub-queues-8 10000 59472 ns/op 3200 B/op 300 allocs/op BenchmarkGetNextRequest/with_2_levels_of_sub-queues-8 10000 117163 ns/op 3200 B/op 300 allocs/op BenchmarkGetNextRequest/with_2_levels_of_sub-queues-8 10000 106505 ns/op 3200 B/op 300 allocs/op BenchmarkGetNextRequest/with_2_levels_of_sub-queues-8 10000 64374 ns/op 3200 B/op 300 allocs/op BenchmarkQueueRequest BenchmarkQueueRequest-8 10000 168391 ns/op 320588 B/op 1156 allocs/op BenchmarkQueueRequest-8 10000 166203 ns/op 320587 B/op 1156 allocs/op BenchmarkQueueRequest-8 10000 149518 ns/op 320584 B/op 1156 allocs/op BenchmarkQueueRequest-8 10000 219776 ns/op 320583 B/op 1156 allocs/op BenchmarkQueueRequest-8 10000 185198 ns/op 320597 B/op 1156 allocs/op PASS ok github.com/grafana/loki/pkg/scheduler/queue 64.648s ``` Signed-off-by: Christian Haudum <christian.haudum@gmail.com>pull/8853/head
parent
3bed7eef55
commit
be8b4eece3
@ -0,0 +1,183 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"strings" |
||||
"sync" |
||||
|
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/stretchr/testify/require" |
||||
"go.uber.org/atomic" |
||||
) |
||||
|
||||
const ( |
||||
numRequestsPerActor = 1 |
||||
numQueriers = 5 |
||||
) |
||||
|
||||
type req struct { |
||||
duration time.Duration |
||||
tenant string |
||||
actor string |
||||
queryID int |
||||
subQueryID int |
||||
} |
||||
|
||||
func enqueueRequestsForActor(t testing.TB, actor []string, useActor bool, queue *RequestQueue, numSubRequests int, d time.Duration) { |
||||
tenant := "tenant" |
||||
serializedActor := strings.Join(actor, "|") |
||||
for x := 0; x < numRequestsPerActor; x++ { |
||||
for y := 0; y < numSubRequests; y++ { |
||||
r := &req{ |
||||
duration: d, |
||||
queryID: x, |
||||
subQueryID: y, |
||||
tenant: tenant, |
||||
actor: serializedActor, |
||||
} |
||||
|
||||
if !useActor { |
||||
actor = nil |
||||
} |
||||
err := queue.Enqueue("tenant", actor, r, 0, nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func BenchmarkQueryFairness(t *testing.B) { |
||||
numSubRequestsActorA, numSubRequestsActorB := 123, 45 |
||||
total := int64((numSubRequestsActorA + numSubRequestsActorA + numSubRequestsActorB) * numRequestsPerActor) |
||||
|
||||
for _, useActor := range []bool{false, true} { |
||||
t.Run(fmt.Sprintf("use hierarchical queues = %v", useActor), func(t *testing.B) { |
||||
requestQueue := NewRequestQueue(1024, 0, |
||||
prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), |
||||
prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), |
||||
) |
||||
enqueueRequestsForActor(t, []string{}, useActor, requestQueue, numSubRequestsActorA, 50*time.Millisecond) |
||||
enqueueRequestsForActor(t, []string{"a"}, useActor, requestQueue, numSubRequestsActorA, 100*time.Millisecond) |
||||
enqueueRequestsForActor(t, []string{"b"}, useActor, requestQueue, numSubRequestsActorB, 50*time.Millisecond) |
||||
requestQueue.queues.recomputeUserQueriers() |
||||
|
||||
// set timeout to minize impact on overall test run duration in case something goes wrong
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
start := time.Now() |
||||
durations := &sync.Map{} |
||||
var wg sync.WaitGroup |
||||
var responseCount atomic.Int64 |
||||
// Simulate querier loop
|
||||
for q := 0; q < numQueriers; q++ { |
||||
wg.Add(1) |
||||
go func(id string) { |
||||
defer wg.Done() |
||||
|
||||
requestQueue.RegisterQuerierConnection(id) |
||||
defer requestQueue.UnregisterQuerierConnection(id) |
||||
idx := StartIndex |
||||
for ctx.Err() == nil { |
||||
r, newIdx, err := requestQueue.Dequeue(ctx, idx, id) |
||||
if err != nil { |
||||
if err != context.Canceled { |
||||
t.Log("Dequeue() returned error:", err) |
||||
} |
||||
break |
||||
} |
||||
if r == nil { |
||||
t.Log("Dequeue() returned nil response") |
||||
break |
||||
} |
||||
res, _ := r.(*req) |
||||
idx = newIdx |
||||
time.Sleep(res.duration) |
||||
count := responseCount.Add(1) |
||||
durations.Store(res.actor, time.Since(start)) |
||||
if count == total { |
||||
t.Log("count", count, "total", total) |
||||
cancel() |
||||
} |
||||
} |
||||
}(fmt.Sprintf("querier-%d", q)) |
||||
} |
||||
|
||||
wg.Wait() |
||||
|
||||
require.Equal(t, total, responseCount.Load()) |
||||
durations.Range(func(k, v any) bool { |
||||
t.Log("duration actor", k, v) |
||||
return true |
||||
}) |
||||
t.Log("total duration", time.Since(start)) |
||||
}) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestQueryFairnessAcrossSameLevel(t *testing.T) { |
||||
/** |
||||
|
||||
`tenant1`, `tenant1|abc`, and `tenant1|xyz` have equal preference |
||||
`tenant1|xyz|123` and `tenant1|xyz|456` have equal preference |
||||
|
||||
root: |
||||
tenant1: [0, 1, 2] |
||||
abc: [10, 11, 12] |
||||
xyz: [20, 21, 22] |
||||
123: [200] |
||||
456: [210] |
||||
**/ |
||||
|
||||
requestQueue := NewRequestQueue(1024, 0, |
||||
prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{"user"}), |
||||
prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}), |
||||
) |
||||
_ = requestQueue.Enqueue("tenant1", []string{}, r(0), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{}, r(1), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{}, r(2), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(10), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(11), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"abc"}, r(12), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(20), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(21), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"xyz"}, r(22), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"xyz", "123"}, r(200), 0, nil) |
||||
_ = requestQueue.Enqueue("tenant1", []string{"xyz", "456"}, r(210), 0, nil) |
||||
requestQueue.queues.recomputeUserQueriers() |
||||
|
||||
items := make([]int, 0) |
||||
|
||||
// set timeout to minize impact on overall test run duration in case something goes wrong
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) |
||||
defer cancel() |
||||
|
||||
requestQueue.RegisterQuerierConnection("querier") |
||||
defer requestQueue.UnregisterQuerierConnection("querier") |
||||
|
||||
idx := StartIndexWithLocalQueue |
||||
for ctx.Err() == nil { |
||||
r, newIdx, err := requestQueue.Dequeue(ctx, idx, "querier") |
||||
if err != nil { |
||||
if err != context.Canceled { |
||||
t.Log("Dequeue() returned error:", err) |
||||
} |
||||
break |
||||
} |
||||
if r == nil { |
||||
t.Log("Dequeue() returned nil response") |
||||
break |
||||
} |
||||
res, _ := r.(*dummyRequest) |
||||
idx = newIdx |
||||
items = append(items, res.id) |
||||
} |
||||
|
||||
require.Equal(t, []int{0, 10, 20, 1, 11, 200, 2, 12, 210, 21, 22}, items) |
||||
} |
||||
@ -0,0 +1,154 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
type QueuePath []string //nolint:revive
|
||||
|
||||
// LeafQueue is an hierarchical queue implementation where each sub-queue
|
||||
// has the same guarantees to be chosen from.
|
||||
// Each queue has also a local queue, which gets chosen with equal preference as the sub-queues.
|
||||
type LeafQueue struct { |
||||
// local queue
|
||||
ch RequestChannel |
||||
// index of where this item is located in the mapping
|
||||
pos QueueIndex |
||||
// index of the sub-queues
|
||||
current QueueIndex |
||||
// mapping for sub-queues
|
||||
mapping *Mapping[*LeafQueue] |
||||
// name of the queue
|
||||
name string |
||||
// maximum queue size of the local queue
|
||||
size int |
||||
} |
||||
|
||||
// newLeafQueue creates a new LeafQueue instance
|
||||
func newLeafQueue(size int, name string) *LeafQueue { |
||||
m := &Mapping[*LeafQueue]{} |
||||
m.Init(64) // TODO(chaudum): What is a good initial value?
|
||||
return &LeafQueue{ |
||||
ch: make(RequestChannel, size), |
||||
pos: StartIndexWithLocalQueue, |
||||
current: StartIndexWithLocalQueue, |
||||
mapping: m, |
||||
name: name, |
||||
size: size, |
||||
} |
||||
} |
||||
|
||||
// add recursively adds queues based on given path
|
||||
func (q *LeafQueue) add(path QueuePath) *LeafQueue { |
||||
if len(path) == 0 { |
||||
return q |
||||
} |
||||
curr, remaining := path[0], path[1:] |
||||
queue, created := q.getOrCreate(curr) |
||||
if created { |
||||
q.mapping.Put(queue.Name(), queue) |
||||
} |
||||
return queue.add(remaining) |
||||
} |
||||
|
||||
func (q *LeafQueue) getOrCreate(name string) (subq *LeafQueue, created bool) { |
||||
subq = q.mapping.GetByKey(name) |
||||
if subq == nil { |
||||
subq = newLeafQueue(q.size, name) |
||||
created = true |
||||
} |
||||
return subq, created |
||||
} |
||||
|
||||
// Chan implements Queue
|
||||
func (q *LeafQueue) Chan() RequestChannel { |
||||
return q.ch |
||||
} |
||||
|
||||
// Dequeue implements Queue
|
||||
func (q *LeafQueue) Dequeue() Request { |
||||
var item Request |
||||
|
||||
// shortcut of there are not sub-queues
|
||||
// always use local queue
|
||||
if q.mapping.Len() == 0 { |
||||
if len(q.ch) > 0 { |
||||
return <-q.ch |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
maxIter := len(q.mapping.keys) + 1 |
||||
for iters := 0; iters < maxIter; iters++ { |
||||
if q.current == StartIndexWithLocalQueue { |
||||
q.current++ |
||||
if len(q.ch) > 0 { |
||||
item = <-q.ch |
||||
if item != nil { |
||||
return item |
||||
} |
||||
} |
||||
} |
||||
|
||||
subq, err := q.mapping.GetNext(q.current) |
||||
if err == ErrOutOfBounds { |
||||
q.current = StartIndexWithLocalQueue |
||||
continue |
||||
} |
||||
if subq != nil { |
||||
q.current = subq.pos |
||||
item := subq.Dequeue() |
||||
if item != nil { |
||||
if subq.Len() == 0 { |
||||
q.mapping.Remove(subq.name) |
||||
} |
||||
return item |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Name implements Queue
|
||||
func (q *LeafQueue) Name() string { |
||||
return q.name |
||||
} |
||||
|
||||
// Len implements Queue
|
||||
// It returns the length of the local queue and all sub-queues.
|
||||
// This may be expensive depending on the size of the queue tree.
|
||||
func (q *LeafQueue) Len() int { |
||||
count := len(q.ch) |
||||
for _, subq := range q.mapping.Values() { |
||||
count += subq.Len() |
||||
} |
||||
return count |
||||
} |
||||
|
||||
// Index implements Mapable
|
||||
func (q *LeafQueue) Pos() QueueIndex { |
||||
return q.pos |
||||
} |
||||
|
||||
// Index implements Mapable
|
||||
func (q *LeafQueue) SetPos(index QueueIndex) { |
||||
q.pos = index |
||||
} |
||||
|
||||
// String makes the queue printable
|
||||
func (q *LeafQueue) String() string { |
||||
sb := &strings.Builder{} |
||||
sb.WriteString("{") |
||||
fmt.Fprintf(sb, "name=%s, len=%d/%d, leafs=[", q.Name(), q.Len(), cap(q.ch)) |
||||
subqs := q.mapping.Values() |
||||
for i, m := range subqs { |
||||
sb.WriteString(m.String()) |
||||
if i < len(subqs)-1 { |
||||
sb.WriteString(",") |
||||
} |
||||
} |
||||
sb.WriteString("]") |
||||
sb.WriteString("}") |
||||
return sb.String() |
||||
} |
||||
@ -0,0 +1,171 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
type dummyRequest struct { |
||||
id int |
||||
} |
||||
|
||||
func r(id int) *dummyRequest { |
||||
return &dummyRequest{id} |
||||
} |
||||
|
||||
func TestLeafQueue(t *testing.T) { |
||||
|
||||
t.Run("add sub queues recursively", func(t *testing.T) { |
||||
pathA := QueuePath([]string{"l0", "l1", "l3"}) |
||||
pathB := QueuePath([]string{"l0", "l2", "l3"}) |
||||
|
||||
q := newLeafQueue(1, "root") |
||||
require.NotNil(t, q) |
||||
require.Equal(t, "root", q.Name()) |
||||
require.Equal(t, 0, q.Len()) |
||||
require.Equal(t, 0, q.mapping.Len()) |
||||
|
||||
q.add(pathA) |
||||
require.Equal(t, 1, q.mapping.Len()) |
||||
|
||||
q.add(pathB) |
||||
require.Equal(t, 1, q.mapping.Len()) |
||||
}) |
||||
|
||||
t.Run("enqueue/dequeue to/from subqueues", func(t *testing.T) { |
||||
/** |
||||
root: [0] |
||||
a: [1] |
||||
b: [2] |
||||
b0: [20] |
||||
b1: [21] |
||||
c: [3] |
||||
c0: [30] |
||||
c00: [300] |
||||
c01: [301] |
||||
c1: [31] |
||||
c10: [310] |
||||
c11: [311] |
||||
**/ |
||||
paths := []QueuePath{ |
||||
QueuePath([]string{"a"}), |
||||
QueuePath([]string{"b", "b0"}), |
||||
QueuePath([]string{"b", "b1"}), |
||||
QueuePath([]string{"c", "c0", "c00"}), |
||||
QueuePath([]string{"c", "c0", "c01"}), |
||||
QueuePath([]string{"c", "c1", "c10"}), |
||||
QueuePath([]string{"c", "c1", "c11"}), |
||||
} |
||||
|
||||
q := newLeafQueue(10, "root") |
||||
require.NotNil(t, q) |
||||
for _, p := range paths { |
||||
q.add(p) |
||||
} |
||||
|
||||
require.Equal(t, 3, q.mapping.Len()) |
||||
|
||||
// no items in any queues
|
||||
require.Equal(t, 0, q.Len()) |
||||
|
||||
q.Chan() <- r(0) |
||||
require.Equal(t, 1, q.Len()) |
||||
|
||||
q.mapping.GetByKey("a").Chan() <- r(1) |
||||
require.Equal(t, 2, q.Len()) |
||||
|
||||
q.mapping.GetByKey("b").Chan() <- r(2) |
||||
q.mapping.GetByKey("b").mapping.GetByKey("b0").Chan() <- r(20) |
||||
q.mapping.GetByKey("b").mapping.GetByKey("b1").Chan() <- r(21) |
||||
require.Equal(t, 5, q.Len()) |
||||
|
||||
q.mapping.GetByKey("c").Chan() <- r(3) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c0").Chan() <- r(30) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c0").mapping.GetByKey("c00").Chan() <- r(300) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c0").mapping.GetByKey("c01").Chan() <- r(301) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c1").Chan() <- r(31) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c1").mapping.GetByKey("c10").Chan() <- r(310) |
||||
q.mapping.GetByKey("c").mapping.GetByKey("c1").mapping.GetByKey("c11").Chan() <- r(311) |
||||
require.Equal(t, 12, q.Len()) |
||||
t.Log(q) |
||||
|
||||
items := make([]int, 0, q.Len()) |
||||
|
||||
for q.Len() > 0 { |
||||
r := q.Dequeue() |
||||
if r == nil { |
||||
continue |
||||
} |
||||
items = append(items, r.(*dummyRequest).id) |
||||
} |
||||
require.Len(t, items, 12) |
||||
require.Equal(t, []int{0, 1, 2, 3, 20, 30, 21, 31, 300, 310, 301, 311}, items) |
||||
}) |
||||
|
||||
t.Run("dequeue ensure round-robin", func(t *testing.T) { |
||||
/** |
||||
root: |
||||
a: [100, 101, 102] |
||||
b: [200] |
||||
c: [300, 301] |
||||
**/ |
||||
paths := []QueuePath{ |
||||
QueuePath([]string{"a"}), |
||||
QueuePath([]string{"b"}), |
||||
QueuePath([]string{"c"}), |
||||
} |
||||
|
||||
q := newLeafQueue(10, "root") |
||||
require.NotNil(t, q) |
||||
for _, p := range paths { |
||||
q.add(p) |
||||
} |
||||
|
||||
require.Equal(t, 3, q.mapping.Len()) |
||||
|
||||
// no items in any queues
|
||||
require.Equal(t, 0, q.Len()) |
||||
|
||||
q.mapping.GetByKey("a").Chan() <- r(100) |
||||
q.mapping.GetByKey("a").Chan() <- r(101) |
||||
q.mapping.GetByKey("a").Chan() <- r(102) |
||||
q.mapping.GetByKey("b").Chan() <- r(200) |
||||
q.mapping.GetByKey("c").Chan() <- r(300) |
||||
q.mapping.GetByKey("c").Chan() <- r(301) |
||||
|
||||
t.Log(q) |
||||
|
||||
items := make([]int, 0, q.Len()) |
||||
|
||||
for q.Len() > 0 { |
||||
r := q.Dequeue() |
||||
if r == nil { |
||||
continue |
||||
} |
||||
items = append(items, r.(*dummyRequest).id) |
||||
} |
||||
require.Len(t, items, 6) |
||||
require.Equal(t, []int{100, 200, 300, 101, 301, 102}, items) |
||||
}) |
||||
|
||||
t.Run("empty sub-queues are removed", func(t *testing.T) { |
||||
q := newLeafQueue(10, "root") |
||||
q.add(QueuePath{"a"}) |
||||
q.add(QueuePath{"b"}) |
||||
|
||||
q.mapping.GetByKey("a").Chan() <- r(1) |
||||
q.mapping.GetByKey("b").Chan() <- r(2) |
||||
|
||||
t.Log(q) |
||||
|
||||
// drain queue
|
||||
r := q.Dequeue() |
||||
for r != nil { |
||||
r = q.Dequeue() |
||||
} |
||||
|
||||
require.Nil(t, q.mapping.GetByKey("a")) |
||||
require.Nil(t, q.mapping.GetByKey("b")) |
||||
}) |
||||
} |
||||
@ -0,0 +1,117 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"github.com/pkg/errors" |
||||
) |
||||
|
||||
type Mapable interface { |
||||
*tenantQueue | *LeafQueue |
||||
// https://github.com/golang/go/issues/48522#issuecomment-924348755
|
||||
Pos() QueueIndex |
||||
SetPos(index QueueIndex) |
||||
} |
||||
|
||||
var ErrOutOfBounds = errors.New("queue index out of bounds") |
||||
|
||||
var empty = string([]byte{byte(0)}) |
||||
|
||||
// Mapping is a map-like data structure that allows accessing its items not
|
||||
// only by key but also by index.
|
||||
// When an item is removed, the internal key array is not resized, but the
|
||||
// removed place is marked as empty. This allows to remove keys without
|
||||
// changing the index of the remaining items after the removed key.
|
||||
// Mapping uses *tenantQueue as concrete value and keys of type string.
|
||||
// The data structure is not thread-safe.
|
||||
type Mapping[v Mapable] struct { |
||||
m map[string]v |
||||
keys []string |
||||
empty []QueueIndex |
||||
} |
||||
|
||||
func (m *Mapping[v]) Init(size int) { |
||||
m.m = make(map[string]v, size) |
||||
m.keys = make([]string, 0, size) |
||||
m.empty = make([]QueueIndex, 0, size) |
||||
} |
||||
|
||||
func (m *Mapping[v]) Put(key string, value v) bool { |
||||
// do not allow empty string or 0 byte string as key
|
||||
if key == "" || key == empty { |
||||
return false |
||||
} |
||||
if len(m.empty) == 0 { |
||||
value.SetPos(QueueIndex(len(m.keys))) |
||||
m.keys = append(m.keys, key) |
||||
} else { |
||||
idx := m.empty[0] |
||||
m.empty = m.empty[1:] |
||||
m.keys[idx] = key |
||||
value.SetPos(idx) |
||||
} |
||||
m.m[key] = value |
||||
return true |
||||
} |
||||
|
||||
func (m *Mapping[v]) Get(idx QueueIndex) v { |
||||
if len(m.keys) == 0 { |
||||
return nil |
||||
} |
||||
k := m.keys[idx] |
||||
return m.GetByKey(k) |
||||
} |
||||
|
||||
func (m *Mapping[v]) GetNext(idx QueueIndex) (v, error) { |
||||
if m.Len() == 0 { |
||||
return nil, ErrOutOfBounds |
||||
} |
||||
|
||||
i := int(idx) |
||||
i++ |
||||
|
||||
for i < len(m.keys) { |
||||
k := m.keys[i] |
||||
if k != empty { |
||||
return m.GetByKey(k), nil |
||||
} |
||||
i++ |
||||
} |
||||
return nil, ErrOutOfBounds |
||||
} |
||||
|
||||
func (m *Mapping[v]) GetByKey(key string) v { |
||||
// do not allow empty string or 0 byte string as key
|
||||
if key == "" || key == empty { |
||||
return nil |
||||
} |
||||
return m.m[key] |
||||
} |
||||
|
||||
func (m *Mapping[v]) Remove(key string) bool { |
||||
e := m.m[key] |
||||
if e == nil { |
||||
return false |
||||
} |
||||
delete(m.m, key) |
||||
m.keys[e.Pos()] = empty |
||||
m.empty = append(m.empty, e.Pos()) |
||||
return true |
||||
} |
||||
|
||||
func (m *Mapping[v]) Keys() []string { |
||||
return m.keys |
||||
} |
||||
|
||||
func (m *Mapping[v]) Values() []v { |
||||
values := make([]v, 0, len(m.keys)) |
||||
for _, k := range m.keys { |
||||
if k == empty { |
||||
continue |
||||
} |
||||
values = append(values, m.m[k]) |
||||
} |
||||
return values |
||||
} |
||||
|
||||
func (m *Mapping[v]) Len() int { |
||||
return len(m.keys) - len(m.empty) |
||||
} |
||||
@ -0,0 +1,92 @@ |
||||
package queue |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestQueueMapping(t *testing.T) { |
||||
// Individual sub-tests in this test case are reflecting a scenario and need
|
||||
// to be executed in sequential order.
|
||||
|
||||
m := &Mapping[*LeafQueue]{} |
||||
m.Init(16) |
||||
|
||||
require.Equal(t, m.Len(), 0) |
||||
|
||||
t.Run("put item to mapping", func(t *testing.T) { |
||||
q1 := newLeafQueue(10, "queue-1") |
||||
m.Put(q1.Name(), q1) |
||||
require.Equal(t, 1, m.Len()) |
||||
require.Equal(t, []string{"queue-1"}, m.Keys()) |
||||
}) |
||||
|
||||
t.Run("insert order is preserved if there is no empty slot", func(t *testing.T) { |
||||
q2 := newLeafQueue(10, "queue-2") |
||||
m.Put(q2.Name(), q2) |
||||
require.Equal(t, 2, m.Len()) |
||||
require.Equal(t, []string{"queue-1", "queue-2"}, m.Keys()) |
||||
}) |
||||
|
||||
t.Run("insert into empty slot if item was removed previously", func(t *testing.T) { |
||||
ok := m.Remove("queue-1") |
||||
require.True(t, ok) |
||||
require.Equal(t, 1, m.Len()) |
||||
q3 := newLeafQueue(10, "queue-3") |
||||
m.Put(q3.Name(), q3) |
||||
require.Equal(t, 2, m.Len()) |
||||
require.Equal(t, []string{"queue-3", "queue-2"}, m.Keys()) |
||||
}) |
||||
|
||||
t.Run("insert order is preserved across keys and values", func(t *testing.T) { |
||||
q4 := newLeafQueue(10, "queue-4") |
||||
m.Put(q4.Name(), q4) |
||||
require.Equal(t, 3, m.Len()) |
||||
for idx, v := range m.Values() { |
||||
require.Equal(t, v.Name(), m.Keys()[idx]) |
||||
} |
||||
}) |
||||
|
||||
t.Run("get by key", func(t *testing.T) { |
||||
key := "queue-2" |
||||
item := m.GetByKey(key) |
||||
require.Equal(t, key, item.Name()) |
||||
require.Equal(t, QueueIndex(1), item.Pos()) |
||||
}) |
||||
|
||||
t.Run("get by empty key returns nil", func(t *testing.T) { |
||||
require.Nil(t, m.GetByKey("")) |
||||
require.Nil(t, m.GetByKey(empty)) |
||||
}) |
||||
|
||||
t.Run("get next item based on index must not skip when items are removed", func(t *testing.T) { |
||||
item, err := m.GetNext(-1) |
||||
require.Nil(t, err) |
||||
require.Equal(t, "queue-3", item.Name()) |
||||
item, err = m.GetNext(item.Pos()) |
||||
require.Nil(t, err) |
||||
require.Equal(t, "queue-2", item.Name()) |
||||
m.Remove(item.Name()) |
||||
item, err = m.GetNext(item.Pos()) |
||||
require.Nil(t, err) |
||||
require.Equal(t, "queue-4", item.Name()) |
||||
}) |
||||
|
||||
t.Run("get next item out of range returns ErrOutOfBounds", func(t *testing.T) { |
||||
item, err := m.GetNext(100) |
||||
require.Nil(t, item) |
||||
require.ErrorIs(t, err, ErrOutOfBounds) |
||||
|
||||
}) |
||||
|
||||
t.Run("get next item skips empty slots", func(t *testing.T) { |
||||
item, err := m.GetNext(-1) |
||||
require.Nil(t, err) |
||||
require.Equal(t, "queue-3", item.Name()) |
||||
item, err = m.GetNext(item.Pos()) |
||||
require.Nil(t, err) |
||||
require.Equal(t, "queue-4", item.Name()) |
||||
}) |
||||
|
||||
} |
||||
@ -0,0 +1,48 @@ |
||||
package httpreq |
||||
|
||||
import ( |
||||
"context" |
||||
"net/http" |
||||
"strings" |
||||
|
||||
"github.com/weaveworks/common/middleware" |
||||
) |
||||
|
||||
type headerContextKey string |
||||
|
||||
var ( |
||||
// LokiActorPathHeader is the name of the header e.g. used to enqueue requests in hierarchical queues.
|
||||
LokiActorPathHeader = "X-Loki-Actor-Path" |
||||
|
||||
// LokiActorPathDelimiter is the delimiter used to serialise the hierarchy of the actor.
|
||||
LokiActorPathDelimiter = "|" |
||||
) |
||||
|
||||
func PropagateHeadersMiddleware(headers ...string) middleware.Interface { |
||||
return middleware.Func(func(next http.Handler) http.Handler { |
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { |
||||
for _, h := range headers { |
||||
value := req.Header.Get(h) |
||||
if value != "" { |
||||
ctx := req.Context() |
||||
ctx = context.WithValue(ctx, headerContextKey(h), value) |
||||
req = req.WithContext(ctx) |
||||
} |
||||
} |
||||
next.ServeHTTP(w, req) |
||||
}) |
||||
}) |
||||
} |
||||
|
||||
func ExtractHeader(ctx context.Context, name string) string { |
||||
s, _ := ctx.Value(headerContextKey(name)).(string) |
||||
return s |
||||
} |
||||
|
||||
func ExtractActorPath(ctx context.Context) []string { |
||||
value := ExtractHeader(ctx, LokiActorPathHeader) |
||||
if value == "" { |
||||
return nil |
||||
} |
||||
return strings.Split(value, LokiActorPathDelimiter) |
||||
} |
||||
Loading…
Reference in new issue