mirror of https://github.com/grafana/loki
Loki: query scheduler should send shutdown to frontends when ReplicationSet changes (#4614)
* make sure we send a shutdown message to frontends from the scheduler when the ReplicationSet changes so that inflight queries are canceled and retried in the frontends. * use Write so we find an instance incase one is unhealthypull/4556/head
parent
308af871cf
commit
fe8bc9144f
@ -0,0 +1,100 @@ |
||||
package scheduler |
||||
|
||||
import ( |
||||
"context" |
||||
"testing" |
||||
|
||||
"github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" |
||||
util_log "github.com/cortexproject/cortex/pkg/util/log" |
||||
"github.com/prometheus/client_golang/prometheus" |
||||
"github.com/prometheus/client_golang/prometheus/promauto" |
||||
|
||||
"github.com/stretchr/testify/assert" |
||||
"google.golang.org/grpc/metadata" |
||||
) |
||||
|
||||
func TestScheduler_setRunState(t *testing.T) { |
||||
|
||||
// This test is a bit crude, the method is not the most directly testable but
|
||||
// this covers us to make sure we don't accidentally change the behavior of
|
||||
// the little bit of logic which runs/stops the scheduler and makes sure we
|
||||
// send a shutdown message to disconnect frontends.
|
||||
|
||||
// To avoid a lot more complicated test setup of calling NewScheduler instead
|
||||
// we make a Scheduler with the things required to avoid nil pointers
|
||||
s := Scheduler{ |
||||
log: util_log.Logger, |
||||
schedulerRunning: promauto.With(prometheus.DefaultRegisterer).NewGauge(prometheus.GaugeOpts{ |
||||
Name: "cortex_query_scheduler_running", |
||||
Help: "Value will be 1 if the scheduler is in the ReplicationSet and actively receiving/processing requests", |
||||
}), |
||||
} |
||||
mock := &mockSchedulerForFrontendFrontendLoopServer{} |
||||
s.connectedFrontends = map[string]*connectedFrontend{ |
||||
"127.0.0.1:9095": { |
||||
connections: 0, |
||||
frontend: mock, |
||||
ctx: nil, |
||||
cancel: nil, |
||||
}, |
||||
} |
||||
|
||||
// not_running, shouldRun == false
|
||||
assert.False(t, s.shouldRun.Load()) |
||||
|
||||
// not_running -> running, shouldRun == true
|
||||
s.setRunState(true) |
||||
assert.True(t, s.shouldRun.Load()) |
||||
|
||||
// running -> running, shouldRun == true
|
||||
s.setRunState(true) |
||||
assert.True(t, s.shouldRun.Load()) |
||||
|
||||
// running -> not_running, shouldRun == false, shutdown message sent
|
||||
s.setRunState(false) |
||||
assert.False(t, s.shouldRun.Load()) |
||||
assert.Equal(t, schedulerpb.SHUTTING_DOWN, mock.msg.Status) |
||||
mock.msg = nil |
||||
|
||||
// not_running -> not_running, shouldRun == false, no shutdown message sent
|
||||
s.setRunState(false) |
||||
assert.Nil(t, mock.msg) |
||||
|
||||
} |
||||
|
||||
type mockSchedulerForFrontendFrontendLoopServer struct { |
||||
msg *schedulerpb.SchedulerToFrontend |
||||
} |
||||
|
||||
func (m *mockSchedulerForFrontendFrontendLoopServer) Send(frontend *schedulerpb.SchedulerToFrontend) error { |
||||
m.msg = frontend |
||||
return nil |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) Recv() (*schedulerpb.FrontendToScheduler, error) { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) SetHeader(md metadata.MD) error { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) SendHeader(md metadata.MD) error { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) SetTrailer(md metadata.MD) { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) Context() context.Context { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) SendMsg(msg interface{}) error { |
||||
panic("implement me") |
||||
} |
||||
|
||||
func (m mockSchedulerForFrontendFrontendLoopServer) RecvMsg(msg interface{}) error { |
||||
panic("implement me") |
||||
} |
||||
Loading…
Reference in new issue