Improve report usage to never fail (#5364)

* Improve report usage to never fail

Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com>

* wait until cancelled for the service to not fail

Signed-off-by: Cyril Tovena <cyril.tovena@gmail.com>
pull/5321/head
Cyril Tovena 3 years ago committed by GitHub
parent bbaef790db
commit 21fa187fa8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      pkg/loki/modules.go
  2. 19
      pkg/usagestats/reporter.go
  3. 20
      pkg/usagestats/reporter_test.go

@ -769,11 +769,13 @@ func (t *Loki) initUsageReport() (services.Service, error) {
objectClient, err := chunk_storage.NewObjectClient(period.ObjectType, t.Cfg.StorageConfig.Config, t.clientMetrics)
if err != nil {
return nil, err
level.Info(util_log.Logger).Log("msg", "failed to initialize usage report", "err", err)
return nil, nil
}
ur, err := usagestats.NewReporter(t.Cfg.UsageReport, t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore, objectClient, util_log.Logger, prometheus.DefaultRegisterer)
if err != nil {
return nil, err
level.Info(util_log.Logger).Log("msg", "failed to initialize usage report", "err", err)
return nil, nil
}
t.usageReport = ur
return ur, nil

@ -46,7 +46,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
}
type Reporter struct {
kvClient kv.Client
logger log.Logger
objectClient chunk.ObjectClient
reg prometheus.Registerer
@ -54,6 +53,7 @@ type Reporter struct {
services.Service
conf Config
kvConfig kv.Config
cluster *ClusterSeed
lastReport time.Time
}
@ -62,15 +62,11 @@ func NewReporter(config Config, kvConfig kv.Config, objectClient chunk.ObjectCli
if config.Disabled {
return nil, nil
}
kvClient, err := kv.NewClient(kvConfig, JSONCodec, kv.RegistererWithKVName(reg, "usagestats"), logger)
if err != nil {
return nil, err
}
r := &Reporter{
kvClient: kvClient,
logger: logger,
objectClient: objectClient,
conf: config,
kvConfig: kvConfig,
reg: reg,
}
r.Service = services.NewBasicService(nil, r.running, nil)
@ -78,6 +74,11 @@ func NewReporter(config Config, kvConfig kv.Config, objectClient chunk.ObjectCli
}
func (rep *Reporter) initLeader(ctx context.Context) *ClusterSeed {
kvClient, err := kv.NewClient(rep.kvConfig, JSONCodec, nil, rep.logger)
if err != nil {
level.Info(rep.logger).Log("msg", "failed to create kv client", "err", err)
return nil
}
// Try to become leader via the kv client
for backoff := backoff.New(ctx, backoff.Config{
MinBackoff: time.Second,
@ -90,7 +91,7 @@ func (rep *Reporter) initLeader(ctx context.Context) *ClusterSeed {
PrometheusVersion: build.GetVersion(),
CreatedAt: time.Now(),
}
if err := rep.kvClient.CAS(ctx, seedKey, func(in interface{}) (out interface{}, retry bool, err error) {
if err := kvClient.CAS(ctx, seedKey, func(in interface{}) (out interface{}, retry bool, err error) {
// The key is already set, so we don't need to do anything
if in != nil {
if kvSeed, ok := in.(*ClusterSeed); ok && kvSeed.UID != seed.UID {
@ -207,6 +208,10 @@ func (rep *Reporter) writeSeedFile(ctx context.Context, seed ClusterSeed) error
func (rep *Reporter) running(ctx context.Context) error {
rep.init(ctx)
if rep.cluster == nil {
<-ctx.Done()
return ctx.Err()
}
// check every minute if we should report.
ticker := time.NewTicker(reportCheckInterval)
defer ticker.Stop()

@ -145,3 +145,23 @@ func Test_NextReport(t *testing.T) {
})
}
}
func TestWrongKV(t *testing.T) {
objectClient, err := storage.NewObjectClient(storage.StorageTypeFileSystem, storage.Config{
FSConfig: local.FSConfig{
Directory: t.TempDir(),
},
}, metrics)
require.NoError(t, err)
r, err := NewReporter(Config{Leader: true}, kv.Config{
Store: "",
}, objectClient, log.NewLogfmtLogger(os.Stdout), prometheus.NewPedanticRegistry())
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-time.After(1 * time.Second)
cancel()
}()
require.Equal(t, context.Canceled, r.running(ctx))
}

Loading…
Cancel
Save