diff --git a/cmd/loki/loki-local-config.yaml b/cmd/loki/loki-local-config.yaml index 434e4a4a43..0b45802732 100644 --- a/cmd/loki/loki-local-config.yaml +++ b/cmd/loki/loki-local-config.yaml @@ -7,7 +7,6 @@ ingester: wal: enabled: true dir: /tmp/wal - recover: true lifecycler: address: 127.0.0.1 ring: diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 02f955006a..1ac6a51f37 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -907,10 +907,6 @@ wal: # CLI flag: -ingester.wal-dir [dir: | default = "wal"] - # Recover data from existing WAL dir irrespective of WAL enabled/disabled. - # CLI flag: -ingester.recover-from-wal - [recover: | default = false] - # When WAL is enabled, should chunks be flushed to long-term storage on shutdown. # CLI flag: -ingester.flush-on-shutdown [flush_on_shutdown: | default = false] diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go index 5a6f504c41..5aadc50290 100644 --- a/pkg/ingester/checkpoint_test.go +++ b/pkg/ingester/checkpoint_test.go @@ -49,7 +49,6 @@ func defaultIngesterTestConfigWithWAL(t *testing.T, walDir string) Config { ingesterConfig.MaxTransferRetries = 0 ingesterConfig.WAL.Enabled = true ingesterConfig.WAL.Dir = walDir - ingesterConfig.WAL.Recover = true ingesterConfig.WAL.CheckpointDuration = time.Second return ingesterConfig diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 25047a4549..537c041d00 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -220,7 +220,7 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid func (i *Ingester) starting(ctx context.Context) error { - if i.cfg.WAL.Recover { + if i.cfg.WAL.Enabled { // Ignore retain period during wal replay. old := i.cfg.RetainPeriod i.cfg.RetainPeriod = 0 diff --git a/pkg/ingester/wal.go b/pkg/ingester/wal.go index 83d69d3f5d..e8b3e63cc7 100644 --- a/pkg/ingester/wal.go +++ b/pkg/ingester/wal.go @@ -26,7 +26,6 @@ const defaultCeiling = 4 << 30 // 4GB type WALConfig struct { Enabled bool `yaml:"enabled"` Dir string `yaml:"dir"` - Recover bool `yaml:"recover"` CheckpointDuration time.Duration `yaml:"checkpoint_duration"` FlushOnShutdown bool `yaml:"flush_on_shutdown"` ReplayMemoryCeiling flagext.ByteSize `yaml:"replay_memory_ceiling"` @@ -43,7 +42,6 @@ func (cfg *WALConfig) Validate() error { func (cfg *WALConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Dir, "ingester.wal-dir", "wal", "Directory to store the WAL and/or recover from WAL.") f.BoolVar(&cfg.Enabled, "ingester.wal-enabled", false, "Enable writing of ingested data into WAL.") - f.BoolVar(&cfg.Recover, "ingester.recover-from-wal", false, "Recover data from existing WAL irrespective of WAL enabled/disabled.") f.DurationVar(&cfg.CheckpointDuration, "ingester.checkpoint-duration", 5*time.Minute, "Interval at which checkpoints should be created.") f.BoolVar(&cfg.FlushOnShutdown, "ingester.flush-on-shutdown", false, "When WAL is enabled, should chunks be flushed to long-term storage on shutdown.") diff --git a/production/ksonnet/loki/wal.libsonnet b/production/ksonnet/loki/wal.libsonnet index d3188c6817..8f63977742 100644 --- a/production/ksonnet/loki/wal.libsonnet +++ b/production/ksonnet/loki/wal.libsonnet @@ -12,7 +12,6 @@ wal+: { enabled: true, dir: '/loki/wal', - recover: true, replay_memory_ceiling: '9GB', // between the requests & limits }, },