fix: Improve docs for min and max table offsets (backport k227) (#14929)

Co-authored-by: Salva Corts <salva.corts@grafana.com>
pull/14931/head
loki-gh-app[bot] 6 months ago committed by GitHub
parent 1f6828b25c
commit 3161fdcc6d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 17
      docs/sources/shared/configuration.md
  2. 4
      pkg/bloombuild/planner/config.go
  3. 31
      pkg/bloombuild/planner/planner_test.go

@ -1263,19 +1263,20 @@ planner:
# CLI flag: -bloom-build.planner.interval
[planning_interval: <duration> | default = 8h]
# Newest day-table offset (from today, inclusive) to build blooms for.
# Increase to lower cost by not re-writing data to object storage too
# frequently since recent data changes more often at the cost of not having
# blooms available as quickly.
# Newest day-table offset (from today, inclusive) to build blooms for. 0 start
# building from today, 1 from yesterday and so on. Increase to lower cost by
# not re-writing data to object storage too frequently since recent data
# changes more often at the cost of not having blooms available as quickly.
# CLI flag: -bloom-build.planner.min-table-offset
[min_table_offset: <int> | default = 1]
[min_table_offset: <int> | default = 0]
# Oldest day-table offset (from today, inclusive) to compact. This can be used
# to lower cost by not trying to compact older data which doesn't change. This
# Oldest day-table offset (from today, inclusive) to build blooms for. 1 till
# yesterday, 2 till day before yesterday and so on. This can be used to lower
# cost by not trying to build blooms for older data which doesn't change. This
# can be optimized by aligning it with the maximum
# `reject_old_samples_max_age` setting of any tenant.
# CLI flag: -bloom-build.planner.max-table-offset
[max_table_offset: <int> | default = 2]
[max_table_offset: <int> | default = 1]
retention:
# Enable bloom retention.

@ -21,14 +21,14 @@ type Config struct {
// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration.
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(&cfg.PlanningInterval, prefix+".interval", 8*time.Hour, "Interval at which to re-run the bloom creation planning.")
f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 1, "Newest day-table offset (from today, inclusive) to build blooms for. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.")
f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 0, "Newest day-table offset (from today, inclusive) to build blooms for. 0 start building from today, 1 from yesterday and so on. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.")
// TODO(owen-d): ideally we'd set this per tenant based on their `reject_old_samples_max_age` setting,
// but due to how we need to discover tenants, we can't do that yet. Tenant+Period discovery is done by
// iterating the table periods in object storage and looking for tenants within that period.
// In order to have this done dynamically, we'd need to account for tenant specific overrides, which are also
// dynamically reloaded.
// I'm doing it the simple way for now.
f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.")
f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 1, "Oldest day-table offset (from today, inclusive) to build blooms for. 1 till yesterday, 2 till day before yesterday and so on. This can be used to lower cost by not trying to build blooms for older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.")
cfg.RetentionConfig.RegisterFlagsWithPrefix(prefix+".retention", f)
cfg.Queue.RegisterFlagsWithPrefix(prefix+".queue", f)
}

@ -22,6 +22,7 @@ import (
"github.com/grafana/loki/v3/pkg/bloombuild/planner/queue"
"github.com/grafana/loki/v3/pkg/bloombuild/planner/strategies"
"github.com/grafana/loki/v3/pkg/bloombuild/protos"
iter "github.com/grafana/loki/v3/pkg/iter/v2"
"github.com/grafana/loki/v3/pkg/storage"
v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1"
"github.com/grafana/loki/v3/pkg/storage/chunk/cache"
@ -606,6 +607,36 @@ func Test_deleteOutdatedMetas(t *testing.T) {
}
}
func TestMinMaxTables(t *testing.T) {
logger := log.NewNopLogger()
//logger := log.NewLogfmtLogger(os.Stdout)
cfg := Config{
PlanningInterval: 1 * time.Hour,
Queue: queue.Config{
MaxQueuedTasksPerTenant: 10000,
},
// From today till day before tomorrow
MinTableOffset: 0,
MaxTableOffset: 2,
}
planner := createPlanner(t, cfg, &fakeLimits{}, logger)
tables := planner.tables(time.Now())
require.Equal(t, 3, tables.TotalDays())
dayTables, err := iter.Collect(tables)
require.NoError(t, err)
todayTable := config.NewDayTable(config.NewDayTime(model.Now()), "index_")
yesterdayTable := config.NewDayTable(config.NewDayTime(model.Now().Add(-24*time.Hour)), "index_")
dayBeforeYesterdayTable := config.NewDayTable(config.NewDayTime(model.Now().Add(-48*time.Hour)), "index_")
require.Equal(t, dayBeforeYesterdayTable.Addr(), dayTables[0].Addr())
require.Equal(t, yesterdayTable.Addr(), dayTables[1].Addr())
require.Equal(t, todayTable.Addr(), dayTables[2].Addr())
}
type fakeBuilder struct {
mx sync.Mutex // Protects tasks and currTaskIdx.
id string

Loading…
Cancel
Save