alertmanagerURL=flag.String("alertmanager.url","","The URL of the alert manager to send notifications to.")
notificationQueueCapacity=flag.Int("alertmanager.notification-queue-capacity",100,"The capacity of the queue for pending alert manager notifications.")
metricsStoragePath=flag.String("storage.local.path","/tmp/metrics","Base path for metrics storage.")
persistenceStoragePath=flag.String("storage.local.path","/tmp/metrics","Base path for metrics storage.")
remoteTSDBUrl=flag.String("storage.remote.url","","The URL of the OpenTSDB instance to send samples to.")
remoteTSDBTimeout=flag.Duration("storage.remote.timeout",30*time.Second,"The timeout to use when sending samples to OpenTSDB.")
@ -56,7 +56,8 @@ var (
numMemoryChunks=flag.Int("storage.local.memory-chunks",1024*1024,"How many chunks to keep in memory. While the size of a chunk is 1kiB, the total memory usage will be significantly higher than this value * 1kiB. Furthermore, for various reasons, more chunks might have to be kept in memory temporarily.")
storageRetentionPeriod=flag.Duration("storage.local.retention",15*24*time.Hour,"How long to retain samples in the local storage.")
persistenceRetentionPeriod=flag.Duration("storage.local.retention",15*24*time.Hour,"How long to retain samples in the local storage.")
persistenceQueueCapacity=flag.Int("storage.local.persistence-queue-capacity",128*1024,"How many chunks can be waiting for being persisted before sample ingestion will stop.")
checkpointInterval=flag.Duration("storage.local.checkpoint-interval",5*time.Minute,"The period at which the in-memory index of time series is checkpointed.")
checkpointDirtySeriesLimit=flag.Int("storage.local.checkpoint-dirty-series-limit",5000,"If approx. that many time series are in a state that would require a recovery operation after a crash, a checkpoint is triggered, even if the checkpoint interval hasn't passed yet. A recovery operation requires a disk seek. The default limit intends to keep the recovery time below 1min even on spinning disks. With SSD, recovery is much faster, so you might want to increase this value in that case to avoid overly frequent checkpoints.")