@ -58,19 +58,23 @@ type Compactor interface {
// Results returned when compactions are in progress are undefined.
Plan ( dir string ) ( [ ] string , error )
// Write persists a Block into a directory.
// No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}.
Write ( dest string , b BlockReader , mint , maxt int64 , base * BlockMeta ) ( ulid . ULID , error )
// Write persists one or more Blocks into a directory.
// No Block is written when resulting Block has 0 samples and returns an empty slice.
// Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
Write ( dest string , b BlockReader , mint , maxt int64 , base * BlockMeta ) ( [ ] ulid . ULID , error )
// Compact runs compaction against the provided directories. Must
// only be called concurrently with results of Plan().
// Can optionally pass a list of already open blocks,
// to avoid having to reopen them.
// When resulting Block has 0 samples
// Prometheus always return one or no block. The interface allows returning more than one
// block for downstream users to experiment with compactor.
// When one resulting Block has 0 samples
// * No block is written.
// * The source dirs are marked Deletable.
// * Returns empty ulid.ULID{} .
Compact ( dest string , dirs [ ] string , open [ ] * Block ) ( ulid . ULID , error )
// * Block is not included in the result .
Compact ( dest string , dirs [ ] string , open [ ] * Block ) ( [ ] ulid . ULID , error )
}
// LeveledCompactor implements the Compactor interface.
@ -441,11 +445,11 @@ func CompactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta {
// Compact creates a new block in the compactor's directory from the blocks in the
// provided directories.
func ( c * LeveledCompactor ) Compact ( dest string , dirs [ ] string , open [ ] * Block ) ( uid ulid . ULID , err error ) {
func ( c * LeveledCompactor ) Compact ( dest string , dirs [ ] string , open [ ] * Block ) ( [ ] ulid . ULID , error ) {
return c . CompactWithBlockPopulator ( dest , dirs , open , DefaultBlockPopulator { } )
}
func ( c * LeveledCompactor ) CompactWithBlockPopulator ( dest string , dirs [ ] string , open [ ] * Block , blockPopulator BlockPopulator ) ( uid ulid . ULID , err error ) {
func ( c * LeveledCompactor ) CompactWithBlockPopulator ( dest string , dirs [ ] string , open [ ] * Block , blockPopulator BlockPopulator ) ( [ ] ulid . ULID , error ) {
var (
blocks [ ] BlockReader
bs [ ] * Block
@ -457,7 +461,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
for _ , d := range dirs {
meta , _ , err := readMetaFile ( d )
if err != nil {
return uid , err
return nil , err
}
var b * Block
@ -475,7 +479,7 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
var err error
b , err = OpenBlock ( c . logger , d , c . chunkPool )
if err != nil {
return uid , err
return nil , err
}
defer b . Close ( )
}
@ -486,10 +490,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
uids = append ( uids , meta . ULID . String ( ) )
}
uid = ulid . MustNew ( ulid . Now ( ) , rand . Reader )
uid : = ulid . MustNew ( ulid . Now ( ) , rand . Reader )
meta := CompactBlockMetas ( uid , metas ... )
err = c . write ( dest , meta , blockPopulator , blocks ... )
err : = c . write ( dest , meta , blockPopulator , blocks ... )
if err == nil {
if meta . Stats . NumSamples == 0 {
for _ , b := range bs {
@ -503,25 +507,25 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
}
b . numBytesMeta = n
}
uid = ulid . ULID { }
level . Info ( c . logger ) . Log (
"msg" , "compact blocks resulted in empty block" ,
"count" , len ( blocks ) ,
"sources" , fmt . Sprintf ( "%v" , uids ) ,
"duration" , time . Since ( start ) ,
)
} else {
level . Info ( c . logger ) . Log (
"msg" , "compact blocks" ,
"count" , len ( blocks ) ,
"mint" , meta . MinTime ,
"maxt" , meta . MaxTime ,
"ulid" , meta . ULID ,
"sources" , fmt . Sprintf ( "%v" , uids ) ,
"duration" , time . Since ( start ) ,
)
return nil , nil
}
return uid , nil
level . Info ( c . logger ) . Log (
"msg" , "compact blocks" ,
"count" , len ( blocks ) ,
"mint" , meta . MinTime ,
"maxt" , meta . MaxTime ,
"ulid" , meta . ULID ,
"sources" , fmt . Sprintf ( "%v" , uids ) ,
"duration" , time . Since ( start ) ,
)
return [ ] ulid . ULID { uid } , nil
}
errs := tsdb_errors . NewMulti ( err )
@ -533,10 +537,10 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string,
}
}
return uid , errs . Err ( )
return nil , errs . Err ( )
}
func ( c * LeveledCompactor ) Write ( dest string , b BlockReader , mint , maxt int64 , base * BlockMeta ) ( ulid . ULID , error ) {
func ( c * LeveledCompactor ) Write ( dest string , b BlockReader , mint , maxt int64 , base * BlockMeta ) ( [ ] ulid . ULID , error ) {
start := time . Now ( )
uid := ulid . MustNew ( ulid . Now ( ) , rand . Reader )
@ -560,7 +564,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
err := c . write ( dest , meta , DefaultBlockPopulator { } , b )
if err != nil {
return uid , err
return nil , err
}
if meta . Stats . NumSamples == 0 {
@ -570,7 +574,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
"maxt" , meta . MaxTime ,
"duration" , time . Since ( start ) ,
)
return ulid . ULID { } , nil
return nil , nil
}
level . Info ( c . logger ) . Log (
@ -581,7 +585,7 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b
"duration" , time . Since ( start ) ,
"ooo" , meta . Compaction . FromOutOfOrder ( ) ,
)
return uid , nil
return [ ] ulid . ULID { uid } , nil
}
// instrumentedChunkWriter is used for level 1 compactions to record statistics