@ -639,45 +639,7 @@ func chunkMetaToChunkRef(userID string, chunkMeta index.ChunkMeta, lbls labels.L
}
func TestCompactedIndex ( t * testing . T ) {
now := model . Now ( )
periodConfig := config . PeriodConfig {
IndexTables : config . PeriodicTableConfig { Period : config . ObjectStorageIndexRequiredPeriod } ,
Schema : "v12" ,
}
schemaCfg := config . SchemaConfig {
Configs : [ ] config . PeriodConfig { periodConfig } ,
}
indexBuckets , err := indexBuckets ( now , now , [ ] config . TableRange { periodConfig . GetIndexTableNumberRange ( config . DayTime { Time : now } ) } )
require . NoError ( t , err )
tableName := indexBuckets [ 0 ]
tableInterval := retention . ExtractIntervalFromTableName ( tableName )
// shiftTableStart shift tableInterval.Start by the given amount of milliseconds.
// It is used for building chunkmetas relative to start time of the table.
shiftTableStart := func ( ms int64 ) int64 {
return int64 ( tableInterval . Start ) + ms
}
lbls1 := mustParseLabels ( ` { foo="bar", a="b"} ` )
lbls2 := mustParseLabels ( ` { fizz="buzz", a="b"} ` )
userID := buildUserID ( 0 )
buildCompactedIndex := func ( ) * compactedIndex {
builder := NewBuilder ( )
stream := buildStream ( lbls1 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) , "" )
builder . AddSeries ( stream . labels , stream . fp , stream . chunks )
stream = buildStream ( lbls2 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) , "" )
builder . AddSeries ( stream . labels , stream . fp , stream . chunks )
builder . FinalizeChunks ( )
return newCompactedIndex ( context . Background ( ) , tableName , buildUserID ( 0 ) , t . TempDir ( ) , periodConfig , builder )
}
expectedChunkEntries := map [ string ] [ ] retention . ChunkEntry {
lbls1 . String ( ) : chunkMetasToChunkEntry ( schemaCfg , userID , lbls1 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) ) ,
lbls2 . String ( ) : chunkMetasToChunkEntry ( schemaCfg , userID , lbls2 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ) ,
}
testCtx := setupCompactedIndex ( t )
for name , tc := range map [ string ] struct {
deleteChunks map [ string ] index . ChunkMetas
@ -689,101 +651,101 @@ func TestCompactedIndex(t *testing.T) {
} {
"no changes" : {
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) ,
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls1 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 10 ) ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"delete some chunks from a stream" : {
deleteChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : append ( buildChunkMetas ( shiftTableStart ( 3 ) , shiftTableStart ( 5 ) ) , buildChunkMetas ( shiftTableStart ( 7 ) , shiftTableStart ( 8 ) ) ... ) ,
testCtx . lbls1 . String ( ) : append ( buildChunkMetas ( testCtx . shiftTableStart ( 3 ) , testCtx . shiftTableStart ( 5 ) ) , buildChunkMetas ( testCtx . shiftTableStart ( 7 ) , testCtx . shiftTableStart ( 8 ) ) ... ) ,
} ,
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : append ( buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 2 ) ) , append ( buildChunkMetas ( shiftTableStart ( 6 ) , shiftTableStart ( 6 ) ) , buildChunkMetas ( shiftTableStart ( 9 ) , shiftTableStart ( 10 ) ) ... ) ... ) ,
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls1 . String ( ) : append ( buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 2 ) ) , append ( buildChunkMetas ( testCtx . shiftTableStart ( 6 ) , testCtx . shiftTableStart ( 6 ) ) , buildChunkMetas ( testCtx . shiftTableStart ( 9 ) , testCtx . shiftTableStart ( 10 ) ) ... ) ... ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"delete all chunks from a stream" : {
deleteChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) ,
testCtx . lbls1 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 10 ) ) ,
} ,
deleteSeries : [ ] labels . Labels { lbls1 } ,
deleteSeries : [ ] labels . Labels { testCtx . lbls1 } ,
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"add some chunks to a stream" : {
addChunks : [ ] chunk . Chunk {
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 11 ) , shiftTableStart ( 11 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 11 ) , testCtx . shiftTableStart ( 11 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 12 ) , shiftTableStart ( 12 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 12 ) , testCtx . shiftTableStart ( 12 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
} ,
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 12 ) ) ,
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls1 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 12 ) ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"add some chunks out of table interval to a stream" : {
addChunks : [ ] chunk . Chunk {
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 11 ) , shiftTableStart ( 11 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 11 ) , testCtx . shiftTableStart ( 11 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 12 ) , shiftTableStart ( 12 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 12 ) , testCtx . shiftTableStart ( 12 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
// these chunks should not be added
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( int64 ( tableInterval . End + 100 ) , int64 ( tableInterval . End + 100 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( int64 ( testCtx . tableInterval . End + 100 ) , int64 ( testCtx . t ableInterval . End + 100 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( int64 ( tableInterval . End + 200 ) , int64 ( tableInterval . End + 200 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( int64 ( testCtx . tableInterval . End + 200 ) , int64 ( testCtx . t ableInterval . End + 200 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
} ,
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 12 ) ) ,
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls1 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 12 ) ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"add and delete some chunks in a stream" : {
addChunks : [ ] chunk . Chunk {
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 11 ) , shiftTableStart ( 11 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 11 ) , testCtx . shiftTableStart ( 11 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
{
Metric : lbls1 ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 12 ) , shiftTableStart ( 12 ) ) [ 0 ] , lbls1 ) ,
Metric : testCtx . lbls1 ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 12 ) , testCtx . shiftTableStart ( 12 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
} ,
deleteChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : buildChunkMetas ( shiftTableStart ( 3 ) , shiftTableStart ( 5 ) ) ,
testCtx . lbls1 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 3 ) , testCtx . shiftTableStart ( 5 ) ) ,
} ,
finalExpectedChunks : map [ string ] index . ChunkMetas {
lbls1 . String ( ) : append ( buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 2 ) ) , buildChunkMetas ( shiftTableStart ( 6 ) , shiftTableStart ( 12 ) ) ... ) ,
lbls2 . String ( ) : buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ,
testCtx . lbls1 . String ( ) : append ( buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 2 ) ) , buildChunkMetas ( testCtx . shiftTableStart ( 6 ) , testCtx . shiftTableStart ( 12 ) ) ... ) ,
testCtx . lbls2 . String ( ) : buildChunkMetas ( testCtx . shiftTableStart ( 0 ) , testCtx . shiftTableStart ( 20 ) ) ,
} ,
} ,
"adding chunk to non-existing stream should error" : {
addChunks : [ ] chunk . Chunk {
{
Metric : labels . NewBuilder ( lbls1 ) . Set ( "new" , "label" ) . Labels ( ) ,
ChunkRef : chunkMetaToChunkRef ( userID , buildChunkMetas ( shiftTableStart ( 11 ) , shiftTableStart ( 11 ) ) [ 0 ] , lbls1 ) ,
Metric : labels . NewBuilder ( testCtx . lbls1 ) . Set ( "new" , "label" ) . Labels ( ) ,
ChunkRef : chunkMetaToChunkRef ( testCtx . userID , buildChunkMetas ( testCtx . shiftTableStart ( 11 ) , testCtx . shiftTableStart ( 11 ) ) [ 0 ] , testCtx . lbls1 ) ,
Data : dummyChunkData { } ,
} ,
} ,
@ -791,10 +753,10 @@ func TestCompactedIndex(t *testing.T) {
} ,
} {
t . Run ( name , func ( t * testing . T ) {
compactedIndex := buildCompactedIndex ( )
compactedIndex := testCtx . buildCompactedIndex ( )
foundChunkEntries := map [ string ] [ ] retention . ChunkEntry { }
err := compactedIndex . ForEachChunk ( func ( chunkEntry retention . ChunkEntry ) ( deleteChunk bool , err error ) {
err := compactedIndex . ForEachChunk ( context . Background ( ) , func ( chunkEntry retention . ChunkEntry ) ( deleteChunk bool , err error ) {
seriesIDStr := string ( chunkEntry . SeriesID )
foundChunkEntries [ seriesIDStr ] = append ( foundChunkEntries [ seriesIDStr ] , chunkEntry )
if chks , ok := tc . deleteChunks [ string ( chunkEntry . SeriesID ) ] ; ok {
@ -809,7 +771,7 @@ func TestCompactedIndex(t *testing.T) {
} )
require . NoError ( t , err )
require . Equal ( t , expectedChunkEntries , foundChunkEntries )
require . Equal ( t , testCtx . expectedChunkEntries , foundChunkEntries )
for _ , lbls := range tc . deleteSeries {
require . NoError ( t , compactedIndex . CleanupSeries ( nil , lbls ) )
@ -839,6 +801,79 @@ func TestCompactedIndex(t *testing.T) {
}
func TestIteratorContextCancelation ( t * testing . T ) {
tc := setupCompactedIndex ( t )
compactedIndex := tc . buildCompactedIndex ( )
ctx , cancel := context . WithCancel ( context . Background ( ) )
cancel ( )
var foundChunkEntries [ ] retention . ChunkEntry
err := compactedIndex . ForEachChunk ( ctx , func ( chunkEntry retention . ChunkEntry ) ( deleteChunk bool , err error ) {
foundChunkEntries = append ( foundChunkEntries , chunkEntry )
return false , nil
} )
require . ErrorIs ( t , err , context . Canceled )
}
type testContext struct {
lbls1 labels . Labels
lbls2 labels . Labels
userID string
tableInterval model . Interval
shiftTableStart func ( ms int64 ) int64
buildCompactedIndex func ( ) * compactedIndex
expectedChunkEntries map [ string ] [ ] retention . ChunkEntry
}
func setupCompactedIndex ( t * testing . T ) * testContext {
t . Helper ( )
now := model . Now ( )
periodConfig := config . PeriodConfig {
IndexTables : config . PeriodicTableConfig { Period : config . ObjectStorageIndexRequiredPeriod } ,
Schema : "v12" ,
}
schemaCfg := config . SchemaConfig {
Configs : [ ] config . PeriodConfig { periodConfig } ,
}
indexBuckets , err := indexBuckets ( now , now , [ ] config . TableRange { periodConfig . GetIndexTableNumberRange ( config . DayTime { Time : now } ) } )
require . NoError ( t , err )
tableName := indexBuckets [ 0 ]
tableInterval := retention . ExtractIntervalFromTableName ( tableName )
// shiftTableStart shift tableInterval.Start by the given amount of milliseconds.
// It is used for building chunkmetas relative to start time of the table.
shiftTableStart := func ( ms int64 ) int64 {
return int64 ( tableInterval . Start ) + ms
}
lbls1 := mustParseLabels ( ` { foo="bar", a="b"} ` )
lbls2 := mustParseLabels ( ` { fizz="buzz", a="b"} ` )
userID := buildUserID ( 0 )
buildCompactedIndex := func ( ) * compactedIndex {
builder := NewBuilder ( )
stream := buildStream ( lbls1 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) , "" )
builder . AddSeries ( stream . labels , stream . fp , stream . chunks )
stream = buildStream ( lbls2 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) , "" )
builder . AddSeries ( stream . labels , stream . fp , stream . chunks )
builder . FinalizeChunks ( )
return newCompactedIndex ( context . Background ( ) , tableName , buildUserID ( 0 ) , t . TempDir ( ) , periodConfig , builder )
}
expectedChunkEntries := map [ string ] [ ] retention . ChunkEntry {
lbls1 . String ( ) : chunkMetasToChunkEntry ( schemaCfg , userID , lbls1 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 10 ) ) ) ,
lbls2 . String ( ) : chunkMetasToChunkEntry ( schemaCfg , userID , lbls2 , buildChunkMetas ( shiftTableStart ( 0 ) , shiftTableStart ( 20 ) ) ) ,
}
return & testContext { lbls1 , lbls2 , userID , tableInterval , shiftTableStart , buildCompactedIndex , expectedChunkEntries }
}
type dummyChunkData struct {
chunk . Data
}