Like Prometheus, but for logs.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 
loki/pkg/dataobj/sections/streams/iter.go

164 lines
4.6 KiB

package streams
import (
"context"
"errors"
"fmt"
"io"
"time"
"unsafe"
"github.com/grafana/loki/v3/pkg/dataobj"
"github.com/grafana/loki/v3/pkg/dataobj/internal/dataset"
"github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/datasetmd"
"github.com/grafana/loki/v3/pkg/dataobj/internal/result"
"github.com/grafana/loki/v3/pkg/dataobj/internal/util/symbolizer"
"github.com/grafana/loki/v3/pkg/dataobj/sections/internal/columnar"
"github.com/grafana/loki/v3/pkg/util/labelpool"
)
// Iter iterates over streams in the provided decoder. All streams sections are
// iterated over in order.
func Iter(ctx context.Context, obj *dataobj.Object) result.Seq[Stream] {
return result.Iter(func(yield func(Stream) bool) error {
for i, section := range obj.Sections().Filter(CheckSection) {
streamsSection, err := Open(ctx, section)
if err != nil {
return fmt.Errorf("opening section %d: %w", i, err)
}
for result := range IterSection(ctx, streamsSection) {
if result.Err() != nil || !yield(result.MustValue()) {
return result.Err()
}
}
}
return nil
})
}
func IterSection(ctx context.Context, section *Section) result.Seq[Stream] {
return result.Iter(func(yield func(Stream) bool) error {
columnarSection := section.inner
dset, err := columnar.MakeDataset(columnarSection, columnarSection.Columns())
if err != nil {
return fmt.Errorf("creating columns dataset: %w", err)
}
columns, err := result.Collect(dset.ListColumns(ctx))
if err != nil {
return err
}
r := dataset.NewReader(dataset.ReaderOptions{
Dataset: dset,
Columns: columns,
Prefetch: true,
})
defer r.Close()
var rows [1]dataset.Row
for {
n, err := r.Read(ctx, rows[:])
if err != nil && !errors.Is(err, io.EOF) {
return err
} else if n == 0 && errors.Is(err, io.EOF) {
return nil
}
var stream Stream
for _, row := range rows[:n] {
if err := decodeRow(section.Columns(), row, &stream, nil); err != nil {
return err
}
if !yield(stream) {
return nil
}
}
}
})
}
// decodeRow decodes a stream from a [dataset.Row], using the provided columns to
// determine the column type. The list of columns must match the columns used
// to create the row.
//
// The sym argument is used for reusing label values between calls to
// decodeRow. If sym is nil, label value strings are always allocated.
func decodeRow(columns []*Column, row dataset.Row, stream *Stream, sym *symbolizer.Symbolizer) error {
labelBuilder := labelpool.Get()
defer labelpool.Put(labelBuilder)
for columnIndex, columnValue := range row.Values {
if columnValue.IsNil() || columnValue.IsZero() {
continue
}
column := columns[columnIndex]
switch column.Type {
case ColumnTypeStreamID:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_INT64 {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
stream.ID = columnValue.Int64()
case ColumnTypeMinTimestamp:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_INT64 {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
stream.MinTimestamp = time.Unix(0, columnValue.Int64())
case ColumnTypeMaxTimestamp:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_INT64 {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
stream.MaxTimestamp = time.Unix(0, columnValue.Int64())
case ColumnTypeRows:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_INT64 {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
stream.Rows = int(columnValue.Int64())
case ColumnTypeUncompressedSize:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_INT64 {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
stream.UncompressedSize = columnValue.Int64()
case ColumnTypeLabel:
if ty := columnValue.Type(); ty != datasetmd.PHYSICAL_TYPE_BINARY {
return fmt.Errorf("invalid type %s for %s", ty, column.Type)
}
if sym != nil {
labelBuilder.Add(column.Name, sym.Get(unsafeString(columnValue.Binary())))
} else {
labelBuilder.Add(column.Name, string(columnValue.Binary()))
}
default:
// TODO(rfratto): We probably don't want to return an error on unexpected
// columns because it breaks forward compatibility. Should we log
// something here?
}
}
// Commit the final set of labels to the stream.
labelBuilder.Sort()
stream.Labels = labelBuilder.Labels()
return nil
}
func unsafeSlice(data string, capacity int) []byte {
if capacity <= 0 {
capacity = len(data)
}
return unsafe.Slice(unsafe.StringData(data), capacity)
}
func unsafeString(data []byte) string {
return unsafe.String(unsafe.SliceData(data), len(data))
}