UnifiedStorage: Rename Batch processing to Bulk (#101413)

pull/101447/head
Ryan McKinley 5 months ago committed by GitHub
parent 1d2f271c95
commit 806c043e45
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      pkg/cmd/grafana-cli/commands/datamigrations/to_unified_storage.go
  2. 14
      pkg/plugins/backendplugin/pluginextensionv2/rendererv2.pb.go
  3. 14
      pkg/plugins/backendplugin/pluginextensionv2/sanitizer.pb.go
  4. 14
      pkg/plugins/backendplugin/secretsmanagerplugin/secretsmanager.pb.go
  5. 6
      pkg/registry/apis/dashboard/legacy/client.go
  6. 50
      pkg/registry/apis/dashboard/legacy/migrate.go
  7. 2
      pkg/registry/apis/dashboard/search_test.go
  8. 14
      pkg/services/authz/proto/v1/extention.pb.go
  9. 14
      pkg/services/ngalert/store/proto/v1/alert_rule_state.pb.go
  10. 2
      pkg/storage/unified/apistore/store_test.go
  11. 36
      pkg/storage/unified/parquet/client.go
  12. 24
      pkg/storage/unified/parquet/reader.go
  13. 24
      pkg/storage/unified/parquet/writer.go
  14. 70
      pkg/storage/unified/resource/bulk.go
  15. 12
      pkg/storage/unified/resource/client.go
  16. 2
      pkg/storage/unified/resource/keys.go
  17. 997
      pkg/storage/unified/resource/resource.pb.go
  18. 10
      pkg/storage/unified/resource/resource.proto
  19. 92
      pkg/storage/unified/resource/resource_grpc.pb.go
  20. 2
      pkg/storage/unified/resource/server.go
  21. 4
      pkg/storage/unified/sql/backend.go
  22. 83
      pkg/storage/unified/sql/bulk.go
  23. 2
      pkg/storage/unified/sql/bulk_test.go
  24. 2
      pkg/storage/unified/sql/notifier.go
  25. 15
      pkg/storage/unified/sql/notifier_sql.go
  26. 35
      pkg/storage/unified/sql/notifier_sql_test.go
  27. 2
      pkg/storage/unified/sql/service.go

@ -199,11 +199,11 @@ func newUnifiedClient(cfg *setting.Cfg, sqlStore db.DB) (resource.ResourceClient
})
}
func newParquetClient(file *os.File) (resource.BatchStoreClient, error) {
func newParquetClient(file *os.File) (resource.BulkStoreClient, error) {
writer, err := parquet.NewParquetWriter(file)
if err != nil {
return nil, err
}
client := parquet.NewBatchResourceWriterClient(writer)
client := parquet.NewBulkResourceWriterClient(writer)
return client, nil
}

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: rendererv2.proto
@ -11,6 +11,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -394,7 +395,7 @@ func (x *RenderCSVResponse) GetFileName() string {
var File_rendererv2_proto protoreflect.FileDescriptor
var file_rendererv2_proto_rawDesc = []byte{
var file_rendererv2_proto_rawDesc = string([]byte{
0x0a, 0x10, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x65, 0x72, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x76, 0x32, 0x22, 0x24, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c,
@ -474,16 +475,16 @@ var file_rendererv2_proto_rawDesc = []byte{
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x3b, 0x70,
0x6c, 0x75, 0x67, 0x69, 0x6e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x76, 0x32,
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
})
var (
file_rendererv2_proto_rawDescOnce sync.Once
file_rendererv2_proto_rawDescData = file_rendererv2_proto_rawDesc
file_rendererv2_proto_rawDescData []byte
)
func file_rendererv2_proto_rawDescGZIP() []byte {
file_rendererv2_proto_rawDescOnce.Do(func() {
file_rendererv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_rendererv2_proto_rawDescData)
file_rendererv2_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rendererv2_proto_rawDesc), len(file_rendererv2_proto_rawDesc)))
})
return file_rendererv2_proto_rawDescData
}
@ -523,7 +524,7 @@ func file_rendererv2_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_rendererv2_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_rendererv2_proto_rawDesc), len(file_rendererv2_proto_rawDesc)),
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@ -534,7 +535,6 @@ func file_rendererv2_proto_init() {
MessageInfos: file_rendererv2_proto_msgTypes,
}.Build()
File_rendererv2_proto = out.File
file_rendererv2_proto_rawDesc = nil
file_rendererv2_proto_goTypes = nil
file_rendererv2_proto_depIdxs = nil
}

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: sanitizer.proto
@ -11,6 +11,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -142,7 +143,7 @@ func (x *SanitizeResponse) GetSanitized() []byte {
var File_sanitizer_proto protoreflect.FileDescriptor
var file_sanitizer_proto_rawDesc = []byte{
var file_sanitizer_proto_rawDesc = string([]byte{
0x0a, 0x0f, 0x73, 0x61, 0x6e, 0x69, 0x74, 0x69, 0x7a, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
0x6f, 0x6e, 0x76, 0x32, 0x22, 0x7f, 0x0a, 0x0f, 0x53, 0x61, 0x6e, 0x69, 0x74, 0x69, 0x7a, 0x65,
@ -166,16 +167,16 @@ var file_sanitizer_proto_rawDesc = []byte{
0x61, 0x6e, 0x69, 0x74, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
0x15, 0x5a, 0x13, 0x2e, 0x3b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x65, 0x78, 0x74, 0x65, 0x6e,
0x73, 0x69, 0x6f, 0x6e, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
})
var (
file_sanitizer_proto_rawDescOnce sync.Once
file_sanitizer_proto_rawDescData = file_sanitizer_proto_rawDesc
file_sanitizer_proto_rawDescData []byte
)
func file_sanitizer_proto_rawDescGZIP() []byte {
file_sanitizer_proto_rawDescOnce.Do(func() {
file_sanitizer_proto_rawDescData = protoimpl.X.CompressGZIP(file_sanitizer_proto_rawDescData)
file_sanitizer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_sanitizer_proto_rawDesc), len(file_sanitizer_proto_rawDesc)))
})
return file_sanitizer_proto_rawDescData
}
@ -204,7 +205,7 @@ func file_sanitizer_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_sanitizer_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_sanitizer_proto_rawDesc), len(file_sanitizer_proto_rawDesc)),
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
@ -215,7 +216,6 @@ func file_sanitizer_proto_init() {
MessageInfos: file_sanitizer_proto_msgTypes,
}.Build()
File_sanitizer_proto = out.File
file_sanitizer_proto_rawDesc = nil
file_sanitizer_proto_goTypes = nil
file_sanitizer_proto_depIdxs = nil
}

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: secretsmanager.proto
@ -11,6 +11,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -710,7 +711,7 @@ func (x *RenameSecretResponse) GetUserFriendlyError() string {
var File_secretsmanager_proto protoreflect.FileDescriptor
var file_secretsmanager_proto_rawDesc = []byte{
var file_secretsmanager_proto_rawDesc = string([]byte{
0x0a, 0x14, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0x4d, 0x0a, 0x03,
@ -836,16 +837,16 @@ var file_secretsmanager_proto_rawDesc = []byte{
0x65, 0x42, 0x19, 0x5a, 0x17, 0x2e, 0x2f, 0x3b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x6d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
})
var (
file_secretsmanager_proto_rawDescOnce sync.Once
file_secretsmanager_proto_rawDescData = file_secretsmanager_proto_rawDesc
file_secretsmanager_proto_rawDescData []byte
)
func file_secretsmanager_proto_rawDescGZIP() []byte {
file_secretsmanager_proto_rawDescOnce.Do(func() {
file_secretsmanager_proto_rawDescData = protoimpl.X.CompressGZIP(file_secretsmanager_proto_rawDescData)
file_secretsmanager_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_secretsmanager_proto_rawDesc), len(file_secretsmanager_proto_rawDesc)))
})
return file_secretsmanager_proto_rawDescData
}
@ -904,7 +905,7 @@ func file_secretsmanager_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_secretsmanager_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_secretsmanager_proto_rawDesc), len(file_secretsmanager_proto_rawDesc)),
NumEnums: 0,
NumMessages: 14,
NumExtensions: 0,
@ -915,7 +916,6 @@ func file_secretsmanager_proto_init() {
MessageInfos: file_secretsmanager_proto_msgTypes,
}.Build()
File_secretsmanager_proto = out.File
file_secretsmanager_proto_rawDesc = nil
file_secretsmanager_proto_goTypes = nil
file_secretsmanager_proto_depIdxs = nil
}

@ -90,7 +90,7 @@ func (d *directResourceClient) Watch(ctx context.Context, in *resource.WatchRequ
return nil, fmt.Errorf("watch not supported with direct resource client")
}
// BatchProcess implements resource.ResourceClient.
func (d *directResourceClient) BatchProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BatchStore_BatchProcessClient, error) {
return nil, fmt.Errorf("BatchProcess not supported with direct resource client")
// BulkProcess implements resource.ResourceClient.
func (d *directResourceClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error) {
return nil, fmt.Errorf("BulkProcess not supported with direct resource client")
}

@ -25,7 +25,7 @@ import (
type MigrateOptions struct {
Namespace string
Store resource.BatchStoreClient
Store resource.BulkStoreClient
LargeObjects apistore.LargeObjectSupport
BlobStore resource.BlobStoreClient
Resources []schema.GroupResource
@ -36,7 +36,7 @@ type MigrateOptions struct {
// Read from legacy and write into unified storage
type LegacyMigrator interface {
Migrate(ctx context.Context, opts MigrateOptions) (*resource.BatchResponse, error)
Migrate(ctx context.Context, opts MigrateOptions) (*resource.BulkResponse, error)
}
// This can migrate Folders, Dashboards and LibraryPanels
@ -54,9 +54,9 @@ type BlobStoreInfo struct {
}
// migrate function -- works for a single kind
type migrator = func(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BatchStore_BatchProcessClient) (*BlobStoreInfo, error)
type migrator = func(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BulkStore_BulkProcessClient) (*BlobStoreInfo, error)
func (a *dashboardSqlAccess) Migrate(ctx context.Context, opts MigrateOptions) (*resource.BatchResponse, error) {
func (a *dashboardSqlAccess) Migrate(ctx context.Context, opts MigrateOptions) (*resource.BulkResponse, error) {
info, err := authlib.ParseNamespace(opts.Namespace)
if err != nil {
return nil, err
@ -71,7 +71,7 @@ func (a *dashboardSqlAccess) Migrate(ctx context.Context, opts MigrateOptions) (
}
migrators := []migrator{}
settings := resource.BatchSettings{
settings := resource.BulkSettings{
RebuildCollection: true,
SkipValidation: true,
}
@ -111,7 +111,7 @@ func (a *dashboardSqlAccess) Migrate(ctx context.Context, opts MigrateOptions) (
}
ctx = metadata.NewOutgoingContext(ctx, settings.ToMD())
stream, err := opts.Store.BatchProcess(ctx)
stream, err := opts.Store.BulkProcess(ctx)
if err != nil {
return nil, err
}
@ -132,7 +132,7 @@ func (a *dashboardSqlAccess) Migrate(ctx context.Context, opts MigrateOptions) (
return stream.CloseAndRecv()
}
func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOptions) (*resource.BatchResponse, error) {
func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOptions) (*resource.BulkResponse, error) {
sql, err := a.sql(ctx)
if err != nil {
return nil, err
@ -142,12 +142,12 @@ func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOption
return nil, err
}
orgId := ns.OrgID
rsp := &resource.BatchResponse{}
rsp := &resource.BulkResponse{}
err = sql.DB.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
for _, res := range opts.Resources {
switch fmt.Sprintf("%s/%s", res.Group, res.Resource) {
case "folder.grafana.app/folders":
summary := &resource.BatchResponse_Summary{}
summary := &resource.BulkResponse_Summary{}
summary.Group = folders.GROUP
summary.Group = folders.RESOURCE
_, err = sess.SQL("SELECT COUNT(*) FROM "+sql.Table("dashboard")+
@ -155,7 +155,7 @@ func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOption
rsp.Summary = append(rsp.Summary, summary)
case "dashboard.grafana.app/librarypanels":
summary := &resource.BatchResponse_Summary{}
summary := &resource.BulkResponse_Summary{}
summary.Group = dashboard.GROUP
summary.Resource = dashboard.LIBRARY_PANEL_RESOURCE
_, err = sess.SQL("SELECT COUNT(*) FROM "+sql.Table("library_element")+
@ -163,7 +163,7 @@ func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOption
rsp.Summary = append(rsp.Summary, summary)
case "dashboard.grafana.app/dashboards":
summary := &resource.BatchResponse_Summary{}
summary := &resource.BulkResponse_Summary{}
summary.Group = dashboard.GROUP
summary.Resource = dashboard.DASHBOARD_RESOURCE
rsp.Summary = append(rsp.Summary, summary)
@ -190,7 +190,7 @@ func (a *dashboardSqlAccess) countValues(ctx context.Context, opts MigrateOption
return rsp, nil
}
func (a *dashboardSqlAccess) migrateDashboards(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BatchStore_BatchProcessClient) (*BlobStoreInfo, error) {
func (a *dashboardSqlAccess) migrateDashboards(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BulkStore_BulkProcessClient) (*BlobStoreInfo, error) {
query := &DashboardQuery{
OrgID: orgId,
Limit: 100000000,
@ -229,7 +229,7 @@ func (a *dashboardSqlAccess) migrateDashboards(ctx context.Context, orgId int64,
return blobs, err
}
req := &resource.BatchRequest{
req := &resource.BulkRequest{
Key: &resource.ResourceKey{
Namespace: opts.Namespace,
Group: dashboard.GROUP,
@ -238,12 +238,12 @@ func (a *dashboardSqlAccess) migrateDashboards(ctx context.Context, orgId int64,
},
Value: body,
Folder: rows.row.FolderUID,
Action: resource.BatchRequest_ADDED,
Action: resource.BulkRequest_ADDED,
}
if dash.Generation > 1 {
req.Action = resource.BatchRequest_MODIFIED
req.Action = resource.BulkRequest_MODIFIED
} else if dash.Generation < 0 {
req.Action = resource.BatchRequest_DELETED
req.Action = resource.BulkRequest_DELETED
}
// With large object support
@ -296,7 +296,7 @@ func (a *dashboardSqlAccess) migrateDashboards(ctx context.Context, orgId int64,
return blobs, err
}
func (a *dashboardSqlAccess) migrateFolders(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BatchStore_BatchProcessClient) (*BlobStoreInfo, error) {
func (a *dashboardSqlAccess) migrateFolders(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BulkStore_BulkProcessClient) (*BlobStoreInfo, error) {
query := &DashboardQuery{
OrgID: orgId,
Limit: 100000000,
@ -341,7 +341,7 @@ func (a *dashboardSqlAccess) migrateFolders(ctx context.Context, orgId int64, op
return nil, err
}
req := &resource.BatchRequest{
req := &resource.BulkRequest{
Key: &resource.ResourceKey{
Namespace: opts.Namespace,
Group: "folder.grafana.app",
@ -350,12 +350,12 @@ func (a *dashboardSqlAccess) migrateFolders(ctx context.Context, orgId int64, op
},
Value: body,
Folder: rows.row.FolderUID,
Action: resource.BatchRequest_ADDED,
Action: resource.BulkRequest_ADDED,
}
if dash.Generation > 1 {
req.Action = resource.BatchRequest_MODIFIED
req.Action = resource.BulkRequest_MODIFIED
} else if dash.Generation < 0 {
req.Action = resource.BatchRequest_DELETED
req.Action = resource.BulkRequest_DELETED
}
opts.Progress(i, fmt.Sprintf("[v:%d] %s (%d)", dash.Generation, dash.Name, len(req.Value)))
@ -377,7 +377,7 @@ func (a *dashboardSqlAccess) migrateFolders(ctx context.Context, orgId int64, op
return nil, err
}
func (a *dashboardSqlAccess) migratePanels(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BatchStore_BatchProcessClient) (*BlobStoreInfo, error) {
func (a *dashboardSqlAccess) migratePanels(ctx context.Context, orgId int64, opts MigrateOptions, stream resource.BulkStore_BulkProcessClient) (*BlobStoreInfo, error) {
opts.Progress(-1, "migrating library panels...")
panels, err := a.GetLibraryPanels(ctx, LibraryPanelQuery{
OrgID: orgId,
@ -396,7 +396,7 @@ func (a *dashboardSqlAccess) migratePanels(ctx context.Context, orgId int64, opt
return nil, err
}
req := &resource.BatchRequest{
req := &resource.BulkRequest{
Key: &resource.ResourceKey{
Namespace: opts.Namespace,
Group: dashboard.GROUP,
@ -405,10 +405,10 @@ func (a *dashboardSqlAccess) migratePanels(ctx context.Context, orgId int64, opt
},
Value: body,
Folder: meta.GetFolder(),
Action: resource.BatchRequest_ADDED,
Action: resource.BulkRequest_ADDED,
}
if panel.Generation > 1 {
req.Action = resource.BatchRequest_MODIFIED
req.Action = resource.BulkRequest_MODIFIED
}
opts.Progress(i, fmt.Sprintf("[v:%d] %s (%d)", i, meta.GetName(), len(req.Value)))

@ -710,6 +710,6 @@ func (m *MockClient) ListRepositoryObjects(ctx context.Context, in *resource.Lis
func (m *MockClient) IsHealthy(ctx context.Context, in *resource.HealthCheckRequest, opts ...grpc.CallOption) (*resource.HealthCheckResponse, error) {
return nil, nil
}
func (m *MockClient) BatchProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BatchStore_BatchProcessClient, error) {
func (m *MockClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error) {
return nil, nil
}

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: extention.proto
@ -14,6 +14,7 @@ import (
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -853,7 +854,7 @@ func (x *BatchCheckGroupResource) GetItems() map[string]bool {
var File_extention_proto protoreflect.FileDescriptor
var file_extention_proto_rawDesc = []byte{
var file_extention_proto_rawDesc = string([]byte{
0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x12, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x74, 0x69,
0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
@ -1006,16 +1007,16 @@ var file_extention_proto_rawDesc = []byte{
0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x7a,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
})
var (
file_extention_proto_rawDescOnce sync.Once
file_extention_proto_rawDescData = file_extention_proto_rawDesc
file_extention_proto_rawDescData []byte
)
func file_extention_proto_rawDescGZIP() []byte {
file_extention_proto_rawDescOnce.Do(func() {
file_extention_proto_rawDescData = protoimpl.X.CompressGZIP(file_extention_proto_rawDescData)
file_extention_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_extention_proto_rawDesc), len(file_extention_proto_rawDesc)))
})
return file_extention_proto_rawDescData
}
@ -1081,7 +1082,7 @@ func file_extention_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_extention_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_extention_proto_rawDesc), len(file_extention_proto_rawDesc)),
NumEnums: 0,
NumMessages: 17,
NumExtensions: 0,
@ -1092,7 +1093,6 @@ func file_extention_proto_init() {
MessageInfos: file_extention_proto_msgTypes,
}.Build()
File_extention_proto = out.File
file_extention_proto_rawDesc = nil
file_extention_proto_goTypes = nil
file_extention_proto_depIdxs = nil
}

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.1
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: alert_rule_state.proto
@ -12,6 +12,7 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@ -183,7 +184,7 @@ func (x *AlertInstances) GetInstances() []*AlertInstance {
var File_alert_rule_state_proto protoreflect.FileDescriptor
var file_alert_rule_state_proto_rawDesc = []byte{
var file_alert_rule_state_proto_rawDesc = string([]byte{
0x0a, 0x16, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61,
0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6e, 0x67, 0x61, 0x6c, 0x65, 0x72,
0x74, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
@ -239,16 +240,16 @@ var file_alert_rule_state_proto_rawDesc = []byte{
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x6e, 0x67, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2f, 0x73,
0x74, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
})
var (
file_alert_rule_state_proto_rawDescOnce sync.Once
file_alert_rule_state_proto_rawDescData = file_alert_rule_state_proto_rawDesc
file_alert_rule_state_proto_rawDescData []byte
)
func file_alert_rule_state_proto_rawDescGZIP() []byte {
file_alert_rule_state_proto_rawDescOnce.Do(func() {
file_alert_rule_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_alert_rule_state_proto_rawDescData)
file_alert_rule_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_alert_rule_state_proto_rawDesc), len(file_alert_rule_state_proto_rawDesc)))
})
return file_alert_rule_state_proto_rawDescData
}
@ -284,7 +285,7 @@ func file_alert_rule_state_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_alert_rule_state_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_alert_rule_state_proto_rawDesc), len(file_alert_rule_state_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
@ -295,7 +296,6 @@ func file_alert_rule_state_proto_init() {
MessageInfos: file_alert_rule_state_proto_msgTypes,
}.Build()
File_alert_rule_state_proto = out.File
file_alert_rule_state_proto_rawDesc = nil
file_alert_rule_state_proto_goTypes = nil
file_alert_rule_state_proto_depIdxs = nil
}

@ -156,7 +156,7 @@ type resourceClientMock struct {
resource.ResourceStoreClient
resource.ResourceIndexClient
resource.RepositoryIndexClient
resource.BatchStoreClient
resource.BulkStoreClient
resource.BlobStoreClient
resource.DiagnosticsClient
}

@ -12,29 +12,29 @@ import (
)
var (
_ resource.BatchStoreClient = (*writerClient)(nil)
_ resource.BatchStore_BatchProcessClient = (*writerClient)(nil)
_ resource.BulkStoreClient = (*writerClient)(nil)
_ resource.BulkStore_BulkProcessClient = (*writerClient)(nil)
errUnimplemented = errors.New("not implemented (BatchResourceWriter as BatchStoreClient shim)")
errUnimplemented = errors.New("not implemented (BulkResourceWriter as BulkStoreClient shim)")
)
type writerClient struct {
writer resource.BatchResourceWriter
writer resource.BulkResourceWriter
ctx context.Context
}
// NewBatchResourceWriterClient wraps a BatchResourceWriter so that it can be used as a ResourceStoreClient
func NewBatchResourceWriterClient(writer resource.BatchResourceWriter) *writerClient {
// NewBulkResourceWriterClient wraps a BulkResourceWriter so that it can be used as a ResourceStoreClient
func NewBulkResourceWriterClient(writer resource.BulkResourceWriter) *writerClient {
return &writerClient{writer: writer}
}
// Send implements resource.ResourceStore_BatchProcessClient.
func (w *writerClient) Send(req *resource.BatchRequest) error {
// Send implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) Send(req *resource.BulkRequest) error {
return w.writer.Write(w.ctx, req.Key, req.Value)
}
// BatchProcess implements resource.ResourceStoreClient.
func (w *writerClient) BatchProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BatchStore_BatchProcessClient, error) {
// BulkProcess implements resource.ResourceStoreClient.
func (w *writerClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resource.BulkStore_BulkProcessClient, error) {
if w.ctx != nil {
return nil, fmt.Errorf("only one batch request supported")
}
@ -42,37 +42,37 @@ func (w *writerClient) BatchProcess(ctx context.Context, opts ...grpc.CallOption
return w, nil
}
// CloseAndRecv implements resource.ResourceStore_BatchProcessClient.
func (w *writerClient) CloseAndRecv() (*resource.BatchResponse, error) {
// CloseAndRecv implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) CloseAndRecv() (*resource.BulkResponse, error) {
return w.writer.CloseWithResults()
}
// CloseSend implements resource.ResourceStore_BatchProcessClient.
// CloseSend implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) CloseSend() error {
return w.writer.Close()
}
// Context implements resource.ResourceStore_BatchProcessClient.
// Context implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) Context() context.Context {
return w.ctx
}
// Header implements resource.ResourceStore_BatchProcessClient.
// Header implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) Header() (metadata.MD, error) {
return nil, errUnimplemented
}
// RecvMsg implements resource.ResourceStore_BatchProcessClient.
// RecvMsg implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) RecvMsg(m any) error {
return errUnimplemented
}
// SendMsg implements resource.ResourceStore_BatchProcessClient.
// SendMsg implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) SendMsg(m any) error {
return errUnimplemented
}
// Trailer implements resource.ResourceStore_BatchProcessClient.
// Trailer implements resource.ResourceStore_BulkProcessClient.
func (w *writerClient) Trailer() metadata.MD {
return nil
}

@ -10,10 +10,10 @@ import (
)
var (
_ resource.BatchRequestIterator = (*parquetReader)(nil)
_ resource.BulkRequestIterator = (*parquetReader)(nil)
)
func NewParquetReader(inputPath string, batchSize int64) (resource.BatchRequestIterator, error) {
func NewParquetReader(inputPath string, batchSize int64) (resource.BulkRequestIterator, error) {
return newResourceReader(inputPath, batchSize)
}
@ -39,17 +39,17 @@ type parquetReader struct {
bufferIndex int
rowGroupIDX int
req *resource.BatchRequest
req *resource.BulkRequest
err error
}
// Next implements resource.BatchRequestIterator.
// Next implements resource.BulkRequestIterator.
func (r *parquetReader) Next() bool {
r.req = nil
for r.err == nil && r.reader != nil {
if r.bufferIndex >= r.bufferSize && r.value.reader.HasNext() {
r.bufferIndex = 0
r.err = r.readBatch()
r.err = r.readBulk()
if r.err != nil {
return false
}
@ -60,14 +60,14 @@ func (r *parquetReader) Next() bool {
i := r.bufferIndex
r.bufferIndex++
r.req = &resource.BatchRequest{
r.req = &resource.BulkRequest{
Key: &resource.ResourceKey{
Group: r.group.buffer[i].String(),
Resource: r.resource.buffer[i].String(),
Namespace: r.namespace.buffer[i].String(),
Name: r.name.buffer[i].String(),
},
Action: resource.BatchRequest_Action(r.action.buffer[i]),
Action: resource.BulkRequest_Action(r.action.buffer[i]),
Value: r.value.buffer[i].Bytes(),
Folder: r.folder.buffer[i].String(),
}
@ -87,12 +87,12 @@ func (r *parquetReader) Next() bool {
return false
}
// Request implements resource.BatchRequestIterator.
func (r *parquetReader) Request() *resource.BatchRequest {
// Request implements resource.BulkRequestIterator.
func (r *parquetReader) Request() *resource.BulkRequest {
return r.req
}
// RollbackRequested implements resource.BatchRequestIterator.
// RollbackRequested implements resource.BulkRequestIterator.
func (r *parquetReader) RollbackRequested() bool {
return r.err != nil
}
@ -163,7 +163,7 @@ func newResourceReader(inputPath string, batchSize int64) (*parquetReader, error
}
// get the first batch
err = reader.readBatch()
err = reader.readBulk()
if err != nil {
_ = rdr.Close()
return nil, err
@ -182,7 +182,7 @@ func (r *parquetReader) open(rgr *file.RowGroupReader) error {
return nil
}
func (r *parquetReader) readBatch() error {
func (r *parquetReader) readBulk() error {
r.bufferIndex = 0
r.bufferSize = 0
for i, c := range r.columns {

@ -18,7 +18,7 @@ import (
)
var (
_ resource.BatchResourceWriter = (*parquetWriter)(nil)
_ resource.BulkResourceWriter = (*parquetWriter)(nil)
)
// Write resources into a parquet file
@ -28,8 +28,8 @@ func NewParquetWriter(f io.Writer) (*parquetWriter, error) {
schema: newSchema(nil),
buffer: 1024 * 10 * 100 * 10, // 10MB
logger: logging.DefaultLogger.With("logger", "parquet.writer"),
rsp: &resource.BatchResponse{},
summary: make(map[string]*resource.BatchResponse_Summary),
rsp: &resource.BulkResponse{},
summary: make(map[string]*resource.BulkResponse_Summary),
}
props := parquet.NewWriterProperties(
@ -43,8 +43,8 @@ func NewParquetWriter(f io.Writer) (*parquetWriter, error) {
return w, w.init()
}
// ProcessBatch implements resource.BatchProcessingBackend.
func (w *parquetWriter) ProcessBatch(ctx context.Context, setting resource.BatchSettings, iter resource.BatchRequestIterator) *resource.BatchResponse {
// ProcessBulk implements resource.BulkProcessingBackend.
func (w *parquetWriter) ProcessBulk(ctx context.Context, setting resource.BulkSettings, iter resource.BulkRequestIterator) *resource.BulkResponse {
defer func() { _ = w.Close() }()
var err error
@ -66,7 +66,7 @@ func (w *parquetWriter) ProcessBatch(ctx context.Context, setting resource.Batch
w.logger.Warn("error closing parquet file", "err", err)
}
if rsp == nil {
rsp = &resource.BatchResponse{}
rsp = &resource.BulkResponse{}
}
if err != nil {
rsp.Error = resource.AsErrorResult(err)
@ -92,11 +92,11 @@ type parquetWriter struct {
action *array.Int8Builder
value *array.StringBuilder
rsp *resource.BatchResponse
summary map[string]*resource.BatchResponse_Summary
rsp *resource.BulkResponse
summary map[string]*resource.BulkResponse_Summary
}
func (w *parquetWriter) CloseWithResults() (*resource.BatchResponse, error) {
func (w *parquetWriter) CloseWithResults() (*resource.BulkResponse, error) {
err := w.Close()
return w.rsp, err
}
@ -181,14 +181,14 @@ func (w *parquetWriter) Write(ctx context.Context, key *resource.ResourceKey, va
return w.flush()
}
summary := w.summary[key.BatchID()]
summary := w.summary[key.NSGR()]
if summary == nil {
summary = &resource.BatchResponse_Summary{
summary = &resource.BulkResponse_Summary{
Namespace: key.Namespace,
Group: key.Group,
Resource: key.Resource,
}
w.summary[key.BatchID()] = summary
w.summary[key.NSGR()] = summary
w.rsp.Summary = append(w.rsp.Summary, summary)
}
summary.Count++

@ -21,30 +21,30 @@ func grpcMetaValueIsTrue(vals []string) bool {
return len(vals) == 1 && vals[0] == "true"
}
type BatchRequestIterator interface {
type BulkRequestIterator interface {
Next() bool
// The next event we should process
Request() *BatchRequest
Request() *BulkRequest
// Rollback requested
RollbackRequested() bool
}
type BatchProcessingBackend interface {
ProcessBatch(ctx context.Context, setting BatchSettings, iter BatchRequestIterator) *BatchResponse
type BulkProcessingBackend interface {
ProcessBulk(ctx context.Context, setting BulkSettings, iter BulkRequestIterator) *BulkResponse
}
type BatchResourceWriter interface {
type BulkResourceWriter interface {
io.Closer
Write(ctx context.Context, key *ResourceKey, value []byte) error
// Called when finished writing
CloseWithResults() (*BatchResponse, error)
CloseWithResults() (*BulkResponse, error)
}
type BatchSettings struct {
type BulkSettings struct {
// All requests will be within this namespace/group/resource
Collection []*ResourceKey
@ -56,7 +56,7 @@ type BatchSettings struct {
SkipValidation bool
}
func (x *BatchSettings) ToMD() metadata.MD {
func (x *BulkSettings) ToMD() metadata.MD {
md := make(metadata.MD)
if len(x.Collection) > 0 {
for _, v := range x.Collection {
@ -72,8 +72,8 @@ func (x *BatchSettings) ToMD() metadata.MD {
return md
}
func NewBatchSettings(md metadata.MD) (BatchSettings, error) {
settings := BatchSettings{}
func NewBulkSettings(md metadata.MD) (BulkSettings, error) {
settings := BulkSettings{}
for k, v := range md {
switch k {
case grpcMetaKeyCollection:
@ -94,13 +94,13 @@ func NewBatchSettings(md metadata.MD) (BatchSettings, error) {
return settings, nil
}
// BatchWrite implements ResourceServer.
// BulkWrite implements ResourceServer.
// All requests must be to the same NAMESPACE/GROUP/RESOURCE
func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
func (s *server) BulkProcess(stream BulkStore_BulkProcessServer) error {
ctx := stream.Context()
user, ok := authlib.AuthInfoFrom(ctx)
if !ok || user == nil {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "no user found in context",
Code: http.StatusUnauthorized,
@ -110,7 +110,7 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "unable to read metadata gRPC request",
Code: http.StatusPreconditionFailed,
@ -121,9 +121,9 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
checker: make(map[string]authlib.ItemChecker), // Can create
stream: stream,
}
settings, err := NewBatchSettings(md)
settings, err := NewBulkSettings(md)
if err != nil {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "error reading settings",
Reason: err.Error(),
@ -133,7 +133,7 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
}
if len(settings.Collection) < 1 {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "Missing target collection(s) in request header",
Code: http.StatusBadRequest,
@ -154,7 +154,7 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
Verb: utils.VerbDeleteCollection,
})
if err != nil || !rsp.Allowed {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: fmt.Sprintf("Requester must be able to: %s", utils.VerbDeleteCollection),
Code: http.StatusForbidden,
@ -163,14 +163,14 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
}
// This will be called for each request -- with the folder ID
runner.checker[k.BatchID()], err = access.Compile(ctx, user, authlib.ListRequest{
runner.checker[k.NSGR()], err = access.Compile(ctx, user, authlib.ListRequest{
Namespace: k.Namespace,
Group: k.Group,
Resource: k.Resource,
Verb: utils.VerbCreate,
})
if err != nil {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "Unable to check `create` permission",
Code: http.StatusForbidden,
@ -179,17 +179,17 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
}
}
} else {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "Batch currently only supports RebuildCollection",
Message: "Bulk currently only supports RebuildCollection",
Code: http.StatusBadRequest,
},
})
}
backend, ok := s.backend.(BatchProcessingBackend)
backend, ok := s.backend.(BulkProcessingBackend)
if !ok {
return stream.SendAndClose(&BatchResponse{
return stream.SendAndClose(&BulkResponse{
Error: &ErrorResult{
Message: "The server backend does not support batch processing",
Code: http.StatusNotImplemented,
@ -197,10 +197,10 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
})
}
// BatchProcess requests
rsp := backend.ProcessBatch(ctx, settings, runner)
// BulkProcess requests
rsp := backend.ProcessBulk(ctx, settings, runner)
if rsp == nil {
rsp = &BatchResponse{
rsp = &BulkResponse{
Error: &ErrorResult{
Code: http.StatusInternalServerError,
Message: "Nothing returned from process batch",
@ -233,18 +233,18 @@ func (s *server) BatchProcess(stream BatchStore_BatchProcessServer) error {
}
var (
_ BatchRequestIterator = (*batchRunner)(nil)
_ BulkRequestIterator = (*batchRunner)(nil)
)
type batchRunner struct {
stream BatchStore_BatchProcessServer
stream BulkStore_BulkProcessServer
rollback bool
request *BatchRequest
request *BulkRequest
err error
checker map[string]authlib.ItemChecker
}
// Next implements BatchRequestIterator.
// Next implements BulkRequestIterator.
func (b *batchRunner) Next() bool {
if b.rollback {
return true
@ -265,7 +265,7 @@ func (b *batchRunner) Next() bool {
if b.request != nil {
key := b.request.Key
k := key.BatchID()
k := key.NSGR()
checker, ok := b.checker[k]
if !ok {
b.err = fmt.Errorf("missing access control for: %s", k)
@ -279,15 +279,15 @@ func (b *batchRunner) Next() bool {
return false
}
// Request implements BatchRequestIterator.
func (b *batchRunner) Request() *BatchRequest {
// Request implements BulkRequestIterator.
func (b *batchRunner) Request() *BulkRequest {
if b.rollback {
return nil
}
return b.request
}
// RollbackRequested implements BatchRequestIterator.
// RollbackRequested implements BulkRequestIterator.
func (b *batchRunner) RollbackRequested() bool {
if b.rollback {
b.rollback = false // break iterator

@ -25,7 +25,7 @@ type ResourceClient interface {
ResourceStoreClient
ResourceIndexClient
RepositoryIndexClient
BatchStoreClient
BulkStoreClient
BlobStoreClient
DiagnosticsClient
}
@ -35,7 +35,7 @@ type resourceClient struct {
ResourceStoreClient
ResourceIndexClient
RepositoryIndexClient
BatchStoreClient
BulkStoreClient
BlobStoreClient
DiagnosticsClient
}
@ -46,7 +46,7 @@ func NewLegacyResourceClient(channel *grpc.ClientConn) ResourceClient {
ResourceStoreClient: NewResourceStoreClient(cc),
ResourceIndexClient: NewResourceIndexClient(cc),
RepositoryIndexClient: NewRepositoryIndexClient(cc),
BatchStoreClient: NewBatchStoreClient(cc),
BulkStoreClient: NewBulkStoreClient(cc),
BlobStoreClient: NewBlobStoreClient(cc),
DiagnosticsClient: NewDiagnosticsClient(cc),
}
@ -62,7 +62,7 @@ func NewLocalResourceClient(server ResourceServer) ResourceClient {
&ResourceIndex_ServiceDesc,
&RepositoryIndex_ServiceDesc,
&BlobStore_ServiceDesc,
&BatchStore_ServiceDesc,
&BulkStore_ServiceDesc,
&Diagnostics_ServiceDesc,
} {
channel.RegisterService(
@ -85,7 +85,7 @@ func NewLocalResourceClient(server ResourceServer) ResourceClient {
ResourceStoreClient: NewResourceStoreClient(cc),
ResourceIndexClient: NewResourceIndexClient(cc),
RepositoryIndexClient: NewRepositoryIndexClient(cc),
BatchStoreClient: NewBatchStoreClient(cc),
BulkStoreClient: NewBulkStoreClient(cc),
BlobStoreClient: NewBlobStoreClient(cc),
DiagnosticsClient: NewDiagnosticsClient(cc),
}
@ -127,7 +127,7 @@ func NewRemoteResourceClient(tracer tracing.Tracer, conn *grpc.ClientConn, cfg R
ResourceStoreClient: NewResourceStoreClient(cc),
ResourceIndexClient: NewResourceIndexClient(cc),
BlobStoreClient: NewBlobStoreClient(cc),
BatchStoreClient: NewBatchStoreClient(cc),
BulkStoreClient: NewBulkStoreClient(cc),
RepositoryIndexClient: NewRepositoryIndexClient(cc),
DiagnosticsClient: NewDiagnosticsClient(cc),
}, nil

@ -75,7 +75,7 @@ func (x *ResourceKey) ReadSearchID(v string) error {
}
// The namespace/group/resource
func (x *ResourceKey) BatchID() string {
func (x *ResourceKey) NSGR() string {
var sb strings.Builder
if x.Namespace == "" {
sb.WriteString(clusterNamespace)

File diff suppressed because it is too large Load Diff

@ -318,7 +318,7 @@ message WatchEvent {
Resource previous = 4;
}
message BatchRequest {
message BulkRequest {
enum Action {
// will be an error
UNKNOWN = 0;
@ -342,7 +342,7 @@ message BatchRequest {
string folder = 4;
}
message BatchResponse {
message BulkResponse {
message Summary {
string namespace = 1;
string group = 2;
@ -359,7 +359,7 @@ message BatchResponse {
// Collect a few invalid messages
message Rejected {
ResourceKey key = 1;
BatchRequest.Action action = 2;
BulkRequest.Action action = 2;
string error = 3;
}
@ -818,11 +818,11 @@ service ResourceStore {
rpc Watch(WatchRequest) returns (stream WatchEvent);
}
service BatchStore {
service BulkStore {
// Write multiple resources to the same Namespace/Group/Resource
// Events will not be sent until the stream is complete
// Only the *create* permissions is checked
rpc BatchProcess(stream BatchRequest) returns (BatchResponse);
rpc BulkProcess(stream BulkRequest) returns (BulkResponse);
}
// Unlike the ResourceStore, this service can be exposed to clients directly

@ -386,128 +386,128 @@ var ResourceStore_ServiceDesc = grpc.ServiceDesc{
}
const (
BatchStore_BatchProcess_FullMethodName = "/resource.BatchStore/BatchProcess"
BulkStore_BulkProcess_FullMethodName = "/resource.BulkStore/BulkProcess"
)
// BatchStoreClient is the client API for BatchStore service.
// BulkStoreClient is the client API for BulkStore service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type BatchStoreClient interface {
type BulkStoreClient interface {
// Write multiple resources to the same Namespace/Group/Resource
// Events will not be sent until the stream is complete
// Only the *create* permissions is checked
BatchProcess(ctx context.Context, opts ...grpc.CallOption) (BatchStore_BatchProcessClient, error)
BulkProcess(ctx context.Context, opts ...grpc.CallOption) (BulkStore_BulkProcessClient, error)
}
type batchStoreClient struct {
type bulkStoreClient struct {
cc grpc.ClientConnInterface
}
func NewBatchStoreClient(cc grpc.ClientConnInterface) BatchStoreClient {
return &batchStoreClient{cc}
func NewBulkStoreClient(cc grpc.ClientConnInterface) BulkStoreClient {
return &bulkStoreClient{cc}
}
func (c *batchStoreClient) BatchProcess(ctx context.Context, opts ...grpc.CallOption) (BatchStore_BatchProcessClient, error) {
func (c *bulkStoreClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (BulkStore_BulkProcessClient, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &BatchStore_ServiceDesc.Streams[0], BatchStore_BatchProcess_FullMethodName, cOpts...)
stream, err := c.cc.NewStream(ctx, &BulkStore_ServiceDesc.Streams[0], BulkStore_BulkProcess_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &batchStoreBatchProcessClient{ClientStream: stream}
x := &bulkStoreBulkProcessClient{ClientStream: stream}
return x, nil
}
type BatchStore_BatchProcessClient interface {
Send(*BatchRequest) error
CloseAndRecv() (*BatchResponse, error)
type BulkStore_BulkProcessClient interface {
Send(*BulkRequest) error
CloseAndRecv() (*BulkResponse, error)
grpc.ClientStream
}
type batchStoreBatchProcessClient struct {
type bulkStoreBulkProcessClient struct {
grpc.ClientStream
}
func (x *batchStoreBatchProcessClient) Send(m *BatchRequest) error {
func (x *bulkStoreBulkProcessClient) Send(m *BulkRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *batchStoreBatchProcessClient) CloseAndRecv() (*BatchResponse, error) {
func (x *bulkStoreBulkProcessClient) CloseAndRecv() (*BulkResponse, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(BatchResponse)
m := new(BulkResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// BatchStoreServer is the server API for BatchStore service.
// All implementations should embed UnimplementedBatchStoreServer
// BulkStoreServer is the server API for BulkStore service.
// All implementations should embed UnimplementedBulkStoreServer
// for forward compatibility
type BatchStoreServer interface {
type BulkStoreServer interface {
// Write multiple resources to the same Namespace/Group/Resource
// Events will not be sent until the stream is complete
// Only the *create* permissions is checked
BatchProcess(BatchStore_BatchProcessServer) error
BulkProcess(BulkStore_BulkProcessServer) error
}
// UnimplementedBatchStoreServer should be embedded to have forward compatible implementations.
type UnimplementedBatchStoreServer struct {
// UnimplementedBulkStoreServer should be embedded to have forward compatible implementations.
type UnimplementedBulkStoreServer struct {
}
func (UnimplementedBatchStoreServer) BatchProcess(BatchStore_BatchProcessServer) error {
return status.Errorf(codes.Unimplemented, "method BatchProcess not implemented")
func (UnimplementedBulkStoreServer) BulkProcess(BulkStore_BulkProcessServer) error {
return status.Errorf(codes.Unimplemented, "method BulkProcess not implemented")
}
// UnsafeBatchStoreServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to BatchStoreServer will
// UnsafeBulkStoreServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to BulkStoreServer will
// result in compilation errors.
type UnsafeBatchStoreServer interface {
mustEmbedUnimplementedBatchStoreServer()
type UnsafeBulkStoreServer interface {
mustEmbedUnimplementedBulkStoreServer()
}
func RegisterBatchStoreServer(s grpc.ServiceRegistrar, srv BatchStoreServer) {
s.RegisterService(&BatchStore_ServiceDesc, srv)
func RegisterBulkStoreServer(s grpc.ServiceRegistrar, srv BulkStoreServer) {
s.RegisterService(&BulkStore_ServiceDesc, srv)
}
func _BatchStore_BatchProcess_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(BatchStoreServer).BatchProcess(&batchStoreBatchProcessServer{ServerStream: stream})
func _BulkStore_BulkProcess_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(BulkStoreServer).BulkProcess(&bulkStoreBulkProcessServer{ServerStream: stream})
}
type BatchStore_BatchProcessServer interface {
SendAndClose(*BatchResponse) error
Recv() (*BatchRequest, error)
type BulkStore_BulkProcessServer interface {
SendAndClose(*BulkResponse) error
Recv() (*BulkRequest, error)
grpc.ServerStream
}
type batchStoreBatchProcessServer struct {
type bulkStoreBulkProcessServer struct {
grpc.ServerStream
}
func (x *batchStoreBatchProcessServer) SendAndClose(m *BatchResponse) error {
func (x *bulkStoreBulkProcessServer) SendAndClose(m *BulkResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *batchStoreBatchProcessServer) Recv() (*BatchRequest, error) {
m := new(BatchRequest)
func (x *bulkStoreBulkProcessServer) Recv() (*BulkRequest, error) {
m := new(BulkRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// BatchStore_ServiceDesc is the grpc.ServiceDesc for BatchStore service.
// BulkStore_ServiceDesc is the grpc.ServiceDesc for BulkStore service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var BatchStore_ServiceDesc = grpc.ServiceDesc{
ServiceName: "resource.BatchStore",
HandlerType: (*BatchStoreServer)(nil),
var BulkStore_ServiceDesc = grpc.ServiceDesc{
ServiceName: "resource.BulkStore",
HandlerType: (*BulkStoreServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "BatchProcess",
Handler: _BatchStore_BatchProcess_Handler,
StreamName: "BulkProcess",
Handler: _BulkStore_BulkProcess_Handler,
ClientStreams: true,
},
},

@ -26,7 +26,7 @@ import (
// ResourceServer implements all gRPC services
type ResourceServer interface {
ResourceStoreServer
BatchStoreServer
BulkStoreServer
ResourceIndexServer
RepositoryIndexServer
BlobStoreServer

@ -69,7 +69,7 @@ func NewBackend(opts BackendOptions) (Backend, error) {
dbProvider: opts.DBProvider,
pollingInterval: opts.PollingInterval,
watchBufferSize: opts.WatchBufferSize,
batchLock: &batchLock{running: make(map[string]bool)},
bulkLock: &bulkLock{running: make(map[string]bool)},
simulatedNetworkLatency: opts.SimulatedNetworkLatency,
}, nil
}
@ -92,7 +92,7 @@ type backend struct {
dbProvider db.DBProvider
db db.DB
dialect sqltemplate.Dialect
batchLock *batchLock
bulkLock *bulkLock
// watch streaming
//stream chan *resource.WatchEvent

@ -23,23 +23,24 @@ import (
)
var (
_ resource.BatchProcessingBackend = (*backend)(nil)
_ resource.BulkProcessingBackend = (*backend)(nil)
)
type batchRV struct {
type bulkRV struct {
max int64
counter int64
}
func newBatchRV() *batchRV {
// When executing a bulk import we can fake the RV values
func newBulkRV() *bulkRV {
t := time.Now().Truncate(time.Second * 10)
return &batchRV{
return &bulkRV{
max: (t.UnixMicro() / 10000000) * 10000000,
counter: 0,
}
}
func (x *batchRV) next(obj metav1.Object) int64 {
func (x *bulkRV) next(obj metav1.Object) int64 {
ts := obj.GetCreationTimestamp().UnixMicro()
anno := obj.GetAnnotations()
if anno != nil {
@ -56,23 +57,23 @@ func (x *batchRV) next(obj metav1.Object) int64 {
return (ts/10000000)*10000000 + x.counter
}
type batchLock struct {
type bulkLock struct {
running map[string]bool
mu sync.Mutex
}
func (x *batchLock) Start(keys []*resource.ResourceKey) error {
func (x *bulkLock) Start(keys []*resource.ResourceKey) error {
x.mu.Lock()
defer x.mu.Unlock()
// First verify that it is not already running
ids := make([]string, len(keys))
for i, k := range keys {
id := k.BatchID()
id := k.NSGR()
if x.running[id] {
return &apierrors.StatusError{ErrStatus: metav1.Status{
Code: http.StatusPreconditionFailed,
Message: "batch export is already running",
Message: "bulk export is already running",
}}
}
ids[i] = id
@ -85,47 +86,47 @@ func (x *batchLock) Start(keys []*resource.ResourceKey) error {
return nil
}
func (x *batchLock) Finish(keys []*resource.ResourceKey) {
func (x *bulkLock) Finish(keys []*resource.ResourceKey) {
x.mu.Lock()
defer x.mu.Unlock()
for _, k := range keys {
delete(x.running, k.BatchID())
delete(x.running, k.NSGR())
}
}
func (x *batchLock) Active() bool {
func (x *bulkLock) Active() bool {
x.mu.Lock()
defer x.mu.Unlock()
return len(x.running) > 0
}
func (b *backend) ProcessBatch(ctx context.Context, setting resource.BatchSettings, iter resource.BatchRequestIterator) *resource.BatchResponse {
err := b.batchLock.Start(setting.Collection)
func (b *backend) ProcessBulk(ctx context.Context, setting resource.BulkSettings, iter resource.BulkRequestIterator) *resource.BulkResponse {
err := b.bulkLock.Start(setting.Collection)
if err != nil {
return &resource.BatchResponse{
return &resource.BulkResponse{
Error: resource.AsErrorResult(err),
}
}
defer b.batchLock.Finish(setting.Collection)
defer b.bulkLock.Finish(setting.Collection)
// We may want to first write parquet, then read parquet
if b.dialect.DialectName() == "sqlite" {
file, err := os.CreateTemp("", "grafana-batch-export-*.parquet")
file, err := os.CreateTemp("", "grafana-bulk-export-*.parquet")
if err != nil {
return &resource.BatchResponse{
return &resource.BulkResponse{
Error: resource.AsErrorResult(err),
}
}
writer, err := parquet.NewParquetWriter(file)
if err != nil {
return &resource.BatchResponse{
return &resource.BulkResponse{
Error: resource.AsErrorResult(err),
}
}
// write batch to parquet
rsp := writer.ProcessBatch(ctx, setting, iter)
// write bulk to parquet
rsp := writer.ProcessBulk(ctx, setting, iter)
if rsp.Error != nil {
return rsp
}
@ -135,18 +136,18 @@ func (b *backend) ProcessBatch(ctx context.Context, setting resource.BatchSettin
// Replace the iterator with one from parquet
iter, err = parquet.NewParquetReader(file.Name(), 50)
if err != nil {
return &resource.BatchResponse{
return &resource.BulkResponse{
Error: resource.AsErrorResult(err),
}
}
}
return b.processBatch(ctx, setting, iter)
return b.processBulk(ctx, setting, iter)
}
// internal batch process
func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettings, iter resource.BatchRequestIterator) *resource.BatchResponse {
rsp := &resource.BatchResponse{}
// internal bulk process
func (b *backend) processBulk(ctx context.Context, setting resource.BulkSettings, iter resource.BulkRequestIterator) *resource.BulkResponse {
rsp := &resource.BulkResponse{}
err := b.db.WithTx(ctx, ReadCommitted, func(ctx context.Context, tx db.Tx) error {
rollbackWithError := func(err error) error {
txerr := tx.Rollback()
@ -157,7 +158,7 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
}
return err
}
batch := &batchWroker{
bulk := &bulkWroker{
ctx: ctx,
tx: tx,
dialect: b.dialect,
@ -165,18 +166,18 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
}
// Calculate the RV based on incoming request timestamps
rv := newBatchRV()
rv := newBulkRV()
summaries := make(map[string]*resource.BatchResponse_Summary, len(setting.Collection)*4)
summaries := make(map[string]*resource.BulkResponse_Summary, len(setting.Collection)*4)
// First clear everything in the transaction
if setting.RebuildCollection {
for _, key := range setting.Collection {
summary, err := batch.deleteCollection(key)
summary, err := bulk.deleteCollection(key)
if err != nil {
return rollbackWithError(err)
}
summaries[key.BatchID()] = summary
summaries[key.NSGR()] = summary
rsp.Summary = append(rsp.Summary, summary)
}
}
@ -194,8 +195,8 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
}
rsp.Processed++
if req.Action == resource.BatchRequest_UNKNOWN {
rsp.Rejected = append(rsp.Rejected, &resource.BatchResponse_Rejected{
if req.Action == resource.BulkRequest_UNKNOWN {
rsp.Rejected = append(rsp.Rejected, &resource.BulkResponse_Rejected{
Key: req.Key,
Action: req.Action,
Error: "unknown action",
@ -205,7 +206,7 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
err := obj.UnmarshalJSON(req.Value)
if err != nil {
rsp.Rejected = append(rsp.Rejected, &resource.BatchResponse_Rejected{
rsp.Rejected = append(rsp.Rejected, &resource.BulkResponse_Rejected{
Key: req.Key,
Action: req.Action,
Error: "unable to unmarshal json",
@ -238,7 +239,7 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
return rollbackWithError(fmt.Errorf("missing summary key for: %s", k))
}
err := batch.syncCollection(key, summary)
err := bulk.syncCollection(key, summary)
if err != nil {
return err
}
@ -257,7 +258,7 @@ func (b *backend) processBatch(ctx context.Context, setting resource.BatchSettin
return rsp
}
type batchWroker struct {
type bulkWroker struct {
ctx context.Context
tx db.ContextExecer
dialect sqltemplate.Dialect
@ -265,8 +266,8 @@ type batchWroker struct {
}
// This will remove everything from the `resource` and `resource_history` table for a given namespace/group/resource
func (w *batchWroker) deleteCollection(key *resource.ResourceKey) (*resource.BatchResponse_Summary, error) {
summary := &resource.BatchResponse_Summary{
func (w *bulkWroker) deleteCollection(key *resource.ResourceKey) (*resource.BulkResponse_Summary, error) {
summary := &resource.BulkResponse_Summary{
Namespace: key.Namespace,
Group: key.Group,
Resource: key.Resource,
@ -303,8 +304,8 @@ func (w *batchWroker) deleteCollection(key *resource.ResourceKey) (*resource.Bat
}
// Copy the latest value from history into the active resource table
func (w *batchWroker) syncCollection(key *resource.ResourceKey, summary *resource.BatchResponse_Summary) error {
w.logger.Info("synchronize collection", "key", key.BatchID())
func (w *bulkWroker) syncCollection(key *resource.ResourceKey, summary *resource.BulkResponse_Summary) error {
w.logger.Info("synchronize collection", "key", key.NSGR())
_, err := dbutil.Exec(w.ctx, w.tx, sqlResourceInsertFromHistory, &sqlResourceInsertFromHistoryRequest{
SQLTemplate: sqltemplate.New(w.dialect),
Key: key,
@ -313,7 +314,7 @@ func (w *batchWroker) syncCollection(key *resource.ResourceKey, summary *resourc
return err
}
w.logger.Info("get stats (still in transaction)", "key", key.BatchID())
w.logger.Info("get stats (still in transaction)", "key", key.NSGR())
rows, err := dbutil.QueryRows(w.ctx, w.tx, sqlResourceStats, &sqlStatsRequest{
SQLTemplate: sqltemplate.New(w.dialect),
Namespace: key.Namespace,

@ -13,7 +13,7 @@ func TestBatch(t *testing.T) {
t.Run("rv iterator", func(t *testing.T) {
t.Parallel()
rv := newBatchRV()
rv := newBulkRV()
v0 := rv.next(&unstructured.Unstructured{})
v1 := rv.next(&unstructured.Unstructured{})
v2 := rv.next(&unstructured.Unstructured{})

@ -29,7 +29,7 @@ func newNotifier(b *backend) (eventNotifier, error) {
watchBufferSize: b.watchBufferSize,
log: b.log,
tracer: b.tracer,
batchLock: b.batchLock,
bulkLock: b.bulkLock,
listLatestRVs: b.listLatestRVs,
historyPoll: func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error) {
var records []*historyPollResponse

@ -5,17 +5,18 @@ import (
"fmt"
"time"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
"go.opentelemetry.io/otel/trace"
)
var (
// Validation errors.
errHistoryPollRequired = fmt.Errorf("historyPoll is required")
errListLatestRVsRequired = fmt.Errorf("listLatestRVs is required")
errBatchLockRequired = fmt.Errorf("batchLock is required")
errBulkLockRequired = fmt.Errorf("bulkLock is required")
errTracerRequired = fmt.Errorf("tracer is required")
errLogRequired = fmt.Errorf("log is required")
errInvalidWatchBufferSize = fmt.Errorf("watchBufferSize must be greater than 0")
@ -33,7 +34,7 @@ type pollingNotifier struct {
log log.Logger
tracer trace.Tracer
batchLock *batchLock
bulkLock *bulkLock
listLatestRVs func(ctx context.Context) (groupResourceRV, error)
historyPoll func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error)
@ -48,7 +49,7 @@ type pollingNotifierConfig struct {
log log.Logger
tracer trace.Tracer
batchLock *batchLock
bulkLock *bulkLock
listLatestRVs func(ctx context.Context) (groupResourceRV, error)
historyPoll func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error)
@ -62,8 +63,8 @@ func (cfg *pollingNotifierConfig) validate() error {
if cfg.listLatestRVs == nil {
return errListLatestRVsRequired
}
if cfg.batchLock == nil {
return errBatchLockRequired
if cfg.bulkLock == nil {
return errBulkLockRequired
}
if cfg.tracer == nil {
return errTracerRequired
@ -96,7 +97,7 @@ func newPollingNotifier(cfg *pollingNotifierConfig) (*pollingNotifier, error) {
watchBufferSize: cfg.watchBufferSize,
log: cfg.log,
tracer: cfg.tracer,
batchLock: cfg.batchLock,
bulkLock: cfg.bulkLock,
listLatestRVs: cfg.listLatestRVs,
historyPoll: cfg.historyPoll,
done: cfg.done,

@ -5,11 +5,12 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace/noop"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/sql/sqltemplate"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace/noop"
)
func TestPollingNotifierConfig(t *testing.T) {
@ -27,7 +28,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -41,7 +42,7 @@ func TestPollingNotifierConfig(t *testing.T) {
name: "missing historyPoll",
config: &pollingNotifierConfig{
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -57,7 +58,7 @@ func TestPollingNotifierConfig(t *testing.T) {
historyPoll: func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error) {
return nil, nil
},
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -68,7 +69,7 @@ func TestPollingNotifierConfig(t *testing.T) {
expectedErr: errListLatestRVsRequired,
},
{
name: "missing batchLock",
name: "missing bulkLock",
config: &pollingNotifierConfig{
historyPoll: func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error) {
return nil, nil
@ -81,7 +82,7 @@ func TestPollingNotifierConfig(t *testing.T) {
done: make(chan struct{}),
dialect: sqltemplate.SQLite,
},
expectedErr: errBatchLockRequired,
expectedErr: errBulkLockRequired,
},
{
name: "missing tracer",
@ -90,7 +91,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
log: log.NewNopLogger(),
watchBufferSize: 10,
pollingInterval: time.Second,
@ -106,7 +107,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
watchBufferSize: 10,
pollingInterval: time.Second,
@ -122,7 +123,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 0,
@ -139,7 +140,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -156,7 +157,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -172,7 +173,7 @@ func TestPollingNotifierConfig(t *testing.T) {
return nil, nil
},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
batchLock: &batchLock{},
bulkLock: &bulkLock{},
tracer: noop.NewTracerProvider().Tracer("test"),
log: log.NewNopLogger(),
watchBufferSize: 10,
@ -244,7 +245,7 @@ func TestPollingNotifier(t *testing.T) {
watchBufferSize: 10,
log: log.NewNopLogger(),
tracer: noop.NewTracerProvider().Tracer("test"),
batchLock: &batchLock{},
bulkLock: &bulkLock{},
listLatestRVs: listLatestRVs,
historyPoll: historyPoll,
done: done,
@ -298,7 +299,7 @@ func TestPollingNotifier(t *testing.T) {
watchBufferSize: 10,
log: log.NewNopLogger(),
tracer: noop.NewTracerProvider().Tracer("test"),
batchLock: &batchLock{},
bulkLock: &bulkLock{},
listLatestRVs: listLatestRVs,
historyPoll: historyPoll,
done: done,
@ -332,7 +333,7 @@ func TestPollingNotifier(t *testing.T) {
watchBufferSize: 10,
log: log.NewNopLogger(),
tracer: noop.NewTracerProvider().Tracer("test"),
batchLock: &batchLock{},
bulkLock: &bulkLock{},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
historyPoll: func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error) {
return nil, nil
@ -369,7 +370,7 @@ func TestPollingNotifier(t *testing.T) {
watchBufferSize: 10,
log: log.NewNopLogger(),
tracer: noop.NewTracerProvider().Tracer("test"),
batchLock: &batchLock{},
bulkLock: &bulkLock{},
listLatestRVs: func(ctx context.Context) (groupResourceRV, error) { return nil, nil },
historyPoll: func(ctx context.Context, grp string, res string, since int64) ([]*historyPollResponse, error) {
return nil, nil

@ -128,7 +128,7 @@ func (s *service) start(ctx context.Context) error {
srv := s.handler.GetServer()
resource.RegisterResourceStoreServer(srv, server)
resource.RegisterBatchStoreServer(srv, server)
resource.RegisterBulkStoreServer(srv, server)
resource.RegisterResourceIndexServer(srv, server)
resource.RegisterRepositoryIndexServer(srv, server)
resource.RegisterBlobStoreServer(srv, server)

Loading…
Cancel
Save