diff --git a/pkg/engine/internal/proto/wirepb/wirepb.pb.go b/pkg/engine/internal/proto/wirepb/wirepb.pb.go new file mode 100644 index 0000000000..38f9db44b1 --- /dev/null +++ b/pkg/engine/internal/proto/wirepb/wirepb.pb.go @@ -0,0 +1,6788 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/engine/internal/proto/wirepb/wirepb.proto + +package wirepb + +import ( + bytes "bytes" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + _ "github.com/gogo/protobuf/types" + _ "github.com/grafana/loki/v3/pkg/engine/internal/proto/expressionpb" + physicalpb "github.com/grafana/loki/v3/pkg/engine/internal/proto/physicalpb" + _ "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid" + github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid" + _ "google.golang.org/protobuf/types/known/durationpb" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// TaskState represents the execution state of a task. +type TaskState int32 + +const ( + TASK_STATE_INVALID TaskState = 0 + TASK_STATE_CREATED TaskState = 1 + TASK_STATE_PENDING TaskState = 2 + TASK_STATE_RUNNING TaskState = 3 + TASK_STATE_COMPLETED TaskState = 4 + TASK_STATE_CANCELLED TaskState = 5 + TASK_STATE_FAILED TaskState = 6 +) + +var TaskState_name = map[int32]string{ + 0: "TASK_STATE_INVALID", + 1: "TASK_STATE_CREATED", + 2: "TASK_STATE_PENDING", + 3: "TASK_STATE_RUNNING", + 4: "TASK_STATE_COMPLETED", + 5: "TASK_STATE_CANCELLED", + 6: "TASK_STATE_FAILED", +} + +var TaskState_value = map[string]int32{ + "TASK_STATE_INVALID": 0, + "TASK_STATE_CREATED": 1, + "TASK_STATE_PENDING": 2, + "TASK_STATE_RUNNING": 3, + "TASK_STATE_COMPLETED": 4, + "TASK_STATE_CANCELLED": 5, + "TASK_STATE_FAILED": 6, +} + +func (TaskState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{0} +} + +// StreamState represents the state of a stream. +type StreamState int32 + +const ( + STREAM_STATE_INVALID StreamState = 0 + STREAM_STATE_IDLE StreamState = 1 + STREAM_STATE_OPEN StreamState = 2 + STREAM_STATE_BLOCKED StreamState = 3 + STREAM_STATE_CLOSED StreamState = 4 +) + +var StreamState_name = map[int32]string{ + 0: "STREAM_STATE_INVALID", + 1: "STREAM_STATE_IDLE", + 2: "STREAM_STATE_OPEN", + 3: "STREAM_STATE_BLOCKED", + 4: "STREAM_STATE_CLOSED", +} + +var StreamState_value = map[string]int32{ + "STREAM_STATE_INVALID": 0, + "STREAM_STATE_IDLE": 1, + "STREAM_STATE_OPEN": 2, + "STREAM_STATE_BLOCKED": 3, + "STREAM_STATE_CLOSED": 4, +} + +func (StreamState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{1} +} + +type Frame struct { + // Types that are valid to be assigned to Kind: + // *Frame_Ack + // *Frame_Nack + // *Frame_Discard + // *Frame_Message + Kind isFrame_Kind `protobuf_oneof:"kind"` +} + +func (m *Frame) Reset() { *m = Frame{} } +func (*Frame) ProtoMessage() {} +func (*Frame) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{0} +} +func (m *Frame) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Frame.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Frame) XXX_Merge(src proto.Message) { + xxx_messageInfo_Frame.Merge(m, src) +} +func (m *Frame) XXX_Size() int { + return m.Size() +} +func (m *Frame) XXX_DiscardUnknown() { + xxx_messageInfo_Frame.DiscardUnknown(m) +} + +var xxx_messageInfo_Frame proto.InternalMessageInfo + +type isFrame_Kind interface { + isFrame_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type Frame_Ack struct { + Ack *AckFrame `protobuf:"bytes,1,opt,name=ack,proto3,oneof"` +} +type Frame_Nack struct { + Nack *NackFrame `protobuf:"bytes,2,opt,name=nack,proto3,oneof"` +} +type Frame_Discard struct { + Discard *DiscardFrame `protobuf:"bytes,3,opt,name=discard,proto3,oneof"` +} +type Frame_Message struct { + Message *MessageFrame `protobuf:"bytes,4,opt,name=message,proto3,oneof"` +} + +func (*Frame_Ack) isFrame_Kind() {} +func (*Frame_Nack) isFrame_Kind() {} +func (*Frame_Discard) isFrame_Kind() {} +func (*Frame_Message) isFrame_Kind() {} + +func (m *Frame) GetKind() isFrame_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Frame) GetAck() *AckFrame { + if x, ok := m.GetKind().(*Frame_Ack); ok { + return x.Ack + } + return nil +} + +func (m *Frame) GetNack() *NackFrame { + if x, ok := m.GetKind().(*Frame_Nack); ok { + return x.Nack + } + return nil +} + +func (m *Frame) GetDiscard() *DiscardFrame { + if x, ok := m.GetKind().(*Frame_Discard); ok { + return x.Discard + } + return nil +} + +func (m *Frame) GetMessage() *MessageFrame { + if x, ok := m.GetKind().(*Frame_Message); ok { + return x.Message + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Frame) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Frame_Ack)(nil), + (*Frame_Nack)(nil), + (*Frame_Discard)(nil), + (*Frame_Message)(nil), + } +} + +type AckFrame struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *AckFrame) Reset() { *m = AckFrame{} } +func (*AckFrame) ProtoMessage() {} +func (*AckFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{1} +} +func (m *AckFrame) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AckFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AckFrame.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AckFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_AckFrame.Merge(m, src) +} +func (m *AckFrame) XXX_Size() int { + return m.Size() +} +func (m *AckFrame) XXX_DiscardUnknown() { + xxx_messageInfo_AckFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_AckFrame proto.InternalMessageInfo + +func (m *AckFrame) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type NackFrame struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *NackFrame) Reset() { *m = NackFrame{} } +func (*NackFrame) ProtoMessage() {} +func (*NackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{2} +} +func (m *NackFrame) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NackFrame.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_NackFrame.Merge(m, src) +} +func (m *NackFrame) XXX_Size() int { + return m.Size() +} +func (m *NackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_NackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_NackFrame proto.InternalMessageInfo + +func (m *NackFrame) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *NackFrame) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type DiscardFrame struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *DiscardFrame) Reset() { *m = DiscardFrame{} } +func (*DiscardFrame) ProtoMessage() {} +func (*DiscardFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{3} +} +func (m *DiscardFrame) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DiscardFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DiscardFrame.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DiscardFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_DiscardFrame.Merge(m, src) +} +func (m *DiscardFrame) XXX_Size() int { + return m.Size() +} +func (m *DiscardFrame) XXX_DiscardUnknown() { + xxx_messageInfo_DiscardFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_DiscardFrame proto.InternalMessageInfo + +func (m *DiscardFrame) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type MessageFrame struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to Kind: + // *MessageFrame_WorkerReady + // *MessageFrame_TaskAssign + // *MessageFrame_TaskCancel + // *MessageFrame_TaskFlag + // *MessageFrame_TaskStatus + // *MessageFrame_StreamBind + // *MessageFrame_StreamData + // *MessageFrame_StreamStatus + Kind isMessageFrame_Kind `protobuf_oneof:"kind"` +} + +func (m *MessageFrame) Reset() { *m = MessageFrame{} } +func (*MessageFrame) ProtoMessage() {} +func (*MessageFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{4} +} +func (m *MessageFrame) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MessageFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MessageFrame.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MessageFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageFrame.Merge(m, src) +} +func (m *MessageFrame) XXX_Size() int { + return m.Size() +} +func (m *MessageFrame) XXX_DiscardUnknown() { + xxx_messageInfo_MessageFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageFrame proto.InternalMessageInfo + +type isMessageFrame_Kind interface { + isMessageFrame_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type MessageFrame_WorkerReady struct { + WorkerReady *WorkerReadyMessage `protobuf:"bytes,2,opt,name=worker_ready,json=workerReady,proto3,oneof"` +} +type MessageFrame_TaskAssign struct { + TaskAssign *TaskAssignMessage `protobuf:"bytes,3,opt,name=task_assign,json=taskAssign,proto3,oneof"` +} +type MessageFrame_TaskCancel struct { + TaskCancel *TaskCancelMessage `protobuf:"bytes,4,opt,name=task_cancel,json=taskCancel,proto3,oneof"` +} +type MessageFrame_TaskFlag struct { + TaskFlag *TaskFlagMessage `protobuf:"bytes,5,opt,name=task_flag,json=taskFlag,proto3,oneof"` +} +type MessageFrame_TaskStatus struct { + TaskStatus *TaskStatusMessage `protobuf:"bytes,6,opt,name=task_status,json=taskStatus,proto3,oneof"` +} +type MessageFrame_StreamBind struct { + StreamBind *StreamBindMessage `protobuf:"bytes,7,opt,name=stream_bind,json=streamBind,proto3,oneof"` +} +type MessageFrame_StreamData struct { + StreamData *StreamDataMessage `protobuf:"bytes,8,opt,name=stream_data,json=streamData,proto3,oneof"` +} +type MessageFrame_StreamStatus struct { + StreamStatus *StreamStatusMessage `protobuf:"bytes,9,opt,name=stream_status,json=streamStatus,proto3,oneof"` +} + +func (*MessageFrame_WorkerReady) isMessageFrame_Kind() {} +func (*MessageFrame_TaskAssign) isMessageFrame_Kind() {} +func (*MessageFrame_TaskCancel) isMessageFrame_Kind() {} +func (*MessageFrame_TaskFlag) isMessageFrame_Kind() {} +func (*MessageFrame_TaskStatus) isMessageFrame_Kind() {} +func (*MessageFrame_StreamBind) isMessageFrame_Kind() {} +func (*MessageFrame_StreamData) isMessageFrame_Kind() {} +func (*MessageFrame_StreamStatus) isMessageFrame_Kind() {} + +func (m *MessageFrame) GetKind() isMessageFrame_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *MessageFrame) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MessageFrame) GetWorkerReady() *WorkerReadyMessage { + if x, ok := m.GetKind().(*MessageFrame_WorkerReady); ok { + return x.WorkerReady + } + return nil +} + +func (m *MessageFrame) GetTaskAssign() *TaskAssignMessage { + if x, ok := m.GetKind().(*MessageFrame_TaskAssign); ok { + return x.TaskAssign + } + return nil +} + +func (m *MessageFrame) GetTaskCancel() *TaskCancelMessage { + if x, ok := m.GetKind().(*MessageFrame_TaskCancel); ok { + return x.TaskCancel + } + return nil +} + +func (m *MessageFrame) GetTaskFlag() *TaskFlagMessage { + if x, ok := m.GetKind().(*MessageFrame_TaskFlag); ok { + return x.TaskFlag + } + return nil +} + +func (m *MessageFrame) GetTaskStatus() *TaskStatusMessage { + if x, ok := m.GetKind().(*MessageFrame_TaskStatus); ok { + return x.TaskStatus + } + return nil +} + +func (m *MessageFrame) GetStreamBind() *StreamBindMessage { + if x, ok := m.GetKind().(*MessageFrame_StreamBind); ok { + return x.StreamBind + } + return nil +} + +func (m *MessageFrame) GetStreamData() *StreamDataMessage { + if x, ok := m.GetKind().(*MessageFrame_StreamData); ok { + return x.StreamData + } + return nil +} + +func (m *MessageFrame) GetStreamStatus() *StreamStatusMessage { + if x, ok := m.GetKind().(*MessageFrame_StreamStatus); ok { + return x.StreamStatus + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*MessageFrame) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*MessageFrame_WorkerReady)(nil), + (*MessageFrame_TaskAssign)(nil), + (*MessageFrame_TaskCancel)(nil), + (*MessageFrame_TaskFlag)(nil), + (*MessageFrame_TaskStatus)(nil), + (*MessageFrame_StreamBind)(nil), + (*MessageFrame_StreamData)(nil), + (*MessageFrame_StreamStatus)(nil), + } +} + +type WorkerReadyMessage struct { +} + +func (m *WorkerReadyMessage) Reset() { *m = WorkerReadyMessage{} } +func (*WorkerReadyMessage) ProtoMessage() {} +func (*WorkerReadyMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{5} +} +func (m *WorkerReadyMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerReadyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkerReadyMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkerReadyMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerReadyMessage.Merge(m, src) +} +func (m *WorkerReadyMessage) XXX_Size() int { + return m.Size() +} +func (m *WorkerReadyMessage) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerReadyMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerReadyMessage proto.InternalMessageInfo + +// TaskAssignMessage is sent by the scheduler to a worker when there is a +// task to run. +type TaskAssignMessage struct { + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + // StreamStates holds the most recent state of each stream that the task + // reads from. The key is the stream ULID. + StreamStates map[string]StreamState `protobuf:"bytes,2,rep,name=stream_states,json=streamStates,proto3" json:"stream_states,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=loki.wire.StreamState"` +} + +func (m *TaskAssignMessage) Reset() { *m = TaskAssignMessage{} } +func (*TaskAssignMessage) ProtoMessage() {} +func (*TaskAssignMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{6} +} +func (m *TaskAssignMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskAssignMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskAssignMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskAssignMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskAssignMessage.Merge(m, src) +} +func (m *TaskAssignMessage) XXX_Size() int { + return m.Size() +} +func (m *TaskAssignMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskAssignMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskAssignMessage proto.InternalMessageInfo + +func (m *TaskAssignMessage) GetTask() *Task { + if m != nil { + return m.Task + } + return nil +} + +func (m *TaskAssignMessage) GetStreamStates() map[string]StreamState { + if m != nil { + return m.StreamStates + } + return nil +} + +// TaskCancelMessage is sent by the scheduler to a worker when a task is no +// longer needed. +type TaskCancelMessage struct { + Id github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=id,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"id"` +} + +func (m *TaskCancelMessage) Reset() { *m = TaskCancelMessage{} } +func (*TaskCancelMessage) ProtoMessage() {} +func (*TaskCancelMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{7} +} +func (m *TaskCancelMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskCancelMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskCancelMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskCancelMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskCancelMessage.Merge(m, src) +} +func (m *TaskCancelMessage) XXX_Size() int { + return m.Size() +} +func (m *TaskCancelMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskCancelMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskCancelMessage proto.InternalMessageInfo + +// TaskFlagMessage is sent by the scheduler to update the runtime flags of a task. +type TaskFlagMessage struct { + Id github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=id,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"id"` + // Interruptible indicates that tasks blocked on writing or reading to a + // stream can be paused, and that worker can accept new tasks to run. + Interruptible bool `protobuf:"varint,2,opt,name=interruptible,proto3" json:"interruptible,omitempty"` +} + +func (m *TaskFlagMessage) Reset() { *m = TaskFlagMessage{} } +func (*TaskFlagMessage) ProtoMessage() {} +func (*TaskFlagMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{8} +} +func (m *TaskFlagMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskFlagMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskFlagMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskFlagMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskFlagMessage.Merge(m, src) +} +func (m *TaskFlagMessage) XXX_Size() int { + return m.Size() +} +func (m *TaskFlagMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskFlagMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskFlagMessage proto.InternalMessageInfo + +func (m *TaskFlagMessage) GetInterruptible() bool { + if m != nil { + return m.Interruptible + } + return false +} + +// TaskStatusMessage is sent by the worker to the scheduler to inform the +// scheduler of the current status of a task. +type TaskStatusMessage struct { + Id github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=id,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"id"` + Status TaskStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status"` +} + +func (m *TaskStatusMessage) Reset() { *m = TaskStatusMessage{} } +func (*TaskStatusMessage) ProtoMessage() {} +func (*TaskStatusMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{9} +} +func (m *TaskStatusMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskStatusMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskStatusMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskStatusMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatusMessage.Merge(m, src) +} +func (m *TaskStatusMessage) XXX_Size() int { + return m.Size() +} +func (m *TaskStatusMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatusMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatusMessage proto.InternalMessageInfo + +func (m *TaskStatusMessage) GetStatus() TaskStatus { + if m != nil { + return m.Status + } + return TaskStatus{} +} + +// StreamBindMessage is sent by the scheduler to a worker to inform the +// worker about the location of a stream receiver. +type StreamBindMessage struct { + StreamId github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"stream_id"` + // Receiver is the network address of the stream receiver. + Receiver string `protobuf:"bytes,2,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *StreamBindMessage) Reset() { *m = StreamBindMessage{} } +func (*StreamBindMessage) ProtoMessage() {} +func (*StreamBindMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{10} +} +func (m *StreamBindMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamBindMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamBindMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamBindMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamBindMessage.Merge(m, src) +} +func (m *StreamBindMessage) XXX_Size() int { + return m.Size() +} +func (m *StreamBindMessage) XXX_DiscardUnknown() { + xxx_messageInfo_StreamBindMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamBindMessage proto.InternalMessageInfo + +func (m *StreamBindMessage) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +// StreamDataMessage is sent by a worker to a stream receiver to provide +// payload data for a stream. +type StreamDataMessage struct { + StreamId github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"stream_id"` + // Data is the serialized Arrow record payload. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *StreamDataMessage) Reset() { *m = StreamDataMessage{} } +func (*StreamDataMessage) ProtoMessage() {} +func (*StreamDataMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{11} +} +func (m *StreamDataMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamDataMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamDataMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamDataMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamDataMessage.Merge(m, src) +} +func (m *StreamDataMessage) XXX_Size() int { + return m.Size() +} +func (m *StreamDataMessage) XXX_DiscardUnknown() { + xxx_messageInfo_StreamDataMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamDataMessage proto.InternalMessageInfo + +func (m *StreamDataMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// StreamStatusMessage communicates the status of the sending side of a stream. +type StreamStatusMessage struct { + StreamId github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"stream_id"` + State StreamState `protobuf:"varint,2,opt,name=state,proto3,enum=loki.wire.StreamState" json:"state,omitempty"` +} + +func (m *StreamStatusMessage) Reset() { *m = StreamStatusMessage{} } +func (*StreamStatusMessage) ProtoMessage() {} +func (*StreamStatusMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{12} +} +func (m *StreamStatusMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamStatusMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamStatusMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamStatusMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamStatusMessage.Merge(m, src) +} +func (m *StreamStatusMessage) XXX_Size() int { + return m.Size() +} +func (m *StreamStatusMessage) XXX_DiscardUnknown() { + xxx_messageInfo_StreamStatusMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamStatusMessage proto.InternalMessageInfo + +func (m *StreamStatusMessage) GetState() StreamState { + if m != nil { + return m.State + } + return STREAM_STATE_INVALID +} + +// Task is a single unit of work within a workflow. +type Task struct { + Ulid github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=ulid,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"ulid"` + TenantId string `protobuf:"bytes,2,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + // Fragment is the local physical plan that this task represents. + Fragment *physicalpb.Plan `protobuf:"bytes,3,opt,name=fragment,proto3" json:"fragment,omitempty"` + // Sources defines which streams physical nodes read from. + // The key is the node ID string representation. + Sources map[string]*StreamList `protobuf:"bytes,4,rep,name=sources,proto3" json:"sources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Sinks defines which streams physical nodes write to. + // The key is the node ID string representation. + Sinks map[string]*StreamList `protobuf:"bytes,5,rep,name=sinks,proto3" json:"sinks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{13} +} +func (m *Task) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Task.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Task) XXX_Merge(src proto.Message) { + xxx_messageInfo_Task.Merge(m, src) +} +func (m *Task) XXX_Size() int { + return m.Size() +} +func (m *Task) XXX_DiscardUnknown() { + xxx_messageInfo_Task.DiscardUnknown(m) +} + +var xxx_messageInfo_Task proto.InternalMessageInfo + +func (m *Task) GetTenantId() string { + if m != nil { + return m.TenantId + } + return "" +} + +func (m *Task) GetFragment() *physicalpb.Plan { + if m != nil { + return m.Fragment + } + return nil +} + +func (m *Task) GetSources() map[string]*StreamList { + if m != nil { + return m.Sources + } + return nil +} + +func (m *Task) GetSinks() map[string]*StreamList { + if m != nil { + return m.Sinks + } + return nil +} + +// StreamList is a list of streams, used in Task's sources and sinks maps. +type StreamList struct { + Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` +} + +func (m *StreamList) Reset() { *m = StreamList{} } +func (*StreamList) ProtoMessage() {} +func (*StreamList) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{14} +} +func (m *StreamList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamList.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamList.Merge(m, src) +} +func (m *StreamList) XXX_Size() int { + return m.Size() +} +func (m *StreamList) XXX_DiscardUnknown() { + xxx_messageInfo_StreamList.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamList proto.InternalMessageInfo + +func (m *StreamList) GetStreams() []*Stream { + if m != nil { + return m.Streams + } + return nil +} + +// Stream is an abstract representation of how data flows across task boundaries. +type Stream struct { + Ulid github_com_grafana_loki_v3_pkg_engine_internal_proto_ulid.ULID `protobuf:"bytes,1,opt,name=ulid,proto3,customtype=github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID" json:"ulid"` + TenantId string `protobuf:"bytes,2,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` +} + +func (m *Stream) Reset() { *m = Stream{} } +func (*Stream) ProtoMessage() {} +func (*Stream) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{15} +} +func (m *Stream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Stream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Stream) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stream.Merge(m, src) +} +func (m *Stream) XXX_Size() int { + return m.Size() +} +func (m *Stream) XXX_DiscardUnknown() { + xxx_messageInfo_Stream.DiscardUnknown(m) +} + +var xxx_messageInfo_Stream proto.InternalMessageInfo + +func (m *Stream) GetTenantId() string { + if m != nil { + return m.TenantId + } + return "" +} + +// TaskStatus represents the current status of a task. +type TaskStatus struct { + State TaskState `protobuf:"varint,1,opt,name=state,proto3,enum=loki.wire.TaskState" json:"state,omitempty"` + // Error is set only when state is TASK_STATE_FAILED. + Error *TaskError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{16} +} +func (m *TaskStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskStatus.Merge(m, src) +} +func (m *TaskStatus) XXX_Size() int { + return m.Size() +} +func (m *TaskStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TaskStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskStatus proto.InternalMessageInfo + +func (m *TaskStatus) GetState() TaskState { + if m != nil { + return m.State + } + return TASK_STATE_INVALID +} + +func (m *TaskStatus) GetError() *TaskError { + if m != nil { + return m.Error + } + return nil +} + +type TaskError struct { + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *TaskError) Reset() { *m = TaskError{} } +func (*TaskError) ProtoMessage() {} +func (*TaskError) Descriptor() ([]byte, []int) { + return fileDescriptor_9956cb67d4b0d2a4, []int{17} +} +func (m *TaskError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TaskError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TaskError.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TaskError) XXX_Merge(src proto.Message) { + xxx_messageInfo_TaskError.Merge(m, src) +} +func (m *TaskError) XXX_Size() int { + return m.Size() +} +func (m *TaskError) XXX_DiscardUnknown() { + xxx_messageInfo_TaskError.DiscardUnknown(m) +} + +var xxx_messageInfo_TaskError proto.InternalMessageInfo + +func (m *TaskError) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterEnum("loki.wire.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("loki.wire.StreamState", StreamState_name, StreamState_value) + proto.RegisterType((*Frame)(nil), "loki.wire.Frame") + proto.RegisterType((*AckFrame)(nil), "loki.wire.AckFrame") + proto.RegisterType((*NackFrame)(nil), "loki.wire.NackFrame") + proto.RegisterType((*DiscardFrame)(nil), "loki.wire.DiscardFrame") + proto.RegisterType((*MessageFrame)(nil), "loki.wire.MessageFrame") + proto.RegisterType((*WorkerReadyMessage)(nil), "loki.wire.WorkerReadyMessage") + proto.RegisterType((*TaskAssignMessage)(nil), "loki.wire.TaskAssignMessage") + proto.RegisterMapType((map[string]StreamState)(nil), "loki.wire.TaskAssignMessage.StreamStatesEntry") + proto.RegisterType((*TaskCancelMessage)(nil), "loki.wire.TaskCancelMessage") + proto.RegisterType((*TaskFlagMessage)(nil), "loki.wire.TaskFlagMessage") + proto.RegisterType((*TaskStatusMessage)(nil), "loki.wire.TaskStatusMessage") + proto.RegisterType((*StreamBindMessage)(nil), "loki.wire.StreamBindMessage") + proto.RegisterType((*StreamDataMessage)(nil), "loki.wire.StreamDataMessage") + proto.RegisterType((*StreamStatusMessage)(nil), "loki.wire.StreamStatusMessage") + proto.RegisterType((*Task)(nil), "loki.wire.Task") + proto.RegisterMapType((map[string]*StreamList)(nil), "loki.wire.Task.SinksEntry") + proto.RegisterMapType((map[string]*StreamList)(nil), "loki.wire.Task.SourcesEntry") + proto.RegisterType((*StreamList)(nil), "loki.wire.StreamList") + proto.RegisterType((*Stream)(nil), "loki.wire.Stream") + proto.RegisterType((*TaskStatus)(nil), "loki.wire.TaskStatus") + proto.RegisterType((*TaskError)(nil), "loki.wire.TaskError") +} + +func init() { + proto.RegisterFile("pkg/engine/internal/proto/wirepb/wirepb.proto", fileDescriptor_9956cb67d4b0d2a4) +} + +var fileDescriptor_9956cb67d4b0d2a4 = []byte{ + // 1229 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xc1, 0x6f, 0xdb, 0x54, + 0x18, 0x8f, 0x13, 0x27, 0x4d, 0xbe, 0x64, 0x5b, 0xfa, 0x96, 0x6d, 0x56, 0x00, 0x6f, 0x32, 0x48, + 0x8c, 0x6e, 0x4b, 0xd8, 0x2a, 0x10, 0x03, 0x09, 0x94, 0x34, 0x2e, 0x8b, 0x96, 0xa6, 0xc5, 0xc9, + 0x98, 0xc4, 0xa5, 0x7a, 0x89, 0x5f, 0x3d, 0x2b, 0x89, 0x1d, 0xfc, 0x9c, 0x96, 0x4a, 0x1c, 0x38, + 0x70, 0xe0, 0xc8, 0x99, 0x3b, 0xd2, 0x2e, 0x1c, 0xb8, 0x20, 0x71, 0xe4, 0xb6, 0x13, 0xea, 0x8d, + 0x89, 0xc3, 0x44, 0x53, 0x09, 0x71, 0xdc, 0x9f, 0x80, 0xde, 0xb3, 0xe3, 0x38, 0x76, 0x3a, 0x24, + 0x84, 0x3a, 0x2e, 0xad, 0xfd, 0xfb, 0xfd, 0xbe, 0xdf, 0xfb, 0xde, 0xe7, 0xef, 0x7d, 0xb1, 0xe1, + 0xd6, 0x78, 0x60, 0x54, 0x89, 0x65, 0x98, 0x16, 0xa9, 0x9a, 0x96, 0x4b, 0x1c, 0x0b, 0x0f, 0xab, + 0x63, 0xc7, 0x76, 0xed, 0xea, 0x81, 0xe9, 0x90, 0x71, 0xcf, 0xff, 0x57, 0xe1, 0x18, 0xca, 0x0d, + 0xed, 0x81, 0x59, 0x61, 0x50, 0xb9, 0x64, 0xd8, 0x86, 0xed, 0x29, 0xd9, 0x95, 0x27, 0x28, 0xcb, + 0x86, 0x6d, 0x1b, 0x43, 0xe2, 0x59, 0xf4, 0x26, 0x7b, 0x55, 0x7d, 0xe2, 0x60, 0xd7, 0xb4, 0x2d, + 0x9f, 0xbf, 0x1a, 0xe5, 0x5d, 0x73, 0x44, 0xa8, 0x8b, 0x47, 0x63, 0x5f, 0x70, 0xf7, 0xf4, 0x84, + 0xc8, 0x17, 0x63, 0x87, 0x50, 0x6a, 0xda, 0xd6, 0xb8, 0xb7, 0x70, 0xe3, 0x87, 0xbe, 0x73, 0x7a, + 0xe8, 0xf8, 0xd1, 0x21, 0x35, 0xfb, 0x78, 0x38, 0xee, 0x85, 0x2e, 0xfd, 0xb0, 0xb7, 0x4e, 0x0f, + 0x9b, 0x0c, 0x4d, 0x9d, 0xff, 0xf1, 0xa4, 0xca, 0xaf, 0x02, 0xa4, 0x37, 0x1d, 0x3c, 0x22, 0xe8, + 0x4d, 0x48, 0xe1, 0xfe, 0x40, 0x12, 0xae, 0x09, 0xd7, 0xf3, 0x77, 0x2e, 0x56, 0x82, 0xb2, 0x54, + 0x6a, 0xfd, 0x01, 0x57, 0xdc, 0x4b, 0x68, 0x4c, 0x81, 0xd6, 0x40, 0xb4, 0x98, 0x32, 0xc9, 0x95, + 0xa5, 0x90, 0xb2, 0x8d, 0xe7, 0x52, 0xae, 0x41, 0xeb, 0xb0, 0xa2, 0x9b, 0xb4, 0x8f, 0x1d, 0x5d, + 0x4a, 0x71, 0xf9, 0x95, 0x90, 0xbc, 0xe1, 0x31, 0xb3, 0x88, 0x99, 0x92, 0x05, 0x8d, 0x08, 0xa5, + 0xd8, 0x20, 0x92, 0x18, 0x0b, 0xda, 0xf2, 0x98, 0x20, 0xc8, 0x57, 0xd6, 0x33, 0x20, 0x0e, 0x4c, + 0x4b, 0x57, 0xca, 0x90, 0x9d, 0x25, 0x8c, 0xce, 0x43, 0xd2, 0xd4, 0xf9, 0x8e, 0x44, 0x2d, 0x69, + 0xea, 0xca, 0x6d, 0xc8, 0x05, 0x29, 0x46, 0x49, 0x54, 0x82, 0x34, 0x71, 0x1c, 0xdb, 0xe1, 0xfb, + 0xca, 0x69, 0xde, 0x8d, 0x22, 0x43, 0x21, 0x9c, 0x66, 0xcc, 0xf2, 0x47, 0x11, 0x0a, 0xe1, 0x94, + 0x62, 0xb6, 0x75, 0x28, 0x1c, 0xd8, 0xce, 0x80, 0x38, 0xbb, 0x0e, 0xc1, 0xfa, 0xa1, 0x5f, 0xb5, + 0xd7, 0x42, 0x3b, 0x7a, 0xc8, 0x69, 0x8d, 0xb1, 0xbe, 0xd3, 0xbd, 0x84, 0x96, 0x3f, 0x98, 0xa3, + 0xe8, 0x23, 0xc8, 0xbb, 0x98, 0x0e, 0x76, 0x31, 0xa5, 0xa6, 0x61, 0xf9, 0x95, 0x7c, 0x35, 0x64, + 0xd1, 0xc5, 0x74, 0x50, 0xe3, 0xe4, 0xdc, 0x01, 0xdc, 0x00, 0x0c, 0x0c, 0xfa, 0xd8, 0xea, 0x93, + 0xa1, 0x5f, 0xd5, 0xa8, 0xc1, 0x06, 0x27, 0x23, 0x06, 0x1e, 0x88, 0xee, 0x42, 0x8e, 0x1b, 0xec, + 0x0d, 0xb1, 0x21, 0xa5, 0x79, 0x78, 0x39, 0x12, 0xbe, 0x39, 0xc4, 0xc6, 0x3c, 0x38, 0xeb, 0xfa, + 0x50, 0xb0, 0x36, 0x75, 0xb1, 0x3b, 0xa1, 0x52, 0x66, 0xe9, 0xda, 0x1d, 0x4e, 0x46, 0xd6, 0xf6, + 0x40, 0x66, 0x40, 0x5d, 0x87, 0xe0, 0xd1, 0x6e, 0xcf, 0xb4, 0x74, 0x69, 0x25, 0x66, 0xd0, 0xe1, + 0x6c, 0xdd, 0xb4, 0xf4, 0x90, 0x01, 0x0d, 0xc0, 0x90, 0x81, 0x8e, 0x5d, 0x2c, 0x65, 0x4f, 0x31, + 0x68, 0x60, 0x17, 0xc7, 0x0c, 0x18, 0x88, 0x54, 0x38, 0xe7, 0x1b, 0xf8, 0x9b, 0xc8, 0x71, 0x0b, + 0x39, 0x66, 0x11, 0xdd, 0x46, 0x81, 0x86, 0xe0, 0xa0, 0x45, 0x4b, 0x80, 0xe2, 0xcf, 0x5c, 0xf9, + 0x53, 0x80, 0xd5, 0xd8, 0x73, 0x44, 0xaf, 0x83, 0xc8, 0x4a, 0xe1, 0x1f, 0xcb, 0x0b, 0x91, 0xb2, + 0x69, 0x9c, 0x44, 0x9d, 0x85, 0xfc, 0x08, 0x95, 0x92, 0xd7, 0x52, 0xd7, 0xf3, 0x77, 0x2a, 0x2f, + 0xea, 0x90, 0x50, 0xc6, 0x84, 0xaa, 0x96, 0xeb, 0x1c, 0x86, 0xb3, 0x25, 0xb4, 0xfc, 0x10, 0x56, + 0x63, 0x12, 0x54, 0x84, 0xd4, 0x80, 0x1c, 0xf2, 0x6c, 0x72, 0x1a, 0xbb, 0x44, 0x37, 0x21, 0xbd, + 0x8f, 0x87, 0x13, 0xc2, 0x1b, 0xfb, 0xfc, 0x9d, 0xcb, 0x4b, 0x6b, 0x42, 0x34, 0x4f, 0xf4, 0x7e, + 0xf2, 0x3d, 0x41, 0xf9, 0xda, 0xdf, 0xe8, 0x42, 0xbf, 0x21, 0x3b, 0x38, 0x37, 0xec, 0xbc, 0xf3, + 0x09, 0xc5, 0x9d, 0xf6, 0x6f, 0x57, 0x76, 0xd8, 0xa4, 0x7a, 0xd0, 0x6a, 0x36, 0xea, 0x9b, 0x4f, + 0x9e, 0x5d, 0x4d, 0xfc, 0xfe, 0xec, 0xea, 0x87, 0x86, 0xe9, 0x3e, 0x9a, 0xf4, 0x2a, 0x7d, 0x7b, + 0x54, 0x35, 0x1c, 0xbc, 0x87, 0x2d, 0x5c, 0x65, 0xea, 0xea, 0xfe, 0x7a, 0xf5, 0xc5, 0xb3, 0xaf, + 0xc2, 0x7c, 0xf8, 0xc9, 0x7d, 0x2c, 0xc0, 0x85, 0x48, 0xdf, 0x9e, 0x79, 0x12, 0xe8, 0x0d, 0x38, + 0xc7, 0x79, 0x67, 0x32, 0x76, 0xcd, 0xde, 0xd0, 0xab, 0x62, 0x56, 0x5b, 0x04, 0x95, 0x9f, 0xfd, + 0x8a, 0x2d, 0xb4, 0xd7, 0xd9, 0x27, 0xbb, 0x0e, 0x19, 0xbf, 0xff, 0xbd, 0x21, 0x76, 0x69, 0xe9, + 0x21, 0xae, 0x8b, 0x6c, 0x49, 0xcd, 0x97, 0x2a, 0x3f, 0x08, 0xb3, 0x3e, 0x0a, 0x1d, 0x50, 0xf4, + 0x25, 0xe4, 0xfc, 0x8e, 0x3d, 0xbb, 0x2d, 0x64, 0xbd, 0x15, 0x9b, 0x3a, 0x2a, 0x43, 0xd6, 0x21, + 0x7d, 0x62, 0xee, 0x93, 0xd9, 0xb4, 0x0f, 0xee, 0x95, 0xef, 0x83, 0x7c, 0x43, 0xf3, 0xe0, 0x25, + 0xe7, 0x8b, 0x40, 0xe4, 0x93, 0x8b, 0xe5, 0x5a, 0xd0, 0xf8, 0xb5, 0xf2, 0x8b, 0x00, 0x17, 0x97, + 0x0c, 0x9d, 0x97, 0x9c, 0xe9, 0x4d, 0x48, 0xf3, 0x11, 0xf4, 0x4f, 0xd3, 0x80, 0x8b, 0x94, 0xdf, + 0x52, 0x20, 0xb2, 0xc6, 0x41, 0x14, 0x44, 0xe6, 0x76, 0x56, 0xf9, 0xf2, 0xc5, 0xd0, 0x2b, 0x90, + 0x73, 0x89, 0x85, 0x2d, 0x97, 0x55, 0xca, 0x6f, 0x03, 0x0f, 0x68, 0xea, 0xa8, 0x0a, 0xd9, 0x3d, + 0x07, 0x1b, 0x23, 0x62, 0xb9, 0xfe, 0xef, 0xad, 0xff, 0x4a, 0x34, 0x7b, 0xd9, 0xaa, 0xec, 0x0c, + 0xb1, 0xa5, 0x05, 0x22, 0xf4, 0x2e, 0xac, 0x50, 0x7b, 0xe2, 0xf4, 0x09, 0x95, 0x44, 0x3e, 0x7d, + 0xa3, 0x3f, 0x71, 0x95, 0x8e, 0x47, 0x7b, 0xb3, 0x76, 0x26, 0x46, 0x6f, 0x43, 0x9a, 0x9a, 0xd6, + 0x80, 0x4a, 0x69, 0x1e, 0x55, 0x8e, 0x45, 0x31, 0xd2, 0x8b, 0xf1, 0x84, 0xe5, 0x4f, 0xa0, 0x10, + 0xb6, 0x5a, 0x32, 0x93, 0x6f, 0x84, 0x67, 0xf2, 0xe2, 0x39, 0xf5, 0x9e, 0x42, 0xcb, 0xa4, 0x6e, + 0x68, 0x24, 0x97, 0xb7, 0x01, 0xe6, 0xeb, 0xfc, 0x07, 0x86, 0xca, 0x5d, 0x80, 0x39, 0x81, 0x6e, + 0xc0, 0x8a, 0xd7, 0x21, 0x54, 0x12, 0xf8, 0x2e, 0x57, 0x63, 0x06, 0xda, 0x4c, 0xa1, 0x7c, 0x27, + 0x40, 0xc6, 0xc3, 0xfe, 0x7f, 0x6d, 0xa1, 0xe8, 0x00, 0xf3, 0x49, 0x87, 0xd6, 0x66, 0xdd, 0x2e, + 0xf0, 0x6e, 0x2f, 0x2d, 0x99, 0x87, 0xb3, 0x5e, 0x67, 0xda, 0xf9, 0xeb, 0x65, 0x3e, 0xa6, 0x55, + 0x19, 0x37, 0x7b, 0xe9, 0xbc, 0x05, 0xb9, 0x00, 0x43, 0xd7, 0x20, 0xaf, 0x13, 0xda, 0x77, 0xcc, + 0x31, 0xfb, 0xe8, 0xf0, 0x9f, 0x4a, 0x18, 0x5a, 0xfb, 0x49, 0xf0, 0xf4, 0x7c, 0x3d, 0x74, 0x19, + 0x50, 0xb7, 0xd6, 0xb9, 0xbf, 0xdb, 0xe9, 0xd6, 0xba, 0xea, 0x6e, 0xb3, 0xfd, 0x69, 0xad, 0xd5, + 0x6c, 0x14, 0x13, 0x11, 0x7c, 0x43, 0x53, 0x6b, 0x5d, 0xb5, 0x51, 0x14, 0x22, 0xf8, 0x8e, 0xda, + 0x6e, 0x34, 0xdb, 0x1f, 0x17, 0x93, 0x11, 0x5c, 0x7b, 0xd0, 0x6e, 0x33, 0x3c, 0x85, 0x24, 0x28, + 0x85, 0x7d, 0xb6, 0xb7, 0x76, 0x5a, 0x2a, 0x73, 0x12, 0xa3, 0x4c, 0xad, 0xbd, 0xa1, 0xb6, 0x5a, + 0x6a, 0xa3, 0x98, 0x46, 0x97, 0x60, 0x35, 0xc4, 0x6c, 0xd6, 0x9a, 0x0c, 0xce, 0xac, 0x7d, 0x23, + 0x40, 0x3e, 0x34, 0x16, 0x98, 0x41, 0xa7, 0xab, 0xa9, 0xb5, 0xad, 0x58, 0xf2, 0x97, 0x60, 0x75, + 0x91, 0x69, 0xb4, 0xd4, 0xa2, 0x10, 0x83, 0xb7, 0x77, 0xd4, 0x76, 0x31, 0x19, 0xf3, 0xa9, 0xb7, + 0xb6, 0x37, 0xee, 0xab, 0x8d, 0x62, 0x0a, 0x5d, 0x81, 0x8b, 0x0b, 0xcc, 0x46, 0x6b, 0xbb, 0xc3, + 0x72, 0xaf, 0x7f, 0x7e, 0x74, 0x2c, 0x27, 0x9e, 0x1e, 0xcb, 0x89, 0xe7, 0xc7, 0xb2, 0xf0, 0xd5, + 0x54, 0x16, 0x1e, 0x4f, 0x65, 0xe1, 0xc9, 0x54, 0x16, 0x8e, 0xa6, 0xb2, 0xf0, 0xc7, 0x54, 0x16, + 0xfe, 0x9a, 0xca, 0x89, 0xe7, 0x53, 0x59, 0xf8, 0xf6, 0x44, 0x4e, 0x1c, 0x9d, 0xc8, 0x89, 0xa7, + 0x27, 0x72, 0xe2, 0xb3, 0x0f, 0xfe, 0x55, 0xc7, 0x79, 0xdf, 0x9f, 0xbd, 0x0c, 0xbf, 0x5b, 0xff, + 0x3b, 0x00, 0x00, 0xff, 0xff, 0x46, 0x61, 0xd1, 0xc4, 0xb1, 0x0e, 0x00, 0x00, +} + +func (x TaskState) String() string { + s, ok := TaskState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x StreamState) String() string { + s, ok := StreamState_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Frame) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Frame) + if !ok { + that2, ok := that.(Frame) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + return true +} +func (this *Frame_Ack) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Frame_Ack) + if !ok { + that2, ok := that.(Frame_Ack) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Ack.Equal(that1.Ack) { + return false + } + return true +} +func (this *Frame_Nack) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Frame_Nack) + if !ok { + that2, ok := that.(Frame_Nack) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Nack.Equal(that1.Nack) { + return false + } + return true +} +func (this *Frame_Discard) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Frame_Discard) + if !ok { + that2, ok := that.(Frame_Discard) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Discard.Equal(that1.Discard) { + return false + } + return true +} +func (this *Frame_Message) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Frame_Message) + if !ok { + that2, ok := that.(Frame_Message) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Message.Equal(that1.Message) { + return false + } + return true +} +func (this *AckFrame) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AckFrame) + if !ok { + that2, ok := that.(AckFrame) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + return true +} +func (this *NackFrame) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*NackFrame) + if !ok { + that2, ok := that.(NackFrame) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if this.Error != that1.Error { + return false + } + return true +} +func (this *DiscardFrame) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DiscardFrame) + if !ok { + that2, ok := that.(DiscardFrame) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + return true +} +func (this *MessageFrame) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame) + if !ok { + that2, ok := that.(MessageFrame) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Id != that1.Id { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + return true +} +func (this *MessageFrame_WorkerReady) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_WorkerReady) + if !ok { + that2, ok := that.(MessageFrame_WorkerReady) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.WorkerReady.Equal(that1.WorkerReady) { + return false + } + return true +} +func (this *MessageFrame_TaskAssign) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_TaskAssign) + if !ok { + that2, ok := that.(MessageFrame_TaskAssign) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskAssign.Equal(that1.TaskAssign) { + return false + } + return true +} +func (this *MessageFrame_TaskCancel) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_TaskCancel) + if !ok { + that2, ok := that.(MessageFrame_TaskCancel) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskCancel.Equal(that1.TaskCancel) { + return false + } + return true +} +func (this *MessageFrame_TaskFlag) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_TaskFlag) + if !ok { + that2, ok := that.(MessageFrame_TaskFlag) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskFlag.Equal(that1.TaskFlag) { + return false + } + return true +} +func (this *MessageFrame_TaskStatus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_TaskStatus) + if !ok { + that2, ok := that.(MessageFrame_TaskStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.TaskStatus.Equal(that1.TaskStatus) { + return false + } + return true +} +func (this *MessageFrame_StreamBind) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_StreamBind) + if !ok { + that2, ok := that.(MessageFrame_StreamBind) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamBind.Equal(that1.StreamBind) { + return false + } + return true +} +func (this *MessageFrame_StreamData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_StreamData) + if !ok { + that2, ok := that.(MessageFrame_StreamData) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamData.Equal(that1.StreamData) { + return false + } + return true +} +func (this *MessageFrame_StreamStatus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MessageFrame_StreamStatus) + if !ok { + that2, ok := that.(MessageFrame_StreamStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamStatus.Equal(that1.StreamStatus) { + return false + } + return true +} +func (this *WorkerReadyMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WorkerReadyMessage) + if !ok { + that2, ok := that.(WorkerReadyMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *TaskAssignMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskAssignMessage) + if !ok { + that2, ok := that.(TaskAssignMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Task.Equal(that1.Task) { + return false + } + if len(this.StreamStates) != len(that1.StreamStates) { + return false + } + for i := range this.StreamStates { + if this.StreamStates[i] != that1.StreamStates[i] { + return false + } + } + return true +} +func (this *TaskCancelMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskCancelMessage) + if !ok { + that2, ok := that.(TaskCancelMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Id.Equal(that1.Id) { + return false + } + return true +} +func (this *TaskFlagMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskFlagMessage) + if !ok { + that2, ok := that.(TaskFlagMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Id.Equal(that1.Id) { + return false + } + if this.Interruptible != that1.Interruptible { + return false + } + return true +} +func (this *TaskStatusMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskStatusMessage) + if !ok { + that2, ok := that.(TaskStatusMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Id.Equal(that1.Id) { + return false + } + if !this.Status.Equal(&that1.Status) { + return false + } + return true +} +func (this *StreamBindMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamBindMessage) + if !ok { + that2, ok := that.(StreamBindMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamId.Equal(that1.StreamId) { + return false + } + if this.Receiver != that1.Receiver { + return false + } + return true +} +func (this *StreamDataMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamDataMessage) + if !ok { + that2, ok := that.(StreamDataMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamId.Equal(that1.StreamId) { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *StreamStatusMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamStatusMessage) + if !ok { + that2, ok := that.(StreamStatusMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamId.Equal(that1.StreamId) { + return false + } + if this.State != that1.State { + return false + } + return true +} +func (this *Task) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Task) + if !ok { + that2, ok := that.(Task) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Ulid.Equal(that1.Ulid) { + return false + } + if this.TenantId != that1.TenantId { + return false + } + if !this.Fragment.Equal(that1.Fragment) { + return false + } + if len(this.Sources) != len(that1.Sources) { + return false + } + for i := range this.Sources { + if !this.Sources[i].Equal(that1.Sources[i]) { + return false + } + } + if len(this.Sinks) != len(that1.Sinks) { + return false + } + for i := range this.Sinks { + if !this.Sinks[i].Equal(that1.Sinks[i]) { + return false + } + } + return true +} +func (this *StreamList) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamList) + if !ok { + that2, ok := that.(StreamList) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Streams) != len(that1.Streams) { + return false + } + for i := range this.Streams { + if !this.Streams[i].Equal(that1.Streams[i]) { + return false + } + } + return true +} +func (this *Stream) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Stream) + if !ok { + that2, ok := that.(Stream) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Ulid.Equal(that1.Ulid) { + return false + } + if this.TenantId != that1.TenantId { + return false + } + return true +} +func (this *TaskStatus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskStatus) + if !ok { + that2, ok := that.(TaskStatus) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + if !this.Error.Equal(that1.Error) { + return false + } + return true +} +func (this *TaskError) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TaskError) + if !ok { + that2, ok := that.(TaskError) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Description != that1.Description { + return false + } + return true +} +func (this *Frame) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&wirepb.Frame{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Frame_Ack) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.Frame_Ack{` + + `Ack:` + fmt.Sprintf("%#v", this.Ack) + `}`}, ", ") + return s +} +func (this *Frame_Nack) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.Frame_Nack{` + + `Nack:` + fmt.Sprintf("%#v", this.Nack) + `}`}, ", ") + return s +} +func (this *Frame_Discard) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.Frame_Discard{` + + `Discard:` + fmt.Sprintf("%#v", this.Discard) + `}`}, ", ") + return s +} +func (this *Frame_Message) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.Frame_Message{` + + `Message:` + fmt.Sprintf("%#v", this.Message) + `}`}, ", ") + return s +} +func (this *AckFrame) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&wirepb.AckFrame{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NackFrame) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.NackFrame{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DiscardFrame) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&wirepb.DiscardFrame{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageFrame) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&wirepb.MessageFrame{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageFrame_WorkerReady) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_WorkerReady{` + + `WorkerReady:` + fmt.Sprintf("%#v", this.WorkerReady) + `}`}, ", ") + return s +} +func (this *MessageFrame_TaskAssign) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_TaskAssign{` + + `TaskAssign:` + fmt.Sprintf("%#v", this.TaskAssign) + `}`}, ", ") + return s +} +func (this *MessageFrame_TaskCancel) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_TaskCancel{` + + `TaskCancel:` + fmt.Sprintf("%#v", this.TaskCancel) + `}`}, ", ") + return s +} +func (this *MessageFrame_TaskFlag) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_TaskFlag{` + + `TaskFlag:` + fmt.Sprintf("%#v", this.TaskFlag) + `}`}, ", ") + return s +} +func (this *MessageFrame_TaskStatus) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_TaskStatus{` + + `TaskStatus:` + fmt.Sprintf("%#v", this.TaskStatus) + `}`}, ", ") + return s +} +func (this *MessageFrame_StreamBind) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_StreamBind{` + + `StreamBind:` + fmt.Sprintf("%#v", this.StreamBind) + `}`}, ", ") + return s +} +func (this *MessageFrame_StreamData) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_StreamData{` + + `StreamData:` + fmt.Sprintf("%#v", this.StreamData) + `}`}, ", ") + return s +} +func (this *MessageFrame_StreamStatus) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&wirepb.MessageFrame_StreamStatus{` + + `StreamStatus:` + fmt.Sprintf("%#v", this.StreamStatus) + `}`}, ", ") + return s +} +func (this *WorkerReadyMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&wirepb.WorkerReadyMessage{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskAssignMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.TaskAssignMessage{") + if this.Task != nil { + s = append(s, "Task: "+fmt.Sprintf("%#v", this.Task)+",\n") + } + keysForStreamStates := make([]string, 0, len(this.StreamStates)) + for k, _ := range this.StreamStates { + keysForStreamStates = append(keysForStreamStates, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStreamStates) + mapStringForStreamStates := "map[string]StreamState{" + for _, k := range keysForStreamStates { + mapStringForStreamStates += fmt.Sprintf("%#v: %#v,", k, this.StreamStates[k]) + } + mapStringForStreamStates += "}" + if this.StreamStates != nil { + s = append(s, "StreamStates: "+mapStringForStreamStates+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskCancelMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&wirepb.TaskCancelMessage{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskFlagMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.TaskFlagMessage{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Interruptible: "+fmt.Sprintf("%#v", this.Interruptible)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskStatusMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.TaskStatusMessage{") + s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") + s = append(s, "Status: "+strings.Replace(this.Status.GoString(), `&`, ``, 1)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamBindMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.StreamBindMessage{") + s = append(s, "StreamId: "+fmt.Sprintf("%#v", this.StreamId)+",\n") + s = append(s, "Receiver: "+fmt.Sprintf("%#v", this.Receiver)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamDataMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.StreamDataMessage{") + s = append(s, "StreamId: "+fmt.Sprintf("%#v", this.StreamId)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamStatusMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.StreamStatusMessage{") + s = append(s, "StreamId: "+fmt.Sprintf("%#v", this.StreamId)+",\n") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Task) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&wirepb.Task{") + s = append(s, "Ulid: "+fmt.Sprintf("%#v", this.Ulid)+",\n") + s = append(s, "TenantId: "+fmt.Sprintf("%#v", this.TenantId)+",\n") + if this.Fragment != nil { + s = append(s, "Fragment: "+fmt.Sprintf("%#v", this.Fragment)+",\n") + } + keysForSources := make([]string, 0, len(this.Sources)) + for k, _ := range this.Sources { + keysForSources = append(keysForSources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSources) + mapStringForSources := "map[string]*StreamList{" + for _, k := range keysForSources { + mapStringForSources += fmt.Sprintf("%#v: %#v,", k, this.Sources[k]) + } + mapStringForSources += "}" + if this.Sources != nil { + s = append(s, "Sources: "+mapStringForSources+",\n") + } + keysForSinks := make([]string, 0, len(this.Sinks)) + for k, _ := range this.Sinks { + keysForSinks = append(keysForSinks, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSinks) + mapStringForSinks := "map[string]*StreamList{" + for _, k := range keysForSinks { + mapStringForSinks += fmt.Sprintf("%#v: %#v,", k, this.Sinks[k]) + } + mapStringForSinks += "}" + if this.Sinks != nil { + s = append(s, "Sinks: "+mapStringForSinks+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamList) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&wirepb.StreamList{") + if this.Streams != nil { + s = append(s, "Streams: "+fmt.Sprintf("%#v", this.Streams)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Stream) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.Stream{") + s = append(s, "Ulid: "+fmt.Sprintf("%#v", this.Ulid)+",\n") + s = append(s, "TenantId: "+fmt.Sprintf("%#v", this.TenantId)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskStatus) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&wirepb.TaskStatus{") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + if this.Error != nil { + s = append(s, "Error: "+fmt.Sprintf("%#v", this.Error)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TaskError) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&wirepb.TaskError{") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWirepb(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Frame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Frame) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Frame) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Frame_Ack) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Frame_Ack) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ack != nil { + { + size, err := m.Ack.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Frame_Nack) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Frame_Nack) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Nack != nil { + { + size, err := m.Nack.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Frame_Discard) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Frame_Discard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Discard != nil { + { + size, err := m.Discard.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Frame_Message) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Frame_Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Message != nil { + { + size, err := m.Message.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *AckFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AckFrame) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AckFrame) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NackFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NackFrame) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NackFrame) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DiscardFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscardFrame) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DiscardFrame) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MessageFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MessageFrame) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MessageFrame) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Id != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MessageFrame_WorkerReady) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_WorkerReady) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.WorkerReady != nil { + { + size, err := m.WorkerReady.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_TaskAssign) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_TaskAssign) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TaskAssign != nil { + { + size, err := m.TaskAssign.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_TaskCancel) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_TaskCancel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TaskCancel != nil { + { + size, err := m.TaskCancel.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_TaskFlag) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_TaskFlag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TaskFlag != nil { + { + size, err := m.TaskFlag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_TaskStatus) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_TaskStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.TaskStatus != nil { + { + size, err := m.TaskStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_StreamBind) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_StreamBind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamBind != nil { + { + size, err := m.StreamBind.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_StreamData) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_StreamData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamData != nil { + { + size, err := m.StreamData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} +func (m *MessageFrame_StreamStatus) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *MessageFrame_StreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamStatus != nil { + { + size, err := m.StreamStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *WorkerReadyMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerReadyMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerReadyMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *TaskAssignMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskAssignMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskAssignMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StreamStates) > 0 { + for k := range m.StreamStates { + v := m.StreamStates[k] + baseI := i + i = encodeVarintWirepb(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWirepb(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWirepb(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Task != nil { + { + size, err := m.Task.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TaskCancelMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskCancelMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskCancelMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TaskFlagMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskFlagMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskFlagMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Interruptible { + i-- + if m.Interruptible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TaskStatusMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatusMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskStatusMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Id.Size() + i -= size + if _, err := m.Id.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StreamBindMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamBindMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamBindMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x12 + } + { + size := m.StreamId.Size() + i -= size + if _, err := m.StreamId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StreamDataMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamDataMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamDataMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + { + size := m.StreamId.Size() + i -= size + if _, err := m.StreamId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StreamStatusMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamStatusMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamStatusMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.State != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size := m.StreamId.Size() + i -= size + if _, err := m.StreamId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Task) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Sinks) > 0 { + for k := range m.Sinks { + v := m.Sinks[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWirepb(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWirepb(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Sources) > 0 { + for k := range m.Sources { + v := m.Sources[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWirepb(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWirepb(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.Fragment != nil { + { + size, err := m.Fragment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x12 + } + { + size := m.Ulid.Size() + i -= size + if _, err := m.Ulid.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StreamList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Stream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Stream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0x12 + } + { + size := m.Ulid.Size() + i -= size + if _, err := m.Ulid.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWirepb(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.State != 0 { + i = encodeVarintWirepb(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TaskError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TaskError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintWirepb(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWirepb(dAtA []byte, offset int, v uint64) int { + offset -= sovWirepb(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Frame) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + return n +} + +func (m *Frame_Ack) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ack != nil { + l = m.Ack.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *Frame_Nack) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Nack != nil { + l = m.Nack.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *Frame_Discard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Discard != nil { + l = m.Discard.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *Frame_Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *AckFrame) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovWirepb(uint64(m.Id)) + } + return n +} + +func (m *NackFrame) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovWirepb(uint64(m.Id)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func (m *DiscardFrame) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovWirepb(uint64(m.Id)) + } + return n +} + +func (m *MessageFrame) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovWirepb(uint64(m.Id)) + } + if m.Kind != nil { + n += m.Kind.Size() + } + return n +} + +func (m *MessageFrame_WorkerReady) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WorkerReady != nil { + l = m.WorkerReady.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_TaskAssign) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskAssign != nil { + l = m.TaskAssign.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_TaskCancel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskCancel != nil { + l = m.TaskCancel.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_TaskFlag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskFlag != nil { + l = m.TaskFlag.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_TaskStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TaskStatus != nil { + l = m.TaskStatus.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_StreamBind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamBind != nil { + l = m.StreamBind.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_StreamData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamData != nil { + l = m.StreamData.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *MessageFrame_StreamStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamStatus != nil { + l = m.StreamStatus.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} +func (m *WorkerReadyMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TaskAssignMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + if len(m.StreamStates) > 0 { + for k, v := range m.StreamStates { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWirepb(uint64(len(k))) + 1 + sovWirepb(uint64(v)) + n += mapEntrySize + 1 + sovWirepb(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TaskCancelMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Id.Size() + n += 1 + l + sovWirepb(uint64(l)) + return n +} + +func (m *TaskFlagMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Id.Size() + n += 1 + l + sovWirepb(uint64(l)) + if m.Interruptible { + n += 2 + } + return n +} + +func (m *TaskStatusMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Id.Size() + n += 1 + l + sovWirepb(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovWirepb(uint64(l)) + return n +} + +func (m *StreamBindMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.StreamId.Size() + n += 1 + l + sovWirepb(uint64(l)) + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func (m *StreamDataMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.StreamId.Size() + n += 1 + l + sovWirepb(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func (m *StreamStatusMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.StreamId.Size() + n += 1 + l + sovWirepb(uint64(l)) + if m.State != 0 { + n += 1 + sovWirepb(uint64(m.State)) + } + return n +} + +func (m *Task) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Ulid.Size() + n += 1 + l + sovWirepb(uint64(l)) + l = len(m.TenantId) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + if m.Fragment != nil { + l = m.Fragment.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + if len(m.Sources) > 0 { + for k, v := range m.Sources { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovWirepb(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovWirepb(uint64(len(k))) + l + n += mapEntrySize + 1 + sovWirepb(uint64(mapEntrySize)) + } + } + if len(m.Sinks) > 0 { + for k, v := range m.Sinks { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovWirepb(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovWirepb(uint64(len(k))) + l + n += mapEntrySize + 1 + sovWirepb(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StreamList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + } + return n +} + +func (m *Stream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Ulid.Size() + n += 1 + l + sovWirepb(uint64(l)) + l = len(m.TenantId) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func (m *TaskStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovWirepb(uint64(m.State)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func (m *TaskError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + if l > 0 { + n += 1 + l + sovWirepb(uint64(l)) + } + return n +} + +func sovWirepb(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWirepb(x uint64) (n int) { + return sovWirepb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Frame) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Frame{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Frame_Ack) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Frame_Ack{`, + `Ack:` + strings.Replace(fmt.Sprintf("%v", this.Ack), "AckFrame", "AckFrame", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Frame_Nack) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Frame_Nack{`, + `Nack:` + strings.Replace(fmt.Sprintf("%v", this.Nack), "NackFrame", "NackFrame", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Frame_Discard) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Frame_Discard{`, + `Discard:` + strings.Replace(fmt.Sprintf("%v", this.Discard), "DiscardFrame", "DiscardFrame", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Frame_Message) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Frame_Message{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "MessageFrame", "MessageFrame", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AckFrame) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AckFrame{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *NackFrame) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NackFrame{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *DiscardFrame) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscardFrame{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_WorkerReady) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_WorkerReady{`, + `WorkerReady:` + strings.Replace(fmt.Sprintf("%v", this.WorkerReady), "WorkerReadyMessage", "WorkerReadyMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_TaskAssign) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_TaskAssign{`, + `TaskAssign:` + strings.Replace(fmt.Sprintf("%v", this.TaskAssign), "TaskAssignMessage", "TaskAssignMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_TaskCancel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_TaskCancel{`, + `TaskCancel:` + strings.Replace(fmt.Sprintf("%v", this.TaskCancel), "TaskCancelMessage", "TaskCancelMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_TaskFlag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_TaskFlag{`, + `TaskFlag:` + strings.Replace(fmt.Sprintf("%v", this.TaskFlag), "TaskFlagMessage", "TaskFlagMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_TaskStatus{`, + `TaskStatus:` + strings.Replace(fmt.Sprintf("%v", this.TaskStatus), "TaskStatusMessage", "TaskStatusMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_StreamBind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_StreamBind{`, + `StreamBind:` + strings.Replace(fmt.Sprintf("%v", this.StreamBind), "StreamBindMessage", "StreamBindMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_StreamData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_StreamData{`, + `StreamData:` + strings.Replace(fmt.Sprintf("%v", this.StreamData), "StreamDataMessage", "StreamDataMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MessageFrame_StreamStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MessageFrame_StreamStatus{`, + `StreamStatus:` + strings.Replace(fmt.Sprintf("%v", this.StreamStatus), "StreamStatusMessage", "StreamStatusMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkerReadyMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkerReadyMessage{`, + `}`, + }, "") + return s +} +func (this *TaskAssignMessage) String() string { + if this == nil { + return "nil" + } + keysForStreamStates := make([]string, 0, len(this.StreamStates)) + for k, _ := range this.StreamStates { + keysForStreamStates = append(keysForStreamStates, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStreamStates) + mapStringForStreamStates := "map[string]StreamState{" + for _, k := range keysForStreamStates { + mapStringForStreamStates += fmt.Sprintf("%v: %v,", k, this.StreamStates[k]) + } + mapStringForStreamStates += "}" + s := strings.Join([]string{`&TaskAssignMessage{`, + `Task:` + strings.Replace(this.Task.String(), "Task", "Task", 1) + `,`, + `StreamStates:` + mapStringForStreamStates + `,`, + `}`, + }, "") + return s +} +func (this *TaskCancelMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskCancelMessage{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `}`, + }, "") + return s +} +func (this *TaskFlagMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskFlagMessage{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Interruptible:` + fmt.Sprintf("%v", this.Interruptible) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatusMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatusMessage{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StreamBindMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamBindMessage{`, + `StreamId:` + fmt.Sprintf("%v", this.StreamId) + `,`, + `Receiver:` + fmt.Sprintf("%v", this.Receiver) + `,`, + `}`, + }, "") + return s +} +func (this *StreamDataMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamDataMessage{`, + `StreamId:` + fmt.Sprintf("%v", this.StreamId) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *StreamStatusMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamStatusMessage{`, + `StreamId:` + fmt.Sprintf("%v", this.StreamId) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + keysForSources := make([]string, 0, len(this.Sources)) + for k, _ := range this.Sources { + keysForSources = append(keysForSources, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSources) + mapStringForSources := "map[string]*StreamList{" + for _, k := range keysForSources { + mapStringForSources += fmt.Sprintf("%v: %v,", k, this.Sources[k]) + } + mapStringForSources += "}" + keysForSinks := make([]string, 0, len(this.Sinks)) + for k, _ := range this.Sinks { + keysForSinks = append(keysForSinks, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSinks) + mapStringForSinks := "map[string]*StreamList{" + for _, k := range keysForSinks { + mapStringForSinks += fmt.Sprintf("%v: %v,", k, this.Sinks[k]) + } + mapStringForSinks += "}" + s := strings.Join([]string{`&Task{`, + `Ulid:` + fmt.Sprintf("%v", this.Ulid) + `,`, + `TenantId:` + fmt.Sprintf("%v", this.TenantId) + `,`, + `Fragment:` + strings.Replace(fmt.Sprintf("%v", this.Fragment), "Plan", "physicalpb.Plan", 1) + `,`, + `Sources:` + mapStringForSources + `,`, + `Sinks:` + mapStringForSinks + `,`, + `}`, + }, "") + return s +} +func (this *StreamList) String() string { + if this == nil { + return "nil" + } + repeatedStringForStreams := "[]*Stream{" + for _, f := range this.Streams { + repeatedStringForStreams += strings.Replace(f.String(), "Stream", "Stream", 1) + "," + } + repeatedStringForStreams += "}" + s := strings.Join([]string{`&StreamList{`, + `Streams:` + repeatedStringForStreams + `,`, + `}`, + }, "") + return s +} +func (this *Stream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Stream{`, + `Ulid:` + fmt.Sprintf("%v", this.Ulid) + `,`, + `TenantId:` + fmt.Sprintf("%v", this.TenantId) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Error:` + strings.Replace(this.Error.String(), "TaskError", "TaskError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskError{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func valueToStringWirepb(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Frame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Frame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Frame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ack", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &AckFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Frame_Ack{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nack", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NackFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Frame_Nack{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Discard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiscardFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Frame_Discard{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MessageFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Frame_Message{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AckFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AckFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AckFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NackFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NackFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NackFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscardFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscardFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscardFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MessageFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MessageFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MessageFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerReady", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &WorkerReadyMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_WorkerReady{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskAssign", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TaskAssignMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_TaskAssign{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskCancel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TaskCancelMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_TaskCancel{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskFlag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TaskFlagMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_TaskFlag{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TaskStatusMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_TaskStatus{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamBind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamBindMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_StreamBind{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamDataMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_StreamData{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamStatusMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &MessageFrame_StreamStatus{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerReadyMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerReadyMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerReadyMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskAssignMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskAssignMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskAssignMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StreamStates == nil { + m.StreamStates = make(map[string]StreamState) + } + var mapkey string + var mapvalue StreamState + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWirepb + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWirepb + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= StreamState(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.StreamStates[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskCancelMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskCancelMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskCancelMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskFlagMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskFlagMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskFlagMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Interruptible", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Interruptible = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatusMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatusMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatusMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamBindMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamBindMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamBindMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StreamId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamDataMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamDataMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamDataMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StreamId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamStatusMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamStatusMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamStatusMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StreamId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= StreamState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ulid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ulid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fragment == nil { + m.Fragment = &physicalpb.Plan{} + } + if err := m.Fragment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sources == nil { + m.Sources = make(map[string]*StreamList) + } + var mapkey string + var mapvalue *StreamList + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWirepb + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWirepb + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthWirepb + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthWirepb + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StreamList{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Sources[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sinks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sinks == nil { + m.Sinks = make(map[string]*StreamList) + } + var mapkey string + var mapvalue *StreamList + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWirepb + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWirepb + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthWirepb + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthWirepb + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StreamList{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Sinks[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, &Stream{}) + if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Stream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ulid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ulid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= TaskState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &TaskError{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWirepb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWirepb + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWirepb + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWirepb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWirepb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWirepb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWirepb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWirepb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWirepb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWirepb + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWirepb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWirepb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWirepb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWirepb + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWirepb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWirepb = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/engine/internal/proto/wirepb/wirepb.proto b/pkg/engine/internal/proto/wirepb/wirepb.proto new file mode 100644 index 0000000000..13089577a9 --- /dev/null +++ b/pkg/engine/internal/proto/wirepb/wirepb.proto @@ -0,0 +1,199 @@ +// wirepb.proto holds types for the wire protocol used for communication +// between scheduler and workers in the query engine. +syntax = "proto3"; + +package loki.wire; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "pkg/engine/internal/proto/expressionpb/expressionpb.proto"; +import "pkg/engine/internal/proto/physicalpb/physicalpb.proto"; +import "pkg/engine/internal/proto/ulid/ulid.proto"; + +option go_package = "github.com/grafana/loki/v3/pkg/engine/internal/proto/wirepb"; + +message Frame { + oneof kind { + AckFrame ack = 1; + NackFrame nack = 2; + DiscardFrame discard = 3; + MessageFrame message = 4; + } +} + +message AckFrame { + uint64 id = 1; +} + +message NackFrame { + uint64 id = 1; + string error = 2; +} + +message DiscardFrame { + uint64 id = 1; +} + +message MessageFrame { + uint64 id = 1; + + oneof kind { + WorkerReadyMessage worker_ready = 2; + + TaskAssignMessage task_assign = 3; + TaskCancelMessage task_cancel = 4; + TaskFlagMessage task_flag = 5; + TaskStatusMessage task_status = 6; + + StreamBindMessage stream_bind = 7; + StreamDataMessage stream_data = 8; + StreamStatusMessage stream_status = 9; + } +} + +message WorkerReadyMessage {} + +// TaskAssignMessage is sent by the scheduler to a worker when there is a +// task to run. +message TaskAssignMessage { + Task task = 1; + + // StreamStates holds the most recent state of each stream that the task + // reads from. The key is the stream ULID. + map stream_states = 2; +} + +// TaskCancelMessage is sent by the scheduler to a worker when a task is no +// longer needed. +message TaskCancelMessage { + ulid.loki.v1.ProtoULID id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; +} + +// TaskFlagMessage is sent by the scheduler to update the runtime flags of a task. +message TaskFlagMessage { + ulid.loki.v1.ProtoULID id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + // Interruptible indicates that tasks blocked on writing or reading to a + // stream can be paused, and that worker can accept new tasks to run. + bool interruptible = 2; +} + +// TaskStatusMessage is sent by the worker to the scheduler to inform the +// scheduler of the current status of a task. +message TaskStatusMessage { + ulid.loki.v1.ProtoULID id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + TaskStatus status = 2 [(gogoproto.nullable) = false]; +} + +// StreamBindMessage is sent by the scheduler to a worker to inform the +// worker about the location of a stream receiver. +message StreamBindMessage { + ulid.loki.v1.ProtoULID stream_id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + // Receiver is the network address of the stream receiver. + string receiver = 2; +} + +// StreamDataMessage is sent by a worker to a stream receiver to provide +// payload data for a stream. +message StreamDataMessage { + ulid.loki.v1.ProtoULID stream_id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + // Data is the serialized Arrow record payload. + bytes data = 2; +} + +// StreamStatusMessage communicates the status of the sending side of a stream. +message StreamStatusMessage { + ulid.loki.v1.ProtoULID stream_id = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + StreamState state = 2; +} + +// Task is a single unit of work within a workflow. +message Task { + ulid.loki.v1.ProtoULID ulid = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + string tenant_id = 2; + + // Fragment is the local physical plan that this task represents. + loki.physical.Plan fragment = 3; + + // Sources defines which streams physical nodes read from. + // The key is the node ID string representation. + map sources = 4; + + // Sinks defines which streams physical nodes write to. + // The key is the node ID string representation. + map sinks = 5; +} + +// StreamList is a list of streams, used in Task's sources and sinks maps. +message StreamList { + repeated Stream streams = 1; +} + +// Stream is an abstract representation of how data flows across task boundaries. +message Stream { + ulid.loki.v1.ProtoULID ulid = 1 [ + (gogoproto.customtype) = "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid.ULID", + (gogoproto.nullable) = false + ]; + + string tenant_id = 2; +} + +// TaskStatus represents the current status of a task. +message TaskStatus { + TaskState state = 1; + + // Error is set only when state is TASK_STATE_FAILED. + TaskError error = 2; +} + +message TaskError { + string description = 1; +} + +// TaskState represents the execution state of a task. +enum TaskState { + TASK_STATE_INVALID = 0; // Invalid/unspecified state. + TASK_STATE_CREATED = 1; // Created but not given to Runner. + TASK_STATE_PENDING = 2; // Pending execution. + TASK_STATE_RUNNING = 3; // Currently being executed. + TASK_STATE_COMPLETED = 4; // Completed successfully. + TASK_STATE_CANCELLED = 5; // Cancelled. + TASK_STATE_FAILED = 6; // Failed during execution. +} + +// StreamState represents the state of a stream. +enum StreamState { + STREAM_STATE_INVALID = 0; // Invalid/unspecified state. + STREAM_STATE_IDLE = 1; // Stream waiting for sender and receiver. + STREAM_STATE_OPEN = 2; // Stream open and transmitting data. + STREAM_STATE_BLOCKED = 3; // Stream blocked by backpressure. + STREAM_STATE_CLOSED = 4; // Stream closed, no longer transmitting. +} diff --git a/pkg/engine/internal/scheduler/wire/addr.go b/pkg/engine/internal/scheduler/wire/addr.go new file mode 100644 index 0000000000..7126e8af89 --- /dev/null +++ b/pkg/engine/internal/scheduler/wire/addr.go @@ -0,0 +1,15 @@ +package wire + +import ( + "fmt" + "net" + "net/netip" +) + +func addrPortStrToAddr(addrPortStr string) (*net.TCPAddr, error) { + addrPort, err := netip.ParseAddrPort(addrPortStr) + if err != nil { + return nil, fmt.Errorf("parse addr port from %s: %w", addrPortStr, err) + } + return net.TCPAddrFromAddrPort(addrPort), nil +} diff --git a/pkg/engine/internal/scheduler/wire/codec.go b/pkg/engine/internal/scheduler/wire/codec.go new file mode 100644 index 0000000000..dc3777cd68 --- /dev/null +++ b/pkg/engine/internal/scheduler/wire/codec.go @@ -0,0 +1,646 @@ +package wire + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/ipc" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/gogo/protobuf/proto" + "github.com/oklog/ulid/v2" + + "github.com/grafana/loki/v3/pkg/engine/internal/planner/physical" + "github.com/grafana/loki/v3/pkg/engine/internal/proto/physicalpb" + protoUlid "github.com/grafana/loki/v3/pkg/engine/internal/proto/ulid" + "github.com/grafana/loki/v3/pkg/engine/internal/proto/wirepb" + "github.com/grafana/loki/v3/pkg/engine/internal/workflow" +) + +// protobufCodec implements a protobuf-based codec for frames. +// Messages are length-prefixed: [uvarint length][protobuf payload] +type protobufCodec struct { + allocator memory.Allocator +} + +// byteReaderAdapter adapts an io.Reader to io.ByteReader without buffering. +// This is used to read uvarint length prefixes byte-by-byte without +// consuming extra data that might be needed for subsequent reads. +type byteReaderAdapter struct { + r io.Reader +} + +func (br *byteReaderAdapter) ReadByte() (byte, error) { + var b [1]byte + _, err := io.ReadFull(br.r, b[:]) + return b[0], err +} + +// EncodeTo encodes a frame as protobuf and writes it to the writer. +// Format: [uvarint length][protobuf payload] +func (c *protobufCodec) EncodeTo(w io.Writer, frame Frame) error { + // Convert wire.Frame to protobuf + pbFrame, err := c.frameToPbFrame(frame) + if err != nil { + return fmt.Errorf("failed to convert frame to protobuf: %w", err) + } + + // Marshal to bytes + data, err := proto.Marshal(pbFrame) + if err != nil { + return fmt.Errorf("failed to marshal protobuf: %w", err) + } + + // Write length prefix (uvarint) + length := uint64(len(data)) + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(buf, length) + if _, err := w.Write(buf[:n]); err != nil { + return fmt.Errorf("failed to write length prefix: %w", err) + } + + // Write payload + written, err := w.Write(data) + if err != nil { + return fmt.Errorf("failed to write payload: %w", err) + } + if written != len(data) { + return fmt.Errorf("incomplete write: wrote %d bytes, expected %d", written, len(data)) + } + + return nil +} + +// DecodeFrom reads and decodes a frame from the bound reader. +// Format: [uvarint length][protobuf payload] +func (c *protobufCodec) DecodeFrom(r io.Reader) (Frame, error) { + // Read length prefix (uvarint) + // binary.ReadUvarint requires a ByteReader, so we wrap if needed + byteReader, ok := r.(io.ByteReader) + if !ok { + byteReader = &byteReaderAdapter{r: r} + } + + length, err := binary.ReadUvarint(byteReader) + if err != nil { + return nil, fmt.Errorf("failed to read length prefix: %w", err) + } + + // Read payload + data := make([]byte, length) + n, err := io.ReadFull(r, data) + if err != nil { + return nil, fmt.Errorf("failed to read payload: %w", err) + } + if uint64(n) != length { + return nil, fmt.Errorf("incomplete read: read %d bytes, expected %d", n, length) + } + + // Unmarshal protobuf + pbFrame := &wirepb.Frame{} + if err := proto.Unmarshal(data, pbFrame); err != nil { + return nil, fmt.Errorf("failed to unmarshal protobuf: %w", err) + } + + // Convert protobuf to wire.Frame + frame, err := c.frameFromPbFrame(pbFrame) + if err != nil { + return nil, fmt.Errorf("failed to convert protobuf to frame: %w", err) + } + + return frame, nil +} + +func (c *protobufCodec) frameFromPbFrame(f *wirepb.Frame) (Frame, error) { + if f == nil { + return nil, errors.New("nil frame") + } + + switch k := f.Kind.(type) { + case *wirepb.Frame_Ack: + return AckFrame{ID: k.Ack.Id}, nil + + case *wirepb.Frame_Nack: + var err error + if k.Nack.Error != "" { + err = errors.New(k.Nack.Error) + } + return NackFrame{ + ID: k.Nack.Id, + Error: err, + }, nil + + case *wirepb.Frame_Discard: + return DiscardFrame{ID: k.Discard.Id}, nil + + case *wirepb.Frame_Message: + msg, err := c.messageFromPbMessage(k.Message) + if err != nil { + return nil, err + } + return MessageFrame{ + ID: k.Message.Id, + Message: msg, + }, nil + + default: + return nil, fmt.Errorf("unknown frame kind: %T", k) + } +} + +func (c *protobufCodec) messageFromPbMessage(mf *wirepb.MessageFrame) (Message, error) { + if mf == nil { + return nil, errors.New("nil message frame") + } + + switch k := mf.Kind.(type) { + case *wirepb.MessageFrame_WorkerReady: + return WorkerReadyMessage{}, nil + + case *wirepb.MessageFrame_TaskAssign: + task, err := c.taskFromPbTask(k.TaskAssign.Task) + if err != nil { + return nil, err + } + + streamStates := make(map[ulid.ULID]workflow.StreamState) + for idStr, statePb := range k.TaskAssign.StreamStates { + id, err := ulid.Parse(idStr) + if err != nil { + return nil, fmt.Errorf("invalid stream ID %q: %w", idStr, err) + } + state, err := c.streamStateFromPbStreamState(statePb) + if err != nil { + return nil, fmt.Errorf("stream state from pb stream state (%s): %w", idStr, err) + } + streamStates[id] = state + } + + return TaskAssignMessage{ + Task: task, + StreamStates: streamStates, + }, nil + + case *wirepb.MessageFrame_TaskCancel: + return TaskCancelMessage{ + ID: ulid.ULID(k.TaskCancel.Id), + }, nil + + case *wirepb.MessageFrame_TaskFlag: + return TaskFlagMessage{ + ID: ulid.ULID(k.TaskFlag.Id), + Interruptible: k.TaskFlag.Interruptible, + }, nil + + case *wirepb.MessageFrame_TaskStatus: + status, err := c.taskStatusFromPbTaskStatus(&k.TaskStatus.Status) + if err != nil { + return nil, err + } + return TaskStatusMessage{ + ID: ulid.ULID(k.TaskStatus.Id), + Status: status, + }, nil + + case *wirepb.MessageFrame_StreamBind: + addr, err := addrPortStrToAddr(k.StreamBind.Receiver) + if err != nil { + return nil, fmt.Errorf("invalid receiver address %s: %w", k.StreamBind.Receiver, err) + } + return StreamBindMessage{ + StreamID: ulid.ULID(k.StreamBind.StreamId), + Receiver: addr, + }, nil + + case *wirepb.MessageFrame_StreamData: + record, err := c.deserializeArrowRecord(k.StreamData.Data) + if err != nil { + return nil, fmt.Errorf("failed to deserialize arrow record: %w", err) + } + return StreamDataMessage{ + StreamID: ulid.ULID(k.StreamData.StreamId), + Data: record, + }, nil + + case *wirepb.MessageFrame_StreamStatus: + streamState, err := c.streamStateFromPbStreamState(k.StreamStatus.State) + if err != nil { + return nil, fmt.Errorf("stream state from pb stream state: %w", err) + } + return StreamStatusMessage{ + StreamID: ulid.ULID(k.StreamStatus.StreamId), + State: streamState, + }, nil + + default: + return nil, fmt.Errorf("unknown message kind: %T", k) + } +} + +func (c *protobufCodec) taskFromPbTask(t *wirepb.Task) (*workflow.Task, error) { + if t == nil { + return nil, fmt.Errorf("nil task") + } + + fragment, err := t.Fragment.MarshalPhysical() + if err != nil { + return nil, fmt.Errorf("failed to marshal fragment: %w", err) + } + + sources, err := c.nodeStreamMapFromPbNodeStreamList(t.Sources, fragment) + if err != nil { + return nil, fmt.Errorf("failed to marshal sources: %w", err) + } + + sinks, err := c.nodeStreamMapFromPbNodeStreamList(t.Sinks, fragment) + if err != nil { + return nil, fmt.Errorf("failed to marshal sinks: %w", err) + } + + return &workflow.Task{ + ULID: ulid.ULID(t.Ulid), + TenantID: t.TenantId, + Fragment: fragment, + Sources: sources, + Sinks: sinks, + }, nil +} + +func (c *protobufCodec) taskStatusFromPbTaskStatus(ts *wirepb.TaskStatus) (workflow.TaskStatus, error) { + if ts == nil { + return workflow.TaskStatus{}, fmt.Errorf("nil task status") + } + + state, err := c.taskStateFromPbTaskState(ts.State) + if err != nil { + return workflow.TaskStatus{}, err + } + + status := workflow.TaskStatus{State: state} + pbErr := ts.GetError() + if pbErr != nil { + status.Error = errors.New(pbErr.Description) + } + + return status, nil +} + +func (c *protobufCodec) taskStateFromPbTaskState(state wirepb.TaskState) (workflow.TaskState, error) { + switch state { + case wirepb.TASK_STATE_CREATED: + return workflow.TaskStateCreated, nil + case wirepb.TASK_STATE_PENDING: + return workflow.TaskStatePending, nil + case wirepb.TASK_STATE_RUNNING: + return workflow.TaskStateRunning, nil + case wirepb.TASK_STATE_COMPLETED: + return workflow.TaskStateCompleted, nil + case wirepb.TASK_STATE_CANCELLED: + return workflow.TaskStateCancelled, nil + case wirepb.TASK_STATE_FAILED: + return workflow.TaskStateFailed, nil + default: + return workflow.TaskStateCancelled, fmt.Errorf("task state %v is unknown", state) + } +} + +func (c *protobufCodec) streamStateFromPbStreamState(state wirepb.StreamState) (workflow.StreamState, error) { + switch state { + case wirepb.STREAM_STATE_IDLE: + return workflow.StreamStateIdle, nil + case wirepb.STREAM_STATE_OPEN: + return workflow.StreamStateOpen, nil + case wirepb.STREAM_STATE_BLOCKED: + return workflow.StreamStateBlocked, nil + case wirepb.STREAM_STATE_CLOSED: + return workflow.StreamStateClosed, nil + default: + return workflow.StreamStateIdle, fmt.Errorf("stream state %v is unknown", state) + } +} + +func (c *protobufCodec) nodeStreamMapFromPbNodeStreamList(pbMap map[string]*wirepb.StreamList, fragment *physical.Plan) (map[physical.Node][]*workflow.Stream, error) { + result := make(map[physical.Node][]*workflow.Stream) + + // Build a map of node IDs to nodes from the fragment + nodeByID := make(map[ulid.ULID]physical.Node) + for node := range fragment.Graph().Nodes() { + nodeByID[node.ID()] = node + } + + for nodeIDStr, streamList := range pbMap { + // Parse the node ID string back to ULID + nodeID, err := ulid.Parse(nodeIDStr) + if err != nil { + return nil, fmt.Errorf("invalid node ID %q: %w", nodeIDStr, err) + } + + // Look up the actual node + node, ok := nodeByID[nodeID] + if !ok { + return nil, fmt.Errorf("node ID %q not found in fragment", nodeIDStr) + } + + streams := make([]*workflow.Stream, len(streamList.Streams)) + for i, s := range streamList.Streams { + streams[i] = &workflow.Stream{ + ULID: ulid.ULID(s.Ulid), + TenantID: s.TenantId, + } + } + + result[node] = streams + } + + return result, nil +} + +func (c *protobufCodec) frameToPbFrame(from Frame) (*wirepb.Frame, error) { + if from == nil { + return nil, errors.New("nil frame") + } + + f := &wirepb.Frame{} + + switch v := from.(type) { + case AckFrame: + f.Kind = &wirepb.Frame_Ack{ + Ack: &wirepb.AckFrame{Id: v.ID}, + } + + case NackFrame: + var errStr string + if v.Error != nil { + errStr = v.Error.Error() + } + f.Kind = &wirepb.Frame_Nack{ + Nack: &wirepb.NackFrame{ + Id: v.ID, + Error: errStr, + }, + } + + case DiscardFrame: + f.Kind = &wirepb.Frame_Discard{ + Discard: &wirepb.DiscardFrame{Id: v.ID}, + } + + case MessageFrame: + mf, err := c.messageToPbMessage(v.Message) + if err != nil { + return nil, err + } + mf.Id = v.ID + f.Kind = &wirepb.Frame_Message{Message: mf} + + default: + return nil, fmt.Errorf("unknown frame type: %T", v) + } + + return f, nil +} + +func (c *protobufCodec) messageToPbMessage(from Message) (*wirepb.MessageFrame, error) { + if from == nil { + return nil, errors.New("nil message") + } + + mf := &wirepb.MessageFrame{} + + switch v := from.(type) { + case WorkerReadyMessage: + mf.Kind = &wirepb.MessageFrame_WorkerReady{ + WorkerReady: &wirepb.WorkerReadyMessage{}, + } + + case TaskAssignMessage: + task, err := c.taskToPbTask(v.Task) + if err != nil { + return nil, err + } + + streamStates := make(map[string]wirepb.StreamState) + for id, state := range v.StreamStates { + streamStates[id.String()] = c.streamStateToPbStreamState(state) + } + + mf.Kind = &wirepb.MessageFrame_TaskAssign{ + TaskAssign: &wirepb.TaskAssignMessage{ + Task: task, + StreamStates: streamStates, + }, + } + + case TaskCancelMessage: + mf.Kind = &wirepb.MessageFrame_TaskCancel{ + TaskCancel: &wirepb.TaskCancelMessage{ + Id: protoUlid.ULID(v.ID), + }, + } + + case TaskFlagMessage: + mf.Kind = &wirepb.MessageFrame_TaskFlag{ + TaskFlag: &wirepb.TaskFlagMessage{ + Id: protoUlid.ULID(v.ID), + Interruptible: v.Interruptible, + }, + } + + case TaskStatusMessage: + status, err := c.taskStatusToPbTaskStatus(v.Status) + if err != nil { + return nil, err + } + mf.Kind = &wirepb.MessageFrame_TaskStatus{ + TaskStatus: &wirepb.TaskStatusMessage{ + Id: protoUlid.ULID(v.ID), + Status: *status, + }, + } + + case StreamBindMessage: + mf.Kind = &wirepb.MessageFrame_StreamBind{ + StreamBind: &wirepb.StreamBindMessage{ + StreamId: protoUlid.ULID(v.StreamID), + Receiver: v.Receiver.String(), + }, + } + + case StreamDataMessage: + // Serialize Arrow record to bytes + data, err := c.serializeArrowRecord(v.Data) + if err != nil { + return nil, fmt.Errorf("failed to serialize arrow record: %w", err) + } + mf.Kind = &wirepb.MessageFrame_StreamData{ + StreamData: &wirepb.StreamDataMessage{ + StreamId: protoUlid.ULID(v.StreamID), + Data: data, + }, + } + + case StreamStatusMessage: + mf.Kind = &wirepb.MessageFrame_StreamStatus{ + StreamStatus: &wirepb.StreamStatusMessage{ + StreamId: protoUlid.ULID(v.StreamID), + State: c.streamStateToPbStreamState(v.State), + }, + } + + default: + return nil, fmt.Errorf("unknown message type: %T", v) + } + + return mf, nil +} + +func (c *protobufCodec) taskToPbTask(from *workflow.Task) (*wirepb.Task, error) { + if from == nil { + return nil, errors.New("nil task") + } + + fragment := &physicalpb.Plan{} + if err := fragment.UnmarshalPhysical(from.Fragment); err != nil { + return nil, fmt.Errorf("failed to unmarshal fragment: %w", err) + } + + sources, err := c.nodeStreamMapToPbNodeStreamList(from.Sources) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sources: %w", err) + } + + sinks, err := c.nodeStreamMapToPbNodeStreamList(from.Sinks) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sinks: %w", err) + } + + return &wirepb.Task{ + Ulid: protoUlid.ULID(from.ULID), + TenantId: from.TenantID, + Fragment: fragment, + Sources: sources, + Sinks: sinks, + }, nil +} + +func (c *protobufCodec) taskStatusToPbTaskStatus(from workflow.TaskStatus) (*wirepb.TaskStatus, error) { + ts := &wirepb.TaskStatus{ + State: c.taskStateToPbTaskState(from.State), + } + + if from.Error != nil { + ts.Error = &wirepb.TaskError{Description: from.Error.Error()} + } + + return ts, nil +} + +func (c *protobufCodec) taskStateToPbTaskState(state workflow.TaskState) wirepb.TaskState { + switch state { + case workflow.TaskStateCreated: + return wirepb.TASK_STATE_CREATED + case workflow.TaskStatePending: + return wirepb.TASK_STATE_PENDING + case workflow.TaskStateRunning: + return wirepb.TASK_STATE_RUNNING + case workflow.TaskStateCompleted: + return wirepb.TASK_STATE_COMPLETED + case workflow.TaskStateCancelled: + return wirepb.TASK_STATE_CANCELLED + case workflow.TaskStateFailed: + return wirepb.TASK_STATE_FAILED + default: + return wirepb.TASK_STATE_INVALID + } +} + +func (c *protobufCodec) streamStateToPbStreamState(state workflow.StreamState) wirepb.StreamState { + switch state { + case workflow.StreamStateIdle: + return wirepb.STREAM_STATE_IDLE + case workflow.StreamStateOpen: + return wirepb.STREAM_STATE_OPEN + case workflow.StreamStateBlocked: + return wirepb.STREAM_STATE_BLOCKED + case workflow.StreamStateClosed: + return wirepb.STREAM_STATE_CLOSED + default: + return wirepb.STREAM_STATE_INVALID + } +} + +func (c *protobufCodec) nodeStreamMapToPbNodeStreamList(nodeMap map[physical.Node][]*workflow.Stream) (map[string]*wirepb.StreamList, error) { + result := make(map[string]*wirepb.StreamList) + + for node, streams := range nodeMap { + // Get the node ID + nodeID := node.ID() + nodeIDStr := nodeID.String() + + pbStreams := make([]*wirepb.Stream, len(streams)) + for i, s := range streams { + pbStreams[i] = &wirepb.Stream{ + Ulid: protoUlid.ULID(s.ULID), + TenantId: s.TenantID, + } + } + + result[nodeIDStr] = &wirepb.StreamList{ + Streams: pbStreams, + } + } + + return result, nil +} + +// serializeArrowRecord serializes an Arrow record to bytes using IPC format. +func (c *protobufCodec) serializeArrowRecord(record arrow.Record) ([]byte, error) { + if record == nil { + return nil, errors.New("nil arrow record") + } + + var buf bytes.Buffer + writer := ipc.NewWriter(&buf, + ipc.WithSchema(record.Schema()), + ipc.WithAllocator(c.allocator), + ) + defer writer.Close() + + if err := writer.Write(record); err != nil { + return nil, err + } + + if err := writer.Close(); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// deserializeArrowRecord deserializes an Arrow record from bytes using IPC format. +func (c *protobufCodec) deserializeArrowRecord(data []byte) (arrow.Record, error) { + if len(data) == 0 { + return nil, errors.New("empty arrow data") + } + + reader, err := ipc.NewReader( + bytes.NewReader(data), + ipc.WithAllocator(c.allocator), + ) + if err != nil { + return nil, err + } + + if !reader.Next() { + if err := reader.Err(); err != nil { + return nil, err + } + return nil, errors.New("no record in arrow data") + } + + rec := reader.Record() + return rec, nil +} diff --git a/pkg/engine/internal/scheduler/wire/codec_test.go b/pkg/engine/internal/scheduler/wire/codec_test.go new file mode 100644 index 0000000000..834700f75e --- /dev/null +++ b/pkg/engine/internal/scheduler/wire/codec_test.go @@ -0,0 +1,422 @@ +package wire + +import ( + "errors" + "net" + "net/netip" + "testing" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/engine/internal/planner/physical" + "github.com/grafana/loki/v3/pkg/engine/internal/workflow" +) + +func TestProtobufCodec_Frames(t *testing.T) { + tests := map[string]struct { + frame Frame + }{ + "AckFrame": { + frame: AckFrame{ID: 42}, + }, + "NackFrame without error": { + frame: NackFrame{ID: 43}, + }, + "NackFrame with error": { + frame: NackFrame{ + ID: 44, + Error: errors.New("test error"), + }, + }, + "DiscardFrame": { + frame: DiscardFrame{ID: 45}, + }, + } + + codec := &protobufCodec{memory.DefaultAllocator} + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + pbFrame, err := codec.frameToPbFrame(tt.frame) + require.NoError(t, err) + + actualFrame, err := codec.frameFromPbFrame(pbFrame) + require.NoError(t, err) + + assert.Equal(t, tt.frame, actualFrame) + }) + } +} + +func TestProtobufCodec_Messages(t *testing.T) { + taskULID := ulid.Make() + streamULID := ulid.Make() + addrPort, err := netip.ParseAddrPort("192.168.0.1:12345") + require.NoError(t, err) + addr := net.TCPAddrFromAddrPort(addrPort) + + tests := map[string]struct { + message Message + }{ + "WorkerReadyMessage": { + message: WorkerReadyMessage{}, + }, + "TaskAssignMessage without StreamStates": { + message: TaskAssignMessage{ + Task: &workflow.Task{ + ULID: taskULID, + TenantID: "test-tenant", + Fragment: &physical.Plan{}, + Sources: map[physical.Node][]*workflow.Stream{}, + Sinks: map[physical.Node][]*workflow.Stream{}, + }, + StreamStates: map[ulid.ULID]workflow.StreamState{}, + }, + }, + "TaskAssignMessage with StreamStates": { + message: TaskAssignMessage{ + Task: &workflow.Task{ + ULID: taskULID, + TenantID: "test-tenant", + Fragment: &physical.Plan{}, + Sources: map[physical.Node][]*workflow.Stream{}, + Sinks: map[physical.Node][]*workflow.Stream{}, + }, + StreamStates: map[ulid.ULID]workflow.StreamState{ + streamULID: workflow.StreamStateOpen, + }, + }, + }, + "TaskCancelMessage": { + message: TaskCancelMessage{ID: taskULID}, + }, + "TaskFlagMessage not interruptible": { + message: TaskFlagMessage{ + ID: taskULID, + Interruptible: false, + }, + }, + "TaskFlagMessage interruptible": { + message: TaskFlagMessage{ + ID: taskULID, + Interruptible: true, + }, + }, + "TaskStatusMessage with Created state": { + message: TaskStatusMessage{ + ID: taskULID, + Status: workflow.TaskStatus{ + State: workflow.TaskStateCreated, + }, + }, + }, + "TaskStatusMessage with Running state": { + message: TaskStatusMessage{ + ID: taskULID, + Status: workflow.TaskStatus{ + State: workflow.TaskStateRunning, + }, + }, + }, + "TaskStatusMessage with Completed state": { + message: TaskStatusMessage{ + ID: taskULID, + Status: workflow.TaskStatus{ + State: workflow.TaskStateCompleted, + }, + }, + }, + "TaskStatusMessage with Failed state and error": { + message: TaskStatusMessage{ + ID: taskULID, + Status: workflow.TaskStatus{ + State: workflow.TaskStateFailed, + Error: errors.New("task failed"), + }, + }, + }, + "StreamBindMessage": { + message: StreamBindMessage{ + StreamID: streamULID, + Receiver: addr, + }, + }, + "StreamStatusMessage with Idle state": { + message: StreamStatusMessage{ + StreamID: streamULID, + State: workflow.StreamStateIdle, + }, + }, + "StreamStatusMessage with Open state": { + message: StreamStatusMessage{ + StreamID: streamULID, + State: workflow.StreamStateOpen, + }, + }, + "StreamStatusMessage with Blocked state": { + message: StreamStatusMessage{ + StreamID: streamULID, + State: workflow.StreamStateBlocked, + }, + }, + "StreamStatusMessage with Closed state": { + message: StreamStatusMessage{ + StreamID: streamULID, + State: workflow.StreamStateClosed, + }, + }, + } + + codec := &protobufCodec{memory.DefaultAllocator} + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + frame := MessageFrame{ + ID: 100, + Message: tt.message, + } + + pbFrame, err := codec.frameToPbFrame(frame) + require.NoError(t, err) + + actualFrame, err := codec.frameFromPbFrame(pbFrame) + require.NoError(t, err) + + assert.Equal(t, frame, actualFrame) + }) + } +} + +func TestProtobufCodec_StreamDataMessage(t *testing.T) { + streamULID := ulid.Make() + codec := &protobufCodec{memory.DefaultAllocator} + + originalRecord := createTestArrowRecord() + + message := StreamDataMessage{ + StreamID: streamULID, + Data: originalRecord, + } + + frame := MessageFrame{ + ID: 100, + Message: message, + } + + pbFrame, err := codec.frameToPbFrame(frame) + require.NoError(t, err) + + actualFrame, err := codec.frameFromPbFrame(pbFrame) + require.NoError(t, err) + + actualMessage := actualFrame.(MessageFrame).Message.(StreamDataMessage) + + assert.Equal(t, frame.ID, actualFrame.(MessageFrame).ID) + assert.Equal(t, streamULID, actualMessage.StreamID) + + assert.NotNil(t, actualMessage.Data) + assert.True(t, originalRecord.Schema().Equal(actualMessage.Data.Schema())) + assert.Equal(t, originalRecord.NumRows(), actualMessage.Data.NumRows()) + assert.Equal(t, originalRecord.NumCols(), actualMessage.Data.NumCols()) +} + +func TestProtobufCodec_TaskStates(t *testing.T) { + taskULID := ulid.Make() + + states := []workflow.TaskState{ + workflow.TaskStateCreated, + workflow.TaskStatePending, + workflow.TaskStateRunning, + workflow.TaskStateCompleted, + workflow.TaskStateCancelled, + workflow.TaskStateFailed, + } + + codec := &protobufCodec{memory.DefaultAllocator} + + for _, state := range states { + t.Run(state.String(), func(t *testing.T) { + message := TaskStatusMessage{ + ID: taskULID, + Status: workflow.TaskStatus{ + State: state, + }, + } + + frame := MessageFrame{ + ID: 1, + Message: message, + } + + pbFrame, err := codec.frameToPbFrame(frame) + require.NoError(t, err) + + actualFrame, err := codec.frameFromPbFrame(pbFrame) + require.NoError(t, err) + + actualMessage := actualFrame.(MessageFrame).Message.(TaskStatusMessage) + assert.Equal(t, state, actualMessage.Status.State) + }) + } +} + +func TestProtobufCodec_StreamStates(t *testing.T) { + streamULID := ulid.Make() + + states := []workflow.StreamState{ + workflow.StreamStateIdle, + workflow.StreamStateOpen, + workflow.StreamStateBlocked, + workflow.StreamStateClosed, + } + + codec := &protobufCodec{memory.DefaultAllocator} + + for _, state := range states { + t.Run(state.String(), func(t *testing.T) { + message := StreamStatusMessage{ + StreamID: streamULID, + State: state, + } + + frame := MessageFrame{ + ID: 1, + Message: message, + } + + pbFrame, err := codec.frameToPbFrame(frame) + require.NoError(t, err) + + actualFrame, err := codec.frameFromPbFrame(pbFrame) + require.NoError(t, err) + + actualMessage := actualFrame.(MessageFrame).Message.(StreamStatusMessage) + assert.Equal(t, state, actualMessage.State) + }) + } +} + +func TestProtobufCodec_ErrorCases(t *testing.T) { + codec := &protobufCodec{memory.DefaultAllocator} + + t.Run("nil frame to protobuf", func(t *testing.T) { + _, err := codec.frameToPbFrame(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "nil frame") + }) + + t.Run("nil frame from protobuf", func(t *testing.T) { + _, err := codec.frameFromPbFrame(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "nil frame") + }) + + t.Run("nil message to protobuf", func(t *testing.T) { + _, err := codec.messageToPbMessage(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "nil message") + }) + + t.Run("nil task to protobuf", func(t *testing.T) { + _, err := codec.taskToPbTask(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "nil task") + }) + + t.Run("nil arrow record serialization", func(t *testing.T) { + _, err := codec.serializeArrowRecord(nil) + assert.Error(t, err) + assert.Contains(t, err.Error(), "nil arrow record") + }) + + t.Run("empty arrow data deserialization", func(t *testing.T) { + _, err := codec.deserializeArrowRecord([]byte{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "empty arrow data") + }) +} + +func TestProtobufCodec_ArrowRecordSerialization(t *testing.T) { + codec := &protobufCodec{memory.DefaultAllocator} + + tests := map[string]struct { + createRecord func() arrow.Record + }{ + "simple int64 record": { + createRecord: createTestArrowRecord, + }, + "empty record": { + createRecord: func() arrow.Record { + schema := arrow.NewSchema([]arrow.Field{ + {Name: "id", Type: arrow.PrimitiveTypes.Int64, Nullable: false}, + }, nil) + + builder := array.NewInt64Builder(memory.DefaultAllocator) + data := builder.NewArray() + + return array.NewRecord(schema, []arrow.Array{data}, 0) + }, + }, + "multiple columns": { + createRecord: func() arrow.Record { + schema := arrow.NewSchema([]arrow.Field{ + {Name: "id", Type: arrow.PrimitiveTypes.Int64, Nullable: false}, + {Name: "value", Type: arrow.PrimitiveTypes.Float64, Nullable: false}, + }, nil) + + idBuilder := array.NewInt64Builder(memory.DefaultAllocator) + idBuilder.Append(1) + idBuilder.Append(2) + + valBuilder := array.NewFloat64Builder(memory.DefaultAllocator) + valBuilder.Append(1.5) + valBuilder.Append(2.5) + + idData := idBuilder.NewArray() + + valData := valBuilder.NewArray() + + return array.NewRecord(schema, []arrow.Array{idData, valData}, 2) + }, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + original := tt.createRecord() + + data, err := codec.serializeArrowRecord(original) + require.NoError(t, err) + require.NotEmpty(t, data) + + deserialized, err := codec.deserializeArrowRecord(data) + require.NoError(t, err) + require.NotNil(t, deserialized) + + assert.True(t, original.Schema().Equal(deserialized.Schema())) + assert.Equal(t, original.NumRows(), deserialized.NumRows()) + assert.Equal(t, original.NumCols(), deserialized.NumCols()) + }) + } +} + +func createTestArrowRecord() arrow.Record { + schema := arrow.NewSchema([]arrow.Field{ + {Name: "id", Type: arrow.PrimitiveTypes.Int64, Nullable: false}, + }, nil) + + builder := array.NewInt64Builder(memory.DefaultAllocator) + + builder.Append(1) + builder.Append(2) + builder.Append(3) + + data := builder.NewArray() + + return array.NewRecord(schema, []arrow.Array{data}, 3) +} diff --git a/pkg/engine/internal/scheduler/wire/wire_http2.go b/pkg/engine/internal/scheduler/wire/wire_http2.go new file mode 100644 index 0000000000..41c42a97e3 --- /dev/null +++ b/pkg/engine/internal/scheduler/wire/wire_http2.go @@ -0,0 +1,368 @@ +package wire + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/http/httptrace" + "sync" + + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "golang.org/x/net/http2" +) + +// HTTP2Listener implements Listener for HTTP/2-based connections. +type HTTP2Listener struct { + logger log.Logger + addr net.Addr + + connCh chan *incomingHTTP2Conn + closeOnce sync.Once + closed chan struct{} + codec *protobufCodec +} + +type incomingHTTP2Conn struct { + conn *http2Conn + w http.ResponseWriter +} + +var ( + _ Listener = (*HTTP2Listener)(nil) + _ http.Handler = (*HTTP2Listener)(nil) +) + +type http2ListenerOpts struct { + // MaxPendingConns defines the maximum number of pending connections (which are not Accepted yet). + MaxPendingConns uint + + // Logger is used for logging. + Logger log.Logger +} + +type HTTP2ListenerOptFunc func(*http2ListenerOpts) + +func WithHTTP2ListenerMaxPendingConns(maxPendingConns uint) HTTP2ListenerOptFunc { + return func(o *http2ListenerOpts) { + o.MaxPendingConns = maxPendingConns + } +} + +func WithHTTP2ListenerLogger(logger log.Logger) HTTP2ListenerOptFunc { + return func(o *http2ListenerOpts) { + o.Logger = logger + } +} + +// NewHTTP2Listener creates a new HTTP/2 listener on the specified address. +func NewHTTP2Listener( + addr net.Addr, + allocator memory.Allocator, + optFuncs ...HTTP2ListenerOptFunc, +) *HTTP2Listener { + opts := http2ListenerOpts{ + MaxPendingConns: 10, + Logger: log.NewNopLogger(), + } + for _, optFunc := range optFuncs { + optFunc(&opts) + } + + l := &HTTP2Listener{ + addr: addr, + logger: opts.Logger, + + connCh: make(chan *incomingHTTP2Conn, opts.MaxPendingConns), + closed: make(chan struct{}), + codec: &protobufCodec{allocator: allocator}, + } + + return l +} + +// ServeHTTP handles incoming connections. +func (l *HTTP2Listener) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming not supported", http.StatusInternalServerError) + return + } + + if r.ProtoMajor != 2 { + http.Error(w, "codec not supported", http.StatusHTTPVersionNotSupported) + return + } + + remoteAddr, err := addrPortStrToAddr(r.RemoteAddr) + if err != nil { + http.Error(w, "invalid remote addr", http.StatusBadRequest) + return + } + conn := newHTTP2Conn(l.Addr(), remoteAddr, r.Body, w, flusher, l.codec) + incomingConn := &incomingHTTP2Conn{conn: conn, w: w} + + // Try to enqueue the connection without blocking indefinitely + select { + case <-l.closed: + err := conn.Close() + if err != nil { + level.Error(l.logger).Log("msg", "failed to close connection on listener close", "err", err.Error()) + http.Error(w, "failed to close connection", http.StatusInternalServerError) + return + } + + http.Error(w, "listener closed", http.StatusServiceUnavailable) + case l.connCh <- incomingConn: + // read loop exits if a connection is closed or the context is canceled + conn.readLoop(r.Context()) + } +} + +// Accept waits for and returns the next connection to the listener. +func (l *HTTP2Listener) Accept(ctx context.Context) (Conn, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-l.closed: + return nil, net.ErrClosed + case incomingConn := <-l.connCh: + incomingConn.w.WriteHeader(http.StatusOK) + incomingConn.conn.flusher.Flush() + return incomingConn.conn, nil + } +} + +// Close closes the listener. +func (l *HTTP2Listener) Close(_ context.Context) error { + l.closeOnce.Do(func() { + close(l.closed) + }) + return nil +} + +// Addr returns the listener's network address. +func (l *HTTP2Listener) Addr() net.Addr { + return l.addr +} + +// http2Conn implements Conn for HTTP/2-based connections. +type http2Conn struct { + localAddr net.Addr + remoteAddr net.Addr + + codec *protobufCodec + reader io.ReadCloser + writer io.Writer + flusher http.Flusher + cleanup func() // Optional cleanup function + + writeMu sync.Mutex + closeOnce sync.Once + closed chan struct{} + + incomingCh chan incomingFrame +} + +type incomingFrame struct { + frame Frame + err error +} + +var _ Conn = (*http2Conn)(nil) + +// newHTTP2Conn creates a new HTTP/2 connection. +func newHTTP2Conn( + localAddr net.Addr, + remoteAddr net.Addr, + reader io.ReadCloser, + writer io.Writer, + flusher http.Flusher, + codec *protobufCodec, +) *http2Conn { + c := &http2Conn{ + localAddr: localAddr, + remoteAddr: remoteAddr, + codec: codec, + reader: reader, + writer: writer, + flusher: flusher, + closed: make(chan struct{}), + incomingCh: make(chan incomingFrame), + } + return c +} + +func (c *http2Conn) readLoop(ctx context.Context) { + for { + frame, err := c.codec.DecodeFrom(c.reader) + incoming := incomingFrame{frame: frame, err: err} + select { + case <-ctx.Done(): + return + case <-c.closed: + return + case c.incomingCh <- incoming: + } + } +} + +// Send sends a frame over the connection. +func (c *http2Conn) Send(ctx context.Context, frame Frame) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.closed: + return ErrConnClosed + default: + } + + c.writeMu.Lock() + defer c.writeMu.Unlock() + + if err := c.codec.EncodeTo(c.writer, frame); err != nil { + return fmt.Errorf("write frame: %w", err) + } + + // Flush after each frame to ensure immediate delivery + if c.flusher != nil { + c.flusher.Flush() + } + + return nil +} + +// Recv receives a frame from the connection. +func (c *http2Conn) Recv(ctx context.Context) (Frame, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.closed: + return nil, ErrConnClosed + case f := <-c.incomingCh: + return f.frame, f.err + } +} + +// Close closes the connection. +func (c *http2Conn) Close() error { + var err error + c.closeOnce.Do(func() { + close(c.closed) + err = c.reader.Close() + if c.cleanup != nil { + c.cleanup() + } + }) + return err +} + +// LocalAddr returns the local network address. +func (c *http2Conn) LocalAddr() net.Addr { + return c.localAddr +} + +// RemoteAddr returns the remote network address. +func (c *http2Conn) RemoteAddr() net.Addr { + return c.remoteAddr +} + +// HTTP2Dialer holds an http client to pool the connections. +type HTTP2Dialer struct { + client *http.Client + codec *protobufCodec + path string +} + +// NewHTTP2Dialer creates a new HTTP/2 dialer that can open HTTP/2 connections to the specified address. +func NewHTTP2Dialer( + allocator memory.Allocator, + path string, +) *HTTP2Dialer { + return &HTTP2Dialer{ + client: &http.Client{ + Transport: &http2.Transport{ + // No TLS + AllowHTTP: true, + DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, addr) + }, + }, + // Context is used for cancellation, no timeout + Timeout: 0, + }, + codec: &protobufCodec{allocator}, + path: path, + } +} + +// Dial establishes an HTTP/2 connection to the specified address. +func (d *HTTP2Dialer) Dial(ctx context.Context, addrStr string) (Conn, error) { + addr, err := addrPortStrToAddr(addrStr) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + + var localAddr net.Addr + trace := &httptrace.ClientTrace{ + GotConn: func(info httptrace.GotConnInfo) { + localAddr = info.Conn.LocalAddr() + }, + } + ctx = httptrace.WithClientTrace(ctx, trace) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://%s/%s", addrStr, d.path), pr) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + + resp, err := d.client.Do(req) + if err != nil { + _ = pw.Close() + return nil, err + } + + if resp.StatusCode != http.StatusOK { + // Drain and close response body to allow connection reuse + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + _ = pw.Close() + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + // Create connection + conn := newHTTP2Conn( + localAddr, + addr, + resp.Body, + pw, + nil, // client doesn't need flusher, it's handled by the pipe writer + d.codec, + ) + + readLoopWg := sync.WaitGroup{} + readLoopWg.Add(1) + go func() { + defer readLoopWg.Done() + conn.readLoop(ctx) + }() + + // when the connection is closed, close the pipe writer and wait until the reader loop exits + conn.cleanup = func() { + _ = pw.Close() + readLoopWg.Wait() + } + + return conn, nil +} diff --git a/pkg/engine/internal/scheduler/wire/wire_http2_test.go b/pkg/engine/internal/scheduler/wire/wire_http2_test.go new file mode 100644 index 0000000000..9de3191fb4 --- /dev/null +++ b/pkg/engine/internal/scheduler/wire/wire_http2_test.go @@ -0,0 +1,593 @@ +package wire_test + +import ( + "context" + "errors" + "net" + "net/http" + "sync" + "testing" + "time" + + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/go-kit/log" + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/require" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + + "github.com/grafana/loki/v3/pkg/engine/internal/planner/physical" + "github.com/grafana/loki/v3/pkg/engine/internal/scheduler/wire" + "github.com/grafana/loki/v3/pkg/engine/internal/types" + "github.com/grafana/loki/v3/pkg/engine/internal/util/dag" + "github.com/grafana/loki/v3/pkg/engine/internal/workflow" +) + +// TestHTTP2BasicConnectivity tests basic connection establishment and communication. +func TestHTTP2BasicConnectivity(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Start HTTP/2 listener + listener, shutdown := prepareHTTP2Listener(t) + defer shutdown() + + addr := listener.Addr().String() + t.Logf("Server listening on %s", addr) + + // Accept connection in goroutine + var serverConn wire.Conn + acceptErr := make(chan error, 1) + go func() { + var err error + serverConn, err = listener.Accept(ctx) + acceptErr <- err + }() + + // Dial from client + clientConn, err := wire.NewHTTP2Dialer(memory.DefaultAllocator, "stream").Dial(ctx, addr) + require.NoError(t, err) + defer clientConn.Close() + + // Wait for server to accept + require.NoError(t, <-acceptErr) + require.NotNil(t, serverConn) + defer serverConn.Close() + + // Verify addresses + require.Equal(t, listener.Addr(), serverConn.LocalAddr()) + t.Logf("Client local: %s, remote: %s", clientConn.LocalAddr(), clientConn.RemoteAddr()) + t.Logf("Server local: %s, remote: %s", serverConn.LocalAddr(), serverConn.RemoteAddr()) + + // Send a frame from client to server + testFrame := wire.AckFrame{ID: 42} + err = clientConn.Send(ctx, testFrame) + require.NoError(t, err) + + // Receive on server + receivedFrame, err := serverConn.Recv(ctx) + require.NoError(t, err) + require.Equal(t, testFrame, receivedFrame) + + // Send a frame back from server to client + responseFrame := wire.AckFrame{ID: 43} + err = serverConn.Send(ctx, responseFrame) + require.NoError(t, err) + + // Receive on client + receivedResponse, err := clientConn.Recv(ctx) + require.NoError(t, err) + require.Equal(t, responseFrame, receivedResponse) +} + +// TestHTTP2WithPeers demonstrates using wire.Peer for bidirectional communication. +func TestHTTP2WithPeers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Start HTTP/2 listener + listener, shutdown := prepareHTTP2Listener(t) + defer shutdown() + + addr := listener.Addr().String() + t.Logf("Server listening on %s", addr) + + // Track received messages + var ( + serverReceivedMu sync.Mutex + serverReceived []wire.Message + clientReceivedMu sync.Mutex + clientReceived []wire.Message + ) + + // Server handler + serverHandler := func(ctx context.Context, peer *wire.Peer, message wire.Message) error { + serverReceivedMu.Lock() + serverReceived = append(serverReceived, message) + serverReceivedMu.Unlock() + + t.Logf("Server received: %T %+v", message, message) + + // Echo back a WorkerReadyMessage + if _, ok := message.(wire.TaskStatusMessage); ok { + return peer.SendMessageAsync(ctx, wire.WorkerReadyMessage{}) + } + return nil + } + + // Client handler + clientHandler := func(_ context.Context, _ *wire.Peer, message wire.Message) error { + clientReceivedMu.Lock() + clientReceived = append(clientReceived, message) + clientReceivedMu.Unlock() + + t.Logf("Client received: %T %+v", message, message) + return nil + } + + // Accept server connection in goroutine + var serverConn wire.Conn + acceptErr := make(chan error, 1) + go func() { + var err error + serverConn, err = listener.Accept(ctx) + acceptErr <- err + }() + + // Dial from client + clientConn, err := wire.NewHTTP2Dialer(memory.DefaultAllocator, "stream").Dial(ctx, addr) + require.NoError(t, err) + defer clientConn.Close() + + // Wait for server to accept + require.NoError(t, <-acceptErr) + require.NotNil(t, serverConn) + defer serverConn.Close() + + // Create server peer + serverPeer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: serverConn, + Handler: serverHandler, + Buffer: 10, + } + + // Create client peer + clientPeer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: clientConn, + Handler: clientHandler, + Buffer: 10, + } + + // Start both peers + peerCtx, peerCancel := context.WithCancel(ctx) + defer peerCancel() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + _ = serverPeer.Serve(peerCtx) + }() + + go func() { + defer wg.Done() + _ = clientPeer.Serve(peerCtx) + }() + + // Give peers time to start + time.Sleep(100 * time.Millisecond) + + // Send message from client to server (synchronous) + err = clientPeer.SendMessage(ctx, wire.TaskStatusMessage{}) + require.NoError(t, err) + + // Wait for message to be processed + require.Eventually(t, func() bool { + serverReceivedMu.Lock() + defer serverReceivedMu.Unlock() + return len(serverReceived) > 0 + }, 2*time.Second, 50*time.Millisecond) + + // Verify server received the message + serverReceivedMu.Lock() + require.Len(t, serverReceived, 1) + require.IsType(t, wire.TaskStatusMessage{}, serverReceived[0]) + serverReceivedMu.Unlock() + + // Wait for echo response + require.Eventually(t, func() bool { + clientReceivedMu.Lock() + defer clientReceivedMu.Unlock() + return len(clientReceived) > 0 + }, 2*time.Second, 50*time.Millisecond) + + // Verify client received the echo + clientReceivedMu.Lock() + require.Len(t, clientReceived, 1) + require.IsType(t, wire.WorkerReadyMessage{}, clientReceived[0]) + clientReceivedMu.Unlock() + + // Clean shutdown + peerCancel() + wg.Wait() +} + +// TestHTTP2MultipleClients demonstrates multiple clients connecting to a single server. +func TestHTTP2MultipleClients(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + // Start HTTP/2 listener + listener, shutdown := prepareHTTP2Listener(t) + defer shutdown() + + addr := listener.Addr().String() + t.Logf("Server listening on %s", addr) + + const numClients = 3 + + // Track messages received by server from each client + var ( + serverReceivedMu sync.Mutex + serverReceived = make(map[string][]wire.Message) + ) + + // Server handler + serverHandler := func(ctx context.Context, peer *wire.Peer, message wire.Message) error { + remoteAddr := peer.RemoteAddr().String() + serverReceivedMu.Lock() + serverReceived[remoteAddr] = append(serverReceived[remoteAddr], message) + count := len(serverReceived[remoteAddr]) + serverReceivedMu.Unlock() + + t.Logf("Server received from %s: %T (total: %d)", remoteAddr, message, count) + + // Send acknowledgment back + return peer.SendMessageAsync(ctx, wire.WorkerReadyMessage{}) + } + + // Accept connections and create server peers + var serverPeers []*wire.Peer + var serverWg sync.WaitGroup + + peerCtx, peerCancel := context.WithCancel(ctx) + defer peerCancel() + + go func() { + for i := 0; i < numClients; i++ { + conn, err := listener.Accept(peerCtx) + if err != nil { + if peerCtx.Err() != nil { + return + } + t.Errorf("Accept failed: %v", err) + return + } + + peer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: conn, + Handler: serverHandler, + Buffer: 10, + } + serverPeers = append(serverPeers, peer) + + serverWg.Add(1) + go func(p *wire.Peer) { + defer serverWg.Done() + _ = p.Serve(peerCtx) + }(peer) + + t.Logf("Server accepted connection %d from %s", i+1, conn.RemoteAddr()) + } + }() + + // Create multiple clients + var clientWg sync.WaitGroup + clientReceivedCounts := make([]int, numClients) + var clientReceivedMu sync.Mutex + + for i := 0; i < numClients; i++ { + clientIdx := i + clientWg.Add(1) + + go func() { + defer clientWg.Done() + + // Connect + conn, err := wire.NewHTTP2Dialer(memory.DefaultAllocator, "stream").Dial(ctx, addr) + if err != nil { + t.Errorf("Client %d dial failed: %v", clientIdx, err) + return + } + defer conn.Close() + + // Handler to count received messages + handler := func(_ context.Context, _ *wire.Peer, message wire.Message) error { + clientReceivedMu.Lock() + clientReceivedCounts[clientIdx]++ + count := clientReceivedCounts[clientIdx] + clientReceivedMu.Unlock() + + t.Logf("Client %d received: %T (total: %d)", clientIdx, message, count) + return nil + } + + peer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: conn, + Handler: handler, + Buffer: 10, + } + + // Start peer + peerDoneCtx, peerDone := context.WithCancel(peerCtx) + defer peerDone() + + var peerWg sync.WaitGroup + peerWg.Add(1) + go func() { + defer peerWg.Done() + _ = peer.Serve(peerDoneCtx) + }() + + // Give peer time to start + time.Sleep(100 * time.Millisecond) + + // Send messages + for j := 0; j < 3; j++ { + msg := wire.TaskStatusMessage{} + err := peer.SendMessage(ctx, msg) + if err != nil { + t.Errorf("Client %d send failed: %v", clientIdx, err) + return + } + t.Logf("Client %d sent message %d", clientIdx, j+1) + } + + // Wait a bit for responses + time.Sleep(500 * time.Millisecond) + + peerDone() + peerWg.Wait() + }() + } + + // Wait for all clients to finish + clientWg.Wait() + + // Verify messages were received + serverReceivedMu.Lock() + totalReceived := 0 + for addr, messages := range serverReceived { + t.Logf("Server received %d messages from %s", len(messages), addr) + totalReceived += len(messages) + } + serverReceivedMu.Unlock() + + require.Equal(t, numClients*3, totalReceived, "server should receive all messages from all clients") + + // Verify clients received responses + clientReceivedMu.Lock() + for i, count := range clientReceivedCounts { + t.Logf("Client %d received %d messages", i, count) + require.Equal(t, 3, count, "each client should receive 3 acknowledgments") + } + clientReceivedMu.Unlock() + + // Clean shutdown + peerCancel() + serverWg.Wait() +} + +// TestHTTP2ErrorHandling tests error scenarios. +func TestHTTP2ErrorHandling(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Start HTTP/2 listener + listener, shutdown := prepareHTTP2Listener(t) + defer shutdown() + + addr := listener.Addr().String() + + // Handler that returns an error + errorHandler := func(_ context.Context, _ *wire.Peer, _ wire.Message) error { + return errors.New("simulated error") + } + + // Accept connection + var serverConn wire.Conn + acceptErr := make(chan error, 1) + go func() { + var err error + serverConn, err = listener.Accept(ctx) + acceptErr <- err + }() + + // Dial from client + clientConn, err := wire.NewHTTP2Dialer(memory.DefaultAllocator, "stream").Dial(ctx, addr) + require.NoError(t, err) + defer clientConn.Close() + + // Wait for server to accept + require.NoError(t, <-acceptErr) + require.NotNil(t, serverConn) + defer serverConn.Close() + + // Create peers + serverPeer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: serverConn, + Handler: errorHandler, + Buffer: 10, + } + + clientPeer := &wire.Peer{ + Logger: log.NewNopLogger(), + Conn: clientConn, + Handler: func(_ context.Context, _ *wire.Peer, _ wire.Message) error { return nil }, + Buffer: 10, + } + + // Start peers + peerCtx, peerCancel := context.WithCancel(ctx) + defer peerCancel() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + _ = serverPeer.Serve(peerCtx) + }() + + go func() { + defer wg.Done() + _ = clientPeer.Serve(peerCtx) + }() + + // Give peers time to start + time.Sleep(100 * time.Millisecond) + + // Send message that will trigger error + err = clientPeer.SendMessage(ctx, wire.WorkerReadyMessage{}) + require.Error(t, err) + require.Contains(t, err.Error(), "simulated error") + + // Clean shutdown + peerCancel() + wg.Wait() +} + +// TestHTTP2MessageFrameSerialization tests that MessageFrames with different message types serialize correctly. +func TestHTTP2MessageFrameSerialization(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Start HTTP/2 listener + listener, shutdown := prepareHTTP2Listener(t) + defer shutdown() + + addr := listener.Addr().String() + + // Accept connection + var serverConn wire.Conn + acceptErr := make(chan error, 1) + go func() { + var err error + serverConn, err = listener.Accept(ctx) + acceptErr <- err + }() + + // Dial from client + clientConn, err := wire.NewHTTP2Dialer(memory.DefaultAllocator, "stream").Dial(ctx, addr) + require.NoError(t, err) + defer clientConn.Close() + + // Wait for server to accept + require.NoError(t, <-acceptErr) + require.NotNil(t, serverConn) + defer serverConn.Close() + + expectedPlan := &physical.Plan{} + parallelize := expectedPlan.Graph().Add(&physical.Parallelize{NodeID: ulid.Make()}) + compat := expectedPlan.Graph().Add(&physical.ColumnCompat{NodeID: ulid.Make(), Source: types.ColumnTypeMetadata, Destination: types.ColumnTypeMetadata, Collision: types.ColumnTypeLabel}) + scanSet := expectedPlan.Graph().Add(&physical.ScanSet{ + NodeID: ulid.Make(), + Targets: []*physical.ScanTarget{ + {Type: physical.ScanTypeDataObject, DataObject: &physical.DataObjScan{NodeID: ulid.Make(), Location: "obj1", Section: 3, StreamIDs: []int64{1, 2}}}, + {Type: physical.ScanTypeDataObject, DataObject: &physical.DataObjScan{NodeID: ulid.Make(), Location: "obj2", Section: 1, StreamIDs: []int64{3, 4}}}, + {Type: physical.ScanTypeDataObject, DataObject: &physical.DataObjScan{NodeID: ulid.Make(), Location: "obj3", Section: 2, StreamIDs: []int64{5, 1}}}, + {Type: physical.ScanTypeDataObject, DataObject: &physical.DataObjScan{NodeID: ulid.Make(), Location: "obj3", Section: 3, StreamIDs: []int64{5, 1}}}, + }, + }) + + _ = expectedPlan.Graph().AddEdge(dag.Edge[physical.Node]{Parent: parallelize, Child: compat}) + _ = expectedPlan.Graph().AddEdge(dag.Edge[physical.Node]{Parent: compat, Child: scanSet}) + + testCases := []struct { + name string + message wire.Message + }{ + {"WorkerReadyMessage", wire.WorkerReadyMessage{}}, + {"TaskCancelMessage", wire.TaskCancelMessage{}}, + {"TaskFlagMessage", wire.TaskFlagMessage{Interruptible: true}}, + {"TaskStatusMessage", wire.TaskStatusMessage{}}, + {"TaskAssignMessage", wire.TaskAssignMessage{ + Task: &workflow.Task{ + ULID: ulid.Make(), + TenantID: "fake", + Fragment: expectedPlan, + Sources: map[physical.Node][]*workflow.Stream{ + compat: { + {ULID: ulid.Make(), TenantID: "fake"}, + }, + }, + Sinks: map[physical.Node][]*workflow.Stream{ + scanSet: { + {ULID: ulid.Make(), TenantID: "fake"}, + }, + }, + }, + StreamStates: nil, + }}, + {"StreamStatusMessage", wire.StreamStatusMessage{}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Send message frame + frame := wire.MessageFrame{ID: 123, Message: tc.message} + err := clientConn.Send(ctx, frame) + require.NoError(t, err) + + // Receive and verify + received, err := serverConn.Recv(ctx) + require.NoError(t, err) + + receivedFrame, ok := received.(wire.MessageFrame) + require.True(t, ok, "expected MessageFrame") + require.Equal(t, uint64(123), receivedFrame.ID) + require.IsType(t, tc.message, receivedFrame.Message) + }) + } +} + +func prepareHTTP2Listener(t *testing.T) (*wire.HTTP2Listener, func()) { + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + listener := wire.NewHTTP2Listener( + l.Addr(), + memory.DefaultAllocator, + wire.WithHTTP2ListenerMaxPendingConns(1), + wire.WithHTTP2ListenerLogger(log.NewNopLogger()), + ) + + mux := http.NewServeMux() + mux.HandleFunc("/stream", listener.ServeHTTP) + + server := &http.Server{ + Handler: h2c.NewHandler(mux, &http2.Server{}), + } + wgServe := sync.WaitGroup{} + wgServe.Add(1) + + go func() { + defer wgServe.Done() + + err = server.Serve(l) + require.Error(t, err, http.ErrServerClosed) + }() + + return listener, func() { + ctx := context.Background() + require.NoError(t, listener.Close(ctx)) + require.NoError(t, server.Shutdown(ctx)) + wgServe.Wait() + } +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go new file mode 100644 index 0000000000..22fabc220c --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/arrio/arrio.go @@ -0,0 +1,92 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package arrio exposes functions to manipulate records, exposing and using +// interfaces not unlike the ones defined in the stdlib io package. +package arrio + +import ( + "errors" + "io" + + "github.com/apache/arrow-go/v18/arrow" +) + +// Reader is the interface that wraps the Read method. +type Reader interface { + // Read reads the current record from the underlying stream and an error, if any. + // When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). + Read() (arrow.Record, error) +} + +// ReaderAt is the interface that wraps the ReadAt method. +type ReaderAt interface { + // ReadAt reads the i-th record from the underlying stream and an error, if any. + ReadAt(i int64) (arrow.Record, error) +} + +// Writer is the interface that wraps the Write method. +type Writer interface { + Write(rec arrow.Record) error +} + +// Copy copies all the records available from src to dst. +// Copy returns the number of records copied and the first error +// encountered while copying, if any. +// +// A successful Copy returns err == nil, not err == EOF. Because Copy is +// defined to read from src until EOF, it does not treat an EOF from Read as an +// error to be reported. +func Copy(dst Writer, src Reader) (n int64, err error) { + for { + rec, err := src.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return n, nil + } + return n, err + } + err = dst.Write(rec) + if err != nil { + return n, err + } + n++ + } +} + +// CopyN copies n records (or until an error) from src to dst. It returns the +// number of records copied and the earliest error encountered while copying. On +// return, written == n if and only if err == nil. +func CopyN(dst Writer, src Reader, n int64) (written int64, err error) { + for ; written < n; written++ { + rec, err := src.Read() + if err != nil { + if errors.Is(err, io.EOF) && written == n { + return written, nil + } + return written, err + } + err = dst.Write(rec) + if err != nil { + return written, err + } + } + + if written != n && err == nil { + err = io.EOF + } + return written, err +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go new file mode 100644 index 0000000000..184e29c025 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/dictutils/dict.go @@ -0,0 +1,411 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dictutils + +import ( + "errors" + "fmt" + "hash/maphash" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +type Kind int8 + +const ( + KindNew Kind = iota + KindDelta + KindReplacement +) + +type FieldPos struct { + parent *FieldPos + index, depth int32 +} + +func NewFieldPos() FieldPos { return FieldPos{index: -1} } + +func (f *FieldPos) Child(index int32) FieldPos { + return FieldPos{parent: f, index: index, depth: f.depth + 1} +} + +func (f *FieldPos) Path() []int32 { + path := make([]int32, f.depth) + cur := f + for i := f.depth - 1; i >= 0; i-- { + path[i] = int32(cur.index) + cur = cur.parent + } + return path +} + +type Mapper struct { + pathToID map[uint64]int64 + hasher maphash.Hash +} + +func (d *Mapper) NumDicts() int { + unique := make(map[int64]bool) + for _, id := range d.pathToID { + unique[id] = true + } + return len(unique) +} + +func (d *Mapper) AddField(id int64, fieldPath []int32) error { + d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath)) + defer d.hasher.Reset() + + sum := d.hasher.Sum64() + if _, ok := d.pathToID[sum]; ok { + return errors.New("field already mapped to id") + } + + d.pathToID[sum] = id + return nil +} + +func (d *Mapper) GetFieldID(fieldPath []int32) (int64, error) { + d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath)) + defer d.hasher.Reset() + + id, ok := d.pathToID[d.hasher.Sum64()] + if !ok { + return -1, errors.New("arrow/ipc: dictionary field not found") + } + return id, nil +} + +func (d *Mapper) NumFields() int { + return len(d.pathToID) +} + +func (d *Mapper) InsertPath(pos FieldPos) { + id := len(d.pathToID) + d.hasher.Write(arrow.Int32Traits.CastToBytes(pos.Path())) + + d.pathToID[d.hasher.Sum64()] = int64(id) + d.hasher.Reset() +} + +func (d *Mapper) ImportField(pos FieldPos, field arrow.Field) { + dt := field.Type + if dt.ID() == arrow.EXTENSION { + dt = dt.(arrow.ExtensionType).StorageType() + } + + if dt.ID() == arrow.DICTIONARY { + d.InsertPath(pos) + // import nested dicts + if nested, ok := dt.(*arrow.DictionaryType).ValueType.(arrow.NestedType); ok { + d.ImportFields(pos, nested.Fields()) + } + return + } + + if nested, ok := dt.(arrow.NestedType); ok { + d.ImportFields(pos, nested.Fields()) + } +} + +func (d *Mapper) ImportFields(pos FieldPos, fields []arrow.Field) { + for i := range fields { + d.ImportField(pos.Child(int32(i)), fields[i]) + } +} + +func (d *Mapper) ImportSchema(schema *arrow.Schema) { + d.pathToID = make(map[uint64]int64) + // This code path intentionally avoids calling ImportFields with + // schema.Fields to avoid allocations. + pos := NewFieldPos() + for i := 0; i < schema.NumFields(); i++ { + d.ImportField(pos.Child(int32(i)), schema.Field(i)) + } +} + +func hasUnresolvedNestedDict(data arrow.ArrayData) bool { + d := data.(*array.Data) + if d.DataType().ID() == arrow.DICTIONARY { + if d.Dictionary().(*array.Data) == nil { + return true + } + if hasUnresolvedNestedDict(d.Dictionary()) { + return true + } + } + for _, c := range d.Children() { + if hasUnresolvedNestedDict(c) { + return true + } + } + return false +} + +type dictpair struct { + ID int64 + Dict arrow.Array +} + +type dictCollector struct { + dictionaries []dictpair + mapper *Mapper +} + +func (d *dictCollector) visitChildren(pos FieldPos, typ arrow.DataType, arr arrow.Array) error { + for i, c := range arr.Data().Children() { + child := array.MakeFromData(c) + defer child.Release() + if err := d.visit(pos.Child(int32(i)), child); err != nil { + return err + } + } + return nil +} + +func (d *dictCollector) visit(pos FieldPos, arr arrow.Array) error { + dt := arr.DataType() + if dt.ID() == arrow.EXTENSION { + dt = dt.(arrow.ExtensionType).StorageType() + arr = arr.(array.ExtensionArray).Storage() + } + + if dt.ID() == arrow.DICTIONARY { + dictarr := arr.(*array.Dictionary) + dict := dictarr.Dictionary() + + // traverse the dictionary to first gather any nested dictionaries + // so they appear in the output before their respective parents + dictType := dt.(*arrow.DictionaryType) + d.visitChildren(pos, dictType.ValueType, dict) + + id, err := d.mapper.GetFieldID(pos.Path()) + if err != nil { + return err + } + dict.Retain() + d.dictionaries = append(d.dictionaries, dictpair{ID: id, Dict: dict}) + return nil + } + return d.visitChildren(pos, dt, arr) +} + +func (d *dictCollector) collect(batch arrow.Record) error { + var ( + pos = NewFieldPos() + schema = batch.Schema() + ) + d.dictionaries = make([]dictpair, 0, d.mapper.NumFields()) + for i := range schema.Fields() { + if err := d.visit(pos.Child(int32(i)), batch.Column(i)); err != nil { + return err + } + } + return nil +} + +type dictMap map[int64][]arrow.ArrayData +type dictTypeMap map[int64]arrow.DataType + +type Memo struct { + Mapper Mapper + dict2id map[arrow.ArrayData]int64 + + id2type dictTypeMap + id2dict dictMap // map of dictionary ID to dictionary array +} + +func NewMemo() Memo { + return Memo{ + dict2id: make(map[arrow.ArrayData]int64), + id2dict: make(dictMap), + id2type: make(dictTypeMap), + Mapper: Mapper{ + pathToID: make(map[uint64]int64), + }, + } +} + +func (memo *Memo) Len() int { return len(memo.id2dict) } + +func (memo *Memo) Clear() { + for id, v := range memo.id2dict { + delete(memo.id2dict, id) + for _, d := range v { + delete(memo.dict2id, d) + d.Release() + } + } +} + +func (memo *Memo) reify(id int64, mem memory.Allocator) (arrow.ArrayData, error) { + v, ok := memo.id2dict[id] + if !ok { + return nil, fmt.Errorf("arrow/ipc: no dictionaries found for id=%d", id) + } + + if len(v) == 1 { + return v[0], nil + } + + // there are deltas we need to concatenate them with the first dictionary + toCombine := make([]arrow.Array, 0, len(v)) + // NOTE: at this point the dictionary data may not be trusted. it needs to + // be validated as concatenation can crash on invalid or corrupted data. + for _, data := range v { + if hasUnresolvedNestedDict(data) { + return nil, fmt.Errorf("arrow/ipc: delta dict with unresolved nested dictionary not implemented") + } + arr := array.MakeFromData(data) + defer arr.Release() + + toCombine = append(toCombine, arr) + defer data.Release() + } + + combined, err := array.Concatenate(toCombine, mem) + if err != nil { + return nil, err + } + defer combined.Release() + combined.Data().Retain() + + memo.id2dict[id] = []arrow.ArrayData{combined.Data()} + return combined.Data(), nil +} + +func (memo *Memo) Dict(id int64, mem memory.Allocator) (arrow.ArrayData, error) { + return memo.reify(id, mem) +} + +func (memo *Memo) AddType(id int64, typ arrow.DataType) error { + if existing, dup := memo.id2type[id]; dup && !arrow.TypeEqual(existing, typ) { + return fmt.Errorf("arrow/ipc: conflicting dictionary types for id %d", id) + } + + memo.id2type[id] = typ + return nil +} + +func (memo *Memo) Type(id int64) (arrow.DataType, bool) { + t, ok := memo.id2type[id] + return t, ok +} + +// func (memo *dictMemo) ID(v arrow.Array) int64 { +// id, ok := memo.dict2id[v] +// if ok { +// return id +// } + +// v.Retain() +// id = int64(len(memo.dict2id)) +// memo.dict2id[v] = id +// memo.id2dict[id] = v +// return id +// } + +func (memo Memo) HasDict(v arrow.ArrayData) bool { + _, ok := memo.dict2id[v] + return ok +} + +func (memo Memo) HasID(id int64) bool { + _, ok := memo.id2dict[id] + return ok +} + +func (memo *Memo) Add(id int64, v arrow.ArrayData) { + if _, dup := memo.id2dict[id]; dup { + panic(fmt.Errorf("arrow/ipc: duplicate id=%d", id)) + } + v.Retain() + memo.id2dict[id] = []arrow.ArrayData{v} + memo.dict2id[v] = id +} + +func (memo *Memo) AddDelta(id int64, v arrow.ArrayData) { + d, ok := memo.id2dict[id] + if !ok { + panic(fmt.Errorf("arrow/ipc: adding delta to non-existing id=%d", id)) + } + v.Retain() + memo.id2dict[id] = append(d, v) +} + +// AddOrReplace puts the provided dictionary into the memo table. If it +// already exists, then the new data will replace it. Otherwise it is added +// to the memo table. +func (memo *Memo) AddOrReplace(id int64, v arrow.ArrayData) bool { + d, ok := memo.id2dict[id] + if ok { + // replace the dictionary and release any existing ones + for _, dict := range d { + dict.Release() + } + d[0] = v + d = d[:1] + } else { + d = []arrow.ArrayData{v} + } + v.Retain() + memo.id2dict[id] = d + return !ok +} + +func CollectDictionaries(batch arrow.Record, mapper *Mapper) (out []dictpair, err error) { + collector := dictCollector{mapper: mapper} + err = collector.collect(batch) + out = collector.dictionaries + return +} + +func ResolveFieldDict(memo *Memo, data arrow.ArrayData, pos FieldPos, mem memory.Allocator) error { + typ := data.DataType() + if typ.ID() == arrow.EXTENSION { + typ = typ.(arrow.ExtensionType).StorageType() + } + if typ.ID() == arrow.DICTIONARY { + id, err := memo.Mapper.GetFieldID(pos.Path()) + if err != nil { + return err + } + dictData, err := memo.Dict(id, mem) + if err != nil { + return err + } + data.(*array.Data).SetDictionary(dictData) + if err := ResolveFieldDict(memo, dictData, pos, mem); err != nil { + return err + } + } + return ResolveDictionaries(memo, data.Children(), pos, mem) +} + +func ResolveDictionaries(memo *Memo, cols []arrow.ArrayData, parentPos FieldPos, mem memory.Allocator) error { + for i, c := range cols { + if c == nil { + continue + } + if err := ResolveFieldDict(memo, c, parentPos.Child(int32(i)), mem); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go b/vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go new file mode 100644 index 0000000000..d471e62ad0 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/internal/utils.go @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" +) + +const CurMetadataVersion = flatbuf.MetadataVersionV5 + +// DefaultHasValidityBitmap is a convenience function equivalent to +// calling HasValidityBitmap with CurMetadataVersion. +func DefaultHasValidityBitmap(id arrow.Type) bool { return HasValidityBitmap(id, CurMetadataVersion) } + +// HasValidityBitmap returns whether the given type at the provided version is +// expected to have a validity bitmap in it's representation. +// +// Typically this is necessary because of the change between V4 and V5 +// where union types no longer have validity bitmaps. +func HasValidityBitmap(id arrow.Type, version flatbuf.MetadataVersion) bool { + // in <=V4 Null types had no validity bitmap + // in >=V5 Null and Union types have no validity bitmap + if version < flatbuf.MetadataVersionV5 { + return id != arrow.NULL + } + + switch id { + case arrow.NULL, arrow.DENSE_UNION, arrow.SPARSE_UNION, arrow.RUN_END_ENCODED: + return false + } + return true +} + +// HasBufferSizesBuffer returns whether a given type has an extra buffer +// in the C ABI to store the sizes of other buffers. Currently this is only +// StringView and BinaryView. +func HasBufferSizesBuffer(id arrow.Type) bool { + switch id { + case arrow.STRING_VIEW, arrow.BINARY_VIEW: + return true + default: + return false + } +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go new file mode 100644 index 0000000000..abdf631326 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/compression.go @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "io" + + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +type compressor interface { + MaxCompressedLen(n int) int + Reset(io.Writer) + io.WriteCloser + Type() flatbuf.CompressionType +} + +type lz4Compressor struct { + *lz4.Writer +} + +func (lz4Compressor) MaxCompressedLen(n int) int { + return lz4.CompressBlockBound(n) +} + +func (lz4Compressor) Type() flatbuf.CompressionType { + return flatbuf.CompressionTypeLZ4_FRAME +} + +type zstdCompressor struct { + *zstd.Encoder +} + +// from zstd.h, ZSTD_COMPRESSBOUND +func (zstdCompressor) MaxCompressedLen(len int) int { + debug.Assert(len >= 0, "MaxCompressedLen called with len less than 0") + extra := uint((uint(128<<10) - uint(len)) >> 11) + if len >= (128 << 10) { + extra = 0 + } + return int(uint(len+(len>>8)) + extra) +} + +func (zstdCompressor) Type() flatbuf.CompressionType { + return flatbuf.CompressionTypeZSTD +} + +func getCompressor(codec flatbuf.CompressionType) compressor { + switch codec { + case flatbuf.CompressionTypeLZ4_FRAME: + w := lz4.NewWriter(nil) + // options here chosen in order to match the C++ implementation + w.Apply(lz4.ChecksumOption(false), lz4.BlockSizeOption(lz4.Block64Kb)) + return &lz4Compressor{w} + case flatbuf.CompressionTypeZSTD: + enc, err := zstd.NewWriter(nil) + if err != nil { + panic(err) + } + return zstdCompressor{enc} + } + return nil +} + +type decompressor interface { + io.Reader + Reset(io.Reader) + Close() +} + +type zstdDecompressor struct { + *zstd.Decoder +} + +func (z *zstdDecompressor) Reset(r io.Reader) { + if err := z.Decoder.Reset(r); err != nil { + panic(err) + } +} + +func (z *zstdDecompressor) Close() { + z.Decoder.Close() +} + +type lz4Decompressor struct { + *lz4.Reader +} + +func (z *lz4Decompressor) Close() { + z.Reset(nil) +} + +func getDecompressor(codec flatbuf.CompressionType) decompressor { + switch codec { + case flatbuf.CompressionTypeLZ4_FRAME: + return &lz4Decompressor{lz4.NewReader(nil)} + case flatbuf.CompressionTypeZSTD: + dec, err := zstd.NewReader(nil) + if err != nil { + panic(err) + } + return &zstdDecompressor{dec} + } + return nil +} + +type bufferWriter struct { + buf *memory.Buffer + pos int +} + +func (bw *bufferWriter) Write(p []byte) (n int, err error) { + if bw.pos+len(p) >= bw.buf.Cap() { + bw.buf.Reserve(bw.pos + len(p)) + } + n = copy(bw.buf.Buf()[bw.pos:], p) + bw.pos += n + return +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go new file mode 100644 index 0000000000..80d8b3cc53 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/endian_swap.go @@ -0,0 +1,166 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "errors" + "fmt" + "math/bits" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +// swap the endianness of the array's buffers as needed in-place to save +// the cost of reallocation. +// +// assumes that nested data buffers are never re-used, if an *array.Data +// child is re-used among the children or the dictionary then this might +// end up double-swapping (putting it back into the original endianness). +// if it is needed to support re-using the buffers, then this can be +// re-factored to instead return a NEW array.Data object with newly +// allocated buffers, rather than doing it in place. +// +// For now this is intended to be used by the IPC readers after loading +// arrays from an IPC message which currently is guaranteed to not re-use +// buffers between arrays. +func swapEndianArrayData(data *array.Data) error { + if data.Offset() != 0 { + return errors.New("unsupported data format: data.offset != 0") + } + if err := swapType(data.DataType(), data); err != nil { + return err + } + return swapChildren(data.Children()) +} + +func swapChildren(children []arrow.ArrayData) (err error) { + for i := range children { + if err = swapEndianArrayData(children[i].(*array.Data)); err != nil { + break + } + } + return +} + +func swapType(dt arrow.DataType, data *array.Data) (err error) { + switch dt.ID() { + case arrow.BINARY, arrow.STRING: + swapOffsets(1, 32, data) + return + case arrow.LARGE_BINARY, arrow.LARGE_STRING: + swapOffsets(1, 64, data) + return + case arrow.NULL, arrow.BOOL, arrow.INT8, arrow.UINT8, + arrow.FIXED_SIZE_BINARY, arrow.FIXED_SIZE_LIST, arrow.STRUCT: + return + } + + switch dt := dt.(type) { + case *arrow.Decimal128Type: + rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + length := data.Buffers()[1].Len() / arrow.Decimal128SizeBytes + for i := 0; i < length; i++ { + idx := i * 2 + tmp := bits.ReverseBytes64(rawdata[idx]) + rawdata[idx] = bits.ReverseBytes64(rawdata[idx+1]) + rawdata[idx+1] = tmp + } + case *arrow.Decimal256Type: + rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes()) + length := data.Buffers()[1].Len() / arrow.Decimal256SizeBytes + for i := 0; i < length; i++ { + idx := i * 4 + tmp0 := bits.ReverseBytes64(rawdata[idx]) + tmp1 := bits.ReverseBytes64(rawdata[idx+1]) + tmp2 := bits.ReverseBytes64(rawdata[idx+2]) + rawdata[idx] = bits.ReverseBytes64(rawdata[idx+3]) + rawdata[idx+1] = tmp2 + rawdata[idx+2] = tmp1 + rawdata[idx+3] = tmp0 + } + case arrow.UnionType: + if dt.Mode() == arrow.DenseMode { + swapOffsets(2, 32, data) + } + case *arrow.ListType: + swapOffsets(1, 32, data) + case *arrow.LargeListType: + swapOffsets(1, 64, data) + case *arrow.MapType: + swapOffsets(1, 32, data) + case *arrow.DayTimeIntervalType: + byteSwapBuffer(32, data.Buffers()[1]) + case *arrow.MonthDayNanoIntervalType: + rawdata := arrow.MonthDayNanoIntervalTraits.CastFromBytes(data.Buffers()[1].Bytes()) + for i, tmp := range rawdata { + rawdata[i].Days = int32(bits.ReverseBytes32(uint32(tmp.Days))) + rawdata[i].Months = int32(bits.ReverseBytes32(uint32(tmp.Months))) + rawdata[i].Nanoseconds = int64(bits.ReverseBytes64(uint64(tmp.Nanoseconds))) + } + case arrow.ExtensionType: + return swapType(dt.StorageType(), data) + case *arrow.DictionaryType: + // dictionary itself was already swapped in ReadDictionary calls + return swapType(dt.IndexType, data) + case arrow.FixedWidthDataType: + byteSwapBuffer(dt.BitWidth(), data.Buffers()[1]) + default: + err = fmt.Errorf("%w: swapping endianness of %s", arrow.ErrNotImplemented, dt) + } + + return +} + +// this can get called on an invalid Array Data object by the IPC reader, +// so we won't rely on the data.length and will instead rely on the buffer's +// own size instead. +func byteSwapBuffer(bw int, buf *memory.Buffer) { + if bw == 1 || buf == nil { + // if byte width == 1, no need to swap anything + return + } + + switch bw { + case 16: + data := arrow.Uint16Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes16(data[i]) + } + case 32: + data := arrow.Uint32Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes32(data[i]) + } + case 64: + data := arrow.Uint64Traits.CastFromBytes(buf.Bytes()) + for i := range data { + data[i] = bits.ReverseBytes64(data[i]) + } + } +} + +func swapOffsets(index int, bitWidth int, data *array.Data) { + if data.Buffers()[index] == nil || data.Buffers()[index].Len() == 0 { + return + } + + // other than unions, offset has one more element than the data.length + // don't yet implement large types, so hardcode 32bit offsets for now + byteSwapBuffer(bitWidth, data.Buffers()[index]) +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go new file mode 100644 index 0000000000..9135529d39 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_reader.go @@ -0,0 +1,902 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +type readerImpl interface { + getFooterEnd() (int64, error) + getBytes(offset, length int64) ([]byte, error) + dict(memory.Allocator, *footerBlock, int) (dataBlock, error) + block(memory.Allocator, *footerBlock, int) (dataBlock, error) +} + +type footerBlock struct { + offset int64 + buffer *memory.Buffer + data *flatbuf.Footer +} + +type dataBlock interface { + Offset() int64 + Meta() int32 + Body() int64 + NewMessage() (*Message, error) +} + +const footerSizeLen = 4 + +var minimumOffsetSize = int64(len(Magic)*2 + footerSizeLen) + +type basicReaderImpl struct { + r ReadAtSeeker +} + +func (r *basicReaderImpl) getBytes(offset, len int64) ([]byte, error) { + buf := make([]byte, len) + n, err := r.r.ReadAt(buf, offset) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read %d bytes at offset %d: %w", len, offset, err) + } + if int64(n) != len { + return nil, fmt.Errorf("arrow/ipc: could not read %d bytes at offset %d", len, offset) + } + return buf, nil +} + +func (r *basicReaderImpl) getFooterEnd() (int64, error) { + return r.r.Seek(0, io.SeekEnd) +} + +func (r *basicReaderImpl) block(mem memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.RecordBatches(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return fileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + r: r.r, + mem: mem, + }, nil +} + +func (r *basicReaderImpl) dict(mem memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.Dictionaries(&blk, i) { + return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } + + return fileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + r: r.r, + mem: mem, + }, nil +} + +type mappedReaderImpl struct { + data []byte +} + +func (r *mappedReaderImpl) getBytes(offset, length int64) ([]byte, error) { + if offset < 0 || offset+int64(length) > int64(len(r.data)) { + return nil, fmt.Errorf("arrow/ipc: invalid offset=%d or length=%d", offset, length) + } + + return r.data[offset : offset+length], nil +} + +func (r *mappedReaderImpl) getFooterEnd() (int64, error) { return int64(len(r.data)), nil } + +func (r *mappedReaderImpl) block(_ memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.RecordBatches(&blk, i) { + return mappedFileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i) + } + + return mappedFileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + data: r.data, + }, nil +} + +func (r *mappedReaderImpl) dict(_ memory.Allocator, f *footerBlock, i int) (dataBlock, error) { + var blk flatbuf.Block + if !f.data.Dictionaries(&blk, i) { + return mappedFileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i) + } + + return mappedFileBlock{ + offset: blk.Offset(), + meta: blk.MetaDataLength(), + body: blk.BodyLength(), + data: r.data, + }, nil +} + +// FileReader is an Arrow file reader. +type FileReader struct { + r readerImpl + + footer footerBlock + + // fields dictTypeMap + memo dictutils.Memo + + schema *arrow.Schema + record arrow.Record + + irec int // current record index. used for the arrio.Reader interface + err error // last error + + mem memory.Allocator + swapEndianness bool +} + +// NewMappedFileReader is like NewFileReader but instead of using a ReadAtSeeker, +// which will force copies through the Read/ReadAt methods, it uses a byte slice +// and pulls slices directly from the data. This is useful specifically when +// dealing with mmapped data so that you can lazily load the buffers and avoid +// extraneous copies. The slices used for the record column buffers will simply +// reference the existing data instead of performing copies via ReadAt/Read. +// +// For example, syscall.Mmap returns a byte slice which could be referencing +// a shared memory region or otherwise a memory-mapped file. +func NewMappedFileReader(data []byte, opts ...Option) (*FileReader, error) { + var ( + cfg = newConfig(opts...) + f = FileReader{ + r: &mappedReaderImpl{data: data}, + mem: cfg.alloc, + } + ) + + if err := f.init(cfg); err != nil { + return nil, err + } + return &f, nil +} + +// NewFileReader opens an Arrow file using the provided reader r. +func NewFileReader(r ReadAtSeeker, opts ...Option) (*FileReader, error) { + var ( + cfg = newConfig(opts...) + f = FileReader{ + r: &basicReaderImpl{r: r}, + memo: dictutils.NewMemo(), + mem: cfg.alloc, + } + ) + + if err := f.init(cfg); err != nil { + return nil, err + } + return &f, nil +} + +func (f *FileReader) init(cfg *config) error { + var err error + if cfg.footer.offset <= 0 { + cfg.footer.offset, err = f.r.getFooterEnd() + if err != nil { + return fmt.Errorf("arrow/ipc: could retrieve footer offset: %w", err) + } + } + f.footer.offset = cfg.footer.offset + + err = f.readFooter() + if err != nil { + return fmt.Errorf("arrow/ipc: could not decode footer: %w", err) + } + + err = f.readSchema(cfg.ensureNativeEndian) + if err != nil { + return fmt.Errorf("arrow/ipc: could not decode schema: %w", err) + } + + if cfg.schema != nil && !cfg.schema.Equal(f.schema) { + return fmt.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema) + } + + return err +} + +func (f *FileReader) readSchema(ensureNativeEndian bool) error { + var ( + err error + kind dictutils.Kind + ) + + schema := f.footer.data.Schema(nil) + if schema == nil { + return fmt.Errorf("arrow/ipc: could not load schema from flatbuffer data") + } + f.schema, err = schemaFromFB(schema, &f.memo) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read schema: %w", err) + } + + if ensureNativeEndian && !f.schema.IsNativeEndian() { + f.swapEndianness = true + f.schema = f.schema.WithEndianness(endian.NativeEndian) + } + + for i := 0; i < f.NumDictionaries(); i++ { + blk, err := f.r.dict(f.mem, &f.footer, i) + if err != nil { + return fmt.Errorf("arrow/ipc: could not read dictionary[%d]: %w", i, err) + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset()): + return fmt.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset(), i) + case !bitutil.IsMultipleOf8(int64(blk.Meta())): + return fmt.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta(), i) + case !bitutil.IsMultipleOf8(blk.Body()): + return fmt.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body(), i) + } + + msg, err := blk.NewMessage() + if err != nil { + return err + } + + kind, err = readDictionary(&f.memo, msg.meta, msg.body, f.swapEndianness, f.mem) + if err != nil { + return err + } + if kind == dictutils.KindReplacement { + return errors.New("arrow/ipc: unsupported dictionary replacement in IPC file") + } + } + + return err +} + +func (f *FileReader) readFooter() error { + if f.footer.offset <= minimumOffsetSize { + return fmt.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset) + } + + eof := int64(len(Magic) + footerSizeLen) + buf, err := f.r.getBytes(f.footer.offset-eof, eof) + if err != nil { + return err + } + + if !bytes.Equal(buf[4:], Magic) { + return errNotArrowFile + } + + size := int64(binary.LittleEndian.Uint32(buf[:footerSizeLen])) + if size <= 0 || size+minimumOffsetSize > f.footer.offset { + return errInconsistentFileMetadata + } + + buf, err = f.r.getBytes(f.footer.offset-size-eof, size) + if err != nil { + return err + } + + f.footer.buffer = memory.NewBufferBytes(buf) + f.footer.data = flatbuf.GetRootAsFooter(buf, 0) + return nil +} + +func (f *FileReader) Schema() *arrow.Schema { + return f.schema +} + +func (f *FileReader) NumDictionaries() int { + if f.footer.data == nil { + return 0 + } + return f.footer.data.DictionariesLength() +} + +func (f *FileReader) NumRecords() int { + return f.footer.data.RecordBatchesLength() +} + +func (f *FileReader) Version() MetadataVersion { + return MetadataVersion(f.footer.data.Version()) +} + +// Close cleans up resources used by the File. +// Close does not close the underlying reader. +func (f *FileReader) Close() error { + if f.footer.data != nil { + f.footer.data = nil + } + + if f.footer.buffer != nil { + f.footer.buffer.Release() + f.footer.buffer = nil + } + + if f.record != nil { + f.record.Release() + f.record = nil + } + return nil +} + +// Record returns the i-th record from the file. +// The returned value is valid until the next call to Record. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Record(i int) (arrow.Record, error) { + record, err := f.RecordAt(i) + if err != nil { + return nil, err + } + + if f.record != nil { + f.record.Release() + } + + f.record = record + return record, nil +} + +// Record returns the i-th record from the file. Ownership is transferred to the +// caller and must call Release() to free the memory. This method is safe to +// call concurrently. +func (f *FileReader) RecordAt(i int) (arrow.Record, error) { + if i < 0 || i > f.NumRecords() { + panic("arrow/ipc: record index out of bounds") + } + + blk, err := f.r.block(f.mem, &f.footer, i) + if err != nil { + return nil, err + } + switch { + case !bitutil.IsMultipleOf8(blk.Offset()): + return nil, fmt.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset(), i) + case !bitutil.IsMultipleOf8(int64(blk.Meta())): + return nil, fmt.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta(), i) + case !bitutil.IsMultipleOf8(blk.Body()): + return nil, fmt.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body(), i) + } + + msg, err := blk.NewMessage() + if err != nil { + return nil, err + } + defer msg.Release() + + if msg.Type() != MessageRecordBatch { + return nil, fmt.Errorf("arrow/ipc: message %d is not a Record", i) + } + + return newRecord(f.schema, &f.memo, msg.meta, msg.body, f.swapEndianness, f.mem), nil +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +// +// The returned record value is valid until the next call to Read. +// Users need to call Retain on that Record to keep it valid for longer. +func (f *FileReader) Read() (rec arrow.Record, err error) { + if f.irec == f.NumRecords() { + return nil, io.EOF + } + rec, f.err = f.Record(f.irec) + f.irec++ + return rec, f.err +} + +// ReadAt reads the i-th record from the underlying stream and an error, if any. +func (f *FileReader) ReadAt(i int64) (arrow.Record, error) { + return f.Record(int(i)) +} + +func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, body *memory.Buffer, swapEndianness bool, mem memory.Allocator) arrow.Record { + var ( + msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) + md flatbuf.RecordBatch + codec decompressor + ) + initFB(&md, msg.Header) + rows := md.Length() + + bodyCompress := md.Compression(nil) + if bodyCompress != nil { + codec = getDecompressor(bodyCompress.Codec()) + defer codec.Close() + } + + ctx := &arrayLoaderContext{ + src: ipcSource{ + meta: &md, + rawBytes: body, + codec: codec, + mem: mem, + }, + memo: memo, + max: kMaxNestingDepth, + version: MetadataVersion(msg.Version()), + } + + pos := dictutils.NewFieldPos() + cols := make([]arrow.Array, schema.NumFields()) + for i := 0; i < schema.NumFields(); i++ { + data := ctx.loadArray(schema.Field(i).Type) + defer data.Release() + + if err := dictutils.ResolveFieldDict(memo, data, pos.Child(int32(i)), mem); err != nil { + panic(err) + } + + if swapEndianness { + swapEndianArrayData(data.(*array.Data)) + } + + cols[i] = array.MakeFromData(data) + defer cols[i].Release() + } + + return array.NewRecord(schema, cols, rows) +} + +type ipcSource struct { + meta *flatbuf.RecordBatch + rawBytes *memory.Buffer + codec decompressor + mem memory.Allocator +} + +func (src *ipcSource) buffer(i int) *memory.Buffer { + var buf flatbuf.Buffer + if !src.meta.Buffers(&buf, i) { + panic("arrow/ipc: buffer index out of bound") + } + + if buf.Length() == 0 { + return memory.NewBufferBytes(nil) + } + + var raw *memory.Buffer + if src.codec == nil { + raw = memory.SliceBuffer(src.rawBytes, int(buf.Offset()), int(buf.Length())) + } else { + body := src.rawBytes.Bytes()[buf.Offset() : buf.Offset()+buf.Length()] + uncompressedSize := int64(binary.LittleEndian.Uint64(body[:8])) + + // check for an uncompressed buffer + if uncompressedSize != -1 { + raw = memory.NewResizableBuffer(src.mem) + raw.Resize(int(uncompressedSize)) + src.codec.Reset(bytes.NewReader(body[8:])) + if _, err := io.ReadFull(src.codec, raw.Bytes()); err != nil { + panic(err) + } + } else { + raw = memory.SliceBuffer(src.rawBytes, int(buf.Offset())+8, int(buf.Length())-8) + } + } + + return raw +} + +func (src *ipcSource) fieldMetadata(i int) *flatbuf.FieldNode { + var node flatbuf.FieldNode + if !src.meta.Nodes(&node, i) { + panic("arrow/ipc: field metadata out of bound") + } + return &node +} + +func (src *ipcSource) variadicCount(i int) int64 { + return src.meta.VariadicBufferCounts(i) +} + +type arrayLoaderContext struct { + src ipcSource + ifield int + ibuffer int + ivariadic int + max int + memo *dictutils.Memo + version MetadataVersion +} + +func (ctx *arrayLoaderContext) field() *flatbuf.FieldNode { + field := ctx.src.fieldMetadata(ctx.ifield) + ctx.ifield++ + return field +} + +func (ctx *arrayLoaderContext) buffer() *memory.Buffer { + buf := ctx.src.buffer(ctx.ibuffer) + ctx.ibuffer++ + return buf +} + +func (ctx *arrayLoaderContext) variadic() int64 { + v := ctx.src.variadicCount(ctx.ivariadic) + ctx.ivariadic++ + return v +} + +func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { + switch dt := dt.(type) { + case *arrow.NullType: + return ctx.loadNull() + + case *arrow.DictionaryType: + indices := ctx.loadPrimitive(dt.IndexType) + defer indices.Release() + return array.NewData(dt, indices.Len(), indices.Buffers(), indices.Children(), indices.NullN(), indices.Offset()) + + case *arrow.BooleanType, + *arrow.Int8Type, *arrow.Int16Type, *arrow.Int32Type, *arrow.Int64Type, + *arrow.Uint8Type, *arrow.Uint16Type, *arrow.Uint32Type, *arrow.Uint64Type, + *arrow.Float16Type, *arrow.Float32Type, *arrow.Float64Type, + arrow.DecimalType, + *arrow.Time32Type, *arrow.Time64Type, + *arrow.TimestampType, + *arrow.Date32Type, *arrow.Date64Type, + *arrow.MonthIntervalType, *arrow.DayTimeIntervalType, *arrow.MonthDayNanoIntervalType, + *arrow.DurationType: + return ctx.loadPrimitive(dt) + + case *arrow.BinaryType, *arrow.StringType, *arrow.LargeStringType, *arrow.LargeBinaryType: + return ctx.loadBinary(dt) + + case arrow.BinaryViewDataType: + return ctx.loadBinaryView(dt) + + case *arrow.FixedSizeBinaryType: + return ctx.loadFixedSizeBinary(dt) + + case *arrow.ListType: + return ctx.loadList(dt) + + case *arrow.LargeListType: + return ctx.loadList(dt) + + case *arrow.ListViewType: + return ctx.loadListView(dt) + + case *arrow.LargeListViewType: + return ctx.loadListView(dt) + + case *arrow.FixedSizeListType: + return ctx.loadFixedSizeList(dt) + + case *arrow.StructType: + return ctx.loadStruct(dt) + + case *arrow.MapType: + return ctx.loadMap(dt) + + case arrow.ExtensionType: + storage := ctx.loadArray(dt.StorageType()) + defer storage.Release() + return array.NewData(dt, storage.Len(), storage.Buffers(), storage.Children(), storage.NullN(), storage.Offset()) + + case *arrow.RunEndEncodedType: + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer memory.ReleaseBuffers(buffers) + + runEnds := ctx.loadChild(dt.RunEnds()) + defer runEnds.Release() + values := ctx.loadChild(dt.Encoded()) + defer values.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{runEnds, values}, int(field.NullCount()), 0) + + case arrow.UnionType: + return ctx.loadUnion(dt) + + default: + panic(fmt.Errorf("arrow/ipc: array type %T not handled yet", dt)) + } +} + +func (ctx *arrayLoaderContext) loadCommon(typ arrow.Type, nbufs int) (*flatbuf.FieldNode, []*memory.Buffer) { + buffers := make([]*memory.Buffer, 0, nbufs) + field := ctx.field() + + var buf *memory.Buffer + + if internal.HasValidityBitmap(typ, flatbuf.MetadataVersion(ctx.version)) { + switch field.NullCount() { + case 0: + ctx.ibuffer++ + default: + buf = ctx.buffer() + } + } + buffers = append(buffers, buf) + + return field, buffers +} + +func (ctx *arrayLoaderContext) loadChild(dt arrow.DataType) arrow.ArrayData { + if ctx.max == 0 { + panic("arrow/ipc: nested type limit reached") + } + ctx.max-- + sub := ctx.loadArray(dt) + ctx.max++ + return sub +} + +func (ctx *arrayLoaderContext) loadNull() arrow.ArrayData { + field := ctx.field() + return array.NewData(arrow.Null, int(field.Length()), nil, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + + switch field.Length() { + case 0: + buffers = append(buffers, nil) + ctx.ibuffer++ + default: + buffers = append(buffers, ctx.buffer()) + } + + defer memory.ReleaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 3) + buffers = append(buffers, ctx.buffer(), ctx.buffer()) + defer memory.ReleaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadBinaryView(dt arrow.DataType) arrow.ArrayData { + nVariadicBufs := ctx.variadic() + field, buffers := ctx.loadCommon(dt.ID(), 2+int(nVariadicBufs)) + buffers = append(buffers, ctx.buffer()) + for i := 0; i < int(nVariadicBufs); i++ { + buffers = append(buffers, ctx.buffer()) + } + defer memory.ReleaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer memory.ReleaseBuffers(buffers) + + return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer memory.ReleaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadList(dt arrow.ListLikeType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 2) + buffers = append(buffers, ctx.buffer()) + defer memory.ReleaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadListView(dt arrow.VarLenListLikeType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 3) + buffers = append(buffers, ctx.buffer(), ctx.buffer()) + defer memory.ReleaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer memory.ReleaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 1) + defer memory.ReleaseBuffers(buffers) + + subs := make([]arrow.ArrayData, dt.NumFields()) + for i, f := range dt.Fields() { + subs[i] = ctx.loadChild(f.Type) + } + defer func() { + for i := range subs { + subs[i].Release() + } + }() + + return array.NewData(dt, int(field.Length()), buffers, subs, int(field.NullCount()), 0) +} + +func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData { + // Sparse unions have 2 buffers (a nil validity bitmap, and the type ids) + nBuffers := 2 + // Dense unions have a third buffer, the offsets + if dt.Mode() == arrow.DenseMode { + nBuffers = 3 + } + + field, buffers := ctx.loadCommon(dt.ID(), nBuffers) + if field.NullCount() != 0 && buffers[0] != nil { + panic("arrow/ipc: cannot read pre-1.0.0 union array with top-level validity bitmap") + } + + switch field.Length() { + case 0: + buffers = append(buffers, memory.NewBufferBytes([]byte{})) + ctx.ibuffer++ + if dt.Mode() == arrow.DenseMode { + buffers = append(buffers, nil) + ctx.ibuffer++ + } + default: + buffers = append(buffers, ctx.buffer()) + if dt.Mode() == arrow.DenseMode { + buffers = append(buffers, ctx.buffer()) + } + } + + defer memory.ReleaseBuffers(buffers) + subs := make([]arrow.ArrayData, dt.NumFields()) + for i, f := range dt.Fields() { + subs[i] = ctx.loadChild(f.Type) + } + defer func() { + for i := range subs { + subs[i].Release() + } + }() + return array.NewData(dt, int(field.Length()), buffers, subs, 0, 0) +} + +func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body *memory.Buffer, swapEndianness bool, mem memory.Allocator) (dictutils.Kind, error) { + var ( + msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0) + md flatbuf.DictionaryBatch + data flatbuf.RecordBatch + codec decompressor + ) + initFB(&md, msg.Header) + + md.Data(&data) + bodyCompress := data.Compression(nil) + if bodyCompress != nil { + codec = getDecompressor(bodyCompress.Codec()) + defer codec.Close() + } + + id := md.Id() + // look up the dictionary value type, which must have been added to the + // memo already before calling this function + valueType, ok := memo.Type(id) + if !ok { + return 0, fmt.Errorf("arrow/ipc: no dictionary type found with id: %d", id) + } + + ctx := &arrayLoaderContext{ + src: ipcSource{ + meta: &data, + codec: codec, + rawBytes: body, + mem: mem, + }, + memo: memo, + max: kMaxNestingDepth, + } + + dict := ctx.loadArray(valueType) + defer dict.Release() + + if swapEndianness { + swapEndianArrayData(dict.(*array.Data)) + } + + if md.IsDelta() { + memo.AddDelta(id, dict) + return dictutils.KindDelta, nil + } + if memo.AddOrReplace(id, dict) { + return dictutils.KindNew, nil + } + return dictutils.KindReplacement, nil +} + +type mappedFileBlock struct { + offset int64 + meta int32 + body int64 + + data []byte +} + +func (blk mappedFileBlock) Offset() int64 { return blk.offset } +func (blk mappedFileBlock) Meta() int32 { return blk.meta } +func (blk mappedFileBlock) Body() int64 { return blk.body } + +func (blk mappedFileBlock) section() []byte { + return blk.data[blk.offset : blk.offset+int64(blk.meta)+blk.body] +} + +func (blk mappedFileBlock) NewMessage() (*Message, error) { + var ( + body *memory.Buffer + meta *memory.Buffer + buf = blk.section() + ) + + metaBytes := buf[:blk.meta] + + prefix := 0 + switch binary.LittleEndian.Uint32(metaBytes) { + case 0: + case kIPCContToken: + prefix = 8 + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + prefix = 4 + } + + meta = memory.NewBufferBytes(metaBytes[prefix:]) + body = memory.NewBufferBytes(buf[blk.meta : int64(blk.meta)+blk.body]) + return NewMessage(meta, body), nil +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go new file mode 100644 index 0000000000..2aa0c9b446 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/file_writer.go @@ -0,0 +1,360 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +// PayloadWriter is an interface for injecting a different payloadwriter +// allowing more reusability with the Writer object with other scenarios, +// such as with Flight data +type PayloadWriter interface { + Start() error + WritePayload(Payload) error + Close() error +} + +type fileWriter struct { + streamWriter + + schema *arrow.Schema + dicts []dataBlock + recs []dataBlock +} + +func (w *fileWriter) Start() error { + var err error + + // only necessary to align to 8-byte boundary at the start of the file + _, err = w.Write(Magic) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write magic Arrow bytes: %w", err) + } + + err = w.align(kArrowIPCAlignment) + if err != nil { + return fmt.Errorf("arrow/ipc: could not align start block: %w", err) + } + + return w.streamWriter.Start() +} + +func (w *fileWriter) WritePayload(p Payload) error { + blk := fileBlock{offset: w.pos, meta: 0, body: p.size} + n, err := writeIPCPayload(w, p) + if err != nil { + return err + } + + blk.meta = int32(n) + + switch flatbuf.MessageHeader(p.msg) { + case flatbuf.MessageHeaderDictionaryBatch: + w.dicts = append(w.dicts, blk) + case flatbuf.MessageHeaderRecordBatch: + w.recs = append(w.recs, blk) + } + + return nil +} + +func (w *fileWriter) Close() error { + var err error + + if err = w.streamWriter.Close(); err != nil { + return err + } + + pos := w.pos + if err = writeFileFooter(w.schema, w.dicts, w.recs, w); err != nil { + return fmt.Errorf("arrow/ipc: could not write file footer: %w", err) + } + + size := w.pos - pos + if size <= 0 { + return fmt.Errorf("arrow/ipc: invalid file footer size (size=%d)", size) + } + + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, uint32(size)) + _, err = w.Write(buf) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write file footer size: %w", err) + } + + _, err = w.Write(Magic) + if err != nil { + return fmt.Errorf("arrow/ipc: could not write Arrow magic bytes: %w", err) + } + + return nil +} + +func (w *fileWriter) align(align int32) error { + remainder := paddedLength(w.pos, align) - w.pos + if remainder == 0 { + return nil + } + + _, err := w.Write(paddingBytes[:int(remainder)]) + return err +} + +func writeIPCPayload(w io.Writer, p Payload) (int, error) { + n, err := writeMessage(p.meta, kArrowIPCAlignment, w) + if err != nil { + return n, err + } + + // now write the buffers + for _, buf := range p.body { + var ( + size int64 + padding int64 + ) + + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + + if size > 0 { + _, err = w.Write(buf.Bytes()) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write payload message body: %w", err) + } + } + + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write payload message padding: %w", err) + } + } + } + + return n, err +} + +// Payload is the underlying message object which is passed to the payload writer +// for actually writing out ipc messages +type Payload struct { + msg MessageType + meta *memory.Buffer + body []*memory.Buffer + size int64 // length of body +} + +// Meta returns the buffer containing the metadata for this payload, +// callers must call Release on the buffer +func (p *Payload) Meta() *memory.Buffer { + if p.meta != nil { + p.meta.Retain() + } + return p.meta +} + +// SerializeBody serializes the body buffers and writes them to the provided +// writer. +func (p *Payload) SerializeBody(w io.Writer) error { + for _, data := range p.body { + if data == nil { + continue + } + + size := int64(data.Len()) + padding := bitutil.CeilByte64(size) - size + if size > 0 { + if _, err := w.Write(data.Bytes()); err != nil { + return fmt.Errorf("arrow/ipc: could not write payload message body: %w", err) + } + + if padding > 0 { + if _, err := w.Write(paddingBytes[:padding]); err != nil { + return fmt.Errorf("arrow/ipc: could not write payload message padding bytes: %w", err) + } + } + } + } + return nil +} + +// WritePayload serializes the payload in IPC format +// into the provided writer. +func (p *Payload) WritePayload(w io.Writer) (int, error) { + return writeIPCPayload(w, *p) +} + +func (p *Payload) Release() { + if p.meta != nil { + p.meta.Release() + p.meta = nil + } + for i, b := range p.body { + if b == nil { + continue + } + b.Release() + p.body[i] = nil + } +} + +type payloads []Payload + +func (ps payloads) Release() { + for i := range ps { + ps[i].Release() + } +} + +// FileWriter is an Arrow file writer. +type FileWriter struct { + w io.Writer + + mem memory.Allocator + + headerStarted bool + footerWritten bool + + pw PayloadWriter + + schema *arrow.Schema + mapper dictutils.Mapper + codec flatbuf.CompressionType + compressNP int + compressors []compressor + minSpaceSavings *float64 + + // map of the last written dictionaries by id + // so we can avoid writing the same dictionary over and over + // also needed for correctness when writing IPC format which + // does not allow replacements or deltas. + lastWrittenDicts map[int64]arrow.Array +} + +// NewFileWriter opens an Arrow file using the provided writer w. +func NewFileWriter(w io.Writer, opts ...Option) (*FileWriter, error) { + var ( + cfg = newConfig(opts...) + err error + ) + + f := FileWriter{ + w: w, + pw: &fileWriter{streamWriter: streamWriter{w: w}, schema: cfg.schema}, + mem: cfg.alloc, + schema: cfg.schema, + codec: cfg.codec, + compressNP: cfg.compressNP, + minSpaceSavings: cfg.minSpaceSavings, + compressors: make([]compressor, cfg.compressNP), + } + + return &f, err +} + +func (f *FileWriter) Close() error { + err := f.checkStarted() + if err != nil { + return fmt.Errorf("arrow/ipc: could not write empty file: %w", err) + } + + if f.footerWritten { + return nil + } + + err = f.pw.Close() + if err != nil { + return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + f.footerWritten = true + + return nil +} + +func (f *FileWriter) Write(rec arrow.Record) error { + schema := rec.Schema() + if schema == nil || !schema.Equal(f.schema) { + return errInconsistentSchema + } + + if err := f.checkStarted(); err != nil { + return fmt.Errorf("arrow/ipc: could not write header: %w", err) + } + + const allow64b = true + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder( + f.mem, 0, kMaxNestingDepth, allow64b, f.codec, f.compressNP, f.minSpaceSavings, f.compressors, + ) + ) + defer data.Release() + + err := writeDictionaryPayloads(f.mem, rec, true, false, &f.mapper, f.lastWrittenDicts, f.pw, enc) + if err != nil { + return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err) + } + + enc.reset() + if err := enc.Encode(&data, rec); err != nil { + return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return f.pw.WritePayload(data) +} + +func (f *FileWriter) checkStarted() error { + if !f.headerStarted { + return f.start() + } + return nil +} + +func (f *FileWriter) start() error { + f.headerStarted = true + err := f.pw.Start() + if err != nil { + return err + } + + f.mapper.ImportSchema(f.schema) + f.lastWrittenDicts = make(map[int64]arrow.Array) + + // write out schema payloads + ps := payloadFromSchema(f.schema, f.mem, &f.mapper) + defer ps.Release() + + for _, data := range ps { + err = f.pw.WritePayload(data) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go new file mode 100644 index 0000000000..c4589da6cb --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/ipc.go @@ -0,0 +1,203 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "io" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/arrio" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +const ( + errNotArrowFile = errString("arrow/ipc: not an Arrow file") + errInconsistentFileMetadata = errString("arrow/ipc: file is smaller than indicated metadata size") + errInconsistentSchema = errString("arrow/ipc: tried to write record batch with different schema") + errMaxRecursion = errString("arrow/ipc: max recursion depth reached") + errBigArray = errString("arrow/ipc: array larger than 2^31-1 in length") + + kArrowAlignment = 64 // buffers are padded to 64b boundaries (for SIMD) + kTensorAlignment = 64 // tensors are padded to 64b boundaries + kArrowIPCAlignment = 8 // align on 8b boundaries in IPC +) + +var ( + paddingBytes [kArrowAlignment]byte + kEOS = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, 0} // end of stream message + kIPCContToken uint32 = 0xFFFFFFFF // 32b continuation indicator for FlatBuffers 8b alignment +) + +func paddedLength(nbytes int64, alignment int32) int64 { + align := int64(alignment) + return ((nbytes + align - 1) / align) * align +} + +type errString string + +func (s errString) Error() string { + return string(s) +} + +type ReadAtSeeker interface { + io.Reader + io.Seeker + io.ReaderAt +} + +type config struct { + alloc memory.Allocator + schema *arrow.Schema + footer struct { + offset int64 + } + codec flatbuf.CompressionType + compressNP int + ensureNativeEndian bool + noAutoSchema bool + emitDictDeltas bool + minSpaceSavings *float64 +} + +func newConfig(opts ...Option) *config { + cfg := &config{ + alloc: memory.NewGoAllocator(), + codec: -1, // uncompressed + ensureNativeEndian: true, + compressNP: 1, + } + + for _, opt := range opts { + opt(cfg) + } + + return cfg +} + +// Option is a functional option to configure opening or creating Arrow files +// and streams. +type Option func(*config) + +// WithFooterOffset specifies the Arrow footer position in bytes. +func WithFooterOffset(offset int64) Option { + return func(cfg *config) { + cfg.footer.offset = offset + } +} + +// WithAllocator specifies the Arrow memory allocator used while building records. +func WithAllocator(mem memory.Allocator) Option { + return func(cfg *config) { + cfg.alloc = mem + } +} + +// WithSchema specifies the Arrow schema to be used for reading or writing. +func WithSchema(schema *arrow.Schema) Option { + return func(cfg *config) { + cfg.schema = schema + } +} + +// WithLZ4 tells the writer to use LZ4 Frame compression on the data +// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress +func WithLZ4() Option { + return func(cfg *config) { + cfg.codec = flatbuf.CompressionTypeLZ4_FRAME + } +} + +// WithZstd tells the writer to use ZSTD compression on the data +// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress +func WithZstd() Option { + return func(cfg *config) { + cfg.codec = flatbuf.CompressionTypeZSTD + } +} + +// WithCompressConcurrency specifies a number of goroutines to spin up for +// concurrent compression of the body buffers when writing compress IPC records. +// If n <= 1 then compression will be done serially without goroutine +// parallelization. Default is 1. +func WithCompressConcurrency(n int) Option { + return func(cfg *config) { + if n <= 0 { + n = 1 + } + cfg.compressNP = n + } +} + +// WithEnsureNativeEndian specifies whether or not to automatically byte-swap +// buffers with endian-sensitive data if the schema's endianness is not the +// platform-native endianness. This includes all numeric types, temporal types, +// decimal types, as well as the offset buffers of variable-sized binary and +// list-like types. +// +// This is only relevant to ipc Reader objects, not to writers. This defaults +// to true. +func WithEnsureNativeEndian(v bool) Option { + return func(cfg *config) { + cfg.ensureNativeEndian = v + } +} + +// WithDelayedReadSchema alters the ipc.Reader behavior to delay attempting +// to read the schema from the stream until the first call to Next instead +// of immediately attempting to read a schema from the stream when created. +func WithDelayReadSchema(v bool) Option { + return func(cfg *config) { + cfg.noAutoSchema = v + } +} + +// WithDictionaryDeltas specifies whether or not to emit dictionary deltas. +func WithDictionaryDeltas(v bool) Option { + return func(cfg *config) { + cfg.emitDictDeltas = v + } +} + +// WithMinSpaceSavings specifies a percentage of space savings for +// compression to be applied to buffers. +// +// Space savings is calculated as (1.0 - compressedSize / uncompressedSize). +// +// For example, if minSpaceSavings = 0.1, a 100-byte body buffer won't +// undergo compression if its expected compressed size exceeds 90 bytes. +// If this option is unset, compression will be used indiscriminately. If +// no codec was supplied, this option is ignored. +// +// Values outside of the range [0,1] are handled as errors. +// +// Note that enabling this option may result in unreadable data for Arrow +// Go and C++ versions prior to 12.0.0. +func WithMinSpaceSavings(savings float64) Option { + return func(cfg *config) { + cfg.minSpaceSavings = &savings + } +} + +var ( + _ arrio.Reader = (*Reader)(nil) + _ arrio.Writer = (*Writer)(nil) + _ arrio.Reader = (*FileReader)(nil) + _ arrio.Writer = (*FileWriter)(nil) + + _ arrio.ReaderAt = (*FileReader)(nil) +) diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go new file mode 100644 index 0000000000..c96869ec0e --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/message.go @@ -0,0 +1,246 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" +) + +// MetadataVersion represents the Arrow metadata version. +type MetadataVersion flatbuf.MetadataVersion + +const ( + MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow Format-0.1.0 + MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow Format-0.2.0 + MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow Format-0.3.0 to 0.7.1 + MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow Format-0.8.0 + MetadataV5 = MetadataVersion(flatbuf.MetadataVersionV5) // version for >= Arrow Format-1.0.0, backward compatible with v4 +) + +func (m MetadataVersion) String() string { + if v, ok := flatbuf.EnumNamesMetadataVersion[flatbuf.MetadataVersion(m)]; ok { + return v + } + return fmt.Sprintf("MetadataVersion(%d)", int16(m)) +} + +// MessageType represents the type of Message in an Arrow format. +type MessageType flatbuf.MessageHeader + +const ( + MessageNone = MessageType(flatbuf.MessageHeaderNONE) + MessageSchema = MessageType(flatbuf.MessageHeaderSchema) + MessageDictionaryBatch = MessageType(flatbuf.MessageHeaderDictionaryBatch) + MessageRecordBatch = MessageType(flatbuf.MessageHeaderRecordBatch) + MessageTensor = MessageType(flatbuf.MessageHeaderTensor) + MessageSparseTensor = MessageType(flatbuf.MessageHeaderSparseTensor) +) + +func (m MessageType) String() string { + if v, ok := flatbuf.EnumNamesMessageHeader[flatbuf.MessageHeader(m)]; ok { + return v + } + return fmt.Sprintf("MessageType(%d)", int(m)) +} + +// Message is an IPC message, including metadata and body. +type Message struct { + refCount atomic.Int64 + msg *flatbuf.Message + meta *memory.Buffer + body *memory.Buffer +} + +// NewMessage creates a new message from the metadata and body buffers. +// NewMessage panics if any of these buffers is nil. +func NewMessage(meta, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + meta.Retain() + body.Retain() + m := &Message{ + msg: flatbuf.GetRootAsMessage(meta.Bytes(), 0), + meta: meta, + body: body, + } + m.refCount.Add(1) + return m +} + +func newMessageFromFB(meta *flatbuf.Message, body *memory.Buffer) *Message { + if meta == nil || body == nil { + panic("arrow/ipc: nil buffers") + } + body.Retain() + m := &Message{ + msg: meta, + meta: memory.NewBufferBytes(meta.Table().Bytes), + body: body, + } + m.refCount.Add(1) + return m +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (msg *Message) Retain() { + msg.refCount.Add(1) +} + +// Release decreases the reference count by 1. +// Release may be called simultaneously from multiple goroutines. +// When the reference count goes to zero, the memory is freed. +func (msg *Message) Release() { + debug.Assert(msg.refCount.Load() > 0, "too many releases") + + if msg.refCount.Add(-1) == 0 { + msg.meta.Release() + msg.body.Release() + msg.msg = nil + msg.meta = nil + msg.body = nil + } +} + +func (msg *Message) Version() MetadataVersion { + return MetadataVersion(msg.msg.Version()) +} + +func (msg *Message) Type() MessageType { + return MessageType(msg.msg.HeaderType()) +} + +func (msg *Message) BodyLen() int64 { + return msg.msg.BodyLength() +} + +type MessageReader interface { + Message() (*Message, error) + Release() + Retain() +} + +// MessageReader reads messages from an io.Reader. +type messageReader struct { + r io.Reader + + refCount atomic.Int64 + msg *Message + + mem memory.Allocator +} + +// NewMessageReader returns a reader that reads messages from an input stream. +func NewMessageReader(r io.Reader, opts ...Option) MessageReader { + cfg := newConfig() + for _, opt := range opts { + opt(cfg) + } + + mr := &messageReader{r: r, mem: cfg.alloc} + mr.refCount.Add(1) + return mr +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *messageReader) Retain() { + r.refCount.Add(1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *messageReader) Release() { + debug.Assert(r.refCount.Load() > 0, "too many releases") + + if r.refCount.Add(-1) == 0 { + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + } +} + +// Message returns the current message that has been extracted from the +// underlying stream. +// It is valid until the next call to Message. +func (r *messageReader) Message() (*Message, error) { + buf := make([]byte, 4) + _, err := io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read continuation indicator: %w", err) + } + var ( + cid = binary.LittleEndian.Uint32(buf) + msgLen int32 + ) + switch cid { + case 0: + // EOS message. + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + case kIPCContToken: + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message length: %w", err) + } + msgLen = int32(binary.LittleEndian.Uint32(buf)) + if msgLen == 0 { + // optional 0 EOS control message + return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error? + } + + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + msgLen = int32(cid) + } + + buf = make([]byte, msgLen) + _, err = io.ReadFull(r.r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + meta := flatbuf.GetRootAsMessage(buf, 0) + bodyLen := meta.BodyLength() + + body := memory.NewResizableBuffer(r.mem) + defer body.Release() + body.Resize(int(bodyLen)) + + _, err = io.ReadFull(r.r, body.Bytes()) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err) + } + + if r.msg != nil { + r.msg.Release() + r.msg = nil + } + r.msg = newMessageFromFB(meta, body) + + return r.msg, nil +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go new file mode 100644 index 0000000000..b83c1a845c --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/metadata.go @@ -0,0 +1,1317 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + flatbuffers "github.com/google/flatbuffers/go" +) + +// Magic string identifying an Apache Arrow file. +var Magic = []byte("ARROW1") + +const ( + currentMetadataVersion = MetadataV5 + minMetadataVersion = MetadataV4 + + // constants for the extension type metadata keys for the type name and + // any extension metadata to be passed to deserialize. + ExtensionTypeKeyName = "ARROW:extension:name" + ExtensionMetadataKeyName = "ARROW:extension:metadata" + + // ARROW-109: We set this number arbitrarily to help catch user mistakes. For + // deeply nested schemas, it is expected the user will indicate explicitly the + // maximum allowed recursion depth + kMaxNestingDepth = 64 +) + +type startVecFunc func(b *flatbuffers.Builder, n int) flatbuffers.UOffsetT + +type fieldMetadata struct { + Len int64 + Nulls int64 + Offset int64 +} + +type bufferMetadata struct { + Offset int64 // relative offset into the memory page to the starting byte of the buffer + Len int64 // absolute length in bytes of the buffer +} + +type fileBlock struct { + offset int64 + meta int32 + body int64 + + r io.ReaderAt + mem memory.Allocator +} + +func (blk fileBlock) Offset() int64 { return blk.offset } +func (blk fileBlock) Meta() int32 { return blk.meta } +func (blk fileBlock) Body() int64 { return blk.body } + +func fileBlocksToFB(b *flatbuffers.Builder, blocks []dataBlock, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(blocks)) + for i := len(blocks) - 1; i >= 0; i-- { + blk := blocks[i] + flatbuf.CreateBlock(b, blk.Offset(), blk.Meta(), blk.Body()) + } + + return b.EndVector(len(blocks)) +} + +func (blk fileBlock) NewMessage() (*Message, error) { + var ( + err error + buf []byte + body *memory.Buffer + meta *memory.Buffer + r = blk.section() + ) + + meta = memory.NewResizableBuffer(blk.mem) + meta.Resize(int(blk.meta)) + defer meta.Release() + + buf = meta.Bytes() + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err) + } + + prefix := 0 + switch binary.LittleEndian.Uint32(buf) { + case 0: + case kIPCContToken: + prefix = 8 + default: + // ARROW-6314: backwards compatibility for reading old IPC + // messages produced prior to version 0.15.0 + prefix = 4 + } + + // drop buf-size already known from blk.Meta + meta = memory.SliceBuffer(meta, prefix, int(blk.meta)-prefix) + defer meta.Release() + + body = memory.NewResizableBuffer(blk.mem) + defer body.Release() + body.Resize(int(blk.body)) + buf = body.Bytes() + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err) + } + + return NewMessage(meta, body), nil +} + +func (blk fileBlock) section() io.Reader { + return io.NewSectionReader(blk.r, blk.offset, int64(blk.meta)+blk.body) +} + +func unitFromFB(unit flatbuf.TimeUnit) arrow.TimeUnit { + switch unit { + case flatbuf.TimeUnitSECOND: + return arrow.Second + case flatbuf.TimeUnitMILLISECOND: + return arrow.Millisecond + case flatbuf.TimeUnitMICROSECOND: + return arrow.Microsecond + case flatbuf.TimeUnitNANOSECOND: + return arrow.Nanosecond + default: + panic(fmt.Errorf("arrow/ipc: invalid flatbuf.TimeUnit(%d) value", unit)) + } +} + +func unitToFB(unit arrow.TimeUnit) flatbuf.TimeUnit { + switch unit { + case arrow.Second: + return flatbuf.TimeUnitSECOND + case arrow.Millisecond: + return flatbuf.TimeUnitMILLISECOND + case arrow.Microsecond: + return flatbuf.TimeUnitMICROSECOND + case arrow.Nanosecond: + return flatbuf.TimeUnitNANOSECOND + default: + panic(fmt.Errorf("arrow/ipc: invalid arrow.TimeUnit(%d) value", unit)) + } +} + +// initFB is a helper function to handle flatbuffers' polymorphism. +func initFB(t interface { + Table() flatbuffers.Table + Init([]byte, flatbuffers.UOffsetT) +}, f func(tbl *flatbuffers.Table) bool) { + tbl := t.Table() + if !f(&tbl) { + panic(fmt.Errorf("arrow/ipc: could not initialize %T from flatbuffer", t)) + } + t.Init(tbl.Bytes, tbl.Pos) +} + +func fieldFromFB(field *flatbuf.Field, pos dictutils.FieldPos, memo *dictutils.Memo) (arrow.Field, error) { + var ( + err error + o arrow.Field + ) + + o.Name = string(field.Name()) + o.Nullable = field.Nullable() + o.Metadata, err = metadataFromFB(field) + if err != nil { + return o, err + } + + n := field.ChildrenLength() + children := make([]arrow.Field, n) + for i := range children { + var childFB flatbuf.Field + if !field.Children(&childFB, i) { + return o, fmt.Errorf("arrow/ipc: could not load field child %d", i) + + } + child, err := fieldFromFB(&childFB, pos.Child(int32(i)), memo) + if err != nil { + return o, fmt.Errorf("arrow/ipc: could not convert field child %d: %w", i, err) + } + children[i] = child + } + + o.Type, err = typeFromFB(field, pos, children, &o.Metadata, memo) + if err != nil { + return o, fmt.Errorf("arrow/ipc: could not convert field type: %w", err) + } + + return o, nil +} + +func fieldToFB(b *flatbuffers.Builder, pos dictutils.FieldPos, field arrow.Field, memo *dictutils.Mapper) flatbuffers.UOffsetT { + var visitor = fieldVisitor{b: b, memo: memo, pos: pos, meta: make(map[string]string)} + return visitor.result(field) +} + +type fieldVisitor struct { + b *flatbuffers.Builder + memo *dictutils.Mapper + pos dictutils.FieldPos + dtype flatbuf.Type + offset flatbuffers.UOffsetT + kids []flatbuffers.UOffsetT + meta map[string]string +} + +func (fv *fieldVisitor) visit(field arrow.Field) { + dt := field.Type + switch dt := dt.(type) { + case *arrow.NullType: + fv.dtype = flatbuf.TypeNull + flatbuf.NullStart(fv.b) + fv.offset = flatbuf.NullEnd(fv.b) + + case *arrow.BooleanType: + fv.dtype = flatbuf.TypeBool + flatbuf.BoolStart(fv.b) + fv.offset = flatbuf.BoolEnd(fv.b) + + case *arrow.Uint8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Uint64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false) + + case *arrow.Int8Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int16Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int32Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Int64Type: + fv.dtype = flatbuf.TypeInt + fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true) + + case *arrow.Float16Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float32Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case *arrow.Float64Type: + fv.dtype = flatbuf.TypeFloatingPoint + fv.offset = floatToFB(fv.b, int32(dt.BitWidth())) + + case arrow.DecimalType: + fv.dtype = flatbuf.TypeDecimal + flatbuf.DecimalStart(fv.b) + flatbuf.DecimalAddPrecision(fv.b, dt.GetPrecision()) + flatbuf.DecimalAddScale(fv.b, dt.GetScale()) + flatbuf.DecimalAddBitWidth(fv.b, int32(dt.BitWidth())) + fv.offset = flatbuf.DecimalEnd(fv.b) + + case *arrow.FixedSizeBinaryType: + fv.dtype = flatbuf.TypeFixedSizeBinary + flatbuf.FixedSizeBinaryStart(fv.b) + flatbuf.FixedSizeBinaryAddByteWidth(fv.b, int32(dt.ByteWidth)) + fv.offset = flatbuf.FixedSizeBinaryEnd(fv.b) + + case *arrow.BinaryType: + fv.dtype = flatbuf.TypeBinary + flatbuf.BinaryStart(fv.b) + fv.offset = flatbuf.BinaryEnd(fv.b) + + case *arrow.LargeBinaryType: + fv.dtype = flatbuf.TypeLargeBinary + flatbuf.LargeBinaryStart(fv.b) + fv.offset = flatbuf.LargeBinaryEnd(fv.b) + + case *arrow.StringType: + fv.dtype = flatbuf.TypeUtf8 + flatbuf.Utf8Start(fv.b) + fv.offset = flatbuf.Utf8End(fv.b) + + case *arrow.LargeStringType: + fv.dtype = flatbuf.TypeLargeUtf8 + flatbuf.LargeUtf8Start(fv.b) + fv.offset = flatbuf.LargeUtf8End(fv.b) + + case *arrow.BinaryViewType: + fv.dtype = flatbuf.TypeBinaryView + flatbuf.BinaryViewStart(fv.b) + fv.offset = flatbuf.BinaryViewEnd(fv.b) + + case *arrow.StringViewType: + fv.dtype = flatbuf.TypeUtf8View + flatbuf.Utf8ViewStart(fv.b) + fv.offset = flatbuf.Utf8ViewEnd(fv.b) + + case *arrow.Date32Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitDAY) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Date64Type: + fv.dtype = flatbuf.TypeDate + flatbuf.DateStart(fv.b) + flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitMILLISECOND) + fv.offset = flatbuf.DateEnd(fv.b) + + case *arrow.Time32Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 32) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.Time64Type: + fv.dtype = flatbuf.TypeTime + flatbuf.TimeStart(fv.b) + flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit)) + flatbuf.TimeAddBitWidth(fv.b, 64) + fv.offset = flatbuf.TimeEnd(fv.b) + + case *arrow.TimestampType: + fv.dtype = flatbuf.TypeTimestamp + unit := unitToFB(dt.Unit) + var tz flatbuffers.UOffsetT + if dt.TimeZone != "" { + tz = fv.b.CreateString(dt.TimeZone) + } + flatbuf.TimestampStart(fv.b) + flatbuf.TimestampAddUnit(fv.b, unit) + flatbuf.TimestampAddTimezone(fv.b, tz) + fv.offset = flatbuf.TimestampEnd(fv.b) + + case *arrow.StructType: + fv.dtype = flatbuf.TypeStruct_ + offsets := make([]flatbuffers.UOffsetT, dt.NumFields()) + for i, field := range dt.Fields() { + offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) + } + flatbuf.Struct_Start(fv.b) + for i := len(offsets) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(offsets[i]) + } + fv.offset = flatbuf.Struct_End(fv.b) + fv.kids = append(fv.kids, offsets...) + + case *arrow.ListType: + fv.dtype = flatbuf.TypeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.ListStart(fv.b) + fv.offset = flatbuf.ListEnd(fv.b) + + case *arrow.LargeListType: + fv.dtype = flatbuf.TypeLargeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.LargeListStart(fv.b) + fv.offset = flatbuf.LargeListEnd(fv.b) + + case *arrow.ListViewType: + fv.dtype = flatbuf.TypeListView + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.ListViewStart(fv.b) + fv.offset = flatbuf.ListViewEnd(fv.b) + + case *arrow.LargeListViewType: + fv.dtype = flatbuf.TypeLargeListView + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.LargeListViewStart(fv.b) + fv.offset = flatbuf.LargeListViewEnd(fv.b) + + case *arrow.FixedSizeListType: + fv.dtype = flatbuf.TypeFixedSizeList + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.FixedSizeListStart(fv.b) + flatbuf.FixedSizeListAddListSize(fv.b, dt.Len()) + fv.offset = flatbuf.FixedSizeListEnd(fv.b) + + case *arrow.MonthIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitYEAR_MONTH) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DayTimeIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitDAY_TIME) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.MonthDayNanoIntervalType: + fv.dtype = flatbuf.TypeInterval + flatbuf.IntervalStart(fv.b) + flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitMONTH_DAY_NANO) + fv.offset = flatbuf.IntervalEnd(fv.b) + + case *arrow.DurationType: + fv.dtype = flatbuf.TypeDuration + unit := unitToFB(dt.Unit) + flatbuf.DurationStart(fv.b) + flatbuf.DurationAddUnit(fv.b, unit) + fv.offset = flatbuf.DurationEnd(fv.b) + + case *arrow.MapType: + fv.dtype = flatbuf.TypeMap + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.MapStart(fv.b) + flatbuf.MapAddKeysSorted(fv.b, dt.KeysSorted) + fv.offset = flatbuf.MapEnd(fv.b) + + case *arrow.RunEndEncodedType: + fv.dtype = flatbuf.TypeRunEndEncoded + var offsets [2]flatbuffers.UOffsetT + offsets[0] = fieldToFB(fv.b, fv.pos.Child(0), + arrow.Field{Name: "run_ends", Type: dt.RunEnds()}, fv.memo) + offsets[1] = fieldToFB(fv.b, fv.pos.Child(1), + arrow.Field{Name: "values", Type: dt.Encoded(), Nullable: true}, fv.memo) + flatbuf.RunEndEncodedStart(fv.b) + fv.b.PrependUOffsetT(offsets[1]) + fv.b.PrependUOffsetT(offsets[0]) + fv.offset = flatbuf.RunEndEncodedEnd(fv.b) + fv.kids = append(fv.kids, offsets[0], offsets[1]) + + case arrow.ExtensionType: + field.Type = dt.StorageType() + fv.visit(field) + fv.meta[ExtensionTypeKeyName] = dt.ExtensionName() + fv.meta[ExtensionMetadataKeyName] = string(dt.Serialize()) + + case *arrow.DictionaryType: + field.Type = dt.ValueType + fv.visit(field) + + case arrow.UnionType: + fv.dtype = flatbuf.TypeUnion + offsets := make([]flatbuffers.UOffsetT, dt.NumFields()) + for i, field := range dt.Fields() { + offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo) + } + + codes := dt.TypeCodes() + flatbuf.UnionStartTypeIdsVector(fv.b, len(codes)) + + for i := len(codes) - 1; i >= 0; i-- { + fv.b.PlaceInt32(int32(codes[i])) + } + fbTypeIDs := fv.b.EndVector(len(dt.TypeCodes())) + flatbuf.UnionStart(fv.b) + switch dt.Mode() { + case arrow.SparseMode: + flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeSparse) + case arrow.DenseMode: + flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeDense) + default: + panic("invalid union mode") + } + flatbuf.UnionAddTypeIds(fv.b, fbTypeIDs) + fv.offset = flatbuf.UnionEnd(fv.b) + fv.kids = append(fv.kids, offsets...) + + default: + err := fmt.Errorf("arrow/ipc: invalid data type %v", dt) + panic(err) // FIXME(sbinet): implement all data-types. + } +} + +func (fv *fieldVisitor) result(field arrow.Field) flatbuffers.UOffsetT { + nameFB := fv.b.CreateString(field.Name) + + fv.visit(field) + + flatbuf.FieldStartChildrenVector(fv.b, len(fv.kids)) + for i := len(fv.kids) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(fv.kids[i]) + } + kidsFB := fv.b.EndVector(len(fv.kids)) + + storageType := field.Type + if storageType.ID() == arrow.EXTENSION { + storageType = storageType.(arrow.ExtensionType).StorageType() + } + + var dictFB flatbuffers.UOffsetT + if storageType.ID() == arrow.DICTIONARY { + idxType := field.Type.(*arrow.DictionaryType).IndexType.(arrow.FixedWidthDataType) + + dictID, err := fv.memo.GetFieldID(fv.pos.Path()) + if err != nil { + panic(err) + } + var signed bool + switch idxType.ID() { + case arrow.UINT8, arrow.UINT16, arrow.UINT32, arrow.UINT64: + signed = false + case arrow.INT8, arrow.INT16, arrow.INT32, arrow.INT64: + signed = true + } + indexTypeOffset := intToFB(fv.b, int32(idxType.BitWidth()), signed) + flatbuf.DictionaryEncodingStart(fv.b) + flatbuf.DictionaryEncodingAddId(fv.b, dictID) + flatbuf.DictionaryEncodingAddIndexType(fv.b, indexTypeOffset) + flatbuf.DictionaryEncodingAddIsOrdered(fv.b, field.Type.(*arrow.DictionaryType).Ordered) + dictFB = flatbuf.DictionaryEncodingEnd(fv.b) + } + + var ( + metaFB flatbuffers.UOffsetT + kvs []flatbuffers.UOffsetT + ) + for i, k := range field.Metadata.Keys() { + v := field.Metadata.Values()[i] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + { + keys := make([]string, 0, len(fv.meta)) + for k := range fv.meta { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := fv.meta[k] + kk := fv.b.CreateString(k) + vv := fv.b.CreateString(v) + flatbuf.KeyValueStart(fv.b) + flatbuf.KeyValueAddKey(fv.b, kk) + flatbuf.KeyValueAddValue(fv.b, vv) + kvs = append(kvs, flatbuf.KeyValueEnd(fv.b)) + } + } + if len(kvs) > 0 { + flatbuf.FieldStartCustomMetadataVector(fv.b, len(kvs)) + for i := len(kvs) - 1; i >= 0; i-- { + fv.b.PrependUOffsetT(kvs[i]) + } + metaFB = fv.b.EndVector(len(kvs)) + } + + flatbuf.FieldStart(fv.b) + flatbuf.FieldAddName(fv.b, nameFB) + flatbuf.FieldAddNullable(fv.b, field.Nullable) + flatbuf.FieldAddTypeType(fv.b, fv.dtype) + flatbuf.FieldAddType(fv.b, fv.offset) + flatbuf.FieldAddDictionary(fv.b, dictFB) + flatbuf.FieldAddChildren(fv.b, kidsFB) + flatbuf.FieldAddCustomMetadata(fv.b, metaFB) + + offset := flatbuf.FieldEnd(fv.b) + + return offset +} + +func typeFromFB(field *flatbuf.Field, pos dictutils.FieldPos, children []arrow.Field, md *arrow.Metadata, memo *dictutils.Memo) (arrow.DataType, error) { + var data flatbuffers.Table + if !field.Type(&data) { + return nil, fmt.Errorf("arrow/ipc: could not load field type data") + } + + dt, err := concreteTypeFromFB(field.TypeType(), data, children) + if err != nil { + return dt, err + } + + var ( + dictID = int64(-1) + dictValueType arrow.DataType + encoding = field.Dictionary(nil) + ) + if encoding != nil { + var idt flatbuf.Int + encoding.IndexType(&idt) + idxType, err := intFromFB(idt) + if err != nil { + return nil, err + } + + dictValueType = dt + dt = &arrow.DictionaryType{IndexType: idxType, ValueType: dictValueType, Ordered: encoding.IsOrdered()} + dictID = encoding.Id() + + if err = memo.Mapper.AddField(dictID, pos.Path()); err != nil { + return dt, err + } + if err = memo.AddType(dictID, dictValueType); err != nil { + return dt, err + } + + } + + // look for extension metadata in custom metadata field. + if md.Len() > 0 { + i := md.FindKey(ExtensionTypeKeyName) + if i < 0 { + return dt, err + } + + extType := arrow.GetExtensionType(md.Values()[i]) + if extType == nil { + // if the extension type is unknown, we do not error here. + // simply return the storage type. + return dt, err + } + + var ( + data string + dataIdx int + ) + + if dataIdx = md.FindKey(ExtensionMetadataKeyName); dataIdx >= 0 { + data = md.Values()[dataIdx] + } + + dt, err = extType.Deserialize(dt, data) + if err != nil { + return dt, err + } + + mdkeys := md.Keys() + mdvals := md.Values() + if dataIdx < 0 { + // if there was no extension metadata, just the name, we only have to + // remove the extension name metadata key/value to ensure roundtrip + // metadata consistency + *md = arrow.NewMetadata(append(mdkeys[:i], mdkeys[i+1:]...), append(mdvals[:i], mdvals[i+1:]...)) + } else { + // if there was extension metadata, we need to remove both the type name + // and the extension metadata keys and values. + newkeys := make([]string, 0, md.Len()-2) + newvals := make([]string, 0, md.Len()-2) + for j := range mdkeys { + if j != i && j != dataIdx { // copy everything except the extension metadata keys/values + newkeys = append(newkeys, mdkeys[j]) + newvals = append(newvals, mdvals[j]) + } + } + *md = arrow.NewMetadata(newkeys, newvals) + } + } + + return dt, err +} + +func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arrow.Field) (arrow.DataType, error) { + switch typ { + case flatbuf.TypeNONE: + return nil, fmt.Errorf("arrow/ipc: Type metadata cannot be none") + + case flatbuf.TypeNull: + return arrow.Null, nil + + case flatbuf.TypeInt: + var dt flatbuf.Int + dt.Init(data.Bytes, data.Pos) + return intFromFB(dt) + + case flatbuf.TypeFloatingPoint: + var dt flatbuf.FloatingPoint + dt.Init(data.Bytes, data.Pos) + return floatFromFB(dt) + + case flatbuf.TypeDecimal: + var dt flatbuf.Decimal + dt.Init(data.Bytes, data.Pos) + return decimalFromFB(dt) + + case flatbuf.TypeBinary: + return arrow.BinaryTypes.Binary, nil + + case flatbuf.TypeFixedSizeBinary: + var dt flatbuf.FixedSizeBinary + dt.Init(data.Bytes, data.Pos) + return &arrow.FixedSizeBinaryType{ByteWidth: int(dt.ByteWidth())}, nil + + case flatbuf.TypeUtf8: + return arrow.BinaryTypes.String, nil + + case flatbuf.TypeLargeBinary: + return arrow.BinaryTypes.LargeBinary, nil + + case flatbuf.TypeLargeUtf8: + return arrow.BinaryTypes.LargeString, nil + + case flatbuf.TypeUtf8View: + return arrow.BinaryTypes.StringView, nil + + case flatbuf.TypeBinaryView: + return arrow.BinaryTypes.BinaryView, nil + + case flatbuf.TypeBool: + return arrow.FixedWidthTypes.Boolean, nil + + case flatbuf.TypeList: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: List must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.ListOfField(children[0]) + return dt, nil + + case flatbuf.TypeLargeList: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: LargeList must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.LargeListOfField(children[0]) + return dt, nil + + case flatbuf.TypeListView: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: ListView must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.ListViewOfField(children[0]) + return dt, nil + + case flatbuf.TypeLargeListView: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: LargeListView must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.LargeListViewOfField(children[0]) + return dt, nil + + case flatbuf.TypeFixedSizeList: + var dt flatbuf.FixedSizeList + dt.Init(data.Bytes, data.Pos) + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: FixedSizeList must have exactly 1 child field (got=%d)", len(children)) + } + ret := arrow.FixedSizeListOfField(dt.ListSize(), children[0]) + return ret, nil + + case flatbuf.TypeStruct_: + return arrow.StructOf(children...), nil + + case flatbuf.TypeUnion: + var dt flatbuf.Union + dt.Init(data.Bytes, data.Pos) + var ( + mode arrow.UnionMode + typeIDs []arrow.UnionTypeCode + ) + + switch dt.Mode() { + case flatbuf.UnionModeSparse: + mode = arrow.SparseMode + case flatbuf.UnionModeDense: + mode = arrow.DenseMode + } + + typeIDLen := dt.TypeIdsLength() + + if typeIDLen == 0 { + for i := range children { + typeIDs = append(typeIDs, int8(i)) + } + } else { + for i := 0; i < typeIDLen; i++ { + id := dt.TypeIds(i) + code := arrow.UnionTypeCode(id) + if int32(code) != id { + return nil, errors.New("union type id out of bounds") + } + typeIDs = append(typeIDs, code) + } + } + + return arrow.UnionOf(mode, children, typeIDs), nil + + case flatbuf.TypeTime: + var dt flatbuf.Time + dt.Init(data.Bytes, data.Pos) + return timeFromFB(dt) + + case flatbuf.TypeTimestamp: + var dt flatbuf.Timestamp + dt.Init(data.Bytes, data.Pos) + return timestampFromFB(dt) + + case flatbuf.TypeDate: + var dt flatbuf.Date + dt.Init(data.Bytes, data.Pos) + return dateFromFB(dt) + + case flatbuf.TypeInterval: + var dt flatbuf.Interval + dt.Init(data.Bytes, data.Pos) + return intervalFromFB(dt) + + case flatbuf.TypeDuration: + var dt flatbuf.Duration + dt.Init(data.Bytes, data.Pos) + return durationFromFB(dt) + + case flatbuf.TypeMap: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: Map must have exactly 1 child field") + } + + if children[0].Nullable || children[0].Type.ID() != arrow.STRUCT || len(children[0].Type.(*arrow.StructType).Fields()) != 2 { + return nil, fmt.Errorf("arrow/ipc: Map's key-item pairs must be non-nullable structs") + } + + pairType := children[0].Type.(*arrow.StructType) + if pairType.Field(0).Nullable { + return nil, fmt.Errorf("arrow/ipc: Map's keys must be non-nullable") + } + + var dt flatbuf.Map + dt.Init(data.Bytes, data.Pos) + ret := arrow.MapOf(pairType.Field(0).Type, pairType.Field(1).Type) + ret.SetItemNullable(pairType.Field(1).Nullable) + ret.KeysSorted = dt.KeysSorted() + return ret, nil + + case flatbuf.TypeRunEndEncoded: + if len(children) != 2 { + return nil, fmt.Errorf("%w: arrow/ipc: RunEndEncoded must have exactly 2 child fields", arrow.ErrInvalid) + } + switch children[0].Type.ID() { + case arrow.INT16, arrow.INT32, arrow.INT64: + default: + return nil, fmt.Errorf("%w: arrow/ipc: run-end encoded run_ends field must be one of int16, int32, or int64 type", arrow.ErrInvalid) + } + return arrow.RunEndEncodedOf(children[0].Type, children[1].Type), nil + + default: + panic(fmt.Errorf("arrow/ipc: type %v not implemented", flatbuf.EnumNamesType[typ])) + } +} + +func intFromFB(data flatbuf.Int) (arrow.DataType, error) { + bw := data.BitWidth() + if bw > 64 { + return nil, fmt.Errorf("arrow/ipc: integers with more than 64 bits not implemented (bits=%d)", bw) + } + if bw < 8 { + return nil, fmt.Errorf("arrow/ipc: integers with less than 8 bits not implemented (bits=%d)", bw) + } + + switch bw { + case 8: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint8, nil + } + return arrow.PrimitiveTypes.Int8, nil + + case 16: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint16, nil + } + return arrow.PrimitiveTypes.Int16, nil + + case 32: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint32, nil + } + return arrow.PrimitiveTypes.Int32, nil + + case 64: + if !data.IsSigned() { + return arrow.PrimitiveTypes.Uint64, nil + } + return arrow.PrimitiveTypes.Int64, nil + default: + return nil, fmt.Errorf("arrow/ipc: integers not in cstdint are not implemented") + } +} + +func intToFB(b *flatbuffers.Builder, bw int32, isSigned bool) flatbuffers.UOffsetT { + flatbuf.IntStart(b) + flatbuf.IntAddBitWidth(b, bw) + flatbuf.IntAddIsSigned(b, isSigned) + return flatbuf.IntEnd(b) +} + +func floatFromFB(data flatbuf.FloatingPoint) (arrow.DataType, error) { + switch p := data.Precision(); p { + case flatbuf.PrecisionHALF: + return arrow.FixedWidthTypes.Float16, nil + case flatbuf.PrecisionSINGLE: + return arrow.PrimitiveTypes.Float32, nil + case flatbuf.PrecisionDOUBLE: + return arrow.PrimitiveTypes.Float64, nil + default: + return nil, fmt.Errorf("arrow/ipc: floating point type with %d precision not implemented", p) + } +} + +func floatToFB(b *flatbuffers.Builder, bw int32) flatbuffers.UOffsetT { + switch bw { + case 16: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionHALF) + return flatbuf.FloatingPointEnd(b) + case 32: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionSINGLE) + return flatbuf.FloatingPointEnd(b) + case 64: + flatbuf.FloatingPointStart(b) + flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionDOUBLE) + return flatbuf.FloatingPointEnd(b) + default: + panic(fmt.Errorf("arrow/ipc: invalid floating point precision %d-bits", bw)) + } +} + +func decimalFromFB(data flatbuf.Decimal) (arrow.DataType, error) { + switch data.BitWidth() { + case 32: + return &arrow.Decimal32Type{Precision: data.Precision(), Scale: data.Scale()}, nil + case 64: + return &arrow.Decimal64Type{Precision: data.Precision(), Scale: data.Scale()}, nil + case 128: + return &arrow.Decimal128Type{Precision: data.Precision(), Scale: data.Scale()}, nil + case 256: + return &arrow.Decimal256Type{Precision: data.Precision(), Scale: data.Scale()}, nil + default: + return nil, fmt.Errorf("arrow/ipc: invalid decimal bitwidth: %d", data.BitWidth()) + } +} + +func timeFromFB(data flatbuf.Time) (arrow.DataType, error) { + bw := data.BitWidth() + unit := unitFromFB(data.Unit()) + + switch bw { + case 32: + switch unit { + case arrow.Millisecond: + return arrow.FixedWidthTypes.Time32ms, nil + case arrow.Second: + return arrow.FixedWidthTypes.Time32s, nil + default: + return nil, fmt.Errorf("arrow/ipc: Time32 type with %v unit not implemented", unit) + } + case 64: + switch unit { + case arrow.Nanosecond: + return arrow.FixedWidthTypes.Time64ns, nil + case arrow.Microsecond: + return arrow.FixedWidthTypes.Time64us, nil + default: + return nil, fmt.Errorf("arrow/ipc: Time64 type with %v unit not implemented", unit) + } + default: + return nil, fmt.Errorf("arrow/ipc: Time type with %d bitwidth not implemented", bw) + } +} + +func timestampFromFB(data flatbuf.Timestamp) (arrow.DataType, error) { + unit := unitFromFB(data.Unit()) + tz := string(data.Timezone()) + return &arrow.TimestampType{Unit: unit, TimeZone: tz}, nil +} + +func dateFromFB(data flatbuf.Date) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.DateUnitDAY: + return arrow.FixedWidthTypes.Date32, nil + case flatbuf.DateUnitMILLISECOND: + return arrow.FixedWidthTypes.Date64, nil + } + return nil, fmt.Errorf("arrow/ipc: Date type with %d unit not implemented", data.Unit()) +} + +func intervalFromFB(data flatbuf.Interval) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.IntervalUnitYEAR_MONTH: + return arrow.FixedWidthTypes.MonthInterval, nil + case flatbuf.IntervalUnitDAY_TIME: + return arrow.FixedWidthTypes.DayTimeInterval, nil + case flatbuf.IntervalUnitMONTH_DAY_NANO: + return arrow.FixedWidthTypes.MonthDayNanoInterval, nil + } + return nil, fmt.Errorf("arrow/ipc: Interval type with %d unit not implemented", data.Unit()) +} + +func durationFromFB(data flatbuf.Duration) (arrow.DataType, error) { + switch data.Unit() { + case flatbuf.TimeUnitSECOND: + return arrow.FixedWidthTypes.Duration_s, nil + case flatbuf.TimeUnitMILLISECOND: + return arrow.FixedWidthTypes.Duration_ms, nil + case flatbuf.TimeUnitMICROSECOND: + return arrow.FixedWidthTypes.Duration_us, nil + case flatbuf.TimeUnitNANOSECOND: + return arrow.FixedWidthTypes.Duration_ns, nil + } + return nil, fmt.Errorf("arrow/ipc: Duration type with %d unit not implemented", data.Unit()) +} + +type customMetadataer interface { + CustomMetadataLength() int + CustomMetadata(*flatbuf.KeyValue, int) bool +} + +func metadataFromFB(md customMetadataer) (arrow.Metadata, error) { + var ( + keys = make([]string, md.CustomMetadataLength()) + vals = make([]string, md.CustomMetadataLength()) + ) + + for i := range keys { + var kv flatbuf.KeyValue + if !md.CustomMetadata(&kv, i) { + return arrow.Metadata{}, fmt.Errorf("arrow/ipc: could not read key-value %d from flatbuffer", i) + } + keys[i] = string(kv.Key()) + vals[i] = string(kv.Value()) + } + + return arrow.NewMetadata(keys, vals), nil +} + +func metadataToFB(b *flatbuffers.Builder, meta arrow.Metadata, start startVecFunc) flatbuffers.UOffsetT { + if meta.Len() == 0 { + return 0 + } + + n := meta.Len() + kvs := make([]flatbuffers.UOffsetT, n) + for i := range kvs { + k := b.CreateString(meta.Keys()[i]) + v := b.CreateString(meta.Values()[i]) + flatbuf.KeyValueStart(b) + flatbuf.KeyValueAddKey(b, k) + flatbuf.KeyValueAddValue(b, v) + kvs[i] = flatbuf.KeyValueEnd(b) + } + + start(b, n) + for i := n - 1; i >= 0; i-- { + b.PrependUOffsetT(kvs[i]) + } + return b.EndVector(n) +} + +func schemaFromFB(schema *flatbuf.Schema, memo *dictutils.Memo) (*arrow.Schema, error) { + var ( + err error + fields = make([]arrow.Field, schema.FieldsLength()) + pos = dictutils.NewFieldPos() + ) + + for i := range fields { + var field flatbuf.Field + if !schema.Fields(&field, i) { + return nil, fmt.Errorf("arrow/ipc: could not read field %d from schema", i) + } + + fields[i], err = fieldFromFB(&field, pos.Child(int32(i)), memo) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not convert field %d from flatbuf: %w", i, err) + } + } + + md, err := metadataFromFB(schema) + if err != nil { + return nil, fmt.Errorf("arrow/ipc: could not convert schema metadata from flatbuf: %w", err) + } + + return arrow.NewSchemaWithEndian(fields, &md, endian.Endianness(schema.Endianness())), nil +} + +func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictutils.Mapper) flatbuffers.UOffsetT { + fields := make([]flatbuffers.UOffsetT, schema.NumFields()) + pos := dictutils.NewFieldPos() + for i := 0; i < schema.NumFields(); i++ { + fields[i] = fieldToFB(b, pos.Child(int32(i)), schema.Field(i), memo) + } + + flatbuf.SchemaStartFieldsVector(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + b.PrependUOffsetT(fields[i]) + } + fieldsFB := b.EndVector(len(fields)) + + metaFB := metadataToFB(b, schema.Metadata(), flatbuf.SchemaStartCustomMetadataVector) + + flatbuf.SchemaStart(b) + flatbuf.SchemaAddEndianness(b, flatbuf.Endianness(schema.Endianness())) + flatbuf.SchemaAddFields(b, fieldsFB) + flatbuf.SchemaAddCustomMetadata(b, metaFB) + offset := flatbuf.SchemaEnd(b) + + return offset +} + +// payloadFromSchema returns a slice of payloads corresponding to the given schema. +// Callers of payloadFromSchema will need to call Release after use. +func payloadFromSchema(schema *arrow.Schema, mem memory.Allocator, memo *dictutils.Mapper) payloads { + ps := make(payloads, 1) + ps[0].msg = MessageSchema + ps[0].meta = writeSchemaMessage(schema, mem, memo) + + return ps +} + +func writeFBBuilder(b *flatbuffers.Builder, mem memory.Allocator) *memory.Buffer { + raw := b.FinishedBytes() + buf := memory.NewResizableBuffer(mem) + buf.Resize(len(raw)) + copy(buf.Bytes(), raw) + return buf +} + +func writeMessageFB(b *flatbuffers.Builder, mem memory.Allocator, hdrType flatbuf.MessageHeader, hdr flatbuffers.UOffsetT, bodyLen int64) *memory.Buffer { + + flatbuf.MessageStart(b) + flatbuf.MessageAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion)) + flatbuf.MessageAddHeaderType(b, hdrType) + flatbuf.MessageAddHeader(b, hdr) + flatbuf.MessageAddBodyLength(b, bodyLen) + msg := flatbuf.MessageEnd(b) + b.Finish(msg) + + return writeFBBuilder(b, mem) +} + +func writeSchemaMessage(schema *arrow.Schema, mem memory.Allocator, dict *dictutils.Mapper) *memory.Buffer { + b := flatbuffers.NewBuilder(1024) + schemaFB := schemaToFB(b, schema, dict) + return writeMessageFB(b, mem, flatbuf.MessageHeaderSchema, schemaFB, 0) +} + +func writeFileFooter(schema *arrow.Schema, dicts, recs []dataBlock, w io.Writer) error { + var ( + b = flatbuffers.NewBuilder(1024) + memo dictutils.Mapper + ) + memo.ImportSchema(schema) + + schemaFB := schemaToFB(b, schema, &memo) + dictsFB := fileBlocksToFB(b, dicts, flatbuf.FooterStartDictionariesVector) + recsFB := fileBlocksToFB(b, recs, flatbuf.FooterStartRecordBatchesVector) + + flatbuf.FooterStart(b) + flatbuf.FooterAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion)) + flatbuf.FooterAddSchema(b, schemaFB) + flatbuf.FooterAddDictionaries(b, dictsFB) + flatbuf.FooterAddRecordBatches(b, recsFB) + footer := flatbuf.FooterEnd(b) + + b.Finish(footer) + + _, err := w.Write(b.FinishedBytes()) + return err +} + +func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) *memory.Buffer { + b := flatbuffers.NewBuilder(0) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec, variadicCounts) + return writeMessageFB(b, mem, flatbuf.MessageHeaderRecordBatch, recFB, bodyLength) +} + +func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) *memory.Buffer { + b := flatbuffers.NewBuilder(0) + recFB := recordToFB(b, size, bodyLength, fields, meta, codec, variadicCounts) + + flatbuf.DictionaryBatchStart(b) + flatbuf.DictionaryBatchAddId(b, id) + flatbuf.DictionaryBatchAddData(b, recFB) + flatbuf.DictionaryBatchAddIsDelta(b, isDelta) + dictFB := flatbuf.DictionaryBatchEnd(b) + return writeMessageFB(b, mem, flatbuf.MessageHeaderDictionaryBatch, dictFB, bodyLength) +} + +func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType, variadicCounts []int64) flatbuffers.UOffsetT { + fieldsFB := writeFieldNodes(b, fields, flatbuf.RecordBatchStartNodesVector) + metaFB := writeBuffers(b, meta, flatbuf.RecordBatchStartBuffersVector) + var bodyCompressFB flatbuffers.UOffsetT + if codec != -1 { + bodyCompressFB = writeBodyCompression(b, codec) + } + + var vcFB *flatbuffers.UOffsetT + if len(variadicCounts) > 0 { + flatbuf.RecordBatchStartVariadicBufferCountsVector(b, len(variadicCounts)) + for i := len(variadicCounts) - 1; i >= 0; i-- { + b.PrependInt64(variadicCounts[i]) + } + vcFBVal := b.EndVector(len(variadicCounts)) + vcFB = &vcFBVal + } + + flatbuf.RecordBatchStart(b) + flatbuf.RecordBatchAddLength(b, size) + flatbuf.RecordBatchAddNodes(b, fieldsFB) + flatbuf.RecordBatchAddBuffers(b, metaFB) + if vcFB != nil { + flatbuf.RecordBatchAddVariadicBufferCounts(b, *vcFB) + } + + if codec != -1 { + flatbuf.RecordBatchAddCompression(b, bodyCompressFB) + } + + return flatbuf.RecordBatchEnd(b) +} + +func writeFieldNodes(b *flatbuffers.Builder, fields []fieldMetadata, start startVecFunc) flatbuffers.UOffsetT { + + start(b, len(fields)) + for i := len(fields) - 1; i >= 0; i-- { + field := fields[i] + if field.Offset != 0 { + panic(fmt.Errorf("arrow/ipc: field metadata for IPC must have offset 0")) + } + flatbuf.CreateFieldNode(b, field.Len, field.Nulls) + } + + return b.EndVector(len(fields)) +} + +func writeBuffers(b *flatbuffers.Builder, buffers []bufferMetadata, start startVecFunc) flatbuffers.UOffsetT { + start(b, len(buffers)) + for i := len(buffers) - 1; i >= 0; i-- { + buffer := buffers[i] + flatbuf.CreateBuffer(b, buffer.Offset, buffer.Len) + } + return b.EndVector(len(buffers)) +} + +func writeBodyCompression(b *flatbuffers.Builder, codec flatbuf.CompressionType) flatbuffers.UOffsetT { + flatbuf.BodyCompressionStart(b) + flatbuf.BodyCompressionAddCodec(b, codec) + flatbuf.BodyCompressionAddMethod(b, flatbuf.BodyCompressionMethodBUFFER) + return flatbuf.BodyCompressionEnd(b) +} + +func writeMessage(msg *memory.Buffer, alignment int32, w io.Writer) (int, error) { + var ( + n int + err error + ) + + // ARROW-3212: we do not make any assumption on whether the output stream is aligned or not. + paddedMsgLen := int32(msg.Len()) + 8 + remainder := paddedMsgLen % alignment + if remainder != 0 { + paddedMsgLen += alignment - remainder + } + + tmp := make([]byte, 4) + + // write continuation indicator, to address 8-byte alignment requirement from FlatBuffers. + binary.LittleEndian.PutUint32(tmp, kIPCContToken) + _, err = w.Write(tmp) + if err != nil { + return 0, fmt.Errorf("arrow/ipc: could not write continuation bit indicator: %w", err) + } + + // the returned message size includes the length prefix, the flatbuffer, + padding + n = int(paddedMsgLen) + + // write the flatbuffer size prefix, including padding + sizeFB := paddedMsgLen - 8 + binary.LittleEndian.PutUint32(tmp, uint32(sizeFB)) + _, err = w.Write(tmp) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer size prefix: %w", err) + } + + // write the flatbuffer + _, err = w.Write(msg.Bytes()) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer: %w", err) + } + + // write any padding + padding := paddedMsgLen - int32(msg.Len()) - 8 + if padding > 0 { + _, err = w.Write(paddingBytes[:padding]) + if err != nil { + return n, fmt.Errorf("arrow/ipc: could not write message padding bytes: %w", err) + } + } + + return n, err +} diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go new file mode 100644 index 0000000000..1934c719b9 --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/reader.go @@ -0,0 +1,284 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/endian" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/utils" +) + +// Reader reads records from an io.Reader. +// Reader expects a schema (plus any dictionaries) as the first messages +// in the stream, followed by records. +type Reader struct { + r MessageReader + schema *arrow.Schema + + refCount atomic.Int64 + rec arrow.Record + err error + + // types dictTypeMap + memo dictutils.Memo + readInitialDicts bool + done bool + swapEndianness bool + ensureNativeEndian bool + expectedSchema *arrow.Schema + + mem memory.Allocator +} + +// NewReaderFromMessageReader allows constructing a new reader object with the +// provided MessageReader allowing injection of reading messages other than +// by simple streaming bytes such as Arrow Flight which receives a protobuf message +func NewReaderFromMessageReader(r MessageReader, opts ...Option) (reader *Reader, err error) { + defer func() { + if pErr := recover(); pErr != nil { + err = utils.FormatRecoveredError("arrow/ipc: unknown error while reading", pErr) + } + }() + cfg := newConfig() + for _, opt := range opts { + opt(cfg) + } + + rr := &Reader{ + r: r, + refCount: atomic.Int64{}, + // types: make(dictTypeMap), + memo: dictutils.NewMemo(), + mem: cfg.alloc, + ensureNativeEndian: cfg.ensureNativeEndian, + expectedSchema: cfg.schema, + } + rr.refCount.Add(1) + + if !cfg.noAutoSchema { + if err := rr.readSchema(cfg.schema); err != nil { + return nil, err + } + } + + return rr, nil +} + +// NewReader returns a reader that reads records from an input stream. +func NewReader(r io.Reader, opts ...Option) (*Reader, error) { + return NewReaderFromMessageReader(NewMessageReader(r, opts...), opts...) +} + +// Err returns the last error encountered during the iteration over the +// underlying stream. +func (r *Reader) Err() error { return r.err } + +func (r *Reader) Schema() *arrow.Schema { + if r.schema == nil { + if err := r.readSchema(r.expectedSchema); err != nil { + r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err) + r.done = true + } + } + return r.schema +} + +func (r *Reader) readSchema(schema *arrow.Schema) error { + msg, err := r.r.Message() + if err != nil { + return fmt.Errorf("arrow/ipc: could not read message schema: %w", err) + } + + if msg.Type() != MessageSchema { + return fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v)", msg.Type(), MessageSchema) + } + + // FIXME(sbinet) refactor msg-header handling. + var schemaFB flatbuf.Schema + initFB(&schemaFB, msg.msg.Header) + + r.schema, err = schemaFromFB(&schemaFB, &r.memo) + if err != nil { + return fmt.Errorf("arrow/ipc: could not decode schema from message schema: %w", err) + } + + // check the provided schema match the one read from stream. + if schema != nil && !schema.Equal(r.schema) { + return errInconsistentSchema + } + + if r.ensureNativeEndian && !r.schema.IsNativeEndian() { + r.swapEndianness = true + r.schema = r.schema.WithEndianness(endian.NativeEndian) + } + + return nil +} + +// Retain increases the reference count by 1. +// Retain may be called simultaneously from multiple goroutines. +func (r *Reader) Retain() { + r.refCount.Add(1) +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +// Release may be called simultaneously from multiple goroutines. +func (r *Reader) Release() { + debug.Assert(r.refCount.Load() > 0, "too many releases") + + if r.refCount.Add(-1) == 0 { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + if r.r != nil { + r.r.Release() + r.r = nil + } + r.memo.Clear() + } +} + +// Next returns whether a Record could be extracted from the underlying stream. +func (r *Reader) Next() bool { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if r.err != nil || r.done { + return false + } + + return r.next() +} + +func (r *Reader) getInitialDicts() bool { + var msg *Message + // we have to get all dictionaries before reconstructing the first + // record. subsequent deltas and replacements modify the memo + numDicts := r.memo.Mapper.NumDicts() + // there should be numDicts dictionary messages + for i := 0; i < numDicts; i++ { + msg, r.err = r.r.Message() + if r.err != nil { + r.done = true + if r.err == io.EOF { + if i == 0 { + r.err = nil + } else { + r.err = fmt.Errorf("arrow/ipc: IPC stream ended without reading the expected (%d) dictionaries", numDicts) + } + } + return false + } + + if msg.Type() != MessageDictionaryBatch { + r.err = fmt.Errorf("arrow/ipc: IPC stream did not have the expected (%d) dictionaries at the start of the stream", numDicts) + } + if _, err := readDictionary(&r.memo, msg.meta, msg.body, r.swapEndianness, r.mem); err != nil { + r.done = true + r.err = err + return false + } + } + r.readInitialDicts = true + return true +} + +func (r *Reader) next() bool { + defer func() { + if pErr := recover(); pErr != nil { + r.err = utils.FormatRecoveredError("arrow/ipc: unknown error while reading", pErr) + } + }() + if r.schema == nil { + if err := r.readSchema(r.expectedSchema); err != nil { + r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err) + r.done = true + return false + } + } + + if !r.readInitialDicts && !r.getInitialDicts() { + return false + } + + var msg *Message + msg, r.err = r.r.Message() + + for msg != nil && msg.Type() == MessageDictionaryBatch { + if _, r.err = readDictionary(&r.memo, msg.meta, msg.body, r.swapEndianness, r.mem); r.err != nil { + r.done = true + return false + } + msg, r.err = r.r.Message() + } + if r.err != nil { + r.done = true + if errors.Is(r.err, io.EOF) { + r.err = nil + } + return false + } + + if got, want := msg.Type(), MessageRecordBatch; got != want { + r.err = fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v", got, want) + return false + } + + r.rec = newRecord(r.schema, &r.memo, msg.meta, msg.body, r.swapEndianness, r.mem) + return true +} + +// Record returns the current record that has been extracted from the +// underlying stream. +// It is valid until the next call to Next. +func (r *Reader) Record() arrow.Record { + return r.rec +} + +// Read reads the current record from the underlying stream and an error, if any. +// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF). +func (r *Reader) Read() (arrow.Record, error) { + if r.rec != nil { + r.rec.Release() + r.rec = nil + } + + if !r.next() { + if r.done && r.err == nil { + return nil, io.EOF + } + return nil, r.err + } + + return r.rec, nil +} + +var _ array.RecordReader = (*Reader)(nil) diff --git a/vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go new file mode 100644 index 0000000000..96f082fbea --- /dev/null +++ b/vendor/github.com/apache/arrow-go/v18/arrow/ipc/writer.go @@ -0,0 +1,1120 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipc + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sync" + "unsafe" + + "github.com/apache/arrow-go/v18/arrow" + "github.com/apache/arrow-go/v18/arrow/array" + "github.com/apache/arrow-go/v18/arrow/bitutil" + "github.com/apache/arrow-go/v18/arrow/internal" + "github.com/apache/arrow-go/v18/arrow/internal/debug" + "github.com/apache/arrow-go/v18/arrow/internal/dictutils" + "github.com/apache/arrow-go/v18/arrow/internal/flatbuf" + "github.com/apache/arrow-go/v18/arrow/memory" + "github.com/apache/arrow-go/v18/internal/utils" +) + +type streamWriter struct { + w io.Writer + pos int64 +} + +func (w *streamWriter) Start() error { return nil } +func (w *streamWriter) Close() error { + _, err := w.Write(kEOS[:]) + return err +} + +func (w *streamWriter) WritePayload(p Payload) error { + _, err := writeIPCPayload(w, p) + if err != nil { + return err + } + return nil +} + +func (w *streamWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.pos += int64(n) + return n, err +} + +func hasNestedDict(data arrow.ArrayData) bool { + if data.DataType().ID() == arrow.DICTIONARY { + return true + } + for _, c := range data.Children() { + if hasNestedDict(c) { + return true + } + } + return false +} + +// Writer is an Arrow stream writer. +type Writer struct { + w io.Writer + + mem memory.Allocator + pw PayloadWriter + + started bool + schema *arrow.Schema + mapper dictutils.Mapper + codec flatbuf.CompressionType + compressNP int + compressors []compressor + minSpaceSavings *float64 + + // map of the last written dictionaries by id + // so we can avoid writing the same dictionary over and over + lastWrittenDicts map[int64]arrow.Array + emitDictDeltas bool +} + +// NewWriterWithPayloadWriter constructs a writer with the provided payload writer +// instead of the default stream payload writer. This makes the writer more +// reusable such as by the Arrow Flight writer. +func NewWriterWithPayloadWriter(pw PayloadWriter, opts ...Option) *Writer { + cfg := newConfig(opts...) + return &Writer{ + mem: cfg.alloc, + pw: pw, + schema: cfg.schema, + codec: cfg.codec, + compressNP: cfg.compressNP, + minSpaceSavings: cfg.minSpaceSavings, + emitDictDeltas: cfg.emitDictDeltas, + compressors: make([]compressor, cfg.compressNP), + } +} + +// NewWriter returns a writer that writes records to the provided output stream. +func NewWriter(w io.Writer, opts ...Option) *Writer { + cfg := newConfig(opts...) + return &Writer{ + w: w, + mem: cfg.alloc, + pw: &streamWriter{w: w}, + schema: cfg.schema, + codec: cfg.codec, + emitDictDeltas: cfg.emitDictDeltas, + compressNP: cfg.compressNP, + compressors: make([]compressor, cfg.compressNP), + } +} + +func (w *Writer) Close() error { + if !w.started { + err := w.start() + if err != nil { + return err + } + } + + if w.pw == nil { + return nil + } + + err := w.pw.Close() + if err != nil { + return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err) + } + w.pw = nil + + for _, d := range w.lastWrittenDicts { + d.Release() + } + + return nil +} + +func (w *Writer) Write(rec arrow.Record) (err error) { + defer func() { + if pErr := recover(); pErr != nil { + err = utils.FormatRecoveredError("arrow/ipc: unknown error while writing", pErr) + } + }() + + incomingSchema := rec.Schema() + + if !w.started { + if w.schema == nil { + w.schema = incomingSchema + } + err := w.start() + if err != nil { + return err + } + } + + if incomingSchema == nil || !incomingSchema.Equal(w.schema) { + return errInconsistentSchema + } + + const allow64b = true + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder( + w.mem, + 0, + kMaxNestingDepth, + allow64b, + w.codec, + w.compressNP, + w.minSpaceSavings, + w.compressors, + ) + ) + defer data.Release() + + err = writeDictionaryPayloads(w.mem, rec, false, w.emitDictDeltas, &w.mapper, w.lastWrittenDicts, w.pw, enc) + if err != nil { + return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err) + } + + enc.reset() + if err := enc.Encode(&data, rec); err != nil { + return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err) + } + + return w.pw.WritePayload(data) +} + +func writeDictionaryPayloads(mem memory.Allocator, batch arrow.Record, isFileFormat bool, emitDictDeltas bool, mapper *dictutils.Mapper, lastWrittenDicts map[int64]arrow.Array, pw PayloadWriter, encoder *recordEncoder) error { + dictionaries, err := dictutils.CollectDictionaries(batch, mapper) + if err != nil { + return err + } + defer func() { + for _, d := range dictionaries { + d.Dict.Release() + } + }() + + eqopt := array.WithNaNsEqual(true) + for _, pair := range dictionaries { + encoder.reset() + var ( + deltaStart int64 + enc = dictEncoder{encoder} + ) + lastDict, exists := lastWrittenDicts[pair.ID] + if exists { + if lastDict.Data() == pair.Dict.Data() { + continue + } + newLen, lastLen := pair.Dict.Len(), lastDict.Len() + if lastLen == newLen && array.ApproxEqual(lastDict, pair.Dict, eqopt) { + // same dictionary by value + // might cost CPU, but required for IPC file format + continue + } + if isFileFormat { + return errors.New("arrow/ipc: Dictionary replacement detected when writing IPC file format. Arrow IPC File only supports single dictionary per field") + } + + if newLen > lastLen && + emitDictDeltas && + !hasNestedDict(pair.Dict.Data()) && + (array.SliceApproxEqual(lastDict, 0, int64(lastLen), pair.Dict, 0, int64(lastLen), eqopt)) { + deltaStart = int64(lastLen) + } + } + + var data = Payload{msg: MessageDictionaryBatch} + defer data.Release() + + dict := pair.Dict + if deltaStart > 0 { + dict = array.NewSlice(dict, deltaStart, int64(dict.Len())) + defer dict.Release() + } + if err := enc.Encode(&data, pair.ID, deltaStart > 0, dict); err != nil { + return err + } + + if err := pw.WritePayload(data); err != nil { + return err + } + + lastWrittenDicts[pair.ID] = pair.Dict + if lastDict != nil { + lastDict.Release() + } + pair.Dict.Retain() + } + return nil +} + +func (w *Writer) start() error { + w.started = true + + w.mapper.ImportSchema(w.schema) + w.lastWrittenDicts = make(map[int64]arrow.Array) + + // write out schema payloads + ps := payloadFromSchema(w.schema, w.mem, &w.mapper) + defer ps.Release() + + for _, data := range ps { + err := w.pw.WritePayload(data) + if err != nil { + return err + } + } + + return nil +} + +type dictEncoder struct { + *recordEncoder +} + +func (d *dictEncoder) encodeMetadata(p *Payload, isDelta bool, id, nrows int64) error { + p.meta = writeDictionaryMessage(d.mem, id, isDelta, nrows, p.size, d.fields, d.meta, d.codec, d.variadicCounts) + return nil +} + +func (d *dictEncoder) Encode(p *Payload, id int64, isDelta bool, dict arrow.Array) error { + d.start = 0 + defer func() { + d.start = 0 + }() + + schema := arrow.NewSchema([]arrow.Field{{Name: "dictionary", Type: dict.DataType(), Nullable: true}}, nil) + batch := array.NewRecord(schema, []arrow.Array{dict}, int64(dict.Len())) + defer batch.Release() + if err := d.encode(p, batch); err != nil { + return err + } + + return d.encodeMetadata(p, isDelta, id, batch.NumRows()) +} + +type recordEncoder struct { + mem memory.Allocator + + fields []fieldMetadata + meta []bufferMetadata + variadicCounts []int64 + + depth int64 + start int64 + allow64b bool + codec flatbuf.CompressionType + compressNP int + compressors []compressor + minSpaceSavings *float64 +} + +func newRecordEncoder( + mem memory.Allocator, + startOffset, + maxDepth int64, + allow64b bool, + codec flatbuf.CompressionType, + compressNP int, + minSpaceSavings *float64, + compressors []compressor, +) *recordEncoder { + return &recordEncoder{ + mem: mem, + start: startOffset, + depth: maxDepth, + allow64b: allow64b, + codec: codec, + compressNP: compressNP, + compressors: compressors, + minSpaceSavings: minSpaceSavings, + } +} + +func (w *recordEncoder) shouldCompress(uncompressed, compressed int) bool { + debug.Assert(uncompressed > 0, "uncompressed size is 0") + if w.minSpaceSavings == nil { + return true + } + + savings := 1.0 - float64(compressed)/float64(uncompressed) + return savings >= *w.minSpaceSavings +} + +func (w *recordEncoder) reset() { + w.start = 0 + w.fields = make([]fieldMetadata, 0) +} + +func (w *recordEncoder) getCompressor(id int) compressor { + if w.compressors[id] == nil { + w.compressors[id] = getCompressor(w.codec) + } + return w.compressors[id] +} + +func (w *recordEncoder) compressBodyBuffers(p *Payload) error { + compress := func(idx int, codec compressor) error { + if p.body[idx] == nil || p.body[idx].Len() == 0 { + return nil + } + + buf := memory.NewResizableBuffer(w.mem) + buf.Reserve(codec.MaxCompressedLen(p.body[idx].Len()) + arrow.Int64SizeBytes) + + binary.LittleEndian.PutUint64(buf.Buf(), uint64(p.body[idx].Len())) + bw := &bufferWriter{buf: buf, pos: arrow.Int64SizeBytes} + codec.Reset(bw) + + n, err := codec.Write(p.body[idx].Bytes()) + if err != nil { + return err + } + if err := codec.Close(); err != nil { + return err + } + + finalLen := bw.pos + compressedLen := bw.pos - arrow.Int64SizeBytes + if !w.shouldCompress(n, compressedLen) { + n = copy(buf.Buf()[arrow.Int64SizeBytes:], p.body[idx].Bytes()) + // size of -1 indicates to the reader that the body + // doesn't need to be decompressed + var noprefix int64 = -1 + binary.LittleEndian.PutUint64(buf.Buf(), uint64(noprefix)) + finalLen = n + arrow.Int64SizeBytes + } + bw.buf.Resize(finalLen) + p.body[idx].Release() + p.body[idx] = buf + return nil + } + + if w.compressNP <= 1 { + codec := w.getCompressor(0) + for idx := range p.body { + if err := compress(idx, codec); err != nil { + return err + } + } + return nil + } + + var ( + wg sync.WaitGroup + ch = make(chan int) + errch = make(chan error) + ctx, cancel = context.WithCancel(context.Background()) + ) + defer cancel() + + for workerID := 0; workerID < w.compressNP; workerID++ { + wg.Add(1) + go func(id int) { + defer wg.Done() + codec := w.getCompressor(id) + for { + select { + case idx, ok := <-ch: + if !ok { + // we're done, channel is closed! + return + } + + if err := compress(idx, codec); err != nil { + errch <- err + cancel() + return + } + case <-ctx.Done(): + // cancelled, return early + return + } + } + }(workerID) + } + + for idx := range p.body { + ch <- idx + } + + close(ch) + wg.Wait() + close(errch) + + return <-errch +} + +func (w *recordEncoder) encode(p *Payload, rec arrow.Record) error { + // perform depth-first traversal of the row-batch + for i, col := range rec.Columns() { + err := w.visit(p, col) + if err != nil { + return fmt.Errorf("arrow/ipc: could not encode column %d (%q): %w", i, rec.ColumnName(i), err) + } + } + + if w.codec != -1 { + if w.minSpaceSavings != nil { + pct := *w.minSpaceSavings + if pct < 0 || pct > 1 { + p.Release() + return fmt.Errorf("%w: minSpaceSavings not in range [0,1]. Provided %.05f", + arrow.ErrInvalid, pct) + } + } + w.compressBodyBuffers(p) + } + + // position for the start of a buffer relative to the passed frame of reference. + // may be 0 or some other position in an address space. + offset := w.start + w.meta = make([]bufferMetadata, len(p.body)) + + // construct the metadata for the record batch header + for i, buf := range p.body { + var ( + size int64 + padding int64 + ) + // the buffer might be null if we are handling zero row lengths. + if buf != nil { + size = int64(buf.Len()) + padding = bitutil.CeilByte64(size) - size + } + w.meta[i] = bufferMetadata{ + Offset: offset, + // even though we add padding, we need the Len to be correct + // so that decompressing works properly. + Len: size, + } + offset += size + padding + } + + p.size = offset - w.start + if !bitutil.IsMultipleOf8(p.size) { + panic("not aligned") + } + + return nil +} + +func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { + if w.depth <= 0 { + return errMaxRecursion + } + + if !w.allow64b && arr.Len() > math.MaxInt32 { + return errBigArray + } + + if arr.DataType().ID() == arrow.EXTENSION { + arr := arr.(array.ExtensionArray) + err := w.visit(p, arr.Storage()) + if err != nil { + return fmt.Errorf("failed visiting storage of for array %T: %w", arr, err) + } + return nil + } + + if arr.DataType().ID() == arrow.DICTIONARY { + arr := arr.(*array.Dictionary) + return w.visit(p, arr.Indices()) + } + + // add all common elements + w.fields = append(w.fields, fieldMetadata{ + Len: int64(arr.Len()), + Nulls: int64(arr.NullN()), + Offset: 0, + }) + + if arr.DataType().ID() == arrow.NULL { + return nil + } + + if internal.HasValidityBitmap(arr.DataType().ID(), flatbuf.MetadataVersion(currentMetadataVersion)) { + switch arr.NullN() { + case 0: + // there are no null values, drop the null bitmap + p.body = append(p.body, nil) + default: + data := arr.Data() + var bitmap *memory.Buffer + if data.NullN() == data.Len() { + // every value is null, just use a new zero-initialized bitmap to avoid the expense of copying + bitmap = memory.NewResizableBuffer(w.mem) + minLength := paddedLength(bitutil.BytesForBits(int64(data.Len())), kArrowAlignment) + bitmap.Resize(int(minLength)) + } else { + // otherwise truncate and copy the bits + bitmap = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[0]) + } + p.body = append(p.body, bitmap) + } + } + + switch dtype := arr.DataType().(type) { + case *arrow.NullType: + // ok. NullArrays are completely empty. + + case *arrow.BooleanType: + var ( + data = arr.Data() + bitm *memory.Buffer + ) + + if data.Len() != 0 { + bitm = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[1]) + } + p.body = append(p.body, bitm) + + case arrow.FixedWidthDataType: + data := arr.Data() + values := data.Buffers()[1] + arrLen := int64(arr.Len()) + typeWidth := int64(dtype.BitWidth() / 8) + minLength := paddedLength(arrLen*typeWidth, kArrowAlignment) + + switch { + case needTruncate(int64(data.Offset()), values, minLength): + // non-zero offset: slice the buffer + offset := int64(data.Offset()) * typeWidth + // send padding if available + len := min(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len())-offset) + values = memory.NewBufferBytes(values.Bytes()[offset : offset+len]) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, values) + + case *arrow.BinaryType, *arrow.LargeBinaryType, *arrow.StringType, *arrow.LargeStringType: + arr := arr.(array.BinaryLike) + voffsets := w.getZeroBasedValueOffsets(arr) + data := arr.Data() + values := data.Buffers()[2] + + var totalDataBytes int64 + if voffsets != nil { + totalDataBytes = int64(len(arr.ValueBytes())) + } + + switch { + case needTruncate(int64(data.Offset()), values, totalDataBytes): + // slice data buffer to include the range we need now. + var ( + beg int64 = 0 + len = min(paddedLength(totalDataBytes, kArrowAlignment), int64(totalDataBytes)) + ) + if arr.Len() > 0 { + beg = arr.ValueOffset64(0) + } + + values = memory.NewBufferBytes(data.Buffers()[2].Bytes()[beg : beg+len]) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, voffsets) + p.body = append(p.body, values) + + case arrow.BinaryViewDataType: + data := arr.Data() + values := data.Buffers()[1] + arrLen := int64(arr.Len()) + typeWidth := int64(arrow.ViewHeaderSizeBytes) + minLength := paddedLength(arrLen*typeWidth, kArrowAlignment) + + switch { + case needTruncate(int64(data.Offset()), values, minLength): + // non-zero offset: slice the buffer + offset := data.Offset() * int(typeWidth) + // send padding if available + len := int(min(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len()-offset))) + values = memory.SliceBuffer(values, offset, len) + default: + if values != nil { + values.Retain() + } + } + p.body = append(p.body, values) + + w.variadicCounts = append(w.variadicCounts, int64(len(data.Buffers())-2)) + for _, b := range data.Buffers()[2:] { + b.Retain() + p.body = append(p.body, b) + } + + case *arrow.StructType: + w.depth-- + arr := arr.(*array.Struct) + for i := 0; i < arr.NumField(); i++ { + err := w.visit(p, arr.Field(i)) + if err != nil { + return fmt.Errorf("could not visit field %d of struct-array: %w", i, err) + } + } + w.depth++ + + case *arrow.SparseUnionType: + offset, length := arr.Data().Offset(), arr.Len() + arr := arr.(*array.SparseUnion) + typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes()) + p.body = append(p.body, typeCodes) + + w.depth-- + for i := 0; i < arr.NumFields(); i++ { + err := w.visit(p, arr.Field(i)) + if err != nil { + return fmt.Errorf("could not visit field %d of sparse union array: %w", i, err) + } + } + w.depth++ + case *arrow.DenseUnionType: + offset, length := arr.Data().Offset(), arr.Len() + arr := arr.(*array.DenseUnion) + typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes()) + p.body = append(p.body, typeCodes) + + w.depth-- + dt := arr.UnionType() + + // union type codes are not necessarily 0-indexed + maxCode := dt.MaxTypeCode() + + // allocate an array of child offsets. Set all to -1 to indicate we + // haven't observed a first occurrence of a particular child yet + offsets := make([]int32, maxCode+1) + lengths := make([]int32, maxCode+1) + offsets[0], lengths[0] = -1, 0 + for i := 1; i < len(offsets); i *= 2 { + copy(offsets[i:], offsets[:i]) + copy(lengths[i:], lengths[:i]) + } + + var valueOffsets *memory.Buffer + if offset != 0 { + valueOffsets = w.rebaseDenseUnionValueOffsets(arr, offsets, lengths) + } else { + valueOffsets = getTruncatedBuffer(int64(offset), int64(length), int32(arrow.Int32SizeBytes), arr.ValueOffsets()) + } + p.body = append(p.body, valueOffsets) + + // visit children and slice accordingly + for i := range dt.Fields() { + child := arr.Field(i) + // for sliced unions it's tricky to know how much to truncate + // the children. For now we'll truncate the children to be + // no longer than the parent union. + + if offset != 0 { + code := dt.TypeCodes()[i] + childOffset := offsets[code] + childLen := lengths[code] + + if childOffset > 0 { + child = array.NewSlice(child, int64(childOffset), int64(childOffset+childLen)) + defer child.Release() + } else if childLen < int32(child.Len()) { + child = array.NewSlice(child, 0, int64(childLen)) + defer child.Release() + } + } + if err := w.visit(p, child); err != nil { + return fmt.Errorf("could not visit field %d of dense union array: %w", i, err) + } + } + w.depth++ + case *arrow.MapType, *arrow.ListType, *arrow.LargeListType: + arr := arr.(array.ListLike) + voffsets := w.getZeroBasedValueOffsets(arr) + p.body = append(p.body, voffsets) + + w.depth-- + var ( + values = arr.ListValues() + mustRelease = false + values_offset int64 + values_end int64 + ) + defer func() { + if mustRelease { + values.Release() + } + }() + + if arr.Len() > 0 && voffsets != nil { + values_offset, _ = arr.ValueOffsets(0) + _, values_end = arr.ValueOffsets(arr.Len() - 1) + } + + if arr.Len() != 0 || values_end < int64(values.Len()) { + // must also slice the values + values = array.NewSlice(values, values_offset, values_end) + mustRelease = true + } + err := w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.ListViewType, *arrow.LargeListViewType: + arr := arr.(array.VarLenListLike) + + voffsets, minOffset, maxEnd := w.getZeroBasedListViewOffsets(arr) + vsizes := w.getListViewSizes(arr) + + p.body = append(p.body, voffsets) + p.body = append(p.body, vsizes) + + w.depth-- + var ( + values = arr.ListValues() + ) + + if minOffset != 0 || maxEnd < int64(values.Len()) { + values = array.NewSlice(values, minOffset, maxEnd) + defer values.Release() + } + err := w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.FixedSizeListType: + arr := arr.(*array.FixedSizeList) + + w.depth-- + + size := int64(arr.DataType().(*arrow.FixedSizeListType).Len()) + beg := int64(arr.Offset()) * size + end := int64(arr.Offset()+arr.Len()) * size + + values := array.NewSlice(arr.ListValues(), beg, end) + defer values.Release() + + err := w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.RunEndEncodedType: + arr := arr.(*array.RunEndEncoded) + w.depth-- + child := arr.LogicalRunEndsArray(w.mem) + defer child.Release() + if err := w.visit(p, child); err != nil { + return err + } + child = arr.LogicalValuesArray() + defer child.Release() + if err := w.visit(p, child); err != nil { + return err + } + w.depth++ + + default: + panic(fmt.Errorf("arrow/ipc: unknown array %T (dtype=%T)", arr, dtype)) + } + + return nil +} + +func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) *memory.Buffer { + data := arr.Data() + voffsets := data.Buffers()[1] + offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() + offsetBytesNeeded := offsetTraits.BytesRequired(data.Len() + 1) + + if voffsets == nil || voffsets.Len() == 0 { + return nil + } + + dataTypeWidth := arr.DataType().Layout().Buffers[1].ByteWidth + + // if we have a non-zero offset, then the value offsets do not start at + // zero. we must a) create a new offsets array with shifted offsets and + // b) slice the values array accordingly + hasNonZeroOffset := data.Offset() != 0 + + // or if there are more value offsets than values (the array has been sliced) + // we need to trim off the trailing offsets + hasMoreOffsetsThanValues := offsetBytesNeeded < voffsets.Len() + + // or if the offsets do not start from the zero index, we need to shift them + // and slice the values array + var firstOffset int64 + if dataTypeWidth == 8 { + firstOffset = arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[0] + } else { + firstOffset = int64(arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[0]) + } + offsetsDoNotStartFromZero := firstOffset != 0 + + // determine whether the offsets array should be shifted + needsTruncateAndShift := hasNonZeroOffset || hasMoreOffsetsThanValues || offsetsDoNotStartFromZero + + if needsTruncateAndShift { + shiftedOffsets := memory.NewResizableBuffer(w.mem) + shiftedOffsets.Resize(offsetBytesNeeded) + + switch dataTypeWidth { + case 8: + dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1] + + startOffset := offsets[0] + for i, o := range offsets { + dest[i] = o - startOffset + } + + default: + debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth") + dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1] + + startOffset := offsets[0] + for i, o := range offsets { + dest[i] = o - startOffset + } + } + + voffsets = shiftedOffsets + } else { + voffsets.Retain() + } + + return voffsets +} + +func getZeroBasedListViewOffsets[OffsetT int32 | int64](mem memory.Allocator, arr array.VarLenListLike) (valueOffsets *memory.Buffer, minOffset, maxEnd OffsetT) { + requiredBytes := int(unsafe.Sizeof(minOffset)) * arr.Len() + if arr.Data().Offset() == 0 { + // slice offsets to used extent, in case we have truncated slice + minOffset, maxEnd = 0, OffsetT(arr.ListValues().Len()) + valueOffsets = arr.Data().Buffers()[1] + if valueOffsets.Len() > requiredBytes { + valueOffsets = memory.SliceBuffer(valueOffsets, 0, requiredBytes) + } else { + valueOffsets.Retain() + } + return + } + + // non-zero offset, it's likely that the smallest offset is not zero + // we must a) create a new offsets array with shifted offsets and + // b) slice the values array accordingly + + valueOffsets = memory.NewResizableBuffer(mem) + valueOffsets.Resize(requiredBytes) + if arr.Len() > 0 { + // max value of int32/int64 based on type + minOffset = (^OffsetT(0)) << ((8 * unsafe.Sizeof(minOffset)) - 1) + for i := 0; i < arr.Len(); i++ { + start, end := arr.ValueOffsets(i) + minOffset = utils.Min(minOffset, OffsetT(start)) + maxEnd = utils.Max(maxEnd, OffsetT(end)) + } + } + + offsets := arrow.GetData[OffsetT](arr.Data().Buffers()[1].Bytes())[arr.Data().Offset():] + destOffset := arrow.GetData[OffsetT](valueOffsets.Bytes()) + for i := 0; i < arr.Len(); i++ { + destOffset[i] = offsets[i] - minOffset + } + return +} + +func getListViewSizes[OffsetT int32 | int64](arr array.VarLenListLike) *memory.Buffer { + var z OffsetT + requiredBytes := int(unsafe.Sizeof(z)) * arr.Len() + sizes := arr.Data().Buffers()[2] + + if arr.Data().Offset() != 0 || sizes.Len() > requiredBytes { + // slice offsets to used extent, in case we have truncated slice + offsetBytes := arr.Data().Offset() * int(unsafe.Sizeof(z)) + sizes = memory.SliceBuffer(sizes, offsetBytes, requiredBytes) + } else { + sizes.Retain() + } + return sizes +} + +func (w *recordEncoder) getZeroBasedListViewOffsets(arr array.VarLenListLike) (*memory.Buffer, int64, int64) { + if arr.Len() == 0 { + return nil, 0, 0 + } + + var ( + outOffsets *memory.Buffer + minOff, maxEnd int64 + ) + + switch v := arr.(type) { + case *array.ListView: + voffsets, outOff, outEnd := getZeroBasedListViewOffsets[int32](w.mem, v) + outOffsets = voffsets + minOff, maxEnd = int64(outOff), int64(outEnd) + case *array.LargeListView: + outOffsets, minOff, maxEnd = getZeroBasedListViewOffsets[int64](w.mem, v) + } + return outOffsets, minOff, maxEnd +} + +func (w *recordEncoder) getListViewSizes(arr array.VarLenListLike) *memory.Buffer { + if arr.Len() == 0 { + return nil + } + + switch v := arr.(type) { + case *array.ListView: + return getListViewSizes[int32](v) + case *array.LargeListView: + return getListViewSizes[int64](v) + } + return nil +} + +func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offsets, lengths []int32) *memory.Buffer { + // this case sucks. Because the offsets are different for each + // child array, when we have a sliced array, we need to re-base + // the value offsets for each array! ew. + unshiftedOffsets := arr.RawValueOffsets() + codes := arr.RawTypeCodes() + + shiftedOffsetsBuf := memory.NewResizableBuffer(w.mem) + shiftedOffsetsBuf.Resize(arrow.Int32Traits.BytesRequired(arr.Len())) + shiftedOffsets := arrow.Int32Traits.CastFromBytes(shiftedOffsetsBuf.Bytes()) + + // compute shifted offsets by subtracting child offset + for i, c := range codes { + if offsets[c] == -1 { + // offsets are guaranteed to be increasing according to the spec + // so the first offset we find for a child is the initial offset + // and will become the "0" for this child. + offsets[c] = unshiftedOffsets[i] + shiftedOffsets[i] = 0 + } else { + shiftedOffsets[i] = unshiftedOffsets[i] - offsets[c] + } + lengths[c] = max(lengths[c], shiftedOffsets[i]+1) + } + return shiftedOffsetsBuf +} + +func (w *recordEncoder) Encode(p *Payload, rec arrow.Record) error { + if err := w.encode(p, rec); err != nil { + return err + } + return w.encodeMetadata(p, rec.NumRows()) +} + +func (w *recordEncoder) encodeMetadata(p *Payload, nrows int64) error { + p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta, w.codec, w.variadicCounts) + return nil +} + +func newTruncatedBitmap(mem memory.Allocator, offset, length int64, input *memory.Buffer) *memory.Buffer { + if input == nil { + return nil + } + + minLength := paddedLength(bitutil.BytesForBits(length), kArrowAlignment) + switch { + case offset != 0 || minLength < int64(input.Len()): + // with a sliced array / non-zero offset, we must copy the bitmap + buf := memory.NewResizableBuffer(mem) + buf.Resize(int(minLength)) + bitutil.CopyBitmap(input.Bytes(), int(offset), int(length), buf.Bytes(), 0) + return buf + default: + input.Retain() + return input + } +} + +func getTruncatedBuffer(offset, length int64, byteWidth int32, buf *memory.Buffer) *memory.Buffer { + if buf == nil { + return buf + } + + paddedLen := paddedLength(length*int64(byteWidth), kArrowAlignment) + if offset != 0 || paddedLen < int64(buf.Len()) { + return memory.SliceBuffer(buf, int(offset*int64(byteWidth)), int(min(paddedLen, int64(buf.Len())))) + } + buf.Retain() + return buf +} + +func needTruncate(offset int64, buf *memory.Buffer, minLength int64) bool { + if buf == nil { + return false + } + return offset != 0 || minLength < int64(buf.Len()) +} + +// GetRecordBatchPayload produces the ipc payload for a given record batch. +// The resulting payload itself must be released by the caller via the Release +// method after it is no longer needed. +func GetRecordBatchPayload(batch arrow.Record, opts ...Option) (Payload, error) { + cfg := newConfig(opts...) + var ( + data = Payload{msg: MessageRecordBatch} + enc = newRecordEncoder( + cfg.alloc, + 0, + kMaxNestingDepth, + true, + cfg.codec, + cfg.compressNP, + cfg.minSpaceSavings, + make([]compressor, cfg.compressNP), + ) + ) + + err := enc.Encode(&data, batch) + if err != nil { + return Payload{}, err + } + + return data, nil +} + +// GetSchemaPayload produces the ipc payload for a given schema. +func GetSchemaPayload(schema *arrow.Schema, mem memory.Allocator) Payload { + var mapper dictutils.Mapper + mapper.ImportSchema(schema) + ps := payloadFromSchema(schema, mem, &mapper) + return ps[0] +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8690c4e346..f6224120fd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -343,6 +343,7 @@ github.com/andybalholm/brotli/matchfinder ## explicit; go 1.23.0 github.com/apache/arrow-go/v18/arrow github.com/apache/arrow-go/v18/arrow/array +github.com/apache/arrow-go/v18/arrow/arrio github.com/apache/arrow-go/v18/arrow/bitutil github.com/apache/arrow-go/v18/arrow/csv github.com/apache/arrow-go/v18/arrow/decimal @@ -351,8 +352,11 @@ github.com/apache/arrow-go/v18/arrow/decimal256 github.com/apache/arrow-go/v18/arrow/encoded github.com/apache/arrow-go/v18/arrow/endian github.com/apache/arrow-go/v18/arrow/float16 +github.com/apache/arrow-go/v18/arrow/internal github.com/apache/arrow-go/v18/arrow/internal/debug +github.com/apache/arrow-go/v18/arrow/internal/dictutils github.com/apache/arrow-go/v18/arrow/internal/flatbuf +github.com/apache/arrow-go/v18/arrow/ipc github.com/apache/arrow-go/v18/arrow/memory github.com/apache/arrow-go/v18/arrow/memory/internal/cgoalloc github.com/apache/arrow-go/v18/arrow/memory/mallocator