mirror of https://github.com/grafana/grafana
InfluxDB: SQL Support (#72167)
* Add influxdbSqlSupport feature toggle * Add SQL option to the config page * Add SQL backend * Add metadata support in config page * Implement unified querying * Fix healthcheck query * fsql tests * secure grpc by default * code cleanup * Query handing for sql mode * Implement a placeholder sql editor * Fix query language dropdown * go mod updates * make lint-go * more make lint-go * remove unused runQuery * switch statements with default case * linting againpull/72168/head
parent
3172715a02
commit
77e7ae2a1b
|
@ -0,0 +1,277 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"runtime/debug" |
||||
"time" |
||||
|
||||
"github.com/apache/arrow/go/v12/arrow" |
||||
"github.com/apache/arrow/go/v12/arrow/array" |
||||
"github.com/apache/arrow/go/v12/arrow/scalar" |
||||
"github.com/grafana/grafana-plugin-sdk-go/backend" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" |
||||
"google.golang.org/grpc/metadata" |
||||
) |
||||
|
||||
// TODO: Make this configurable. This is an arbitrary value right
|
||||
// now. Grafana used to have a 1M row rowLimit established in open-source. I'll
|
||||
// let users hit that for now until we decide how to proceed.
|
||||
const rowLimit = 1_000_000 |
||||
|
||||
type recordReader interface { |
||||
Next() bool |
||||
Schema() *arrow.Schema |
||||
Record() arrow.Record |
||||
Err() error |
||||
} |
||||
|
||||
// newQueryDataResponse builds a [backend.DataResponse] from a stream of
|
||||
// [arrow.Record]s.
|
||||
//
|
||||
// The backend.DataResponse contains a single [data.Frame].
|
||||
func newQueryDataResponse(reader recordReader, query sqlutil.Query, headers metadata.MD) backend.DataResponse { |
||||
var resp backend.DataResponse |
||||
frame, err := frameForRecords(reader) |
||||
if err != nil { |
||||
resp.Error = err |
||||
} |
||||
if frame.Rows() == 0 { |
||||
resp.Frames = data.Frames{} |
||||
return resp |
||||
} |
||||
|
||||
frame.Meta.Custom = map[string]any{ |
||||
"headers": headers, |
||||
} |
||||
frame.Meta.ExecutedQueryString = query.RawSQL |
||||
frame.Meta.DataTopic = data.DataTopic(query.RawSQL) |
||||
|
||||
switch query.Format { |
||||
case sqlutil.FormatOptionTimeSeries: |
||||
if _, idx := frame.FieldByName("time"); idx == -1 { |
||||
resp.Error = fmt.Errorf("no time column found") |
||||
return resp |
||||
} |
||||
|
||||
if frame.TimeSeriesSchema().Type == data.TimeSeriesTypeLong { |
||||
var err error |
||||
frame, err = data.LongToWide(frame, nil) |
||||
if err != nil { |
||||
resp.Error = err |
||||
return resp |
||||
} |
||||
} |
||||
case sqlutil.FormatOptionTable: |
||||
// No changes to the output. Send it as is.
|
||||
case sqlutil.FormatOptionLogs: |
||||
// TODO(brett): We need to find out what this actually is and if its
|
||||
// worth supporting. Pass through as "table" for now.
|
||||
default: |
||||
resp.Error = fmt.Errorf("unsupported format") |
||||
} |
||||
|
||||
resp.Frames = data.Frames{frame} |
||||
return resp |
||||
} |
||||
|
||||
// frameForRecords creates a [data.Frame] from a stream of [arrow.Record]s.
|
||||
func frameForRecords(reader recordReader) (*data.Frame, error) { |
||||
var ( |
||||
frame = newFrame(reader.Schema()) |
||||
rows int64 |
||||
) |
||||
for reader.Next() { |
||||
record := reader.Record() |
||||
for i, col := range record.Columns() { |
||||
if err := copyData(frame.Fields[i], col); err != nil { |
||||
return frame, err |
||||
} |
||||
} |
||||
|
||||
rows += record.NumRows() |
||||
if rows > rowLimit { |
||||
frame.AppendNotices(data.Notice{ |
||||
Severity: data.NoticeSeverityWarning, |
||||
Text: fmt.Sprintf("Results have been limited to %v because the SQL row limit was reached", rowLimit), |
||||
}) |
||||
return frame, nil |
||||
} |
||||
|
||||
if err := reader.Err(); err != nil && !errors.Is(err, io.EOF) { |
||||
return frame, err |
||||
} |
||||
} |
||||
return frame, nil |
||||
} |
||||
|
||||
// newFrame builds a new Data Frame from an Arrow Schema.
|
||||
func newFrame(schema *arrow.Schema) *data.Frame { |
||||
fields := schema.Fields() |
||||
df := &data.Frame{ |
||||
Fields: make([]*data.Field, len(fields)), |
||||
Meta: &data.FrameMeta{}, |
||||
} |
||||
for i, f := range fields { |
||||
df.Fields[i] = newField(f) |
||||
} |
||||
return df |
||||
} |
||||
|
||||
func newField(f arrow.Field) *data.Field { |
||||
switch f.Type.ID() { |
||||
case arrow.STRING: |
||||
return newDataField[string](f) |
||||
case arrow.FLOAT32: |
||||
return newDataField[float32](f) |
||||
case arrow.FLOAT64: |
||||
return newDataField[float64](f) |
||||
case arrow.UINT8: |
||||
return newDataField[uint8](f) |
||||
case arrow.UINT16: |
||||
return newDataField[uint16](f) |
||||
case arrow.UINT32: |
||||
return newDataField[uint32](f) |
||||
case arrow.UINT64: |
||||
return newDataField[uint64](f) |
||||
case arrow.INT8: |
||||
return newDataField[int8](f) |
||||
case arrow.INT16: |
||||
return newDataField[int16](f) |
||||
case arrow.INT32: |
||||
return newDataField[int32](f) |
||||
case arrow.INT64: |
||||
return newDataField[int64](f) |
||||
case arrow.BOOL: |
||||
return newDataField[bool](f) |
||||
case arrow.TIMESTAMP: |
||||
return newDataField[time.Time](f) |
||||
case arrow.DURATION: |
||||
return newDataField[int64](f) |
||||
default: |
||||
return newDataField[json.RawMessage](f) |
||||
} |
||||
} |
||||
|
||||
func newDataField[T any](f arrow.Field) *data.Field { |
||||
if f.Nullable { |
||||
var s []*T |
||||
return data.NewField(f.Name, nil, s) |
||||
} |
||||
var s []T |
||||
return data.NewField(f.Name, nil, s) |
||||
} |
||||
|
||||
// copyData copies the contents of an Arrow column into a Data Frame field.
|
||||
func copyData(field *data.Field, col arrow.Array) error { |
||||
defer func() { |
||||
if r := recover(); r != nil { |
||||
fmt.Println(fmt.Errorf("panic: %s %s", r, string(debug.Stack()))) |
||||
} |
||||
}() |
||||
|
||||
colData := col.Data() |
||||
|
||||
switch col.DataType().ID() { |
||||
case arrow.TIMESTAMP: |
||||
v := array.NewTimestampData(colData) |
||||
for i := 0; i < v.Len(); i++ { |
||||
if field.Nullable() { |
||||
if v.IsNull(i) { |
||||
var t *time.Time |
||||
field.Append(t) |
||||
continue |
||||
} |
||||
t := v.Value(i).ToTime(arrow.Nanosecond) |
||||
field.Append(&t) |
||||
continue |
||||
} |
||||
field.Append(v.Value(i).ToTime(arrow.Nanosecond)) |
||||
} |
||||
case arrow.DENSE_UNION: |
||||
v := array.NewDenseUnionData(colData) |
||||
for i := 0; i < v.Len(); i++ { |
||||
sc, err := scalar.GetScalar(v, i) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
value := sc.(*scalar.DenseUnion).ChildValue() |
||||
|
||||
var d any |
||||
switch value.DataType().ID() { |
||||
case arrow.STRING: |
||||
d = value.(*scalar.String).String() |
||||
case arrow.BOOL: |
||||
d = value.(*scalar.Boolean).Value |
||||
case arrow.INT32: |
||||
d = value.(*scalar.Int32).Value |
||||
case arrow.INT64: |
||||
d = value.(*scalar.Int64).Value |
||||
case arrow.LIST: |
||||
d = value.(*scalar.List).Value |
||||
default: |
||||
d = value.(*scalar.Null) |
||||
} |
||||
b, err := json.Marshal(d) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
field.Append(json.RawMessage(b)) |
||||
} |
||||
case arrow.STRING: |
||||
copyBasic[string](field, array.NewStringData(colData)) |
||||
case arrow.UINT8: |
||||
copyBasic[uint8](field, array.NewUint8Data(colData)) |
||||
case arrow.UINT16: |
||||
copyBasic[uint16](field, array.NewUint16Data(colData)) |
||||
case arrow.UINT32: |
||||
copyBasic[uint32](field, array.NewUint32Data(colData)) |
||||
case arrow.UINT64: |
||||
copyBasic[uint64](field, array.NewUint64Data(colData)) |
||||
case arrow.INT8: |
||||
copyBasic[int8](field, array.NewInt8Data(colData)) |
||||
case arrow.INT16: |
||||
copyBasic[int16](field, array.NewInt16Data(colData)) |
||||
case arrow.INT32: |
||||
copyBasic[int32](field, array.NewInt32Data(colData)) |
||||
case arrow.INT64: |
||||
copyBasic[int64](field, array.NewInt64Data(colData)) |
||||
case arrow.FLOAT32: |
||||
copyBasic[float32](field, array.NewFloat32Data(colData)) |
||||
case arrow.FLOAT64: |
||||
copyBasic[float64](field, array.NewFloat64Data(colData)) |
||||
case arrow.BOOL: |
||||
copyBasic[bool](field, array.NewBooleanData(colData)) |
||||
case arrow.DURATION: |
||||
copyBasic[int64](field, array.NewInt64Data(colData)) |
||||
default: |
||||
fmt.Printf("datatype %s is unhandled", col.DataType().ID()) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
type arrowArray[T any] interface { |
||||
IsNull(int) bool |
||||
Value(int) T |
||||
Len() int |
||||
} |
||||
|
||||
func copyBasic[T any, Array arrowArray[T]](dst *data.Field, src Array) { |
||||
for i := 0; i < src.Len(); i++ { |
||||
if dst.Nullable() { |
||||
if src.IsNull(i) { |
||||
var s *T |
||||
dst.Append(s) |
||||
continue |
||||
} |
||||
s := src.Value(i) |
||||
dst.Append(&s) |
||||
continue |
||||
} |
||||
dst.Append(src.Value(i)) |
||||
} |
||||
} |
||||
@ -0,0 +1,496 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"fmt" |
||||
"log" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/apache/arrow/go/v12/arrow" |
||||
"github.com/apache/arrow/go/v12/arrow/array" |
||||
"github.com/apache/arrow/go/v12/arrow/memory" |
||||
"github.com/google/go-cmp/cmp" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" |
||||
"github.com/stretchr/testify/assert" |
||||
"google.golang.org/grpc/metadata" |
||||
) |
||||
|
||||
func TestNewQueryDataResponse(t *testing.T) { |
||||
alloc := memory.DefaultAllocator |
||||
schema := arrow.NewSchema( |
||||
[]arrow.Field{ |
||||
{Name: "i8", Type: arrow.PrimitiveTypes.Int8}, |
||||
{Name: "i16", Type: arrow.PrimitiveTypes.Int16}, |
||||
{Name: "i32", Type: arrow.PrimitiveTypes.Int32}, |
||||
{Name: "i64", Type: arrow.PrimitiveTypes.Int64}, |
||||
|
||||
{Name: "u8", Type: arrow.PrimitiveTypes.Uint8}, |
||||
{Name: "u16", Type: arrow.PrimitiveTypes.Uint16}, |
||||
{Name: "u32", Type: arrow.PrimitiveTypes.Uint32}, |
||||
{Name: "u64", Type: arrow.PrimitiveTypes.Uint64}, |
||||
|
||||
{Name: "f32", Type: arrow.PrimitiveTypes.Float32}, |
||||
{Name: "f64", Type: arrow.PrimitiveTypes.Float64}, |
||||
|
||||
{Name: "utf8", Type: &arrow.StringType{}}, |
||||
{Name: "duration", Type: &arrow.DurationType{}}, |
||||
{Name: "timestamp", Type: &arrow.TimestampType{}}, |
||||
}, |
||||
nil, |
||||
) |
||||
|
||||
strValues := []jsonArray{ |
||||
newJSONArray(`[1, -2, 3]`, arrow.PrimitiveTypes.Int8), |
||||
newJSONArray(`[1, -2, 3]`, arrow.PrimitiveTypes.Int16), |
||||
newJSONArray(`[1, -2, 3]`, arrow.PrimitiveTypes.Int32), |
||||
newJSONArray(`[1, -2, 3]`, arrow.PrimitiveTypes.Int64), |
||||
|
||||
newJSONArray(`[1, 2, 3]`, arrow.PrimitiveTypes.Uint8), |
||||
newJSONArray(`[1, 2, 3]`, arrow.PrimitiveTypes.Uint16), |
||||
newJSONArray(`[1, 2, 3]`, arrow.PrimitiveTypes.Uint32), |
||||
newJSONArray(`[1, 2, 3]`, arrow.PrimitiveTypes.Uint64), |
||||
|
||||
newJSONArray(`[1.1, -2.2, 3.0]`, arrow.PrimitiveTypes.Float32), |
||||
newJSONArray(`[1.1, -2.2, 3.0]`, arrow.PrimitiveTypes.Float64), |
||||
|
||||
newJSONArray(`["foo", "bar", "baz"]`, &arrow.StringType{}), |
||||
newJSONArray(`[0, 1, -2]`, &arrow.DurationType{}), |
||||
newJSONArray(`[0, 1, 2]`, &arrow.TimestampType{}), |
||||
} |
||||
|
||||
var arr []arrow.Array |
||||
for _, v := range strValues { |
||||
tarr, _, err := array.FromJSON( |
||||
alloc, |
||||
v.dt, |
||||
strings.NewReader(v.json), |
||||
) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
arr = append(arr, tarr) |
||||
} |
||||
|
||||
record := array.NewRecord(schema, arr, -1) |
||||
records := []arrow.Record{record} |
||||
reader, err := array.NewRecordReader(schema, records) |
||||
assert.NoError(t, err) |
||||
|
||||
query := sqlutil.Query{Format: sqlutil.FormatOptionTable} |
||||
resp := newQueryDataResponse(errReader{RecordReader: reader}, query, metadata.MD{}) |
||||
assert.NoError(t, resp.Error) |
||||
assert.Len(t, resp.Frames, 1) |
||||
assert.Len(t, resp.Frames[0].Fields, 13) |
||||
|
||||
frame := resp.Frames[0] |
||||
f0 := frame.Fields[0] |
||||
assert.Equal(t, f0.Name, "i8") |
||||
assert.Equal(t, f0.Type(), data.FieldTypeInt8) |
||||
assert.Equal(t, []int8{1, -2, 3}, extractFieldValues[int8](t, f0)) |
||||
|
||||
f1 := frame.Fields[1] |
||||
assert.Equal(t, f1.Name, "i16") |
||||
assert.Equal(t, f1.Type(), data.FieldTypeInt16) |
||||
assert.Equal(t, []int16{1, -2, 3}, extractFieldValues[int16](t, f1)) |
||||
|
||||
f2 := frame.Fields[2] |
||||
assert.Equal(t, f2.Name, "i32") |
||||
assert.Equal(t, f2.Type(), data.FieldTypeInt32) |
||||
assert.Equal(t, []int32{1, -2, 3}, extractFieldValues[int32](t, f2)) |
||||
|
||||
f3 := frame.Fields[3] |
||||
assert.Equal(t, f3.Name, "i64") |
||||
assert.Equal(t, f3.Type(), data.FieldTypeInt64) |
||||
assert.Equal(t, []int64{1, -2, 3}, extractFieldValues[int64](t, f3)) |
||||
|
||||
f4 := frame.Fields[4] |
||||
assert.Equal(t, f4.Name, "u8") |
||||
assert.Equal(t, f4.Type(), data.FieldTypeUint8) |
||||
assert.Equal(t, []uint8{1, 2, 3}, extractFieldValues[uint8](t, f4)) |
||||
|
||||
f5 := frame.Fields[5] |
||||
assert.Equal(t, f5.Name, "u16") |
||||
assert.Equal(t, f5.Type(), data.FieldTypeUint16) |
||||
assert.Equal(t, []uint16{1, 2, 3}, extractFieldValues[uint16](t, f5)) |
||||
|
||||
f6 := frame.Fields[6] |
||||
assert.Equal(t, f6.Name, "u32") |
||||
assert.Equal(t, f6.Type(), data.FieldTypeUint32) |
||||
assert.Equal(t, []uint32{1, 2, 3}, extractFieldValues[uint32](t, f6)) |
||||
|
||||
f7 := frame.Fields[7] |
||||
assert.Equal(t, f7.Name, "u64") |
||||
assert.Equal(t, f7.Type(), data.FieldTypeUint64) |
||||
assert.Equal(t, []uint64{1, 2, 3}, extractFieldValues[uint64](t, f7)) |
||||
|
||||
f8 := frame.Fields[8] |
||||
assert.Equal(t, f8.Name, "f32") |
||||
assert.Equal(t, f8.Type(), data.FieldTypeFloat32) |
||||
assert.Equal(t, []float32{1.1, -2.2, 3.0}, extractFieldValues[float32](t, f8)) |
||||
|
||||
f9 := frame.Fields[9] |
||||
assert.Equal(t, f9.Name, "f64") |
||||
assert.Equal(t, f9.Type(), data.FieldTypeFloat64) |
||||
assert.Equal(t, []float64{1.1, -2.2, 3.0}, extractFieldValues[float64](t, f9)) |
||||
|
||||
f10 := frame.Fields[10] |
||||
assert.Equal(t, f10.Name, "utf8") |
||||
assert.Equal(t, f10.Type(), data.FieldTypeString) |
||||
assert.Equal(t, []string{"foo", "bar", "baz"}, extractFieldValues[string](t, f10)) |
||||
|
||||
f11 := frame.Fields[11] |
||||
assert.Equal(t, f11.Name, "duration") |
||||
assert.Equal(t, f11.Type(), data.FieldTypeInt64) |
||||
assert.Equal(t, []int64{0, 1, -2}, extractFieldValues[int64](t, f11)) |
||||
|
||||
f12 := frame.Fields[12] |
||||
assert.Equal(t, f12.Name, "timestamp") |
||||
assert.Equal(t, f12.Type(), data.FieldTypeTime) |
||||
assert.Equal(t, |
||||
[]time.Time{ |
||||
time.Unix(0, 0).UTC(), |
||||
time.Unix(0, 1).UTC(), |
||||
time.Unix(0, 2).UTC(), |
||||
}, |
||||
extractFieldValues[time.Time](t, f12), |
||||
) |
||||
} |
||||
|
||||
type jsonArray struct { |
||||
json string |
||||
dt arrow.DataType |
||||
} |
||||
|
||||
func newJSONArray(json string, dt arrow.DataType) jsonArray { |
||||
return jsonArray{json: json, dt: dt} |
||||
} |
||||
|
||||
func TestNewQueryDataResponse_Error(t *testing.T) { |
||||
alloc := memory.DefaultAllocator |
||||
schema := arrow.NewSchema( |
||||
[]arrow.Field{ |
||||
{Name: "f1-i64", Type: arrow.PrimitiveTypes.Int64}, |
||||
{Name: "f2-f64", Type: arrow.PrimitiveTypes.Float64}, |
||||
}, |
||||
nil, |
||||
) |
||||
|
||||
i64s, _, err := array.FromJSON( |
||||
alloc, |
||||
&arrow.Int64Type{}, |
||||
strings.NewReader(`[1, 2, 3]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
f64s, _, err := array.FromJSON( |
||||
alloc, |
||||
&arrow.Float64Type{}, |
||||
strings.NewReader(`[1.1, 2.2, 3.3]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
|
||||
record := array.NewRecord(schema, []arrow.Array{i64s, f64s}, -1) |
||||
records := []arrow.Record{record} |
||||
reader, err := array.NewRecordReader(schema, records) |
||||
assert.NoError(t, err) |
||||
|
||||
wrappedReader := errReader{ |
||||
RecordReader: reader, |
||||
err: fmt.Errorf("explosion!"), |
||||
} |
||||
query := sqlutil.Query{Format: sqlutil.FormatOptionTable} |
||||
resp := newQueryDataResponse(wrappedReader, query, metadata.MD{}) |
||||
assert.Error(t, resp.Error) |
||||
assert.Equal(t, fmt.Errorf("explosion!"), resp.Error) |
||||
} |
||||
|
||||
func TestNewQueryDataResponse_WideTable(t *testing.T) { |
||||
alloc := memory.DefaultAllocator |
||||
schema := arrow.NewSchema( |
||||
[]arrow.Field{ |
||||
{Name: "time", Type: &arrow.TimestampType{}}, |
||||
{Name: "label", Type: &arrow.StringType{}}, |
||||
{Name: "value", Type: arrow.PrimitiveTypes.Int64}, |
||||
}, |
||||
nil, |
||||
) |
||||
|
||||
times, _, err := array.FromJSON( |
||||
alloc, |
||||
&arrow.TimestampType{}, |
||||
strings.NewReader(`["2023-01-01T00:00:00Z", "2023-01-01T00:00:01Z", "2023-01-01T00:00:02Z"]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
strs, _, err := array.FromJSON( |
||||
alloc, |
||||
&arrow.StringType{}, |
||||
strings.NewReader(`["foo", "bar", "baz"]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
i64s, _, err := array.FromJSON( |
||||
alloc, |
||||
arrow.PrimitiveTypes.Int64, |
||||
strings.NewReader(`[1, 2, 3]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
|
||||
record := array.NewRecord(schema, []arrow.Array{times, strs, i64s}, -1) |
||||
records := []arrow.Record{record} |
||||
reader, err := array.NewRecordReader(schema, records) |
||||
assert.NoError(t, err) |
||||
|
||||
resp := newQueryDataResponse(errReader{RecordReader: reader}, sqlutil.Query{}, metadata.MD{}) |
||||
assert.NoError(t, resp.Error) |
||||
assert.Len(t, resp.Frames, 1) |
||||
assert.Equal(t, 3, resp.Frames[0].Rows()) |
||||
assert.Len(t, resp.Frames[0].Fields, 4) |
||||
|
||||
frame := resp.Frames[0] |
||||
assert.Equal(t, "time", frame.Fields[0].Name) |
||||
|
||||
// label=bar
|
||||
assert.Equal(t, "value", frame.Fields[1].Name) |
||||
assert.Equal(t, data.Labels{"label": "bar"}, frame.Fields[1].Labels) |
||||
assert.Equal(t, []int64{0, 2, 0}, extractFieldValues[int64](t, frame.Fields[1])) |
||||
|
||||
// label=baz
|
||||
assert.Equal(t, "value", frame.Fields[2].Name) |
||||
assert.Equal(t, data.Labels{"label": "baz"}, frame.Fields[2].Labels) |
||||
assert.Equal(t, []int64{0, 0, 3}, extractFieldValues[int64](t, frame.Fields[2])) |
||||
|
||||
// label=foo
|
||||
assert.Equal(t, "value", frame.Fields[3].Name) |
||||
assert.Equal(t, data.Labels{"label": "foo"}, frame.Fields[3].Labels) |
||||
assert.Equal(t, []int64{1, 0, 0}, extractFieldValues[int64](t, frame.Fields[3])) |
||||
} |
||||
|
||||
func extractFieldValues[T any](t *testing.T, field *data.Field) []T { |
||||
t.Helper() |
||||
|
||||
values := make([]T, 0, field.Len()) |
||||
for i := 0; i < cap(values); i++ { |
||||
values = append(values, field.CopyAt(i).(T)) |
||||
} |
||||
return values |
||||
} |
||||
|
||||
type errReader struct { |
||||
array.RecordReader |
||||
err error |
||||
} |
||||
|
||||
func (r errReader) Err() error { |
||||
return r.err |
||||
} |
||||
|
||||
func TestNewFrame(t *testing.T) { |
||||
schema := arrow.NewSchema([]arrow.Field{ |
||||
{ |
||||
Name: "name", |
||||
Type: &arrow.StringType{}, |
||||
Nullable: false, |
||||
Metadata: arrow.NewMetadata(nil, nil), |
||||
}, |
||||
{ |
||||
Name: "time", |
||||
Type: &arrow.TimestampType{}, |
||||
Nullable: false, |
||||
Metadata: arrow.NewMetadata(nil, nil), |
||||
}, |
||||
{ |
||||
Name: "extra", |
||||
Type: &arrow.Int64Type{}, |
||||
Nullable: true, |
||||
Metadata: arrow.NewMetadata(nil, nil), |
||||
}, |
||||
}, nil) |
||||
|
||||
actual := newFrame(schema) |
||||
expected := &data.Frame{ |
||||
Fields: []*data.Field{ |
||||
data.NewField("name", nil, []string{}), |
||||
data.NewField("time", nil, []time.Time{}), |
||||
data.NewField("extra", nil, []*int64{}), |
||||
}, |
||||
} |
||||
if !cmp.Equal(expected, actual, cmp.Comparer(cmpFrame)) { |
||||
log.Fatalf(cmp.Diff(expected, actual)) |
||||
} |
||||
} |
||||
|
||||
func cmpFrame(a, b data.Frame) bool { |
||||
if len(a.Fields) != len(b.Fields) { |
||||
return false |
||||
} |
||||
for i := 0; i < len(a.Fields); i++ { |
||||
if a.Fields[i].Name != b.Fields[i].Name { |
||||
return false |
||||
} |
||||
if a.Fields[i].Nullable() != b.Fields[i].Nullable() { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func TestCopyData_String(t *testing.T) { |
||||
field := data.NewField("field", nil, []string{}) |
||||
builder := array.NewStringBuilder(memory.DefaultAllocator) |
||||
builder.Append("joe") |
||||
builder.Append("john") |
||||
builder.Append("jackie") |
||||
err := copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, "joe", field.CopyAt(0)) |
||||
assert.Equal(t, "john", field.CopyAt(1)) |
||||
assert.Equal(t, "jackie", field.CopyAt(2)) |
||||
|
||||
field = data.NewField("field", nil, []*string{}) |
||||
builder = array.NewStringBuilder(memory.DefaultAllocator) |
||||
builder.Append("joe") |
||||
builder.AppendNull() |
||||
builder.Append("jackie") |
||||
err = copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, "joe", *(field.CopyAt(0).(*string))) |
||||
assert.Equal(t, (*string)(nil), field.CopyAt(1)) |
||||
assert.Equal(t, "jackie", *(field.CopyAt(2).(*string))) |
||||
} |
||||
|
||||
func TestCopyData_Timestamp(t *testing.T) { |
||||
start, _ := time.Parse(time.RFC3339, "2023-01-01T01:01:01Z") |
||||
|
||||
field := data.NewField("field", nil, []time.Time{}) |
||||
builder := array.NewTimestampBuilder(memory.DefaultAllocator, &arrow.TimestampType{}) |
||||
builder.Append(arrow.Timestamp(start.Add(time.Hour).UnixNano())) |
||||
builder.Append(arrow.Timestamp(start.Add(2 * time.Hour).UnixNano())) |
||||
builder.Append(arrow.Timestamp(start.Add(3 * time.Hour).UnixNano())) |
||||
err := copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, start.Add(time.Hour), field.CopyAt(0)) |
||||
assert.Equal(t, start.Add(2*time.Hour), field.CopyAt(1)) |
||||
assert.Equal(t, start.Add(3*time.Hour), field.CopyAt(2)) |
||||
|
||||
field = data.NewField("field", nil, []*time.Time{}) |
||||
builder = array.NewTimestampBuilder(memory.DefaultAllocator, &arrow.TimestampType{}) |
||||
builder.Append(arrow.Timestamp(start.Add(time.Hour).UnixNano())) |
||||
builder.AppendNull() |
||||
builder.Append(arrow.Timestamp(start.Add(3 * time.Hour).UnixNano())) |
||||
err = copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, start.Add(time.Hour), *field.CopyAt(0).(*time.Time)) |
||||
assert.Equal(t, (*time.Time)(nil), field.CopyAt(1)) |
||||
assert.Equal(t, start.Add(3*time.Hour), *field.CopyAt(2).(*time.Time)) |
||||
} |
||||
|
||||
func TestCopyData_Boolean(t *testing.T) { |
||||
field := data.NewField("field", nil, []bool{}) |
||||
builder := array.NewBooleanBuilder(memory.DefaultAllocator) |
||||
builder.Append(true) |
||||
builder.Append(false) |
||||
builder.Append(true) |
||||
err := copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, true, field.CopyAt(0)) |
||||
assert.Equal(t, false, field.CopyAt(1)) |
||||
assert.Equal(t, true, field.CopyAt(2)) |
||||
|
||||
field = data.NewField("field", nil, []*bool{}) |
||||
builder = array.NewBooleanBuilder(memory.DefaultAllocator) |
||||
builder.Append(true) |
||||
builder.AppendNull() |
||||
builder.Append(true) |
||||
err = copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, true, *field.CopyAt(0).(*bool)) |
||||
assert.Equal(t, (*bool)(nil), field.CopyAt(1)) |
||||
assert.Equal(t, true, *field.CopyAt(2).(*bool)) |
||||
} |
||||
|
||||
func TestCopyData_Int64(t *testing.T) { |
||||
field := data.NewField("field", nil, []int64{}) |
||||
builder := array.NewInt64Builder(memory.DefaultAllocator) |
||||
builder.Append(1) |
||||
builder.Append(2) |
||||
builder.Append(3) |
||||
err := copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, int64(1), field.CopyAt(0)) |
||||
assert.Equal(t, int64(2), field.CopyAt(1)) |
||||
assert.Equal(t, int64(3), field.CopyAt(2)) |
||||
|
||||
field = data.NewField("field", nil, []*int64{}) |
||||
builder = array.NewInt64Builder(memory.DefaultAllocator) |
||||
builder.Append(1) |
||||
builder.AppendNull() |
||||
builder.Append(3) |
||||
arr := builder.NewArray() |
||||
err = copyData(field, arr) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, int64(1), *field.CopyAt(0).(*int64)) |
||||
assert.Equal(t, (*int64)(nil), field.CopyAt(1)) |
||||
assert.Equal(t, int64(3), *field.CopyAt(2).(*int64)) |
||||
} |
||||
|
||||
func TestCopyData_Float64(t *testing.T) { |
||||
field := data.NewField("field", nil, []float64{}) |
||||
builder := array.NewFloat64Builder(memory.DefaultAllocator) |
||||
builder.Append(1.1) |
||||
builder.Append(2.2) |
||||
builder.Append(3.3) |
||||
err := copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, float64(1.1), field.CopyAt(0)) |
||||
assert.Equal(t, float64(2.2), field.CopyAt(1)) |
||||
assert.Equal(t, float64(3.3), field.CopyAt(2)) |
||||
|
||||
field = data.NewField("field", nil, []*float64{}) |
||||
builder = array.NewFloat64Builder(memory.DefaultAllocator) |
||||
builder.Append(1.1) |
||||
builder.AppendNull() |
||||
builder.Append(3.3) |
||||
err = copyData(field, builder.NewArray()) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, float64(1.1), *field.CopyAt(0).(*float64)) |
||||
assert.Equal(t, (*float64)(nil), field.CopyAt(1)) |
||||
assert.Equal(t, float64(3.3), *field.CopyAt(2).(*float64)) |
||||
} |
||||
|
||||
func TestCustomMetadata(t *testing.T) { |
||||
schema := arrow.NewSchema([]arrow.Field{ |
||||
{ |
||||
Name: "int64", |
||||
Type: &arrow.Int64Type{}, |
||||
Nullable: true, |
||||
Metadata: arrow.NewMetadata(nil, nil), |
||||
}, |
||||
}, nil) |
||||
i64s, _, err := array.FromJSON( |
||||
memory.DefaultAllocator, |
||||
arrow.PrimitiveTypes.Int64, |
||||
strings.NewReader(`[1, 2, 3]`), |
||||
) |
||||
assert.NoError(t, err) |
||||
|
||||
record := array.NewRecord(schema, []arrow.Array{i64s}, -1) |
||||
records := []arrow.Record{record} |
||||
reader, err := array.NewRecordReader(schema, records) |
||||
assert.NoError(t, err) |
||||
|
||||
md := metadata.MD{} |
||||
md.Set("trace-id", "abc") |
||||
md.Set("trace-sampled", "true") |
||||
query := sqlutil.Query{ |
||||
Format: sqlutil.FormatOptionTable, |
||||
} |
||||
resp := newQueryDataResponse(errReader{RecordReader: reader}, query, md) |
||||
assert.NoError(t, resp.Error) |
||||
|
||||
assert.Equal(t, map[string]any{ |
||||
"headers": metadata.MD{ |
||||
"trace-id": []string{"abc"}, |
||||
"trace-sampled": []string{"true"}, |
||||
}, |
||||
}, resp.Frames[0].Meta.Custom) |
||||
} |
||||
@ -0,0 +1,117 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"context" |
||||
"crypto/x509" |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/apache/arrow/go/v12/arrow/flight" |
||||
"github.com/apache/arrow/go/v12/arrow/flight/flightsql" |
||||
"github.com/apache/arrow/go/v12/arrow/ipc" |
||||
"github.com/apache/arrow/go/v12/arrow/memory" |
||||
"google.golang.org/grpc" |
||||
"google.golang.org/grpc/credentials" |
||||
"google.golang.org/grpc/credentials/insecure" |
||||
"google.golang.org/grpc/metadata" |
||||
) |
||||
|
||||
type client struct { |
||||
*flightsql.Client |
||||
md metadata.MD |
||||
} |
||||
|
||||
// FlightClient returns the underlying [flight.Client].
|
||||
func (c *client) FlightClient() flight.Client { |
||||
return c.Client.Client |
||||
} |
||||
|
||||
func newFlightSQLClient(addr string, metadata metadata.MD, secure bool) (*client, error) { |
||||
dialOptions, err := grpcDialOptions(secure) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("grpc dial options: %s", err) |
||||
} |
||||
fsqlClient, err := flightsql.NewClient(addr, nil, nil, dialOptions...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &client{Client: fsqlClient, md: metadata}, nil |
||||
} |
||||
|
||||
func grpcDialOptions(secure bool) ([]grpc.DialOption, error) { |
||||
transport := grpc.WithTransportCredentials(insecure.NewCredentials()) |
||||
if secure { |
||||
pool, err := x509.SystemCertPool() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("x509: %s", err) |
||||
} |
||||
transport = grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(pool, "")) |
||||
} |
||||
|
||||
opts := []grpc.DialOption{ |
||||
transport, |
||||
} |
||||
|
||||
return opts, nil |
||||
} |
||||
|
||||
// DoGetWithHeaderExtraction performs a normal DoGet, but wraps the stream in a
|
||||
// mechanism that extracts headers when they become available. At least one
|
||||
// record should be read from the *flightReader before the headers are
|
||||
// available.
|
||||
func (c *client) DoGetWithHeaderExtraction(ctx context.Context, in *flight.Ticket, opts ...grpc.CallOption) (*flightReader, error) { |
||||
stream, err := c.Client.Client.DoGet(ctx, in, opts...) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return newFlightReader(stream, c.Client.Alloc) |
||||
} |
||||
|
||||
// flightReader wraps a [flight.Reader] to expose the headers captured when the
|
||||
// first read occurs on the stream.
|
||||
type flightReader struct { |
||||
*flight.Reader |
||||
extractor *headerExtractor |
||||
} |
||||
|
||||
// newFlightReader returns a [flightReader].
|
||||
func newFlightReader(stream flight.FlightService_DoGetClient, alloc memory.Allocator) (*flightReader, error) { |
||||
extractor := &headerExtractor{stream: stream} |
||||
reader, err := flight.NewRecordReader(extractor, ipc.WithAllocator(alloc)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &flightReader{ |
||||
Reader: reader, |
||||
extractor: extractor, |
||||
}, nil |
||||
} |
||||
|
||||
// Header returns the extracted headers if they exist.
|
||||
func (s *flightReader) Header() (metadata.MD, error) { |
||||
return s.extractor.Header() |
||||
} |
||||
|
||||
// headerExtractor collects the stream's headers on the first call to
|
||||
// [(*headerExtractor).Recv].
|
||||
type headerExtractor struct { |
||||
stream flight.FlightService_DoGetClient |
||||
|
||||
once sync.Once |
||||
header metadata.MD |
||||
err error |
||||
} |
||||
|
||||
// Header returns the extracted headers if they exist.
|
||||
func (s *headerExtractor) Header() (metadata.MD, error) { |
||||
return s.header, s.err |
||||
} |
||||
|
||||
// Recv reads from the stream. The first invocation will capture the headers.
|
||||
func (s *headerExtractor) Recv() (*flight.FlightData, error) { |
||||
data, err := s.stream.Recv() |
||||
s.once.Do(func() { |
||||
s.header, s.err = s.stream.Header() |
||||
}) |
||||
return data, err |
||||
} |
||||
@ -0,0 +1,98 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"context" |
||||
"database/sql" |
||||
"encoding/json" |
||||
"testing" |
||||
|
||||
"github.com/apache/arrow/go/v12/arrow/flight" |
||||
"github.com/apache/arrow/go/v12/arrow/flight/flightsql" |
||||
"github.com/apache/arrow/go/v12/arrow/flight/flightsql/example" |
||||
"github.com/apache/arrow/go/v12/arrow/memory" |
||||
"github.com/grafana/grafana-plugin-sdk-go/backend" |
||||
"github.com/stretchr/testify/assert" |
||||
"github.com/stretchr/testify/require" |
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/influxdb/models" |
||||
) |
||||
|
||||
func TestIntegration_QueryData(t *testing.T) { |
||||
db, err := example.CreateDB() |
||||
require.NoError(t, err) |
||||
defer func(db *sql.DB) { |
||||
err := db.Close() |
||||
assert.NoError(t, err) |
||||
}(db) |
||||
|
||||
sqliteServer, err := example.NewSQLiteFlightSQLServer(db) |
||||
require.NoError(t, err) |
||||
sqliteServer.Alloc = memory.NewCheckedAllocator(memory.DefaultAllocator) |
||||
server := flight.NewServerWithMiddleware(nil) |
||||
server.RegisterFlightService(flightsql.NewFlightServer(sqliteServer)) |
||||
err = server.Init("localhost:12345") |
||||
require.NoError(t, err) |
||||
go func() { |
||||
err := server.Serve() |
||||
assert.NoError(t, err) |
||||
}() |
||||
defer server.Shutdown() |
||||
|
||||
resp, err := Query( |
||||
context.Background(), |
||||
&models.DatasourceInfo{ |
||||
HTTPClient: nil, |
||||
Token: "secret", |
||||
URL: "http://localhost:12345", |
||||
DbName: "influxdb", |
||||
Version: "test", |
||||
HTTPMode: "proxy", |
||||
Metadata: []map[string]string{ |
||||
{ |
||||
"bucket": "bucket", |
||||
}, |
||||
}, |
||||
SecureGrpc: false, |
||||
}, |
||||
backend.QueryDataRequest{ |
||||
Queries: []backend.DataQuery{ |
||||
{ |
||||
RefID: "A", |
||||
JSON: mustQueryJSON(t, "A", "select * from intTable"), |
||||
}, |
||||
{ |
||||
RefID: "B", |
||||
JSON: mustQueryJSON(t, "B", "select 1"), |
||||
}, |
||||
}, |
||||
}, |
||||
) |
||||
require.NoError(t, err) |
||||
require.Len(t, resp.Responses, 2) |
||||
|
||||
respA := resp.Responses["A"] |
||||
require.NoError(t, respA.Error) |
||||
frame := respA.Frames[0] |
||||
|
||||
require.Equal(t, "id", frame.Fields[0].Name) |
||||
require.Equal(t, "keyName", frame.Fields[1].Name) |
||||
require.Equal(t, "value", frame.Fields[2].Name) |
||||
require.Equal(t, "foreignId", frame.Fields[3].Name) |
||||
for _, f := range frame.Fields { |
||||
assert.Equal(t, 4, f.Len()) |
||||
} |
||||
} |
||||
|
||||
func mustQueryJSON(t *testing.T, refID, sql string) []byte { |
||||
t.Helper() |
||||
|
||||
b, err := json.Marshal(queryRequest{ |
||||
RefID: refID, |
||||
RawQuery: sql, |
||||
Format: "table", |
||||
}) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return b |
||||
} |
||||
@ -0,0 +1,126 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"net" |
||||
"net/url" |
||||
"strings" |
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend" |
||||
"google.golang.org/grpc/metadata" |
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log" |
||||
"github.com/grafana/grafana/pkg/tsdb/influxdb/models" |
||||
) |
||||
|
||||
var ( |
||||
glog = log.New("tsdb.influx_flightsql") |
||||
) |
||||
|
||||
type SQLOptions struct { |
||||
Addr string `json:"host"` |
||||
Metadata []map[string]string `json:"metadata"` |
||||
Token string `json:"token"` |
||||
} |
||||
|
||||
func Query(ctx context.Context, dsInfo *models.DatasourceInfo, req backend.QueryDataRequest) ( |
||||
*backend.QueryDataResponse, error) { |
||||
logger := glog.FromContext(ctx) |
||||
tRes := backend.NewQueryDataResponse() |
||||
r, err := runnerFromDataSource(dsInfo) |
||||
if err != nil { |
||||
return tRes, err |
||||
} |
||||
defer func(client *client) { |
||||
err := client.Close() |
||||
if err != nil { |
||||
logger.Warn("Failed to close fsql client", "err", err) |
||||
} |
||||
}(r.client) |
||||
|
||||
if r.client.md.Len() != 0 { |
||||
ctx = metadata.NewOutgoingContext(ctx, r.client.md) |
||||
} |
||||
|
||||
for _, q := range req.Queries { |
||||
qm, err := getQueryModel(q) |
||||
if err != nil { |
||||
tRes.Responses[q.RefID] = backend.ErrDataResponse(backend.StatusInternal, "bad request") |
||||
continue |
||||
} |
||||
|
||||
info, err := r.client.Execute(ctx, qm.RawSQL) |
||||
if err != nil { |
||||
tRes.Responses[q.RefID] = backend.ErrDataResponse(backend.StatusInternal, fmt.Sprintf("flightsql: %s", err)) |
||||
return tRes, nil |
||||
} |
||||
if len(info.Endpoint) != 1 { |
||||
tRes.Responses[q.RefID] = backend.ErrDataResponse(backend.StatusInternal, fmt.Sprintf("unsupported endpoint count in response: %d", len(info.Endpoint))) |
||||
return tRes, nil |
||||
} |
||||
|
||||
reader, err := r.client.DoGetWithHeaderExtraction(ctx, info.Endpoint[0].Ticket) |
||||
if err != nil { |
||||
tRes.Responses[q.RefID] = backend.ErrDataResponse(backend.StatusInternal, fmt.Sprintf("flightsql: %s", err)) |
||||
return tRes, nil |
||||
} |
||||
defer reader.Release() |
||||
|
||||
headers, err := reader.Header() |
||||
if err != nil { |
||||
logger.Error(fmt.Sprintf("Failed to extract headers: %s", err)) |
||||
} |
||||
|
||||
tRes.Responses[q.RefID] = newQueryDataResponse(reader, *qm.Query, headers) |
||||
} |
||||
|
||||
return tRes, nil |
||||
} |
||||
|
||||
type runner struct { |
||||
client *client |
||||
} |
||||
|
||||
// runnerFromDataSource creates a runner from the datasource model (the datasource instance's configuration).
|
||||
func runnerFromDataSource(dsInfo *models.DatasourceInfo) (*runner, error) { |
||||
if dsInfo.URL == "" { |
||||
return nil, fmt.Errorf("missing URL from datasource configuration") |
||||
} |
||||
|
||||
u, err := url.Parse(dsInfo.URL) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("bad URL : %s", err) |
||||
} |
||||
|
||||
host, port, err := net.SplitHostPort(u.Host) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("bad URL : %s", err) |
||||
} |
||||
addr := strings.Join([]string{host, port}, ":") |
||||
|
||||
md := metadata.MD{} |
||||
for _, m := range dsInfo.Metadata { |
||||
for k, v := range m { |
||||
if _, ok := md[k]; ok { |
||||
return nil, fmt.Errorf("metadata: duplicate key: %s", k) |
||||
} |
||||
if k != "" { |
||||
md.Set(k, v) |
||||
} |
||||
} |
||||
} |
||||
|
||||
if dsInfo.Token != "" { |
||||
md.Set("Authorization", fmt.Sprintf("Bearer %s", dsInfo.Token)) |
||||
} |
||||
|
||||
fsqlClient, err := newFlightSQLClient(addr, md, dsInfo.SecureGrpc) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
return &runner{ |
||||
client: fsqlClient, |
||||
}, nil |
||||
} |
||||
@ -0,0 +1,109 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" |
||||
) |
||||
|
||||
var macros = sqlutil.Macros{ |
||||
"dateBin": macroDateBin(""), |
||||
"dateBinAlias": macroDateBin("_binned"), |
||||
"interval": macroInterval, |
||||
"timeGroup": macroTimeGroup, |
||||
"timeGroupAlias": macroTimeGroupAlias, |
||||
|
||||
// The behaviors of timeFrom and timeTo as defined in the SDK are different
|
||||
// from all other Grafana SQL plugins. Instead we'll take their
|
||||
// implementations, rename them and define timeFrom and timeTo ourselves.
|
||||
"timeRangeFrom": sqlutil.DefaultMacros["timeFrom"], |
||||
"timeRangeTo": sqlutil.DefaultMacros["timeTo"], |
||||
"timeRange": sqlutil.DefaultMacros["timeFilter"], |
||||
"timeTo": macroTo, |
||||
"timeFrom": macroFrom, |
||||
} |
||||
|
||||
func macroTimeGroup(query *sqlutil.Query, args []string) (string, error) { |
||||
if len(args) != 2 { |
||||
return "", fmt.Errorf("%w: expected 1 argument, received %d", sqlutil.ErrorBadArgumentCount, len(args)) |
||||
} |
||||
|
||||
column := args[0] |
||||
|
||||
res := "" |
||||
switch args[1] { |
||||
case "minute": |
||||
res += fmt.Sprintf("datepart('minute', %s),", column) |
||||
fallthrough |
||||
case "hour": |
||||
res += fmt.Sprintf("datepart('hour', %s),", column) |
||||
fallthrough |
||||
case "day": |
||||
res += fmt.Sprintf("datepart('day', %s),", column) |
||||
fallthrough |
||||
case "month": |
||||
res += fmt.Sprintf("datepart('month', %s),", column) |
||||
fallthrough |
||||
case "year": |
||||
res += fmt.Sprintf("datepart('year', %s)", column) |
||||
} |
||||
|
||||
return res, nil |
||||
} |
||||
|
||||
func macroTimeGroupAlias(query *sqlutil.Query, args []string) (string, error) { |
||||
if len(args) != 2 { |
||||
return "", fmt.Errorf("%w: expected 1 argument, received %d", sqlutil.ErrorBadArgumentCount, len(args)) |
||||
} |
||||
|
||||
column := args[0] |
||||
|
||||
res := "" |
||||
switch args[1] { |
||||
case "minute": |
||||
res += fmt.Sprintf("datepart('minute', %s) as %s_minute,", column, column) |
||||
fallthrough |
||||
case "hour": |
||||
res += fmt.Sprintf("datepart('hour', %s) as %s_hour,", column, column) |
||||
fallthrough |
||||
case "day": |
||||
res += fmt.Sprintf("datepart('day', %s) as %s_day,", column, column) |
||||
fallthrough |
||||
case "month": |
||||
res += fmt.Sprintf("datepart('month', %s) as %s_month,", column, column) |
||||
fallthrough |
||||
case "year": |
||||
res += fmt.Sprintf("datepart('year', %s) as %s_year", column, column) |
||||
} |
||||
|
||||
return res, nil |
||||
} |
||||
|
||||
func macroInterval(query *sqlutil.Query, _ []string) (string, error) { |
||||
return fmt.Sprintf("interval '%d second'", int64(query.Interval.Seconds())), nil |
||||
} |
||||
|
||||
func macroFrom(query *sqlutil.Query, _ []string) (string, error) { |
||||
return fmt.Sprintf("cast('%s' as timestamp)", query.TimeRange.From.Format(time.RFC3339)), nil |
||||
} |
||||
|
||||
func macroTo(query *sqlutil.Query, _ []string) (string, error) { |
||||
return fmt.Sprintf("cast('%s' as timestamp)", query.TimeRange.To.Format(time.RFC3339)), nil |
||||
} |
||||
|
||||
func macroDateBin(suffix string) sqlutil.MacroFunc { |
||||
return func(query *sqlutil.Query, args []string) (string, error) { |
||||
if len(args) != 1 { |
||||
return "", fmt.Errorf("%w: expected 1 argument, received %d", sqlutil.ErrorBadArgumentCount, len(args)) |
||||
} |
||||
column := args[0] |
||||
aliasing := func() string { |
||||
if suffix == "" { |
||||
return "" |
||||
} |
||||
return fmt.Sprintf(" as %s%s", column, suffix) |
||||
}() |
||||
return fmt.Sprintf("date_bin(interval '%d second', %s, timestamp '1970-01-01T00:00:00Z')%s", int64(query.Interval.Seconds()), column, aliasing), nil |
||||
} |
||||
} |
||||
@ -0,0 +1,75 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" |
||||
"github.com/stretchr/testify/require" |
||||
) |
||||
|
||||
func TestMacros(t *testing.T) { |
||||
from, _ := time.Parse(time.RFC3339, "2023-01-01T00:00:00Z") |
||||
|
||||
query := sqlutil.Query{ |
||||
TimeRange: backend.TimeRange{ |
||||
From: from, |
||||
To: from.Add(10 * time.Minute), |
||||
}, |
||||
Interval: 10 * time.Second, |
||||
} |
||||
|
||||
cs := []struct { |
||||
in string |
||||
out string |
||||
}{ |
||||
{ |
||||
in: `select * from x`, |
||||
out: `select * from x`, |
||||
}, |
||||
{ |
||||
in: `select date_bin($__interval, time, timestamp '1970-01-01T00:00:00Z')`, |
||||
out: `select date_bin(interval '10 second', time, timestamp '1970-01-01T00:00:00Z')`, |
||||
}, |
||||
{ |
||||
in: `select $__dateBin(time)`, |
||||
out: `select date_bin(interval '10 second', time, timestamp '1970-01-01T00:00:00Z')`, |
||||
}, |
||||
{ |
||||
in: `select $__dateBinAlias(time)`, |
||||
out: `select date_bin(interval '10 second', time, timestamp '1970-01-01T00:00:00Z') as time_binned`, |
||||
}, |
||||
{ |
||||
in: `select * from x where $__timeFilter(time)`, |
||||
out: `select * from x where time >= '2023-01-01T00:00:00Z' AND time <= '2023-01-01T00:10:00Z'`, |
||||
}, |
||||
{ |
||||
in: `select * from x where $__timeRangeFrom(time)`, |
||||
out: `select * from x where time >= '2023-01-01T00:00:00Z'`, |
||||
}, |
||||
{ |
||||
in: `select * from x where $__timeRangeTo(time)`, |
||||
out: `select * from x where time <= '2023-01-01T00:10:00Z'`, |
||||
}, |
||||
{ |
||||
in: `select * from x where $__timeRange(time)`, |
||||
out: `select * from x where time >= '2023-01-01T00:00:00Z' AND time <= '2023-01-01T00:10:00Z'`, |
||||
}, |
||||
{ |
||||
in: `select * from x where time >= $__timeFrom`, |
||||
out: `select * from x where time >= cast('2023-01-01T00:00:00Z' as timestamp)`, |
||||
}, |
||||
{ |
||||
in: `select * from x where time < $__timeTo`, |
||||
out: `select * from x where time < cast('2023-01-01T00:10:00Z' as timestamp)`, |
||||
}, |
||||
} |
||||
for _, c := range cs { |
||||
t.Run(c.in, func(t *testing.T) { |
||||
sql, err := sqlutil.Interpolate(query.WithSQL(c.in), macros) |
||||
require.NoError(t, err) |
||||
require.Equal(t, c.out, sql) |
||||
}) |
||||
} |
||||
} |
||||
@ -0,0 +1,59 @@ |
||||
package fsql |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend" |
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" |
||||
) |
||||
|
||||
type queryModel struct { |
||||
*sqlutil.Query |
||||
} |
||||
|
||||
// queryRequest is an inbound query request as part of a batch of queries sent
|
||||
// to [(*FlightSQLDatasource).QueryData].
|
||||
type queryRequest struct { |
||||
RefID string `json:"refId"` |
||||
RawQuery string `json:"query"` |
||||
IntervalMilliseconds int `json:"intervalMs"` |
||||
MaxDataPoints int64 `json:"maxDataPoints"` |
||||
Format string `json:"resultFormat"` |
||||
} |
||||
|
||||
func getQueryModel(dataQuery backend.DataQuery) (*queryModel, error) { |
||||
var q queryRequest |
||||
if err := json.Unmarshal(dataQuery.JSON, &q); err != nil { |
||||
return nil, fmt.Errorf("unmarshal json: %w", err) |
||||
} |
||||
|
||||
var format sqlutil.FormatQueryOption |
||||
switch q.Format { |
||||
case "time_series": |
||||
format = sqlutil.FormatOptionTimeSeries |
||||
case "table": |
||||
format = sqlutil.FormatOptionTable |
||||
default: |
||||
format = sqlutil.FormatOptionTimeSeries |
||||
} |
||||
|
||||
query := &sqlutil.Query{ |
||||
RawSQL: q.RawQuery, |
||||
RefID: q.RefID, |
||||
MaxDataPoints: q.MaxDataPoints, |
||||
Interval: time.Duration(q.IntervalMilliseconds) * time.Millisecond, |
||||
TimeRange: dataQuery.TimeRange, |
||||
Format: format, |
||||
} |
||||
|
||||
// Process macros and execute the query.
|
||||
sql, err := sqlutil.Interpolate(query, macros) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("macro interpolation: %w", err) |
||||
} |
||||
query.RawSQL = sql |
||||
|
||||
return &queryModel{query}, nil |
||||
} |
||||
@ -0,0 +1,90 @@ |
||||
import { uniqueId } from 'lodash'; |
||||
import React from 'react'; |
||||
|
||||
import { |
||||
DataSourcePluginOptionsEditorProps, |
||||
onUpdateDatasourceJsonDataOption, |
||||
onUpdateDatasourceSecureJsonDataOption, |
||||
updateDatasourcePluginResetOption, |
||||
} from '@grafana/data'; |
||||
import { InlineFormLabel, LegacyForms } from '@grafana/ui'; |
||||
|
||||
import { InfluxOptions, InfluxSecureJsonData } from '../../../types'; |
||||
|
||||
const { Input, SecretFormField } = LegacyForms; |
||||
|
||||
export type Props = DataSourcePluginOptionsEditorProps<InfluxOptions, InfluxSecureJsonData>; |
||||
|
||||
export const InfluxFluxConfig = (props: Props) => { |
||||
const { |
||||
options: { jsonData, secureJsonData, secureJsonFields }, |
||||
} = props; |
||||
const htmlPrefix = uniqueId('influxdb-flux-config'); |
||||
|
||||
return ( |
||||
<> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel htmlFor={`${htmlPrefix}-org`} className="width-10"> |
||||
Organization |
||||
</InlineFormLabel> |
||||
<div className="width-10"> |
||||
<Input |
||||
id={`${htmlPrefix}-org`} |
||||
className="width-20" |
||||
value={jsonData.organization || ''} |
||||
onChange={onUpdateDatasourceJsonDataOption(props, 'organization')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<SecretFormField |
||||
isConfigured={Boolean(secureJsonFields && secureJsonFields.token)} |
||||
value={secureJsonData?.token || ''} |
||||
label="Token" |
||||
aria-label="Token" |
||||
labelWidth={10} |
||||
inputWidth={20} |
||||
onReset={() => updateDatasourcePluginResetOption(props, 'token')} |
||||
onChange={onUpdateDatasourceSecureJsonDataOption(props, 'token')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel className="width-10">Default Bucket</InlineFormLabel> |
||||
<div className="width-10"> |
||||
<Input |
||||
className="width-20" |
||||
placeholder="default bucket" |
||||
value={jsonData.defaultBucket || ''} |
||||
onChange={onUpdateDatasourceJsonDataOption(props, 'defaultBucket')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
|
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel |
||||
className="width-10" |
||||
tooltip="A lower limit for the auto group by time interval. Recommended to be set to write frequency, |
||||
for example 1m if your data is written every minute." |
||||
> |
||||
Min time interval |
||||
</InlineFormLabel> |
||||
<div className="width-10"> |
||||
<Input |
||||
className="width-20" |
||||
placeholder="10s" |
||||
value={jsonData.timeInterval || ''} |
||||
onChange={onUpdateDatasourceJsonDataOption(props, 'timeInterval')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
</> |
||||
); |
||||
}; |
||||
@ -0,0 +1,140 @@ |
||||
import { uniqueId } from 'lodash'; |
||||
import React from 'react'; |
||||
|
||||
import { |
||||
DataSourcePluginOptionsEditorProps, |
||||
onUpdateDatasourceJsonDataOption, |
||||
onUpdateDatasourceJsonDataOptionSelect, |
||||
onUpdateDatasourceOption, |
||||
onUpdateDatasourceSecureJsonDataOption, |
||||
SelectableValue, |
||||
updateDatasourcePluginResetOption, |
||||
} from '@grafana/data'; |
||||
import { Alert, InlineFormLabel, LegacyForms, Select } from '@grafana/ui'; |
||||
|
||||
import { InfluxOptions, InfluxSecureJsonData } from '../../../types'; |
||||
|
||||
const { Input, SecretFormField } = LegacyForms; |
||||
|
||||
const httpModes: SelectableValue[] = [ |
||||
{ label: 'GET', value: 'GET' }, |
||||
{ label: 'POST', value: 'POST' }, |
||||
]; |
||||
|
||||
export type Props = DataSourcePluginOptionsEditorProps<InfluxOptions, InfluxSecureJsonData>; |
||||
|
||||
export const InfluxInfluxQLConfig = (props: Props) => { |
||||
const { options, onOptionsChange } = props; |
||||
const { database, jsonData, secureJsonData, secureJsonFields } = options; |
||||
const htmlPrefix = uniqueId('influxdb-influxql-config'); |
||||
|
||||
return ( |
||||
<> |
||||
<Alert severity="info" title="Database Access"> |
||||
<p> |
||||
Setting the database for this datasource does not deny access to other databases. The InfluxDB query syntax |
||||
allows switching the database in the query. For example: |
||||
<code>SHOW MEASUREMENTS ON _internal</code> or |
||||
<code>SELECT * FROM "_internal".."database" LIMIT 10</code> |
||||
<br /> |
||||
<br /> |
||||
To support data isolation and security, make sure appropriate permissions are configured in InfluxDB. |
||||
</p> |
||||
</Alert> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel htmlFor={`${htmlPrefix}-db`} className="width-10"> |
||||
Database |
||||
</InlineFormLabel> |
||||
<div className="width-20"> |
||||
<Input |
||||
id={`${htmlPrefix}-db`} |
||||
className="width-20" |
||||
value={jsonData.dbName ?? database} |
||||
onChange={(event) => { |
||||
onOptionsChange({ |
||||
...options, |
||||
database: '', |
||||
jsonData: { |
||||
...jsonData, |
||||
dbName: event.target.value, |
||||
}, |
||||
}); |
||||
}} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel htmlFor={`${htmlPrefix}-user`} className="width-10"> |
||||
User |
||||
</InlineFormLabel> |
||||
<div className="width-10"> |
||||
<Input |
||||
id={`${htmlPrefix}-user`} |
||||
className="width-20" |
||||
value={options.user || ''} |
||||
onChange={onUpdateDatasourceOption(props, 'user')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<SecretFormField |
||||
isConfigured={Boolean(secureJsonFields && secureJsonFields.password)} |
||||
value={secureJsonData?.password || ''} |
||||
label="Password" |
||||
aria-label="Password" |
||||
labelWidth={10} |
||||
inputWidth={20} |
||||
onReset={() => updateDatasourcePluginResetOption(props, 'password')} |
||||
onChange={onUpdateDatasourceSecureJsonDataOption(props, 'password')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel |
||||
htmlFor={`${htmlPrefix}-http-method`} |
||||
className="width-10" |
||||
tooltip="You can use either GET or POST HTTP method to query your InfluxDB database. The POST |
||||
method allows you to perform heavy requests (with a lots of WHERE clause) while the GET method |
||||
will restrict you and return an error if the query is too large." |
||||
> |
||||
HTTP Method |
||||
</InlineFormLabel> |
||||
<Select |
||||
inputId={`${htmlPrefix}-http-method`} |
||||
className="width-20" |
||||
value={httpModes.find((httpMode) => httpMode.value === options.jsonData.httpMode)} |
||||
options={httpModes} |
||||
defaultValue={options.jsonData.httpMode} |
||||
onChange={onUpdateDatasourceJsonDataOptionSelect(props, 'httpMode')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
|
||||
<div className="gf-form-inline"> |
||||
<div className="gf-form"> |
||||
<InlineFormLabel |
||||
className="width-10" |
||||
tooltip="A lower limit for the auto group by time interval. Recommended to be set to write frequency, |
||||
for example 1m if your data is written every minute." |
||||
> |
||||
Min time interval |
||||
</InlineFormLabel> |
||||
<div className="width-10"> |
||||
<Input |
||||
className="width-20" |
||||
placeholder="10s" |
||||
value={options.jsonData.timeInterval || ''} |
||||
onChange={onUpdateDatasourceJsonDataOption(props, 'timeInterval')} |
||||
/> |
||||
</div> |
||||
</div> |
||||
</div> |
||||
</> |
||||
); |
||||
}; |
||||
@ -0,0 +1,138 @@ |
||||
import React, { useEffect, useState } from 'react'; |
||||
|
||||
import { |
||||
DataSourcePluginOptionsEditorProps, |
||||
onUpdateDatasourceSecureJsonDataOption, |
||||
updateDatasourcePluginResetOption, |
||||
} from '@grafana/data'; |
||||
import { InlineField, SecretInput, Input, InlineFieldRow, InlineLabel } from '@grafana/ui'; |
||||
|
||||
import { InfluxOptions, InfluxSecureJsonData } from '../../../types'; |
||||
|
||||
export type Props = DataSourcePluginOptionsEditorProps<InfluxOptions, InfluxSecureJsonData>; |
||||
|
||||
type MetadataState = Array<{ key: string; value: string }>; |
||||
|
||||
export const addMetaData = (setMetaData: (val: MetadataState) => void, metaDataArr: MetadataState) => { |
||||
setMetaData([...metaDataArr, { key: '', value: '' }]); |
||||
}; |
||||
|
||||
export const removeMetaData = (i: number, setMetaData: (val: MetadataState) => void, metaDataArr: MetadataState) => { |
||||
const newMetaValues = [...metaDataArr]; |
||||
newMetaValues.splice(i, 1); |
||||
setMetaData(newMetaValues); |
||||
}; |
||||
|
||||
export const onKeyChange = ( |
||||
key: string, |
||||
metaDataArr: MetadataState, |
||||
index: number, |
||||
setMetaData: (val: MetadataState) => void |
||||
) => { |
||||
const newMetaValues = [...metaDataArr]; |
||||
newMetaValues[index]['key'] = key; |
||||
setMetaData(newMetaValues); |
||||
}; |
||||
|
||||
export const onValueChange = ( |
||||
value: string, |
||||
metaDataArr: MetadataState, |
||||
index: number, |
||||
setMetaData: (val: MetadataState) => void |
||||
) => { |
||||
const newMetaValues = [...metaDataArr]; |
||||
newMetaValues[index]['value'] = value; |
||||
setMetaData(newMetaValues); |
||||
}; |
||||
|
||||
export const InfluxSqlConfig = (props: Props) => { |
||||
const { |
||||
options: { jsonData, secureJsonData, secureJsonFields }, |
||||
} = props; |
||||
|
||||
const existingMetadata: MetadataState = jsonData?.metadata?.length |
||||
? jsonData?.metadata?.map((md) => ({ key: Object.keys(md)[0], value: Object.values(md)[0] })) |
||||
: [{ key: 'bucket-name', value: '' }]; |
||||
const [metaDataArr, setMetaData] = useState<MetadataState>(existingMetadata); |
||||
|
||||
useEffect(() => { |
||||
const { onOptionsChange, options } = props; |
||||
const mapData = metaDataArr?.map((m) => ({ [m.key]: m.value })); |
||||
const jsonData = { |
||||
...options.jsonData, |
||||
metadata: mapData, |
||||
}; |
||||
onOptionsChange({ |
||||
...options, |
||||
jsonData, |
||||
}); |
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [metaDataArr]); |
||||
|
||||
return ( |
||||
<div> |
||||
<div className="gf-form"> |
||||
<h6>Token</h6> |
||||
</div> |
||||
<div> |
||||
<InlineField labelWidth={20} label="Token"> |
||||
<SecretInput |
||||
width={40} |
||||
name="token" |
||||
type="text" |
||||
value={secureJsonData?.token || ''} |
||||
onReset={() => updateDatasourcePluginResetOption(props, 'token')} |
||||
onChange={onUpdateDatasourceSecureJsonDataOption(props, 'token')} |
||||
isConfigured={secureJsonFields?.token} |
||||
/> |
||||
</InlineField> |
||||
</div> |
||||
<div> |
||||
<div className="gf-form"> |
||||
<h6>MetaData</h6> |
||||
</div> |
||||
{metaDataArr?.map((_, i) => ( |
||||
<InlineFieldRow key={i} style={{ flexFlow: 'row' }}> |
||||
<InlineField labelWidth={20} label="Key"> |
||||
<Input |
||||
key={i} |
||||
width={40} |
||||
name="key" |
||||
type="text" |
||||
value={metaDataArr[i]?.key || ''} |
||||
placeholder="key" |
||||
onChange={(e) => onKeyChange(e.currentTarget.value, metaDataArr, i, setMetaData)} |
||||
></Input> |
||||
</InlineField> |
||||
<InlineField labelWidth={20} label="Value"> |
||||
<Input |
||||
key={i} |
||||
width={40} |
||||
name="value" |
||||
type="text" |
||||
value={metaDataArr[i]?.value?.toString() ?? ''} |
||||
placeholder="value" |
||||
onChange={(e) => onValueChange(e.currentTarget.value, metaDataArr, i, setMetaData)} |
||||
></Input> |
||||
</InlineField> |
||||
{i + 1 >= metaDataArr.length && ( |
||||
<InlineLabel as="button" className="" onClick={() => addMetaData(setMetaData, metaDataArr)} width="auto"> |
||||
+ |
||||
</InlineLabel> |
||||
)} |
||||
{i > 0 && ( |
||||
<InlineLabel |
||||
as="button" |
||||
className="" |
||||
width="auto" |
||||
onClick={() => removeMetaData(i, setMetaData, metaDataArr)} |
||||
> |
||||
- |
||||
</InlineLabel> |
||||
)} |
||||
</InlineFieldRow> |
||||
))} |
||||
</div> |
||||
</div> |
||||
); |
||||
}; |
||||
@ -0,0 +1,32 @@ |
||||
import React from 'react'; |
||||
|
||||
import { Input } from '@grafana/ui'; |
||||
|
||||
import { InfluxQuery } from '../../../../types'; |
||||
|
||||
type Props = { |
||||
onChange: (query: InfluxQuery) => void; |
||||
onRunQuery: () => void; |
||||
query: InfluxQuery; |
||||
}; |
||||
|
||||
// Flight SQL Editor
|
||||
export const FSQLEditor = (props: Props) => { |
||||
const onSQLQueryChange = (query?: string) => { |
||||
if (query) { |
||||
props.onChange({ ...props.query, query, resultFormat: 'table' }); |
||||
} |
||||
props.onRunQuery(); |
||||
}; |
||||
return ( |
||||
<div> |
||||
<Input |
||||
value={props.query.query} |
||||
onBlur={(e) => onSQLQueryChange(e.currentTarget.value)} |
||||
onChange={(e) => onSQLQueryChange(e.currentTarget.value)} |
||||
/> |
||||
<br /> |
||||
<button onClick={() => onSQLQueryChange()}>run query</button> |
||||
</div> |
||||
); |
||||
}; |
||||
Loading…
Reference in new issue