@ -102,11 +102,6 @@ func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest)
var (
var (
hasPromQLScopeFeatureFlag = s . featureToggles . IsEnabled ( "promQLScope" )
hasPromQLScopeFeatureFlag = s . featureToggles . IsEnabled ( "promQLScope" )
hasPrometheusRunQueriesInParallel = s . featureToggles . IsEnabled ( "prometheusRunQueriesInParallel" )
)
if hasPrometheusRunQueriesInParallel {
var (
m sync . Mutex
m sync . Mutex
)
)
@ -118,7 +113,7 @@ func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest)
_ = concurrency . ForEachJob ( ctx , len ( req . Queries ) , concurrentQueryCount , func ( ctx context . Context , idx int ) error {
_ = concurrency . ForEachJob ( ctx , len ( req . Queries ) , concurrentQueryCount , func ( ctx context . Context , idx int ) error {
query := req . Queries [ idx ]
query := req . Queries [ idx ]
r := s . handleQuery ( ctx , query , fromAlert , hasPromQLScopeFeatureFlag , true )
r := s . handleQuery ( ctx , query , fromAlert , hasPromQLScopeFeatureFlag )
if r != nil {
if r != nil {
m . Lock ( )
m . Lock ( )
result . Responses [ query . RefID ] = * r
result . Responses [ query . RefID ] = * r
@ -126,20 +121,12 @@ func (s *QueryData) Execute(ctx context.Context, req *backend.QueryDataRequest)
}
}
return nil
return nil
} )
} )
} else {
for _ , q := range req . Queries {
r := s . handleQuery ( ctx , q , fromAlert , hasPromQLScopeFeatureFlag , false )
if r != nil {
result . Responses [ q . RefID ] = * r
}
}
}
return & result , nil
return & result , nil
}
}
func ( s * QueryData ) handleQuery ( ctx context . Context , bq backend . DataQuery , fromAlert ,
func ( s * QueryData ) handleQuery ( ctx context . Context , bq backend . DataQuery , fromAlert ,
hasPromQLScopeFeatureFlag , hasPrometheusRunQueriesInParallel bool ) * backend . DataResponse {
hasPromQLScopeFeatureFlag bool ) * backend . DataResponse {
traceCtx , span := s . tracer . Start ( ctx , "datasource.prometheus" )
traceCtx , span := s . tracer . Start ( ctx , "datasource.prometheus" )
defer span . End ( )
defer span . End ( )
query , err := models . Parse ( span , bq , s . TimeInterval , s . intervalCalculator , fromAlert , hasPromQLScopeFeatureFlag )
query , err := models . Parse ( span , bq , s . TimeInterval , s . intervalCalculator , fromAlert , hasPromQLScopeFeatureFlag )
@ -149,14 +136,14 @@ func (s *QueryData) handleQuery(ctx context.Context, bq backend.DataQuery, fromA
}
}
}
}
r := s . fetch ( traceCtx , s . client , query , hasPrometheusRunQueriesInParallel )
r := s . fetch ( traceCtx , s . client , query )
if r == nil {
if r == nil {
s . log . FromContext ( ctx ) . Debug ( "Received nil response from runQuery" , "query" , query . Expr )
s . log . FromContext ( ctx ) . Debug ( "Received nil response from runQuery" , "query" , query . Expr )
}
}
return r
return r
}
}
func ( s * QueryData ) fetch ( traceCtx context . Context , client * client . Client , q * models . Query , hasPrometheusRunQueriesInParallel bool ) * backend . DataResponse {
func ( s * QueryData ) fetch ( traceCtx context . Context , client * client . Client , q * models . Query ) * backend . DataResponse {
logger := s . log . FromContext ( traceCtx )
logger := s . log . FromContext ( traceCtx )
logger . Debug ( "Sending query" , "start" , q . Start , "end" , q . End , "step" , q . Step , "query" , q . Expr /*, "queryTimeout", s.QueryTimeout*/ )
logger . Debug ( "Sending query" , "start" , q . Start , "end" , q . End , "step" , q . Step , "query" , q . Expr /*, "queryTimeout", s.QueryTimeout*/ )
@ -171,7 +158,6 @@ func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *mo
)
)
if q . InstantQuery {
if q . InstantQuery {
if hasPrometheusRunQueriesInParallel {
wg . Add ( 1 )
wg . Add ( 1 )
go func ( ) {
go func ( ) {
defer wg . Done ( )
defer wg . Done ( )
@ -180,14 +166,9 @@ func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *mo
addDataResponse ( & res , dr )
addDataResponse ( & res , dr )
m . Unlock ( )
m . Unlock ( )
} ( )
} ( )
} else {
res := s . instantQuery ( traceCtx , client , q )
addDataResponse ( & res , dr )
}
}
}
if q . RangeQuery {
if q . RangeQuery {
if hasPrometheusRunQueriesInParallel {
wg . Add ( 1 )
wg . Add ( 1 )
go func ( ) {
go func ( ) {
defer wg . Done ( )
defer wg . Done ( )
@ -196,14 +177,9 @@ func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *mo
addDataResponse ( & res , dr )
addDataResponse ( & res , dr )
m . Unlock ( )
m . Unlock ( )
} ( )
} ( )
} else {
res := s . rangeQuery ( traceCtx , client , q )
addDataResponse ( & res , dr )
}
}
}
if q . ExemplarQuery {
if q . ExemplarQuery {
if hasPrometheusRunQueriesInParallel {
wg . Add ( 1 )
wg . Add ( 1 )
go func ( ) {
go func ( ) {
defer wg . Done ( )
defer wg . Done ( )
@ -217,15 +193,6 @@ func (s *QueryData) fetch(traceCtx context.Context, client *client.Client, q *mo
dr . Frames = append ( dr . Frames , res . Frames ... )
dr . Frames = append ( dr . Frames , res . Frames ... )
m . Unlock ( )
m . Unlock ( )
} ( )
} ( )
} else {
res := s . exemplarQuery ( traceCtx , client , q )
if res . Error != nil {
// If exemplar query returns error, we want to only log it and
// continue with other results processing
logger . Error ( "Exemplar query failed" , "query" , q . Expr , "err" , res . Error )
}
dr . Frames = append ( dr . Frames , res . Frames ... )
}
}
}
wg . Wait ( )
wg . Wait ( )