|
|
|
|
@ -300,29 +300,6 @@ func (e *StackdriverExecutor) parseResponse(queryRes *tsdb.QueryResult, data Sta |
|
|
|
|
for _, series := range data.TimeSeries { |
|
|
|
|
points := make([]tsdb.TimePoint, 0) |
|
|
|
|
|
|
|
|
|
// reverse the order to be ascending
|
|
|
|
|
for i := len(series.Points) - 1; i >= 0; i-- { |
|
|
|
|
point := series.Points[i] |
|
|
|
|
value := point.Value.DoubleValue |
|
|
|
|
|
|
|
|
|
if series.ValueType == "INT64" { |
|
|
|
|
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64) |
|
|
|
|
if err == nil { |
|
|
|
|
value = parsedValue |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if series.ValueType == "BOOL" { |
|
|
|
|
if point.Value.BoolValue { |
|
|
|
|
value = 1 |
|
|
|
|
} else { |
|
|
|
|
value = 0 |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000)) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
defaultMetricName := series.Metric.Type |
|
|
|
|
|
|
|
|
|
for key, value := range series.Metric.Labels { |
|
|
|
|
@ -338,18 +315,87 @@ func (e *StackdriverExecutor) parseResponse(queryRes *tsdb.QueryResult, data Sta |
|
|
|
|
if !containsLabel(resourceLabels[key], value) { |
|
|
|
|
resourceLabels[key] = append(resourceLabels[key], value) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if containsLabel(query.GroupBys, "resource.label."+key) { |
|
|
|
|
defaultMetricName += " " + value |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, query) |
|
|
|
|
// reverse the order to be ascending
|
|
|
|
|
if series.ValueType != "DISTRIBUTION" { |
|
|
|
|
for i := len(series.Points) - 1; i >= 0; i-- { |
|
|
|
|
point := series.Points[i] |
|
|
|
|
value := point.Value.DoubleValue |
|
|
|
|
|
|
|
|
|
if series.ValueType == "INT64" { |
|
|
|
|
parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64) |
|
|
|
|
if err == nil { |
|
|
|
|
value = parsedValue |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{ |
|
|
|
|
Name: metricName, |
|
|
|
|
Points: points, |
|
|
|
|
}) |
|
|
|
|
if series.ValueType == "BOOL" { |
|
|
|
|
if point.Value.BoolValue { |
|
|
|
|
value = 1 |
|
|
|
|
} else { |
|
|
|
|
value = 0 |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000)) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, make(map[string]string), query) |
|
|
|
|
|
|
|
|
|
queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{ |
|
|
|
|
Name: metricName, |
|
|
|
|
Points: points, |
|
|
|
|
}) |
|
|
|
|
} else { |
|
|
|
|
buckets := make(map[int]*tsdb.TimeSeries) |
|
|
|
|
|
|
|
|
|
for i := len(series.Points) - 1; i >= 0; i-- { |
|
|
|
|
point := series.Points[i] |
|
|
|
|
if len(point.Value.DistributionValue.BucketCounts) == 0 { |
|
|
|
|
continue |
|
|
|
|
} |
|
|
|
|
maxKey := 0 |
|
|
|
|
for i := 0; i < len(point.Value.DistributionValue.BucketCounts); i++ { |
|
|
|
|
value, err := strconv.ParseFloat(point.Value.DistributionValue.BucketCounts[i], 64) |
|
|
|
|
if err != nil { |
|
|
|
|
continue |
|
|
|
|
} |
|
|
|
|
if _, ok := buckets[i]; !ok { |
|
|
|
|
// set lower bounds
|
|
|
|
|
// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries#Distribution
|
|
|
|
|
bucketBound := calcBucketBound(point.Value.DistributionValue.BucketOptions, i) |
|
|
|
|
additionalLabels := map[string]string{"bucket": bucketBound} |
|
|
|
|
buckets[i] = &tsdb.TimeSeries{ |
|
|
|
|
Name: formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, additionalLabels, query), |
|
|
|
|
Points: make([]tsdb.TimePoint, 0), |
|
|
|
|
} |
|
|
|
|
if maxKey < i { |
|
|
|
|
maxKey = i |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
buckets[i].Points = append(buckets[i].Points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000)) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// fill empty bucket
|
|
|
|
|
for i := 0; i < maxKey; i++ { |
|
|
|
|
if _, ok := buckets[i]; !ok { |
|
|
|
|
bucketBound := calcBucketBound(point.Value.DistributionValue.BucketOptions, i) |
|
|
|
|
additionalLabels := map[string]string{"bucket": bucketBound} |
|
|
|
|
buckets[i] = &tsdb.TimeSeries{ |
|
|
|
|
Name: formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, additionalLabels, query), |
|
|
|
|
Points: make([]tsdb.TimePoint, 0), |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
for i := 0; i < len(buckets); i++ { |
|
|
|
|
queryRes.Series = append(queryRes.Series, buckets[i]) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
queryRes.Meta.Set("resourceLabels", resourceLabels) |
|
|
|
|
@ -368,7 +414,7 @@ func containsLabel(labels []string, newLabel string) bool { |
|
|
|
|
return false |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func formatLegendKeys(metricType string, defaultMetricName string, metricLabels map[string]string, resourceLabels map[string]string, query *StackdriverQuery) string { |
|
|
|
|
func formatLegendKeys(metricType string, defaultMetricName string, metricLabels map[string]string, resourceLabels map[string]string, additionalLabels map[string]string, query *StackdriverQuery) string { |
|
|
|
|
if query.AliasBy == "" { |
|
|
|
|
return defaultMetricName |
|
|
|
|
} |
|
|
|
|
@ -400,6 +446,10 @@ func formatLegendKeys(metricType string, defaultMetricName string, metricLabels |
|
|
|
|
return []byte(val) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if val, exists := additionalLabels[metaPartName]; exists { |
|
|
|
|
return []byte(val) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return in |
|
|
|
|
}) |
|
|
|
|
|
|
|
|
|
@ -425,6 +475,22 @@ func replaceWithMetricPart(metaPartName string, metricType string) []byte { |
|
|
|
|
return nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func calcBucketBound(bucketOptions StackdriverBucketOptions, n int) string { |
|
|
|
|
bucketBound := "0" |
|
|
|
|
if n == 0 { |
|
|
|
|
return bucketBound |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if bucketOptions.LinearBuckets != nil { |
|
|
|
|
bucketBound = strconv.FormatInt(bucketOptions.LinearBuckets.Offset+(bucketOptions.LinearBuckets.Width*int64(n-1)), 10) |
|
|
|
|
} else if bucketOptions.ExponentialBuckets != nil { |
|
|
|
|
bucketBound = strconv.FormatInt(int64(bucketOptions.ExponentialBuckets.Scale*math.Pow(bucketOptions.ExponentialBuckets.GrowthFactor, float64(n-1))), 10) |
|
|
|
|
} else if bucketOptions.ExplicitBuckets != nil { |
|
|
|
|
bucketBound = strconv.FormatInt(bucketOptions.ExplicitBuckets.Bounds[(n-1)], 10) |
|
|
|
|
} |
|
|
|
|
return bucketBound |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
func (e *StackdriverExecutor) createRequest(ctx context.Context, dsInfo *models.DataSource) (*http.Request, error) { |
|
|
|
|
u, _ := url.Parse(dsInfo.Url) |
|
|
|
|
u.Path = path.Join(u.Path, "render") |
|
|
|
|
|