Prometheus: Incremental querying option for `to: now` dashboards (#62932)

Provide new feature in Prometheus dashboards to cache queried time series data, modify requests to not include previously cached data in order to improve dashboard refresh performance. 

Co-authored-by: Galen <galen.kistler@grafana.com>
Co-authored-by: Leon <leon.sorokin@grafana.com>
samu6851-patch-4
Leon Sorokin 2 years ago committed by GitHub
parent c5172247a9
commit 2b2a4e13e5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 104
      docs/sources/administration/provisioning/index.md
  2. 10
      docs/sources/datasources/prometheus/_index.md
  3. 93
      public/app/features/live/data/amendTimeSeries.ts
  4. 46
      public/app/plugins/datasource/prometheus/configuration/PromSettings.tsx
  5. 32
      public/app/plugins/datasource/prometheus/datasource.tsx
  6. 490
      public/app/plugins/datasource/prometheus/querycache/QueryCache.test.ts
  7. 258
      public/app/plugins/datasource/prometheus/querycache/QueryCache.ts
  8. 864
      public/app/plugins/datasource/prometheus/querycache/QueryCacheTestData.ts
  9. 2
      public/app/plugins/datasource/prometheus/types.ts

@ -169,57 +169,59 @@ Common settings in the [built-in core data sources]({{< relref "../../datasource
> **Note:** Data sources tagged with _HTTP\*_ communicate using the HTTP protocol, which includes all core data source plugins except MySQL, PostgreSQL, and MSSQL.
| Name | Type | Data source | Description |
| -------------------------- | ------- | ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| tlsAuth | boolean | _HTTP\*_, MySQL | Enable TLS authentication using client cert configured in secure json data |
| tlsAuthWithCACert | boolean | _HTTP\*_, MySQL, PostgreSQL | Enable TLS authentication using CA cert |
| tlsSkipVerify | boolean | _HTTP\*_, MySQL, PostgreSQL, MSSQL | Controls whether a client verifies the server's certificate chain and host name. |
| serverName | string | _HTTP\*_, MSSQL | Optional. Controls the server name used for certificate common name/subject alternative name verification. Defaults to using the data source URL. |
| timeout | string | _HTTP\*_ | Request timeout in seconds. Overrides dataproxy.timeout option |
| graphiteVersion | string | Graphite | Graphite version |
| timeInterval | string | Prometheus, Elasticsearch, InfluxDB, MySQL, PostgreSQL and MSSQL | Lowest interval/step value that should be used for this data source. |
| httpMode | string | Influxdb | HTTP Method. 'GET', 'POST', defaults to GET |
| maxSeries | number | Influxdb | Max number of series/tables that Grafana processes |
| httpMethod | string | Prometheus | HTTP Method. 'GET', 'POST', defaults to POST |
| customQueryParameters | string | Prometheus | Query parameters to add, as a URL-encoded string. |
| manageAlerts | boolean | Prometheus and Loki | Manage alerts via Alerting UI |
| alertmanagerUid | string | Prometheus and Loki | UID of Alert Manager that manages Alert for this data source. |
| timeField | string | Elasticsearch | Which field that should be used as timestamp |
| interval | string | Elasticsearch | Index date time format. nil(No Pattern), 'Hourly', 'Daily', 'Weekly', 'Monthly' or 'Yearly' |
| logMessageField | string | Elasticsearch | Which field should be used as the log message |
| logLevelField | string | Elasticsearch | Which field should be used to indicate the priority of the log message |
| maxConcurrentShardRequests | number | Elasticsearch | Maximum number of concurrent shard requests that each sub-search request executes per node |
| sigV4Auth | boolean | Elasticsearch and Prometheus | Enable usage of SigV4 |
| sigV4AuthType | string | Elasticsearch and Prometheus | SigV4 auth provider. default/credentials/keys |
| sigV4ExternalId | string | Elasticsearch and Prometheus | Optional SigV4 External ID |
| sigV4AssumeRoleArn | string | Elasticsearch and Prometheus | Optional SigV4 ARN role to assume |
| sigV4Region | string | Elasticsearch and Prometheus | SigV4 AWS region |
| sigV4Profile | string | Elasticsearch and Prometheus | Optional SigV4 credentials profile |
| authType | string | Cloudwatch | Auth provider. default/credentials/keys |
| externalId | string | Cloudwatch | Optional External ID |
| assumeRoleArn | string | Cloudwatch | Optional ARN role to assume |
| defaultRegion | string | Cloudwatch | Optional default AWS region |
| customMetricsNamespaces | string | Cloudwatch | Namespaces of Custom Metrics |
| profile | string | Cloudwatch | Optional credentials profile |
| tsdbVersion | string | OpenTSDB | Version |
| tsdbResolution | string | OpenTSDB | Resolution |
| sslmode | string | PostgreSQL | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
| tlsConfigurationMethod | string | PostgreSQL | SSL Certificate configuration, either by 'file-path' or 'file-content' |
| sslRootCertFile | string | PostgreSQL, MSSQL | SSL server root certificate file, must be readable by the Grafana user |
| sslCertFile | string | PostgreSQL | SSL client certificate file, must be readable by the Grafana user |
| sslKeyFile | string | PostgreSQL | SSL client key file, must be readable by _only_ the Grafana user |
| encrypt | string | MSSQL | Connection SSL encryption handling. 'disable', 'false' or 'true' |
| postgresVersion | number | PostgreSQL | Postgres version as a number (903/904/905/906/1000) meaning v9.3, v9.4, ..., v10 |
| timescaledb | boolean | PostgreSQL | Enable usage of TimescaleDB extension |
| maxOpenConns | number | MySQL, PostgreSQL and MSSQL | Maximum number of open connections to the database (Grafana v5.4+) |
| maxIdleConns | number | MySQL, PostgreSQL and MSSQL | Maximum number of connections in the idle connection pool (Grafana v5.4+) |
| connMaxLifetime | number | MySQL, PostgreSQL and MSSQL | Maximum amount of time in seconds a connection may be reused (Grafana v5.4+) |
| keepCookies | array | _HTTP\*_ | Cookies that needs to be passed along while communicating with data sources |
| prometheusVersion | string | Prometheus | The version of the Prometheus data source, such as `2.37.0`, `2.24.0` |
| prometheusType | string | Prometheus | The type of the Prometheus data sources. such as `Prometheus`, `Cortex`, `Thanos`, `Mimir` |
| cacheLevel | string | Prometheus | This determines the duration of the browser cache. Valid values include: `Low`, `Medium`, `High`, and `None`. This field is configurable when you enable the `prometheusResourceBrowserCache` feature flag. |
| implementation | string | AlertManager | The implementation of the AlertManager data source, such as `prometheus`, `cortex` or `mimir` |
| handleGrafanaManagedAlerts | boolean | AlertManager | When enabled, Grafana-managed alerts are sent to this Alertmanager |
| Name | Type | Data source | Description |
| ----------------------------- | ------- | ---------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| tlsAuth | boolean | _HTTP\*_, MySQL | Enable TLS authentication using client cert configured in secure json data |
| tlsAuthWithCACert | boolean | _HTTP\*_, MySQL, PostgreSQL | Enable TLS authentication using CA cert |
| tlsSkipVerify | boolean | _HTTP\*_, MySQL, PostgreSQL, MSSQL | Controls whether a client verifies the server's certificate chain and host name. |
| serverName | string | _HTTP\*_, MSSQL | Optional. Controls the server name used for certificate common name/subject alternative name verification. Defaults to using the data source URL. |
| timeout | string | _HTTP\*_ | Request timeout in seconds. Overrides dataproxy.timeout option |
| graphiteVersion | string | Graphite | Graphite version |
| timeInterval | string | Prometheus, Elasticsearch, InfluxDB, MySQL, PostgreSQL and MSSQL | Lowest interval/step value that should be used for this data source. |
| httpMode | string | Influxdb | HTTP Method. 'GET', 'POST', defaults to GET |
| maxSeries | number | Influxdb | Max number of series/tables that Grafana processes |
| httpMethod | string | Prometheus | HTTP Method. 'GET', 'POST', defaults to POST |
| customQueryParameters | string | Prometheus | Query parameters to add, as a URL-encoded string. |
| manageAlerts | boolean | Prometheus and Loki | Manage alerts via Alerting UI |
| alertmanagerUid | string | Prometheus and Loki | UID of Alert Manager that manages Alert for this data source. |
| timeField | string | Elasticsearch | Which field that should be used as timestamp |
| interval | string | Elasticsearch | Index date time format. nil(No Pattern), 'Hourly', 'Daily', 'Weekly', 'Monthly' or 'Yearly' |
| logMessageField | string | Elasticsearch | Which field should be used as the log message |
| logLevelField | string | Elasticsearch | Which field should be used to indicate the priority of the log message |
| maxConcurrentShardRequests | number | Elasticsearch | Maximum number of concurrent shard requests that each sub-search request executes per node |
| sigV4Auth | boolean | Elasticsearch and Prometheus | Enable usage of SigV4 |
| sigV4AuthType | string | Elasticsearch and Prometheus | SigV4 auth provider. default/credentials/keys |
| sigV4ExternalId | string | Elasticsearch and Prometheus | Optional SigV4 External ID |
| sigV4AssumeRoleArn | string | Elasticsearch and Prometheus | Optional SigV4 ARN role to assume |
| sigV4Region | string | Elasticsearch and Prometheus | SigV4 AWS region |
| sigV4Profile | string | Elasticsearch and Prometheus | Optional SigV4 credentials profile |
| authType | string | Cloudwatch | Auth provider. default/credentials/keys |
| externalId | string | Cloudwatch | Optional External ID |
| assumeRoleArn | string | Cloudwatch | Optional ARN role to assume |
| defaultRegion | string | Cloudwatch | Optional default AWS region |
| customMetricsNamespaces | string | Cloudwatch | Namespaces of Custom Metrics |
| profile | string | Cloudwatch | Optional credentials profile |
| tsdbVersion | string | OpenTSDB | Version |
| tsdbResolution | string | OpenTSDB | Resolution |
| sslmode | string | PostgreSQL | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
| tlsConfigurationMethod | string | PostgreSQL | SSL Certificate configuration, either by 'file-path' or 'file-content' |
| sslRootCertFile | string | PostgreSQL, MSSQL | SSL server root certificate file, must be readable by the Grafana user |
| sslCertFile | string | PostgreSQL | SSL client certificate file, must be readable by the Grafana user |
| sslKeyFile | string | PostgreSQL | SSL client key file, must be readable by _only_ the Grafana user |
| encrypt | string | MSSQL | Connection SSL encryption handling. 'disable', 'false' or 'true' |
| postgresVersion | number | PostgreSQL | Postgres version as a number (903/904/905/906/1000) meaning v9.3, v9.4, ..., v10 |
| timescaledb | boolean | PostgreSQL | Enable usage of TimescaleDB extension |
| maxOpenConns | number | MySQL, PostgreSQL and MSSQL | Maximum number of open connections to the database (Grafana v5.4+) |
| maxIdleConns | number | MySQL, PostgreSQL and MSSQL | Maximum number of connections in the idle connection pool (Grafana v5.4+) |
| connMaxLifetime | number | MySQL, PostgreSQL and MSSQL | Maximum amount of time in seconds a connection may be reused (Grafana v5.4+) |
| keepCookies | array | _HTTP\*_ | Cookies that needs to be passed along while communicating with data sources |
| prometheusVersion | string | Prometheus | The version of the Prometheus data source, such as `2.37.0`, `2.24.0` |
| prometheusType | string | Prometheus | The type of the Prometheus data sources. such as `Prometheus`, `Cortex`, `Thanos`, `Mimir` |
| cacheLevel | string | Prometheus | This determines the duration of the browser cache. Valid values include: `Low`, `Medium`, `High`, and `None`. This field is configurable when you enable the `prometheusResourceBrowserCache` feature flag. |
| incrementalQuerying | string | Prometheus | Experimental: Turn on incremental querying to enhance dashboard reload performance with slow data sources |
| incrementalQueryOverlapWindow | string | Prometheus | Experimental: Configure incremental query overlap window. Requires a valid duration string, i.e. `180s` or `15m` Default value is `10m` (10 minutes). |
| implementation | string | AlertManager | The implementation of the AlertManager data source, such as `prometheus`, `cortex` or `mimir` |
| handleGrafanaManagedAlerts | boolean | AlertManager | When enabled, Grafana-managed alerts are sent to this Alertmanager |
For examples of specific data sources' JSON data, refer to that [data source's documentation]({{< relref "../../datasources" >}}).

@ -89,6 +89,8 @@ datasources:
manageAlerts: true
prometheusType: Prometheus
prometheusVersion: 2.37.0
incrementalQuerying: true
incrementalQueryOverlapWindow: 10m
cacheLevel: 'High'
exemplarTraceIdDestinations:
# Field with internal link pointing to data source in Grafana.
@ -165,3 +167,11 @@ Grafana lists these variables in dropdown select boxes at the top of the dashboa
Grafana refers to such variables as template variables.
For details, see the [template variables documentation]({{< relref "./template-variables/" >}}).
## Incremental Dashboard Queries (beta)
As of Grafana 10, the Prometheus data source can be configured to query live dashboards incrementally, instead of re-querying the entire duration on each dashboard refresh.
This can be toggled on or off in the datasource configuration or provisioning file (under `incrementalQuerying` in jsonData).
Additionally, the amount of overlap between incremental queries can be configured using the `incrementalQueryOverlapWindow` jsonData field, the default value is 10m (10 minutes).
Increasing the duration of the `incrementalQueryOverlapWindow` will increase the size of every incremental query, but might be helpful for instances that have inconsistent results for recent data.

@ -0,0 +1,93 @@
import { closestIdx } from "./StreamingDataFrame";
export type Table = [times: number[], ...values: any[][]];
// prevTable and nextTable are assumed sorted ASC on reference [0] arrays
// nextTable is assumed to be contiguous, only edges are checked for overlap
// ...so prev: [1,2,5] + next: [3,4,6] -> [1,2,3,4,6]
export function amendTable(prevTable: Table, nextTable: Table): Table {
let [prevTimes] = prevTable;
let [nextTimes] = nextTable;
let pLen = prevTimes.length;
let pStart = prevTimes[0];
let pEnd = prevTimes[pLen - 1];
let nLen = nextTimes.length;
let nStart = nextTimes[0];
let nEnd = nextTimes[nLen - 1];
let outTable: Table;
if (pLen) {
if (nLen) {
// append, no overlap
if (nStart > pEnd) {
outTable = prevTable.map((_, i) => prevTable[i].concat(nextTable[i])) as Table;
}
// prepend, no overlap
else if (nEnd < pStart) {
outTable = nextTable.map((_, i) => nextTable[i].concat(prevTable[i])) as Table;
}
// full replace
else if (nStart <= pStart && nEnd >= pEnd) {
outTable = nextTable;
}
// partial replace
else if (nStart > pStart && nEnd < pEnd) {
}
// append, with overlap
else if (nStart >= pStart) {
let idx = closestIdx(nStart, prevTimes);
idx = prevTimes[idx] < nStart ? idx - 1 : idx;
outTable = prevTable.map((_, i) => prevTable[i].slice(0, idx).concat(nextTable[i])) as Table;
}
// prepend, with overlap
else if (nEnd >= pStart) {
let idx = closestIdx(nEnd, prevTimes);
idx = prevTimes[idx] < nEnd ? idx : idx + 1;
outTable = nextTable.map((_, i) => nextTable[i].concat(prevTable[i].slice(idx))) as Table;
}
} else {
outTable = prevTable;
}
} else {
if (nLen) {
outTable = nextTable;
} else {
outTable = [[]];
}
}
return outTable!;
}
export function trimTable(table: Table, fromTime: number, toTime: number): Table {
let [times, ...vals] = table;
let fromIdx: number | undefined;
let toIdx: number | undefined;
// trim to bounds
if (times[0] < fromTime) {
fromIdx = closestIdx(fromTime, times);
if (times[fromIdx] < fromTime) {
fromIdx++;
}
}
if (times[times.length - 1] > toTime) {
toIdx = closestIdx(toTime, times);
if (times[toIdx] > toTime) {
toIdx--;
}
}
if (fromIdx != null || toIdx != null) {
times = times.slice(fromIdx ?? 0, toIdx);
vals = vals.map(vals2 => vals2.slice(fromIdx ?? 0, toIdx));
}
return [times, ...vals];
}

@ -4,6 +4,7 @@ import semver from 'semver/preload';
import {
DataSourcePluginOptionsEditorProps,
DataSourceSettings as DataSourceSettingsType,
isValidDuration,
onUpdateDatasourceJsonDataOptionChecked,
SelectableValue,
updateDatasourcePluginJsonDataOption,
@ -23,6 +24,7 @@ import config from '../../../../core/config';
import { useUpdateDatasource } from '../../../../features/datasources/state';
import { PromApplication, PromBuildInfoResponse } from '../../../../types/unified-alerting-dto';
import { QueryEditorMode } from '../querybuilder/shared/types';
import { defaultPrometheusQueryOverlapWindow } from '../querycache/QueryCache';
import { PrometheusCacheLevel, PromOptions } from '../types';
import { ExemplarsSettings } from './ExemplarsSettings';
@ -362,6 +364,50 @@ export const PromSettings = (props: Props) => {
</div>
</div>
)}
<div className="gf-form-inline">
<div className="gf-form max-width-30">
<FormField
label="Incremental querying (beta)"
labelWidth={14}
tooltip="This feature will change the default behavior of relative queries to always request fresh data from the prometheus instance, instead query results will be cached, and only new records are requested. Turn this on to decrease database and network load."
inputEl={
<InlineSwitch
value={options.jsonData.incrementalQuerying ?? false}
onChange={onUpdateDatasourceJsonDataOptionChecked(props, 'incrementalQuerying')}
disabled={options.readOnly}
/>
}
/>
</div>
</div>
<div className="gf-form-inline">
{options.jsonData.incrementalQuerying && (
<FormField
label="Query overlap window"
labelWidth={14}
tooltip="Set a duration like 10m or 120s or 0s. Default of 10 minutes. This duration will be added to the duration of each incremental request."
inputEl={
<Input
validationEvents={{
onBlur: [
{
rule: (value) => isValidDuration(value),
errorMessage: 'Invalid duration. Example values: 100s, 10m',
},
],
}}
className="width-25"
value={options.jsonData.incrementalQueryOverlapWindow ?? defaultPrometheusQueryOverlapWindow}
onChange={onChangeHandler('incrementalQueryOverlapWindow', options, onOptionsChange)}
spellCheck={false}
disabled={options.readOnly}
/>
}
/>
)}
</div>
</div>
<ExemplarsSettings
options={options.jsonData.exemplarTraceIdDestinations}

@ -56,6 +56,7 @@ import { renderLegendFormat } from './legend';
import PrometheusMetricFindQuery from './metric_find_query';
import { getInitHints, getQueryHints } from './query_hints';
import { QueryEditorMode } from './querybuilder/shared/types';
import { CacheRequestInfo, defaultPrometheusQueryOverlapWindow, QueryCache } from './querycache/QueryCache';
import { getOriginalMetricName, transform, transformV2 } from './result_transformer';
import { trackQuery } from './tracking';
import {
@ -84,6 +85,7 @@ export class PrometheusDatasource
{
type: string;
ruleMappings: { [index: string]: string };
hasIncrementalQuery: boolean;
url: string;
id: number;
directUrl: string;
@ -105,6 +107,7 @@ export class PrometheusDatasource
subType: PromApplication;
rulerEnabled: boolean;
cacheLevel: PrometheusCacheLevel;
cache: QueryCache;
constructor(
instanceSettings: DataSourceInstanceSettings<PromOptions>,
@ -129,6 +132,7 @@ export class PrometheusDatasource
// here we "fall back" to this.url to make typescript happy, but it should never happen
this.directUrl = instanceSettings.jsonData.directUrl ?? this.url;
this.exemplarTraceIdDestinations = instanceSettings.jsonData.exemplarTraceIdDestinations;
this.hasIncrementalQuery = instanceSettings.jsonData.incrementalQuerying ?? false;
this.ruleMappings = {};
this.languageProvider = languageProvider ?? new PrometheusLanguageProvider(this);
this.lookupsDisabled = instanceSettings.jsonData.disableMetricsLookup ?? false;
@ -139,6 +143,9 @@ export class PrometheusDatasource
this.variables = new PrometheusVariableSupport(this, this.templateSrv, this.timeSrv);
this.exemplarsAvailable = true;
this.cacheLevel = instanceSettings.jsonData.cacheLevel ?? PrometheusCacheLevel.Low;
this.cache = new QueryCache(
instanceSettings.jsonData.incrementalQueryOverlapWindow ?? defaultPrometheusQueryOverlapWindow
);
// This needs to be here and cannot be static because of how annotations typing affects casting of data source
// objects to DataSourceApi types.
@ -447,12 +454,27 @@ export class PrometheusDatasource
query(request: DataQueryRequest<PromQuery>): Observable<DataQueryResponse> {
if (this.access === 'proxy') {
const targets = request.targets.map((target) => this.processTargetV2(target, request));
let fullOrPartialRequest: DataQueryRequest<PromQuery>;
let requestInfo: CacheRequestInfo | undefined = undefined;
if (this.hasIncrementalQuery) {
requestInfo = this.cache.requestInfo(request, this.interpolateString.bind(this));
fullOrPartialRequest = requestInfo.requests[0];
} else {
fullOrPartialRequest = request;
}
const targets = fullOrPartialRequest.targets.map((target) => this.processTargetV2(target, fullOrPartialRequest));
const startTime = new Date();
return super.query({ ...request, targets: targets.flat() }).pipe(
map((response) =>
transformV2(response, request, { exemplarTraceIdDestinations: this.exemplarTraceIdDestinations })
),
return super.query({ ...fullOrPartialRequest, targets: targets.flat() }).pipe(
map((response) => {
const amendedResponse = {
...response,
data: this.cache.procFrames(request, requestInfo, response.data),
};
return transformV2(amendedResponse, request, {
exemplarTraceIdDestinations: this.exemplarTraceIdDestinations,
});
}),
tap((response: DataQueryResponse) => {
trackQuery(response, request, startTime);
})

@ -0,0 +1,490 @@
import moment from 'moment';
import { DataFrame, DataQueryRequest, DateTime, dateTime, TimeRange } from '@grafana/data/src';
import { QueryEditorMode } from '../querybuilder/shared/types';
import { PromQuery } from '../types';
import { getTargSig, QueryCache } from './QueryCache';
import { IncrementalStorageDataFrameScenarios } from './QueryCacheTestData';
const mockRequest = (request?: Partial<DataQueryRequest<PromQuery>>): DataQueryRequest<PromQuery> => {
// Histogram
const defaultRequest: DataQueryRequest<PromQuery> = {
app: 'undefined',
requestId: '',
timezone: '',
range: {
from: moment('2023-01-30T19:33:01.332Z') as DateTime,
to: moment('2023-01-30T20:33:01.332Z') as DateTime,
raw: { from: 'now-1h', to: 'now' },
},
interval: '15s',
intervalMs: 15000,
targets: [
{
datasource: { type: 'prometheus', uid: 'OPQv8Kc4z' },
editorMode: QueryEditorMode.Code,
exemplar: false,
expr: 'sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[$__rate_interval]))',
format: 'heatmap',
legendFormat: '{{le}}',
range: true,
refId: 'A',
utcOffsetSec: -21600,
},
],
maxDataPoints: 871,
scopedVars: {
__interval: { text: '15s', value: '15s' },
__interval_ms: { text: '15000', value: 15000 },
},
startTime: 1675110781332,
rangeRaw: { from: 'now-1h', to: 'now' },
};
return {
...defaultRequest,
...request,
};
};
describe('QueryCache', function () {
it('instantiates', () => {
const storage = new QueryCache();
expect(storage).toBeInstanceOf(QueryCache);
});
it('will not modify or crash with empty response', () => {
const storage = new QueryCache();
const firstFrames: DataFrame[] = [];
const secondFrames: DataFrame[] = [];
const cache = new Map<string, string>();
// start time of scenario
const firstFrom = dateTime(new Date(1675262550000));
// End time of scenario
const firstTo = dateTime(new Date(1675262550000)).add(6, 'hours');
const firstRange: TimeRange = {
from: firstFrom,
to: firstTo,
raw: {
from: 'now-6h',
to: 'now',
},
};
// Same query 2 minutes later
const numberOfSamplesLater = 4;
const interval = 30000;
const secondFrom = dateTime(new Date(1675262550000 + interval * numberOfSamplesLater));
const secondTo = dateTime(new Date(1675262550000 + interval * numberOfSamplesLater)).add(6, 'hours');
const secondRange: TimeRange = {
from: secondFrom,
to: secondTo,
raw: {
from: 'now-6h',
to: 'now',
},
};
const targetSignature = `'1=1'|${interval}|${JSON.stringify(secondRange.raw)}`;
const dashboardId = `dashid`;
const panelId = 2;
const targetIdentity = `${dashboardId}|${panelId}|A`;
cache.set(targetIdentity, targetSignature);
const firstStoredFrames = storage.procFrames(
mockRequest({
range: firstRange,
dashboardUID: dashboardId,
panelId: panelId,
}),
{
requests: [], // unused
targSigs: cache,
shouldCache: true,
},
firstFrames
);
const cached = storage.cache.get(targetIdentity);
expect(cached?.frames[0].fields[0].values.length).toEqual(firstFrames[0]?.fields[0]?.values?.length);
expect(firstStoredFrames[0]?.fields[0].values.length).toEqual(firstFrames[0]?.fields[0]?.values?.length);
// Should return the request frames unaltered
expect(firstStoredFrames).toEqual(firstFrames);
const secondRequest = mockRequest({
range: secondRange,
dashboardUID: dashboardId,
panelId: panelId,
});
const secondStoredFrames = storage.procFrames(
secondRequest,
{
requests: [], // unused
targSigs: cache,
shouldCache: true,
},
secondFrames
);
const storageLengthAfterSubsequentQuery = storage.cache.get(targetIdentity);
expect(secondStoredFrames).toEqual([]);
storageLengthAfterSubsequentQuery?.frames.forEach((dataFrame, index) => {
const secondFramesLength = secondFrames[index].fields[0].values.length;
const firstFramesLength = firstFrames[index].fields[0].values.length;
const cacheLength = dataFrame.fields[0].values.length;
// Cache can contain more, but never less
expect(cacheLength).toBeGreaterThanOrEqual(secondFramesLength + firstFramesLength - (20 + numberOfSamplesLater));
// Fewer results are sent in incremental result
expect(firstFramesLength).toBeGreaterThan(secondFramesLength);
});
});
it('Merges incremental queries in storage', () => {
const scenarios = [
IncrementalStorageDataFrameScenarios.histogram.getSeriesWithGapAtEnd(),
IncrementalStorageDataFrameScenarios.histogram.getSeriesWithGapInMiddle(),
IncrementalStorageDataFrameScenarios.histogram.getSeriesWithGapAtStart(),
];
scenarios.forEach((scenario, index) => {
const storage = new QueryCache();
const firstFrames = scenario.first.dataFrames as unknown as DataFrame[];
const secondFrames = scenario.second.dataFrames as unknown as DataFrame[];
const targetSignatures = new Map<string, string>();
// start time of scenario
const firstFrom = dateTime(new Date(1675262550000));
// End time of scenario
const firstTo = dateTime(new Date(1675262550000)).add(6, 'hours');
const firstRange: TimeRange = {
from: firstFrom,
to: firstTo,
raw: {
from: 'now-6h',
to: 'now',
},
};
// Same query 2 minutes later
const numberOfSamplesLater = 4;
const interval = 30000;
const secondFrom = dateTime(new Date(1675262550000 + interval * numberOfSamplesLater));
const secondTo = dateTime(new Date(1675262550000 + interval * numberOfSamplesLater)).add(6, 'hours');
const secondRange: TimeRange = {
from: secondFrom,
to: secondTo,
raw: {
from: 'now-6h',
to: 'now',
},
};
const dashboardId = `dashid--${index}`;
const panelId = 2 + index;
// This can't change
const targetIdentity = `${dashboardId}|${panelId}|A`;
const request = mockRequest({
range: firstRange,
dashboardUID: dashboardId,
panelId: panelId,
});
// But the signature can, and we should clean up any non-matching signatures
const targetSignature = getTargSig(request.targets[0].expr, request, request.targets[0]);
targetSignatures.set(targetIdentity, targetSignature);
const firstStoredFrames = storage.procFrames(
request,
{
requests: [], // unused
targSigs: targetSignatures,
shouldCache: true,
},
firstFrames
);
const cached = storage.cache.get(targetIdentity);
// I would expect that the number of values received from the API should be the same as the cached values?
expect(cached?.frames[0].fields[0].values.length).toEqual(firstFrames[0].fields[0].values.length);
// Should return the request frames unaltered
expect(firstStoredFrames).toEqual(firstFrames);
const secondRequest = mockRequest({
range: secondRange,
dashboardUID: dashboardId,
panelId: panelId,
});
const secondStoredFrames = storage.procFrames(
secondRequest,
{
requests: [], // unused
targSigs: targetSignatures,
shouldCache: true,
},
secondFrames
);
const storageLengthAfterSubsequentQuery = storage.cache.get(targetIdentity);
storageLengthAfterSubsequentQuery?.frames.forEach((dataFrame, index) => {
const secondFramesLength = secondFrames[index].fields[0].values.length;
const firstFramesLength = firstFrames[index].fields[0].values.length;
const cacheLength = dataFrame.fields[0].values.length;
// Cache can contain more, but never less
expect(cacheLength).toBeGreaterThanOrEqual(
secondFramesLength + firstFramesLength - (20 + numberOfSamplesLater)
);
// Fewer results are sent in incremental result
expect(firstFramesLength).toBeGreaterThan(secondFramesLength);
});
// All of the new values should be the ones that were stored, this is overkill
secondFrames.forEach((frame, frameIdx) => {
frame.fields.forEach((field, fieldIdx) => {
secondFrames[frameIdx].fields[fieldIdx].values.toArray().forEach((value) => {
expect(secondStoredFrames[frameIdx].fields[fieldIdx].values).toContain(value);
});
});
});
const interpolateString = (s: string) => {
return s;
};
const secondRequestModified = {
...secondRequest,
range: {
...secondRequest.range,
to: dateTime(secondRequest.range.to.valueOf() + 30000),
},
};
const cacheRequest = storage.requestInfo(secondRequestModified, interpolateString);
expect(cacheRequest.requests[0].targets).toEqual(secondRequestModified.targets);
expect(cacheRequest.requests[0].range.to).toEqual(secondRequestModified.range.to);
expect(cacheRequest.requests[0].range.raw).toEqual(secondRequestModified.range.raw);
expect(cacheRequest.requests[0].range.from.valueOf() - 21000000).toEqual(
secondRequestModified.range.from.valueOf()
);
expect(cacheRequest.shouldCache).toBe(true);
});
});
it('Will evict old dataframes, and use stored data when user shortens query window', () => {
const storage = new QueryCache();
// Initial request with all data for time range
const firstFrames = IncrementalStorageDataFrameScenarios.histogram.evictionRequests.first
.dataFrames as unknown as DataFrame[];
// Shortened request 30s later
const secondFrames = IncrementalStorageDataFrameScenarios.histogram.evictionRequests.second
.dataFrames as unknown as DataFrame[];
// Now the user waits a minute and changes the query duration to just the last 5 minutes, luckily the interval hasn't changed, so we can still use the data in storage except for the latest minute
const thirdFrames = IncrementalStorageDataFrameScenarios.histogram.evictionRequests.second
.dataFrames as unknown as DataFrame[];
const cache = new Map<string, string>();
const interval = 15000;
// start time of scenario
const firstFrom = dateTime(new Date(1675107180000));
const firstTo = dateTime(new Date(1675107180000)).add(1, 'hours');
const firstRange: TimeRange = {
from: firstFrom,
to: firstTo,
raw: {
from: 'now-1h',
to: 'now',
},
};
// 30 seconds later
const secondNumberOfSamplesLater = 2;
const secondFrom = dateTime(new Date(1675107180000 + interval * secondNumberOfSamplesLater));
const secondTo = dateTime(new Date(1675107180000 + interval * secondNumberOfSamplesLater)).add(1, 'hours');
const secondRange: TimeRange = {
from: secondFrom,
to: secondTo,
raw: {
from: 'now-1h',
to: 'now',
},
};
// 1 minute + 30 seconds later, but 5 minute viewing window
const thirdNumberOfSamplesLater = 6;
const thirdFrom = dateTime(new Date(1675107180000 + interval * thirdNumberOfSamplesLater));
const thirdTo = dateTime(new Date(1675107180000 + interval * thirdNumberOfSamplesLater)).add(5, 'minutes');
const thirdRange: TimeRange = {
from: thirdFrom,
to: thirdTo,
raw: {
from: 'now-5m',
to: 'now',
},
};
// Signifier definition
const dashboardId = `dashid`;
const panelId = 200;
const targetIdentity = `${dashboardId}|${panelId}|A`;
const request = mockRequest({
range: firstRange,
dashboardUID: dashboardId,
panelId: panelId,
});
const requestInfo = {
requests: [], // unused
targSigs: cache,
shouldCache: true,
};
const targetSignature = `1=1|${interval}|${JSON.stringify(request.rangeRaw ?? '')}`;
cache.set(targetIdentity, targetSignature);
const firstQueryResult = storage.procFrames(request, requestInfo, firstFrames);
const firstMergedLength = firstQueryResult[0].fields[0].values.length;
const secondQueryResult = storage.procFrames(
mockRequest({
range: secondRange,
dashboardUID: dashboardId,
panelId: panelId,
}),
{
requests: [], // unused
targSigs: cache,
shouldCache: true,
},
secondFrames
);
const secondMergedLength = secondQueryResult[0].fields[0].values.length;
// Since the step is 15s, and the request was 30 seconds later, we should have 2 extra frames, but we should evict the first two, so we should get the same length
expect(firstMergedLength).toEqual(secondMergedLength);
expect(firstQueryResult[0].fields[0].values.toArray()[2]).toEqual(
secondQueryResult[0].fields[0].values.toArray()[0]
);
expect(firstQueryResult[0].fields[0].values.toArray()[0] + 30000).toEqual(
secondQueryResult[0].fields[0].values.toArray()[0]
);
cache.set(targetIdentity, `'1=1'|${interval}|${JSON.stringify(thirdRange.raw)}`);
storage.procFrames(
mockRequest({
range: thirdRange,
dashboardUID: dashboardId,
panelId: panelId,
}),
{
requests: [], // unused
targSigs: cache,
shouldCache: true,
},
thirdFrames
);
const cachedAfterThird = storage.cache.get(targetIdentity);
const storageLengthAfterThirdQuery = cachedAfterThird?.frames[0].fields[0].values.toArray().length;
expect(storageLengthAfterThirdQuery).toEqual(20);
});
it('Will build signature using target overrides', () => {
const targetInterval = '30s';
const requestInterval = '15s';
const target: PromQuery = {
datasource: { type: 'prometheus', uid: 'OPQv8Kc4z' },
editorMode: QueryEditorMode.Code,
exemplar: false,
expr: 'sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[$__rate_interval]))',
format: 'heatmap',
interval: targetInterval,
legendFormat: '{{le}}',
range: true,
refId: 'A',
utcOffsetSec: -21600,
};
const request = mockRequest({
interval: requestInterval,
targets: [target],
});
const targSig = getTargSig('__EXPR__', request, target);
expect(targSig).toContain(targetInterval);
expect(targSig.includes(requestInterval)).toBeFalsy();
});
it('will not modify request with absolute duration', () => {
const request = mockRequest({
range: {
from: moment('2023-01-30T19:33:01.332Z') as DateTime,
to: moment('2023-01-30T20:33:01.332Z') as DateTime,
raw: { from: '2023-01-30T19:33:01.332Z', to: '2023-01-30T20:33:01.332Z' },
},
rangeRaw: { from: '2023-01-30T19:33:01.332Z', to: '2023-01-30T20:33:01.332Z' },
});
const storage = new QueryCache();
const interpolateString = (s: string) => {
return s;
};
const cacheRequest = storage.requestInfo(request, interpolateString);
expect(cacheRequest.requests[0]).toBe(request);
expect(cacheRequest.shouldCache).toBe(false);
});
it('mark request as shouldCache', () => {
const request = mockRequest();
const storage = new QueryCache();
const interpolateString = (s: string) => {
return s;
};
const cacheRequest = storage.requestInfo(request, interpolateString);
expect(cacheRequest.requests[0]).toBe(request);
expect(cacheRequest.shouldCache).toBe(true);
});
it('Should modify request', () => {
const request = mockRequest();
const storage = new QueryCache();
const interpolateString = (s: string) => {
return s;
};
const cacheRequest = storage.requestInfo(request, interpolateString);
expect(cacheRequest.requests[0]).toBe(request);
expect(cacheRequest.shouldCache).toBe(true);
});
});

@ -0,0 +1,258 @@
import {
ArrayVector,
DataFrame,
DataQueryRequest,
dateTime,
durationToMilliseconds,
Field,
isValidDuration,
parseDuration,
} from '@grafana/data/src';
import { amendTable, Table, trimTable } from 'app/features/live/data/amendTimeSeries';
import { PromQuery } from '../types';
// dashboardUID + panelId + refId
// (must be stable across query changes, time range changes / interval changes / panel resizes / template variable changes)
type TargetIdent = string;
// query + template variables + interval + raw time range
// used for full target cache busting -> full range re-query
type TargetSig = string;
type TimestampMs = number;
type StringInterpolator = (expr: string) => string;
// string matching requirements defined in durationutil.ts
export const defaultPrometheusQueryOverlapWindow = '10m';
interface TargetCache {
sig: TargetSig;
prevTo: TimestampMs;
frames: DataFrame[];
}
export interface CacheRequestInfo {
requests: Array<DataQueryRequest<PromQuery>>;
targSigs: Map<TargetIdent, TargetSig>;
shouldCache: boolean;
}
/**
* Get field identity
* This is the string used to uniquely identify a field within a "target"
* @param field
*/
export const getFieldIdent = (field: Field) => `${field.type}|${field.name}|${JSON.stringify(field.labels ?? '')}`;
/**
* Get target signature
* @param targExpr
* @param request
* @param targ
*/
export function getTargSig(targExpr: string, request: DataQueryRequest<PromQuery>, targ: PromQuery) {
return `${targExpr}|${targ.interval ?? request.interval}|${JSON.stringify(request.rangeRaw ?? '')}|${targ.exemplar}`;
}
/**
* NOMENCLATURE
* Target: The request target (DataQueryRequest), i.e. a specific query reference within a panel
* Ident: Identity: the string that is not expected to change
* Sig: Signature: the string that is expected to change, upon which we wipe the cache fields
*/
export class QueryCache {
private overlapWindowMs: number;
constructor(overlapString?: string) {
const unverifiedOverlap = overlapString ?? defaultPrometheusQueryOverlapWindow;
if (isValidDuration(unverifiedOverlap)) {
const duration = parseDuration(unverifiedOverlap);
this.overlapWindowMs = durationToMilliseconds(duration);
} else {
const duration = parseDuration(defaultPrometheusQueryOverlapWindow);
this.overlapWindowMs = durationToMilliseconds(duration);
}
}
cache = new Map<TargetIdent, TargetCache>();
// can be used to change full range request to partial, split into multiple requests
requestInfo(request: DataQueryRequest<PromQuery>, interpolateString: StringInterpolator): CacheRequestInfo {
// TODO: align from/to to interval to increase probability of hitting backend cache
const newFrom = request.range.from.valueOf();
const newTo = request.range.to.valueOf();
// only cache 'now'-relative queries (that can benefit from a backfill cache)
const shouldCache = request.rangeRaw?.to?.toString() === 'now';
// all targets are queried together, so we check for any that causes group cache invalidation & full re-query
let doPartialQuery = shouldCache;
let prevTo: TimestampMs;
// pre-compute reqTargSigs
const reqTargSigs = new Map<TargetIdent, TargetSig>();
request.targets.forEach((targ) => {
let targIdent = `${request.dashboardUID}|${request.panelId}|${targ.refId}`;
// @todo refactor getTargSig into datasource class and remove targExpr. See #65952 for a potential implementation
let targExpr = interpolateString(targ.expr);
let targSig = getTargSig(targExpr, request, targ);
reqTargSigs.set(targIdent, targSig);
});
// figure out if new query range or new target props trigger full cache invalidation & re-query
for (const [targIdent, targSig] of reqTargSigs) {
let cached = this.cache.get(targIdent);
let cachedSig = cached?.sig;
if (cachedSig !== targSig) {
doPartialQuery = false;
} else {
// only do partial queries when new request range follows prior request range (possibly with overlap)
// e.g. now-6h with refresh <= 6h
prevTo = cached?.prevTo ?? Infinity;
doPartialQuery = newTo > prevTo && newFrom <= prevTo;
}
if (!doPartialQuery) {
break;
}
}
if (doPartialQuery) {
// 10m re-query overlap
// clamp to make sure we don't re-query previous 10m when newFrom is ahead of it (e.g. 5min range, 30s refresh)
let newFromPartial = Math.max(prevTo! - this.overlapWindowMs, newFrom);
// modify to partial query
request = {
...request,
range: {
...request.range,
from: dateTime(newFromPartial),
to: dateTime(newTo),
},
};
} else {
reqTargSigs.forEach((targSig, targIdent) => {
this.cache.delete(targIdent);
});
}
return {
requests: [request],
targSigs: reqTargSigs,
shouldCache,
};
}
// should amend existing cache with new frames and return full response
procFrames(
request: DataQueryRequest<PromQuery>,
requestInfo: CacheRequestInfo | undefined,
respFrames: DataFrame[]
): DataFrame[] {
if (requestInfo?.shouldCache) {
const newFrom = request.range.from.valueOf();
const newTo = request.range.to.valueOf();
// group frames by targets
const respByTarget = new Map<TargetIdent, DataFrame[]>();
respFrames.forEach((frame: DataFrame) => {
let targIdent = `${request.dashboardUID}|${request.panelId}|${frame.refId}`;
let frames = respByTarget.get(targIdent);
if (!frames) {
frames = [];
respByTarget.set(targIdent, frames);
}
frames.push(frame);
});
let outFrames: DataFrame[] = [];
respByTarget.forEach((respFrames, targIdent) => {
let cachedFrames = (targIdent ? this.cache.get(targIdent)?.frames : null) ?? [];
respFrames.forEach((respFrame: DataFrame) => {
// skip empty frames
if (respFrame.length === 0 || respFrame.fields.length === 0) {
return;
}
// frames are identified by their second (non-time) field's name + labels
// TODO: maybe also frame.meta.type?
let respFrameIdent = getFieldIdent(respFrame.fields[1]);
let cachedFrame = cachedFrames.find((cached) => getFieldIdent(cached.fields[1]) === respFrameIdent);
if (!cachedFrame) {
// append new unknown frames
cachedFrames.push(respFrame);
} else {
// we assume that fields cannot appear/disappear and will all exist in same order
// amend & re-cache
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
let prevTable: Table = cachedFrame.fields.map((field) => field.values.toArray()) as Table;
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
let nextTable: Table = respFrame.fields.map((field) => field.values.toArray()) as Table;
let amendedTable = amendTable(prevTable, nextTable);
for (let i = 0; i < amendedTable.length; i++) {
cachedFrame.fields[i].values = new ArrayVector(amendedTable[i]);
}
cachedFrame.length = cachedFrame.fields[0].values.length;
}
});
// trim all frames to in-view range, evict those that end up with 0 length
let nonEmptyCachedFrames: DataFrame[] = [];
cachedFrames.forEach((frame) => {
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
let table: Table = frame.fields.map((field) => field.values.toArray()) as Table;
let trimmed = trimTable(table, newFrom, newTo);
if (trimmed[0].length > 0) {
for (let i = 0; i < trimmed.length; i++) {
frame.fields[i].values = new ArrayVector(trimmed[i]);
}
nonEmptyCachedFrames.push(frame);
}
});
this.cache.set(targIdent, {
sig: requestInfo.targSigs.get(targIdent)!,
frames: nonEmptyCachedFrames,
prevTo: newTo,
});
outFrames.push(...nonEmptyCachedFrames);
});
// transformV2 mutates field values for heatmap de-accum, and modifies field order, so we gotta clone here, for now :(
respFrames = outFrames.map((frame) => ({
...frame,
fields: frame.fields.map((field) => ({
...field,
config: {
...field.config, // prevents mutatative exemplars links (re)enrichment
},
values: new ArrayVector(field.values.toArray().slice()),
})),
}));
}
return respFrames;
}
}

@ -0,0 +1,864 @@
import { clone } from 'lodash';
import { ArrayVector } from '@grafana/data/src';
/**
*
* @param length - Number of values to add
* @param start - First timestamp (ms)
* @param step - step duration (ms)
*/
export const getMockTimeFrameArray = (length: number, start: number, step: number): ArrayVector => {
let timeValues = [];
for (let i = 0; i < length; i++) {
timeValues.push(start + i * step);
}
return new ArrayVector(timeValues);
};
/**
* @param length - number of "Values" to add
* @param values
* @param high
*/
export const getMockValueFrameArray = (length: number, values = 0): ArrayVector => {
return new ArrayVector(Array(length).fill(values));
};
const timeFrameWithMissingValuesInMiddle = getMockTimeFrameArray(721, 1675262550000, 30000);
const timeFrameWithMissingValuesAtStart = getMockTimeFrameArray(721, 1675262550000, 30000);
const timeFrameWithMissingValuesAtEnd = getMockTimeFrameArray(721, 1675262550000, 30000);
// Deleting some out the middle
timeFrameWithMissingValuesInMiddle.toArray().splice(360, 721 - 684);
timeFrameWithMissingValuesAtStart.toArray().splice(0, 721 - 684);
timeFrameWithMissingValuesAtEnd.toArray().splice(721 - 684, 721 - 684);
const mockLabels = {
__name__: 'cortex_request_duration_seconds_bucket',
cluster: 'dev-us-central-0',
container: 'aggregator',
instance: 'aggregator-7:aggregator:http-metrics',
job: 'mimir-dev-11/aggregator',
le: '0.5',
method: 'GET',
namespace: 'mimir-dev-11',
pod: 'aggregator-7',
route: 'metrics',
status_code: '200',
ws: 'false',
};
const twoRequestsOneCachedMissingData = {
first: {
request: {
app: 'panel-viewer',
requestId: 'Q100',
panelId: 19,
dashboardId: 884,
dashboardUID: 'dtngicc4z',
range: {
from: '2023-02-01T14:42:54.929Z',
to: '2023-02-01T20:42:54.929Z',
raw: { from: 'now-6h', to: 'now' },
},
interval: '30s',
intervalMs: 30000,
targets: [
{
datasource: { type: 'prometheus', uid: 'OPQv8Kc4z' },
editorMode: 'code',
expr: '',
legendFormat: '',
range: true,
refId: 'A',
exemplar: false,
requestId: '19A',
utcOffsetSec: -21600,
},
],
startTime: 1675284174929,
rangeRaw: { from: 'now-6h', to: 'now' },
},
dataFrames: [
{
name: '+Inf',
refId: 'A',
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 30000 },
// Delete values from the middle
values: timeFrameWithMissingValuesInMiddle,
entities: {},
},
{
name: 'Value',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { ...mockLabels, le: '+Inf' },
config: { displayNameFromDS: '+Inf' },
values: getMockValueFrameArray(684, 1),
entities: {},
},
],
length: 684,
},
{
name: '0.5',
refId: 'A',
meta: {
type: 'timeseries-multi',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: {__name__="cortex_request_duration_seconds_bucket", cluster="dev-us-central-0", container="aggregator", instance=~"aggregator-7:aggregator:http-metrics|aggregator-6:aggregator:http-metrics", job="mimir-dev-11/aggregator", le=~"\\\\+Inf|0.5", method="GET", namespace="mimir-dev-11", pod="aggregator-7"}\nStep: 30s',
preferredVisualisationType: 'graph',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 30000 },
values: timeFrameWithMissingValuesInMiddle,
entities: {},
},
{
name: 'Value',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { ...mockLabels, le: '0.5' },
config: { displayNameFromDS: '0.5' },
values: getMockValueFrameArray(684, 25349),
entities: {},
},
],
length: 684,
},
],
originalRange: undefined,
timeSrv: { from: 'now-6h', to: 'now' },
},
second: {
request: {
app: 'panel-viewer',
requestId: 'Q101',
timezone: 'browser',
panelId: 19,
dashboardId: 884,
dashboardUID: 'dtngicc4z',
publicDashboardAccessToken: '',
range: {
from: '2023-02-01T14:44:01.928Z',
to: '2023-02-01T20:44:01.928Z',
raw: { from: 'now-6h', to: 'now' },
},
timeInfo: '',
interval: '30s',
intervalMs: 30000,
targets: [
{
datasource: { type: 'prometheus', uid: 'OPQv8Kc4z' },
editorMode: 'code',
expr: '{__name__="cortex_request_duration_seconds_bucket", cluster="dev-us-central-0", container="aggregator", instance=~"aggregator-7:aggregator:http-metrics|aggregator-6:aggregator:http-metrics", job="mimir-dev-11/aggregator", le=~"\\\\+Inf|0.5", method="GET", namespace="mimir-dev-11", pod="aggregator-7"}',
legendFormat: '{{le}}',
range: true,
refId: 'A',
exemplar: false,
requestId: '19A',
utcOffsetSec: -21600,
},
],
maxDataPoints: 775,
scopedVars: { __interval: { text: '30s', value: '30s' }, __interval_ms: { text: '30000', value: 30000 } },
startTime: 1675284241929,
rangeRaw: { from: 'now-6h', to: 'now' },
},
dataFrames: [
{
name: '+Inf',
refId: 'A',
meta: {
type: 'timeseries-multi',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: {__name__="cortex_request_duration_seconds_bucket", cluster="dev-us-central-0", container="aggregator", instance=~"aggregator-7:aggregator:http-metrics|aggregator-6:aggregator:http-metrics", job="mimir-dev-11/aggregator", le=~"\\\\+Inf|0.5", method="GET", namespace="mimir-dev-11", pod="aggregator-7"}\nStep: 30s',
preferredVisualisationType: 'graph',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 30000 },
values: getMockTimeFrameArray(24, 1675283550000, 30000),
entities: {},
},
{
name: 'Value',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { ...mockLabels, le: '+Inf' },
config: { displayNameFromDS: '+Inf' },
values: getMockValueFrameArray(24, 1),
entities: {},
},
],
length: 24,
},
{
name: '0.5',
refId: 'A',
meta: {
type: 'timeseries-multi',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: {__name__="cortex_request_duration_seconds_bucket", cluster="dev-us-central-0", container="aggregator", instance=~"aggregator-7:aggregator:http-metrics|aggregator-6:aggregator:http-metrics", job="mimir-dev-11/aggregator", le=~"\\\\+Inf|0.5", method="GET", namespace="mimir-dev-11", pod="aggregator-7"}\nStep: 30s',
preferredVisualisationType: 'graph',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 30000 },
values: getMockTimeFrameArray(21, 1675283550000, 30000),
entities: {},
},
{
name: 'Value',
type: 'number',
typeInfo: { frame: 'float64' },
labels: {
__name__: 'cortex_request_duration_seconds_bucket',
cluster: 'dev-us-central-0',
container: 'aggregator',
instance: 'aggregator-7:aggregator:http-metrics',
job: 'mimir-dev-11/aggregator',
le: '0.5',
method: 'GET',
namespace: 'mimir-dev-11',
pod: 'aggregator-7',
route: 'metrics',
status_code: '200',
ws: 'false',
},
config: { displayNameFromDS: '0.5' },
values: getMockValueFrameArray(21, 2),
entities: {},
},
],
length: 21,
},
],
originalRange: { end: 1675284241920, start: 1675262641920 },
timeSrv: { from: 'now-6h', to: 'now' },
},
};
export const IncrementalStorageDataFrameScenarios = {
histogram: {
// 3 requests, one 30 seconds after the first, and then the user waits a minute and shortens to a 5 minute query window from 1 hour to force frames to get evicted
evictionRequests: {
first: {
request: {
range: {
from: '2023-01-30T19:33:01.332Z',
to: '2023-01-30T20:33:01.332Z',
raw: { from: 'now-1h', to: 'now' },
},
interval: '15s',
intervalMs: 15000,
targets: [
{
datasource: { type: 'prometheus', uid: 'OPQv8Kc4z' },
editorMode: 'code',
exemplar: false,
expr: 'sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[$__rate_interval]))',
format: 'heatmap',
legendFormat: '{{le}}',
range: true,
refId: 'A',
requestId: '2A',
utcOffsetSec: -21600,
},
],
maxDataPoints: 871,
scopedVars: {
__interval: { text: '15s', value: '15s' },
__interval_ms: { text: '15000', value: 15000 },
},
startTime: 1675110781332,
rangeRaw: { from: 'now-1h', to: 'now' },
},
dataFrames: [
{
name: '0.005',
refId: 'A',
meta: {
type: 'heatmap-rows',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[1m0s]))\nStep: 15s',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 15000 },
values: getMockTimeFrameArray(241, 1675107180000, 15000),
entities: {},
},
{
name: '0.005',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.005' },
config: { displayNameFromDS: '0.005' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.01',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.01' },
config: { displayNameFromDS: '0.01' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.025',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.025' },
config: { displayNameFromDS: '0.025' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.05',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.05' },
config: { displayNameFromDS: '0.05' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.1',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.1' },
config: { displayNameFromDS: '0.1' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.25',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.25' },
config: { displayNameFromDS: '0.25' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '0.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.5' },
config: { displayNameFromDS: '0.5' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '1.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '1.0' },
config: { displayNameFromDS: '1.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '2.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '2.5' },
config: { displayNameFromDS: '2.5' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '5.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '5.0' },
config: { displayNameFromDS: '5.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '10.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '10.0' },
config: { displayNameFromDS: '10.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '25.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '25.0' },
config: { displayNameFromDS: '25.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '50.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '50.0' },
config: { displayNameFromDS: '50.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '100.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '100.0' },
config: { displayNameFromDS: '100.0' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
{
name: '+Inf',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '+Inf' },
config: { displayNameFromDS: '+Inf' },
values: getMockValueFrameArray(241, 2.8),
entities: {},
},
],
length: 241,
},
],
},
second: {
request: {
range: {
from: '2023-01-30T19:33:31.357Z',
to: '2023-01-30T20:33:31.357Z',
raw: { from: 'now-1h', to: 'now' },
},
interval: '15s',
intervalMs: 15000,
targets: [
{
datasource: { type: 'prometheus' },
editorMode: 'code',
exemplar: false,
expr: 'sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[$__rate_interval]))',
format: 'heatmap',
legendFormat: '{{le}}',
range: true,
refId: 'A',
requestId: '2A',
utcOffsetSec: -21600,
},
],
maxDataPoints: 871,
scopedVars: {
__interval: { text: '15s', value: '15s' },
__interval_ms: { text: '15000', value: 15000 },
},
startTime: 1675110811357,
rangeRaw: { from: 'now-1h', to: 'now' },
},
dataFrames: [
{
name: '0.005',
refId: 'A',
meta: {
type: 'heatmap-rows',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[1m0s]))\nStep: 15s',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 15000 },
values: getMockTimeFrameArray(43, 1675110180000, 15000),
entities: {},
},
{
name: '0.005',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.005' },
config: { displayNameFromDS: '0.005' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.01',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.01' },
config: { displayNameFromDS: '0.01' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.025',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.025' },
config: { displayNameFromDS: '0.025' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.05',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.05' },
config: { displayNameFromDS: '0.05' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.1',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.1' },
config: { displayNameFromDS: '0.1' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.25',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.25' },
config: { displayNameFromDS: '0.25' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '0.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.5' },
config: { displayNameFromDS: '0.5' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '1.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '1.0' },
config: { displayNameFromDS: '1.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '2.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '2.5' },
config: { displayNameFromDS: '2.5' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '5.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '5.0' },
config: { displayNameFromDS: '5.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '10.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '10.0' },
config: { displayNameFromDS: '10.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '25.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '25.0' },
config: { displayNameFromDS: '25.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '50.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '50.0' },
config: { displayNameFromDS: '50.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '100.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '100.0' },
config: { displayNameFromDS: '100.0' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
{
name: '+Inf',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '+Inf' },
config: { displayNameFromDS: '+Inf' },
values: getMockValueFrameArray(43, 2.8),
entities: {},
},
],
length: 43,
},
],
},
third: {
request: {
range: {
from: '2023-01-30T20:33:31.357Z',
to: '2023-01-30T20:34:31.357Z',
raw: { from: 'now-5m', to: 'now' },
},
interval: '15s',
intervalMs: 15000,
targets: [
{
datasource: { type: 'prometheus' },
editorMode: 'code',
exemplar: false,
expr: 'sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[$__rate_interval]))',
format: 'heatmap',
legendFormat: '{{le}}',
range: true,
refId: 'A',
requestId: '2A',
utcOffsetSec: -21600,
},
],
maxDataPoints: 871,
scopedVars: {
__interval: { text: '15s', value: '15s' },
__interval_ms: { text: '15000', value: 15000 },
},
startTime: 1675110811357,
rangeRaw: { from: 'now-1h', to: 'now' },
},
dataFrames: [
{
name: '0.005',
refId: 'A',
meta: {
type: 'heatmap-rows',
custom: { resultType: 'matrix' },
executedQueryString:
'Expr: sum by(le) (rate(cortex_request_duration_seconds_bucket{cluster="dev-us-central-0", job="cortex-dev-01/cortex-gw-internal", namespace="cortex-dev-01"}[1m0s]))\nStep: 15s',
},
fields: [
{
name: 'Time',
type: 'time',
typeInfo: { frame: 'time.Time' },
config: { interval: 15000 },
values: getMockTimeFrameArray(20, 1675110810000, 15000),
entities: {},
},
{
name: '0.005',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.005' },
config: { displayNameFromDS: '0.005' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '0.01',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.01' },
config: { displayNameFromDS: '0.01' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '0.025',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.025' },
config: { displayNameFromDS: '0.025' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '0.05',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.05' },
config: { displayNameFromDS: '0.05' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '0.1',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.1' },
config: { displayNameFromDS: '0.1' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '0.25',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.25' },
config: { displayNameFromDS: '0.25' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
// Sometimes we don't always get new values, the preprocessing will need to back-fill any missing values
{
name: '0.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '0.5' },
config: { displayNameFromDS: '0.5' },
values: getMockValueFrameArray(10, 4.3),
entities: {},
},
{
name: '1.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '1.0' },
config: { displayNameFromDS: '1.0' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '2.5',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '2.5' },
config: { displayNameFromDS: '2.5' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '5.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '5.0' },
config: { displayNameFromDS: '5.0' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '10.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '10.0' },
config: { displayNameFromDS: '10.0' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '25.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '25.0' },
config: { displayNameFromDS: '25.0' },
values: getMockValueFrameArray(10, 4.3),
entities: {},
},
{
name: '50.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '50.0' },
config: { displayNameFromDS: '50.0' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '100.0',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '100.0' },
config: { displayNameFromDS: '100.0' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
{
name: '+Inf',
type: 'number',
typeInfo: { frame: 'float64' },
labels: { le: '+Inf' },
config: { displayNameFromDS: '+Inf' },
values: getMockValueFrameArray(20, 4.3),
entities: {},
},
],
length: 43,
},
],
},
},
getSeriesWithGapAtEnd: (countOfSeries = 2) => {
const templateClone = clone(twoRequestsOneCachedMissingData);
for (let i = 0; i < countOfSeries - 1; i++) {
templateClone.first.dataFrames[i].fields[0].values = timeFrameWithMissingValuesAtEnd;
}
return templateClone;
},
getSeriesWithGapAtStart: (countOfSeries = 2) => {
const templateClone = clone(twoRequestsOneCachedMissingData);
for (let i = 0; i < countOfSeries - 1; i++) {
templateClone.first.dataFrames[i].fields[0].values = timeFrameWithMissingValuesAtStart;
}
return templateClone;
},
getSeriesWithGapInMiddle: (countOfSeries = 2) => {
const templateClone = clone(twoRequestsOneCachedMissingData);
for (let i = 0; i < countOfSeries - 1; i++) {
templateClone.first.dataFrames[i].fields[0].values = timeFrameWithMissingValuesInMiddle;
}
return templateClone;
},
},
};

@ -38,6 +38,8 @@ export interface PromOptions extends DataSourceJsonData {
prometheusVersion?: string;
cacheLevel?: PrometheusCacheLevel;
defaultEditor?: QueryEditorMode;
incrementalQuerying?: boolean;
incrementalQueryOverlapWindow?: string;
}
export type ExemplarTraceIdDestination = {

Loading…
Cancel
Save