Prometheus: Remove query assistant and related components (#100669)

* remove query assistant related components

* remove export statement

* remove grafana/llm from prometheus packages

* remove extra package

* revert unintended change

* incorrect handling of  managedPluginsInstall merge deletion

* update yarn.lock

* linting fix

* linting fix
eleijonmarck/datasource-permissions/query-only-for-query-path
Edward Qian 3 months ago committed by GitHub
parent f0f8bb890c
commit 6eca5c09df
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      docs/sources/setup-grafana/configure-grafana/feature-toggles/index.md
  2. 13
      e2e/old-arch/various-suite/prometheus-editor.spec.ts
  3. 13
      e2e/various-suite/prometheus-editor.spec.ts
  4. 1
      packages/grafana-data/src/types/featureToggles.gen.ts
  5. 1
      packages/grafana-prometheus/package.json
  6. 1
      packages/grafana-prometheus/src/index.ts
  7. 24
      packages/grafana-prometheus/src/querybuilder/components/PromQueryBuilder.test.tsx
  8. 40
      packages/grafana-prometheus/src/querybuilder/components/PromQueryBuilder.tsx
  9. 148
      packages/grafana-prometheus/src/querybuilder/components/promQail/PromQail.test.tsx
  10. 616
      packages/grafana-prometheus/src/querybuilder/components/promQail/PromQail.tsx
  11. 51
      packages/grafana-prometheus/src/querybuilder/components/promQail/QueryAssistantButton.test.tsx
  12. 86
      packages/grafana-prometheus/src/querybuilder/components/promQail/QueryAssistantButton.tsx
  13. 102
      packages/grafana-prometheus/src/querybuilder/components/promQail/QuerySuggestionContainer.tsx
  14. 322
      packages/grafana-prometheus/src/querybuilder/components/promQail/QuerySuggestionItem.tsx
  15. 1
      packages/grafana-prometheus/src/querybuilder/components/promQail/index.ts
  16. 115
      packages/grafana-prometheus/src/querybuilder/components/promQail/prompts.ts
  17. 4
      packages/grafana-prometheus/src/querybuilder/components/promQail/resources/AI_Logo_bw.svg
  18. 11
      packages/grafana-prometheus/src/querybuilder/components/promQail/resources/AI_Logo_color.svg
  19. 73
      packages/grafana-prometheus/src/querybuilder/components/promQail/state/helpers.test.ts
  20. 415
      packages/grafana-prometheus/src/querybuilder/components/promQail/state/helpers.ts
  21. 44
      packages/grafana-prometheus/src/querybuilder/components/promQail/state/state.ts
  22. 342
      packages/grafana-prometheus/src/querybuilder/components/promQail/state/templates.ts
  23. 18
      packages/grafana-prometheus/src/querybuilder/components/promQail/types.ts
  24. 7
      pkg/services/featuremgmt/registry.go
  25. 1
      pkg/services/featuremgmt/toggles-gitlog.csv
  26. 1
      pkg/services/featuremgmt/toggles_gen.csv
  27. 4
      pkg/services/featuremgmt/toggles_gen.go
  28. 16
      pkg/services/featuremgmt/toggles_gen.json
  29. 1
      yarn.lock

@ -174,7 +174,6 @@ Experimental features might be changed or removed without prior notice.
| `queryServiceRewrite` | Rewrite requests targeting /ds/query to the query service |
| `queryServiceFromUI` | Routes requests to the new query service |
| `cachingOptimizeSerializationMemoryUsage` | If enabled, the caching backend gradually serializes query responses for the cache, comparing against the configured `[caching]max_value_mb` value as it goes. This can can help prevent Grafana from running out of memory while attempting to cache very large query responses. |
| `prometheusPromQAIL` | Prometheus and AI/ML to assist users in creating a query |
| `prometheusCodeModeMetricNamesSearch` | Enables search for metric names in Code Mode, to improve performance when working with an enormous number of metric names |
| `alertmanagerRemoteSecondary` | Enable Grafana to sync configuration and state with a remote Alertmanager. |
| `alertmanagerRemotePrimary` | Enable Grafana to have a remote Alertmanager instance as the primary Alertmanager. |

@ -159,19 +159,6 @@ describe('Prometheus query editor', () => {
e2e.components.DataSource.Prometheus.queryEditor.builder.metricsExplorer().should('exist');
});
// NEED TO COMPLETE QUEY ADVISOR WORK OR FIGURE OUT HOW TO ENABLE EXPERIMENTAL FEATURE TOGGLES
// it('should have a query advisor when enabled with feature toggle', () => {
// cy.window().then((win) => {
// win.localStorage.setItem('grafana.featureToggles', 'prometheusPromQAIL=0');
// navigateToEditor('Builder', 'prometheusBuilder');
// getResources();
// e2e.components.DataSource.Prometheus.queryEditor.builder.queryAdvisor().should('exist');
// });
// });
});
});

@ -159,19 +159,6 @@ describe.skip('Prometheus query editor', () => {
e2e.components.DataSource.Prometheus.queryEditor.builder.metricsExplorer().should('exist');
});
// NEED TO COMPLETE QUEY ADVISOR WORK OR FIGURE OUT HOW TO ENABLE EXPERIMENTAL FEATURE TOGGLES
// it('should have a query advisor when enabled with feature toggle', () => {
// cy.window().then((win) => {
// win.localStorage.setItem('grafana.featureToggles', 'prometheusPromQAIL=0');
// navigateToEditor('Builder', 'prometheusBuilder');
// getResources();
// e2e.components.DataSource.Prometheus.queryEditor.builder.queryAdvisor().should('exist');
// });
// });
});
});

@ -118,7 +118,6 @@ export interface FeatureToggles {
recoveryThreshold?: boolean;
lokiStructuredMetadata?: boolean;
cachingOptimizeSerializationMemoryUsage?: boolean;
prometheusPromQAIL?: boolean;
prometheusCodeModeMetricNamesSearch?: boolean;
addFieldFromCalculationStatFunctions?: boolean;
alertmanagerRemoteSecondary?: boolean;

@ -40,7 +40,6 @@
"@floating-ui/react": "0.27.3",
"@grafana/data": "11.6.0-pre",
"@grafana/e2e-selectors": "11.6.0-pre",
"@grafana/llm": "0.12.0",
"@grafana/plugin-ui": "0.10.1",
"@grafana/runtime": "11.6.0-pre",
"@grafana/schema": "11.6.0-pre",

@ -55,7 +55,6 @@ export { PromQueryEditorSelector } from './querybuilder/components/PromQueryEdit
export { PromQueryLegendEditor } from './querybuilder/components/PromQueryLegendEditor';
export { QueryPreview } from './querybuilder/components/QueryPreview';
export { MetricsModal } from './querybuilder/components/metrics-modal/MetricsModal';
export { PromQail } from './querybuilder/components/promQail/PromQail';
// SRC/
// Main export

@ -11,7 +11,7 @@ import {
QueryHint,
TimeRange,
} from '@grafana/data';
import { config, TemplateSrv } from '@grafana/runtime';
import { TemplateSrv } from '@grafana/runtime';
import { PrometheusDatasource } from '../../datasource';
import PromQlLanguageProvider from '../../language_provider';
@ -108,28 +108,6 @@ describe('PromQueryBuilder', () => {
await waitFor(() => expect(datasource.getVariables).toBeCalled());
});
it('checks if the LLM plugin is enabled when the `prometheusPromQAIL` feature is enabled', async () => {
jest.replaceProperty(config, 'featureToggles', {
prometheusPromQAIL: true,
});
const mockIsLLMPluginEnabled = jest.fn();
mockIsLLMPluginEnabled.mockResolvedValue(true);
jest.spyOn(require('./promQail/state/helpers'), 'isLLMPluginEnabled').mockImplementation(mockIsLLMPluginEnabled);
setup();
await waitFor(() => expect(mockIsLLMPluginEnabled).toHaveBeenCalledTimes(1));
});
it('does not check if the LLM plugin is enabled when the `prometheusPromQAIL` feature is disabled', async () => {
jest.replaceProperty(config, 'featureToggles', {
prometheusPromQAIL: false,
});
const mockIsLLMPluginEnabled = jest.fn();
mockIsLLMPluginEnabled.mockResolvedValue(true);
jest.spyOn(require('./promQail/state/helpers'), 'isLLMPluginEnabled').mockImplementation(mockIsLLMPluginEnabled);
setup();
await waitFor(() => expect(mockIsLLMPluginEnabled).toHaveBeenCalledTimes(0));
});
// <LegacyPrometheus>
it('tries to load labels when metric selected', async () => {
const { languageProvider } = setup();

@ -1,12 +1,10 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/PromQueryBuilder.tsx
import { css } from '@emotion/css';
import { memo, useEffect, useState } from 'react';
import { memo, useState } from 'react';
import { DataSourceApi, PanelData } from '@grafana/data';
import { selectors } from '@grafana/e2e-selectors';
import { EditorRow } from '@grafana/plugin-ui';
import { config } from '@grafana/runtime';
import { Drawer } from '@grafana/ui';
import { PrometheusDatasource } from '../../datasource';
import promqlGrammar from '../../promql';
@ -24,9 +22,6 @@ import { PromVisualQuery } from '../types';
import { MetricsLabelsSection } from './MetricsLabelsSection';
import { NestedQueryList } from './NestedQueryList';
import { EXPLAIN_LABEL_FILTER_CONTENT } from './PromQueryBuilderExplained';
import { PromQail } from './promQail/PromQail';
import { QueryAssistantButton } from './promQail/QueryAssistantButton';
import { isLLMPluginEnabled } from './promQail/state/helpers';
export interface PromQueryBuilderProps {
query: PromVisualQuery;
@ -40,37 +35,13 @@ export interface PromQueryBuilderProps {
export const PromQueryBuilder = memo<PromQueryBuilderProps>((props) => {
const { datasource, query, onChange, onRunQuery, data, showExplain } = props;
const [highlightedOp, setHighlightedOp] = useState<QueryBuilderOperation | undefined>();
const [showDrawer, setShowDrawer] = useState<boolean>(false);
const [llmAppEnabled, updateLlmAppEnabled] = useState<boolean>(false);
const { prometheusPromQAIL } = config.featureToggles; // AI/ML + Prometheus
const lang = { grammar: promqlGrammar, name: 'promql' };
const initHints = datasource.getInitHints();
useEffect(() => {
async function checkLlms() {
const check = await isLLMPluginEnabled();
updateLlmAppEnabled(check);
}
if (prometheusPromQAIL) {
checkLlms();
}
}, [prometheusPromQAIL]);
return (
<>
{prometheusPromQAIL && showDrawer && (
<Drawer closeOnMaskClick={false} onClose={() => setShowDrawer(false)}>
<PromQail
query={query}
closeDrawer={() => setShowDrawer(false)}
onChange={onChange}
datasource={datasource}
/>
</Drawer>
)}
<EditorRow>
<MetricsLabelsSection query={query} onChange={onChange} datasource={datasource} />
</EditorRow>
@ -108,15 +79,6 @@ export const PromQueryBuilder = memo<PromQueryBuilderProps>((props) => {
onRunQuery={onRunQuery}
highlightedOp={highlightedOp}
/>
{prometheusPromQAIL && (
<div
className={css({
padding: '0 0 0 6px',
})}
>
<QueryAssistantButton llmAppEnabled={llmAppEnabled} metric={query.metric} setShowDrawer={setShowDrawer} />
</div>
)}
<div data-testid={selectors.components.DataSource.Prometheus.queryEditor.builder.hints}>
<QueryBuilderHints<PromVisualQuery>
datasource={datasource}

@ -1,148 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/PromQail.test.tsx
import { render, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { DataSourceInstanceSettings, DataSourcePluginMeta } from '@grafana/data';
import { PrometheusDatasource } from '../../../datasource';
import PromQlLanguageProvider from '../../../language_provider';
import { EmptyLanguageProviderMock } from '../../../language_provider.mock';
import { PromOptions } from '../../../types';
import { PromVisualQuery } from '../../types';
import { PromQail, queryAssistanttestIds } from './PromQail';
// don't care about interaction tracking in our unit tests
jest.mock('@grafana/runtime', () => ({
...jest.requireActual('@grafana/runtime'),
reportInteraction: jest.fn(),
}));
window.HTMLElement.prototype.scrollIntoView = jest.fn();
describe('PromQail', () => {
it('renders the drawer', async () => {
setup(defaultQuery);
await waitFor(() => {
expect(screen.getByText('Query advisor')).toBeInTheDocument();
});
});
it('shows an option to not show security warning', async () => {
setup(defaultQuery);
await waitFor(() => {
expect(screen.getByText("Don't show this message again")).toBeInTheDocument();
});
});
it('shows selected metric and asks for a prompt', async () => {
setup(defaultQuery);
await clickSecurityButton();
await waitFor(() => {
expect(screen.getByText('random_metric')).toBeInTheDocument();
expect(screen.getByText('Do you know what you want to query?')).toBeInTheDocument();
});
});
it('displays a prompt when the user knows what they want to query', async () => {
setup(defaultQuery);
await clickSecurityButton();
await waitFor(() => {
expect(screen.getByText('random_metric')).toBeInTheDocument();
expect(screen.getByText('Do you know what you want to query?')).toBeInTheDocument();
});
const aiPrompt = screen.getByTestId(queryAssistanttestIds.clickForAi);
await userEvent.click(aiPrompt);
await waitFor(() => {
expect(screen.getByText('What kind of data do you want to see with your metric?')).toBeInTheDocument();
});
});
it('does not display a prompt when choosing historical', async () => {
setup(defaultQuery);
await clickSecurityButton();
await waitFor(() => {
expect(screen.getByText('random_metric')).toBeInTheDocument();
expect(screen.getByText('Do you know what you want to query?')).toBeInTheDocument();
});
const historicalPrompt = screen.getByTestId(queryAssistanttestIds.clickForHistorical);
await userEvent.click(historicalPrompt);
await waitFor(() => {
expect(screen.queryByText('What kind of data do you want to see with your metric?')).toBeNull();
});
});
});
const defaultQuery: PromVisualQuery = {
metric: 'random_metric',
labels: [],
operations: [],
};
function createDatasource(withLabels?: boolean) {
const languageProvider = new EmptyLanguageProviderMock() as unknown as PromQlLanguageProvider;
languageProvider.metricsMetadata = {
'all-metrics': {
type: 'all-metrics-type',
help: 'all-metrics-help',
},
a: {
type: 'counter',
help: 'a-metric-help',
},
a_bucket: {
type: 'counter',
help: 'for functions',
},
};
const datasource = new PrometheusDatasource(
{
url: '',
jsonData: {},
meta: {} as DataSourcePluginMeta,
} as DataSourceInstanceSettings<PromOptions>,
undefined,
languageProvider
);
return datasource;
}
function createProps(query: PromVisualQuery, datasource: PrometheusDatasource) {
return {
datasource,
onChange: jest.fn(),
closeDrawer: jest.fn(),
query: query,
};
}
function setup(query: PromVisualQuery) {
const withLabels: boolean = query.labels.length > 0;
const datasource = createDatasource(withLabels);
const props = createProps(query, datasource);
// render the drawer only
const { container } = render(<PromQail {...props} />);
return container;
}
async function clickSecurityButton() {
const securityInfoButton = screen.getByTestId(queryAssistanttestIds.securityInfoButton);
await userEvent.click(securityInfoButton);
}

@ -1,616 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/PromQail.tsx
import { css, cx } from '@emotion/css';
import { PayloadAction, createSlice } from '@reduxjs/toolkit';
import { useEffect, useReducer, useRef, useState } from 'react';
import { GrafanaTheme2, store } from '@grafana/data';
import { reportInteraction } from '@grafana/runtime';
import { Alert, Button, Checkbox, Input, Spinner, useTheme2 } from '@grafana/ui';
import { PrometheusDatasource } from '../../../datasource';
import { PromVisualQuery } from '../../types';
import { QuerySuggestionContainer } from './QuerySuggestionContainer';
// @ts-ignore until we can get these added for icons
import AI_Logo_color from './resources/AI_Logo_color.svg';
import { promQailExplain, promQailSuggest } from './state/helpers';
import { createInteraction, initialState } from './state/state';
import { Interaction, SuggestionType } from './types';
export type PromQailProps = {
query: PromVisualQuery;
closeDrawer: () => void;
onChange: (query: PromVisualQuery) => void;
datasource: PrometheusDatasource;
};
const SKIP_STARTING_MESSAGE = 'SKIP_STARTING_MESSAGE';
export const PromQail = (props: PromQailProps) => {
const { query, closeDrawer, onChange, datasource } = props;
const skipStartingMessage = store.getBool(SKIP_STARTING_MESSAGE, false);
const [state, dispatch] = useReducer(stateSlice.reducer, initialState(query, !skipStartingMessage));
const [labelNames, setLabelNames] = useState<string[]>([]);
const suggestions = state.interactions.reduce((acc, int) => acc + int.suggestions.length, 0);
const responsesEndRef = useRef(null);
const scrollToBottom = () => {
if (responsesEndRef) {
// @ts-ignore for React.MutableRefObject
responsesEndRef?.current?.scrollIntoView({ behavior: 'smooth' });
}
};
useEffect(() => {
// only scroll when an interaction has been added or the suggestions have been updated
scrollToBottom();
}, [state.interactions.length, suggestions]);
useEffect(() => {
const fetchLabels = async () => {
let labelsIndex: Record<string, string[]> = await datasource.languageProvider.fetchLabelsWithMatch(query.metric);
setLabelNames(Object.keys(labelsIndex));
};
fetchLabels();
}, [query, datasource]);
const theme = useTheme2();
const styles = getStyles(theme);
return (
<div className={styles.containerPadding}>
{/* Query Advisor */}
{/* header */}
<div className={styles.header}>
<h3>Query advisor</h3>
<Button icon="times" fill="text" variant="secondary" onClick={closeDrawer} />
</div>
{/* Starting message */}
<div>
<div className={styles.iconSection}>
<img src={AI_Logo_color} alt="AI logo color" /> Assistant
</div>
{state.showStartingMessage ? (
<>
<div className={styles.dataList}>
<ol>
<li className={styles.textPadding}>
Query Advisor suggests queries based on a metric and requests you type in.
</li>
<li className={styles.textPadding}>
Query Advisor sends Prometheus metrics, labels and metadata to the LLM provider you&#39;ve configured.
Be sure to align its usage with your company&#39;s internal policies.
</li>
<li className={styles.textPadding}>
An AI-suggested query may not fully answer your question. Always take a moment to understand a query
before you use it.
</li>
</ol>
</div>
<Alert
title={''}
severity={'info'}
key={'promqail-llm-app'}
className={cx(styles.textPadding, styles.noMargin)}
>
Query Advisor is currently in Private Preview. Feedback is appreciated and can be provided on explanations
and suggestions.
</Alert>
{/* don't show this message again, store in localstorage */}
<div className={styles.textPadding}>
<Checkbox
checked={state.indicateCheckbox}
value={state.indicateCheckbox}
onChange={() => {
const val = store.getBool(SKIP_STARTING_MESSAGE, false);
store.set(SKIP_STARTING_MESSAGE, !val);
dispatch(indicateCheckbox(!val));
}}
label="Don't show this message again"
/>
</div>
<div className={styles.rightButtonsWrapper}>
<div className={styles.rightButtons}>
<Button className={styles.leftButton} fill="outline" variant="secondary" onClick={closeDrawer}>
Cancel
</Button>
<Button
fill="solid"
variant="primary"
onClick={() => dispatch(showStartingMessage(false))}
data-testid={queryAssistanttestIds.securityInfoButton}
>
Continue
</Button>
</div>
</div>
</>
) : (
<div className={styles.bodySmall}>
{/* MAKE THIS TABLE RESPONSIVE */}
{/* FIT SUPER LONG METRICS AND LABELS IN HERE */}
<div className={styles.textPadding}>Here is the metric you have selected:</div>
<div className={styles.infoContainerWrapper}>
<div className={styles.infoContainer}>
<table className={styles.metricTable}>
<tbody>
<tr>
<td className={styles.metricTableName}>metric</td>
<td className={styles.metricTableValue}>{state.query.metric}</td>
<td>
<Button
fill="outline"
variant="secondary"
onClick={closeDrawer}
className={styles.metricTableButton}
size={'sm'}
>
Choose new metric
</Button>
</td>
</tr>
{state.query.labels.map((label, idx) => {
const text = idx === 0 ? 'labels' : '';
return (
<tr key={`${label.label}-${idx}`}>
<td>{text}</td>
<td className={styles.metricTableValue}>{`${label.label}${label.op}${label.value}`}</td>
<td> </td>
</tr>
);
})}
</tbody>
</table>
</div>
</div>
{/* Ask if you know what you want to query? */}
{!state.askForQueryHelp && state.interactions.length === 0 && (
<>
<div className={styles.queryQuestion}>Do you know what you want to query?</div>
<div className={styles.rightButtonsWrapper}>
<div className={styles.rightButtons}>
<Button
className={styles.leftButton}
fill="solid"
variant="secondary"
data-testid={queryAssistanttestIds.clickForHistorical}
onClick={() => {
const isLoading = true;
const suggestionType = SuggestionType.Historical;
dispatch(addInteraction({ suggestionType, isLoading }));
reportInteraction('grafana_prometheus_promqail_know_what_you_want_to_query', {
promVisualQuery: query,
doYouKnow: 'no',
});
promQailSuggest(dispatch, 0, query, labelNames, datasource);
}}
>
No
</Button>
<Button
fill="solid"
variant="primary"
data-testid={queryAssistanttestIds.clickForAi}
onClick={() => {
reportInteraction('grafana_prometheus_promqail_know_what_you_want_to_query', {
promVisualQuery: query,
doYouKnow: 'yes',
});
const isLoading = false;
const suggestionType = SuggestionType.AI;
dispatch(addInteraction({ suggestionType, isLoading }));
}}
>
Yes
</Button>
</div>
</div>
</>
)}
{state.interactions.map((interaction: Interaction, idx: number) => {
return (
<div key={idx}>
{interaction.suggestionType === SuggestionType.AI ? (
<>
<div className={styles.textPadding}>What kind of data do you want to see with your metric?</div>
<div className={cx(styles.secondaryText, styles.bottomMargin)}>
<div>You do not need to enter in a metric or a label again in the prompt.</div>
<div>Example: I want to monitor request latency, not errors.</div>
</div>
<div className={styles.inputPadding}>
<Input
value={interaction.prompt}
spellCheck={false}
placeholder="Enter prompt"
disabled={interaction.suggestions.length > 0}
onChange={(e) => {
const prompt = e.currentTarget.value;
const payload = {
idx: idx,
interaction: { ...interaction, prompt },
};
dispatch(updateInteraction(payload));
}}
/>
</div>
{interaction.suggestions.length === 0 ? (
interaction.isLoading ? (
<>
<div className={styles.loadingMessageContainer}>
Waiting for OpenAI <Spinner className={styles.floatRight} />
</div>
</>
) : (
<>
<div className={styles.rightButtonsWrapper}>
<div className={styles.rightButtons}>
<Button
className={styles.leftButton}
fill="outline"
variant="secondary"
onClick={closeDrawer}
>
Cancel
</Button>
<Button
className={styles.leftButton}
fill="outline"
variant="secondary"
onClick={() => {
// JUST SUGGEST QUERIES AND SHOW THE LIST
const newInteraction: Interaction = {
...interaction,
suggestionType: SuggestionType.Historical,
isLoading: true,
};
const payload = {
idx: idx,
interaction: newInteraction,
};
reportInteraction('grafana_prometheus_promqail_suggest_query_instead', {
promVisualQuery: query,
});
dispatch(updateInteraction(payload));
promQailSuggest(dispatch, idx, query, labelNames, datasource, newInteraction);
}}
>
Suggest queries instead
</Button>
<Button
fill="solid"
variant="primary"
data-testid={queryAssistanttestIds.submitPrompt + idx}
onClick={() => {
const newInteraction: Interaction = {
...interaction,
isLoading: true,
};
const payload = {
idx: idx,
interaction: newInteraction,
};
reportInteraction('grafana_prometheus_promqail_prompt_submitted', {
promVisualQuery: query,
prompt: interaction.prompt,
});
dispatch(updateInteraction(payload));
// add the suggestions in the API call
promQailSuggest(dispatch, idx, query, labelNames, datasource, interaction);
}}
>
Submit
</Button>
</div>
</div>
</>
)
) : (
// LIST OF SUGGESTED QUERIES FROM AI
<QuerySuggestionContainer
suggestionType={SuggestionType.AI}
querySuggestions={interaction.suggestions}
closeDrawer={closeDrawer}
nextInteraction={() => {
const isLoading = false;
const suggestionType = SuggestionType.AI;
dispatch(addInteraction({ suggestionType, isLoading }));
}}
queryExplain={(suggIdx: number) =>
interaction.suggestions[suggIdx].explanation === ''
? promQailExplain(dispatch, idx, query, interaction, suggIdx, datasource)
: interaction.suggestions[suggIdx].explanation
}
onChange={onChange}
prompt={interaction.prompt ?? ''}
/>
)}
</>
) : // HISTORICAL SUGGESTIONS
interaction.isLoading ? (
<>
<div className={styles.loadingMessageContainer}>
Waiting for OpenAI <Spinner className={styles.floatRight} />
</div>
</>
) : (
// LIST OF SUGGESTED QUERIES FROM HISTORICAL DATA
<QuerySuggestionContainer
suggestionType={SuggestionType.Historical}
querySuggestions={interaction.suggestions}
closeDrawer={closeDrawer}
nextInteraction={() => {
const isLoading = false;
const suggestionType = SuggestionType.AI;
dispatch(addInteraction({ suggestionType, isLoading }));
}}
queryExplain={(suggIdx: number) =>
interaction.suggestions[suggIdx].explanation === ''
? promQailExplain(dispatch, idx, query, interaction, suggIdx, datasource)
: interaction.suggestions[suggIdx].explanation
}
onChange={onChange}
prompt={interaction.prompt ?? ''}
/>
)}
</div>
);
})}
</div>
)}
</div>
<div ref={responsesEndRef} />
</div>
);
};
export const getStyles = (theme: GrafanaTheme2) => {
return {
sectionPadding: css({
padding: '20px',
}),
header: css({
display: 'flex',
button: {
marginLeft: 'auto',
},
}),
iconSection: css({
padding: '0 0 10px 0',
color: `${theme.colors.text.secondary}`,
img: {
paddingRight: '4px',
},
}),
rightButtonsWrapper: css({
display: 'flex',
}),
rightButtons: css({
marginLeft: 'auto',
}),
leftButton: css({
marginRight: '10px',
}),
dataList: css({
padding: '0px 28px 0px 28px',
}),
textPadding: css({
paddingBottom: '12px',
}),
containerPadding: css({
padding: '28px',
}),
infoContainer: css({
border: `${theme.colors.border.strong}`,
padding: '16px',
backgroundColor: `${theme.colors.background.secondary}`,
borderRadius: `8px`,
borderBottomLeftRadius: 0,
}),
infoContainerWrapper: css({
paddingBottom: '24px',
}),
metricTable: css({
width: '100%',
}),
metricTableName: css({
width: '15%',
}),
metricTableValue: css({
fontFamily: `${theme.typography.fontFamilyMonospace}`,
fontSize: `${theme.typography.bodySmall.fontSize}`,
overflow: 'scroll',
textWrap: 'nowrap',
maxWidth: '150px',
width: '60%',
maskImage: `linear-gradient(to right, rgba(0, 0, 0, 1) 90%, rgba(0, 0, 0, 0))`,
}),
metricTableButton: css({
float: 'right',
}),
queryQuestion: css({
textAlign: 'end',
padding: '8px 0',
}),
secondaryText: css({
color: `${theme.colors.text.secondary}`,
}),
loadingMessageContainer: css({
border: `${theme.colors.border.strong}`,
padding: `16px`,
backgroundColor: `${theme.colors.background.secondary}`,
marginBottom: `20px`,
borderRadius: `8px`,
color: `${theme.colors.text.secondary}`,
fontStyle: 'italic',
}),
floatRight: css({
float: 'right',
}),
codeText: css({
fontFamily: `${theme.typography.fontFamilyMonospace}`,
fontSize: `${theme.typography.bodySmall.fontSize}`,
}),
bodySmall: css({
fontSize: `${theme.typography.bodySmall.fontSize}`,
}),
explainPadding: css({
paddingLeft: '26px',
}),
bottomMargin: css({
marginBottom: '20px',
}),
topPadding: css({
paddingTop: '22px',
}),
doc: css({
textDecoration: 'underline',
}),
afterButtons: css({
display: 'flex',
justifyContent: 'flex-end',
}),
feedbackStyle: css({
margin: 0,
textAlign: 'right',
paddingTop: '22px',
paddingBottom: '22px',
}),
nextInteractionHeight: css({
height: '88px',
}),
center: css({
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}),
inputPadding: css({
paddingBottom: '24px',
}),
querySuggestion: css({
display: 'flex',
flexWrap: 'nowrap',
}),
longCode: css({
width: '90%',
textWrap: 'nowrap',
overflow: 'scroll',
maskImage: `linear-gradient(to right, rgba(0, 0, 0, 1) 90%, rgba(0, 0, 0, 0))`,
div: {
display: 'inline-block',
},
}),
useButton: css({
marginLeft: 'auto',
}),
suggestionFeedback: css({
textAlign: 'left',
}),
feedbackQuestion: css({
display: 'flex',
padding: '8px 0px',
h6: { marginBottom: 0 },
i: {
marginTop: '1px',
},
}),
explationTextInput: css({
paddingLeft: '24px',
}),
submitFeedback: css({
padding: '16px 0',
}),
noMargin: css({
margin: 0,
}),
enableButtonTooltip: css({
padding: 8,
}),
enableButtonTooltipText: css({
color: `${theme.colors.text.secondary}`,
ul: {
marginLeft: 16,
},
}),
link: css({
color: `${theme.colors.text.link} !important`,
}),
};
};
export const queryAssistanttestIds = {
promQail: 'prom-qail',
securityInfoButton: 'security-info-button',
clickForHistorical: 'click-for-historical',
clickForAi: 'click-for-ai',
submitPrompt: 'submit-prompt',
refinePrompt: 'refine-prompt',
};
const stateSlice = createSlice({
name: 'metrics-modal-state',
initialState: initialState(),
reducers: {
showExplainer: (state, action: PayloadAction<boolean>) => {
state.showExplainer = action.payload;
},
showStartingMessage: (state, action: PayloadAction<boolean>) => {
state.showStartingMessage = action.payload;
},
indicateCheckbox: (state, action: PayloadAction<boolean>) => {
state.indicateCheckbox = action.payload;
},
askForQueryHelp: (state, action: PayloadAction<boolean>) => {
state.askForQueryHelp = action.payload;
},
/*
* start working on a collection of interactions
* {
* askForhelp y n
* prompt question
* queries querySuggestions
* }
*
*/
addInteraction: (state, action: PayloadAction<{ suggestionType: SuggestionType; isLoading: boolean }>) => {
// AI or Historical?
const interaction = createInteraction(action.payload.suggestionType, action.payload.isLoading);
const interactions = state.interactions;
state.interactions = interactions.concat([interaction]);
},
updateInteraction: (state, action: PayloadAction<{ idx: number; interaction: Interaction }>) => {
// update the interaction by index
// will most likely be the last interaction but we might update previous by giving them cues of helpful or not
const index = action.payload.idx;
const updInteraction = action.payload.interaction;
state.interactions = state.interactions.map((interaction: Interaction, idx: number) => {
if (idx === index) {
return updInteraction;
}
return interaction;
});
},
},
});
// actions to update the state
export const { showStartingMessage, indicateCheckbox, addInteraction, updateInteraction } = stateSlice.actions;

@ -1,51 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/QueryAssistantButton.test.tsx
import { fireEvent, render, screen, waitFor } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { QueryAssistantButton } from './QueryAssistantButton';
const setShowDrawer = jest.fn(() => {});
describe('QueryAssistantButton', () => {
it('renders the button', async () => {
const props = createProps(true, 'metric', setShowDrawer);
render(<QueryAssistantButton {...props} />);
expect(screen.getByText('Get query suggestions')).toBeInTheDocument();
});
it('shows the LLM app disabled message when LLM app is not set up with vector DB', async () => {
const props = createProps(false, 'metric', setShowDrawer);
render(<QueryAssistantButton {...props} />);
const button = screen.getByText('Get query suggestions');
await userEvent.hover(button);
await waitFor(() => {
expect(screen.getByText('Install and enable the LLM plugin')).toBeInTheDocument();
});
});
it('shows the message to select a metric when LLM is enabled and no metric is selected', async () => {
const props = createProps(true, '', setShowDrawer);
render(<QueryAssistantButton {...props} />);
const button = screen.getByText('Get query suggestions');
await userEvent.hover(button);
await waitFor(() => {
expect(screen.getByText('First, select a metric.')).toBeInTheDocument();
});
});
it('calls setShowDrawer when button is clicked', async () => {
const props = createProps(true, 'metric', setShowDrawer);
render(<QueryAssistantButton {...props} />);
const button = screen.getByText('Get query suggestions');
fireEvent.click(button);
expect(setShowDrawer).toHaveBeenCalled();
});
});
function createProps(llmAppEnabled: boolean, metric: string, setShowDrawer: () => void) {
return {
llmAppEnabled,
metric,
setShowDrawer,
};
}

@ -1,86 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/QueryAssistantButton.tsx
import { selectors } from '@grafana/e2e-selectors';
import { reportInteraction } from '@grafana/runtime';
import { Button, Tooltip, useTheme2 } from '@grafana/ui';
import { getStyles } from './PromQail';
import AI_Logo_color from './resources/AI_Logo_color.svg';
export type Props = {
llmAppEnabled: boolean;
metric: string;
setShowDrawer: (show: boolean) => void;
};
export function QueryAssistantButton(props: Props) {
const { llmAppEnabled, metric, setShowDrawer } = props;
const llmAppDisabled = !llmAppEnabled;
const noMetricSelected = !metric;
const theme = useTheme2();
const styles = getStyles(theme);
const button = () => {
return (
<Button
variant={'secondary'}
onClick={() => {
reportInteraction('grafana_prometheus_promqail_ai_button_clicked', {
metric: metric,
});
setShowDrawer(true);
}}
disabled={!metric || !llmAppEnabled}
data-testid={selectors.components.DataSource.Prometheus.queryEditor.builder.queryAdvisor}
>
<img height={16} src={AI_Logo_color} alt="AI logo black and white" />
{'\u00A0'}Get query suggestions
</Button>
);
};
const selectMetricMessage = (
<Tooltip content={'First, select a metric.'} placement={'bottom-end'}>
{button()}
</Tooltip>
);
const llmAppMessage = (
<Tooltip
interactive={true}
placement={'auto-end'}
content={
<div className={styles.enableButtonTooltip}>
<h6>Query Advisor is disabled</h6>
<div className={styles.enableButtonTooltipText}>To enable Query Advisor you must:</div>
<div className={styles.enableButtonTooltipText}>
<ul>
<li>
<a
href={'https://grafana.com/docs/grafana-cloud/alerting-and-irm/machine-learning/llm-plugin/'}
target="_blank"
rel="noreferrer noopener"
className={styles.link}
>
Install and enable the LLM plugin
</a>
</li>
<li>Select a metric</li>
</ul>
</div>
</div>
}
>
{button()}
</Tooltip>
);
if (llmAppDisabled) {
return llmAppMessage;
} else if (noMetricSelected) {
return selectMetricMessage;
} else {
return button();
}
}

@ -1,102 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/QuerySuggestionContainer.tsx
import { cx } from '@emotion/css';
import { useState } from 'react';
import { Button, useTheme2 } from '@grafana/ui';
import { PromVisualQuery } from '../../types';
import { getStyles, queryAssistanttestIds } from './PromQail';
import { QuerySuggestionItem } from './QuerySuggestionItem';
import { QuerySuggestion, SuggestionType } from './types';
export type Props = {
querySuggestions: QuerySuggestion[];
suggestionType: SuggestionType;
closeDrawer: () => void;
nextInteraction: () => void;
queryExplain: (idx: number) => void;
onChange: (query: PromVisualQuery) => void;
prompt: string;
};
export function QuerySuggestionContainer(props: Props) {
const { suggestionType, querySuggestions, closeDrawer, nextInteraction, queryExplain, onChange, prompt } = props;
const [hasNextInteraction, updateHasNextInteraction] = useState<boolean>(false);
const theme = useTheme2();
const styles = getStyles(theme);
let text, secondaryText, refineText;
if (suggestionType === SuggestionType.Historical) {
text = `Here are ${querySuggestions.length} query suggestions:`;
refineText = 'I want to write a prompt';
} else if (suggestionType === SuggestionType.AI) {
text = text = 'Here is your query suggestion:';
secondaryText =
'This query is based off of natural language descriptions of the most commonly used PromQL queries.';
refineText = 'Refine prompt';
}
return (
<>
{suggestionType === SuggestionType.Historical ? (
<div className={styles.bottomMargin}>{text}</div>
) : (
<>
<div className={styles.textPadding}>{text}</div>
<div className={cx(styles.secondaryText, styles.bottomMargin)}>{secondaryText}</div>
</>
)}
<div className={styles.infoContainerWrapper}>
<div className={styles.infoContainer}>
{querySuggestions.map((qs: QuerySuggestion, idx: number) => {
return (
<QuerySuggestionItem
historical={suggestionType === SuggestionType.Historical}
querySuggestion={qs}
key={idx}
order={idx + 1}
queryExplain={queryExplain}
onChange={onChange}
closeDrawer={closeDrawer}
last={idx === querySuggestions.length - 1}
// for feedback rudderstack events
allSuggestions={querySuggestions.reduce((acc: string, qs: QuerySuggestion) => {
return acc + '$$' + qs.query;
}, '')}
prompt={prompt ?? ''}
/>
);
})}
</div>
</div>
{!hasNextInteraction && (
<div className={styles.nextInteractionHeight}>
<div className={cx(styles.afterButtons, styles.textPadding)}>
<Button
onClick={() => {
updateHasNextInteraction(true);
nextInteraction();
}}
data-testid={queryAssistanttestIds.refinePrompt}
fill="outline"
variant="secondary"
size="md"
>
{refineText}
</Button>
</div>
<div className={cx(styles.textPadding, styles.floatRight)}>
<Button fill="outline" variant="secondary" size="md" onClick={closeDrawer}>
Cancel
</Button>
</div>
</div>
)}
</>
);
}

@ -1,322 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/QuerySuggestionItem.tsx
import { cx } from '@emotion/css';
import { FormEvent, useState } from 'react';
import { SelectableValue } from '@grafana/data';
import { reportInteraction } from '@grafana/runtime';
import { Button, RadioButtonList, Spinner, TextArea, Toggletip, useTheme2 } from '@grafana/ui';
import { buildVisualQueryFromString } from '../../parsing';
import { PromVisualQuery } from '../../types';
import { getStyles } from './PromQail';
import { QuerySuggestion } from './types';
export type Props = {
querySuggestion: QuerySuggestion;
order: number;
queryExplain: (idx: number) => void;
historical: boolean;
onChange: (query: PromVisualQuery) => void;
closeDrawer: () => void;
last: boolean;
prompt: string;
allSuggestions: string | undefined;
};
const suggestionOptions: SelectableValue[] = [
{ label: 'Yes', value: 'yes' },
{ label: 'No', value: 'no' },
];
const explationOptions: SelectableValue[] = [
{ label: 'Too vague', value: 'too vague' },
{ label: 'Too technical', value: 'too technical' },
{ label: 'Inaccurate', value: 'inaccurate' },
{ label: 'Other', value: 'other' },
];
export function QuerySuggestionItem(props: Props) {
const { querySuggestion, order, queryExplain, historical, onChange, closeDrawer, last, allSuggestions, prompt } =
props;
const [showExp, updShowExp] = useState<boolean>(false);
const [gaveExplanationFeedback, updateGaveExplanationFeedback] = useState<boolean>(false);
const [gaveSuggestionFeedback, updateGaveSuggestionFeedback] = useState<boolean>(false);
const [suggestionFeedback, setSuggestionFeedback] = useState({
radioInput: '',
text: '',
});
const [explanationFeedback, setExplanationFeedback] = useState({
radioInput: '',
text: '',
});
const theme = useTheme2();
const styles = getStyles(theme);
const { query, explanation } = querySuggestion;
const feedbackToggleTip = (type: string) => {
const updateRadioFeedback = (value: string) => {
if (type === 'explanation') {
setExplanationFeedback({
...explanationFeedback,
radioInput: value,
});
} else {
setSuggestionFeedback({
...suggestionFeedback,
radioInput: value,
});
}
};
const updateTextFeedback = (e: FormEvent<HTMLTextAreaElement>) => {
if (type === 'explanation') {
setExplanationFeedback({
...explanationFeedback,
text: e.currentTarget.value,
});
} else {
setSuggestionFeedback({
...suggestionFeedback,
text: e.currentTarget.value,
});
}
};
const disabledButton = () =>
type === 'explanation' ? !explanationFeedback.radioInput : !suggestionFeedback.radioInput;
const questionOne =
type === 'explanation' ? 'Why was the explanation not helpful?' : 'Were the query suggestions helpful?';
return (
<div className={styles.suggestionFeedback}>
<div>
<div className={styles.feedbackQuestion}>
<h6>{questionOne}</h6>
<i>(Required)</i>
</div>
<RadioButtonList
name="default"
options={type === 'explanation' ? explationOptions : suggestionOptions}
value={type === 'explanation' ? explanationFeedback.radioInput : suggestionFeedback.radioInput}
onChange={updateRadioFeedback}
/>
</div>
<div className={cx(type === 'explanation' && styles.explationTextInput)}>
{type !== 'explanation' && (
<div className={styles.feedbackQuestion}>
<h6>How can we improve the query suggestions?</h6>
</div>
)}
<TextArea
type="text"
aria-label="Promqail suggestion text"
placeholder="Enter your feedback"
value={type === 'explanation' ? explanationFeedback.text : suggestionFeedback.text}
onChange={updateTextFeedback}
cols={100}
/>
</div>
<div className={styles.submitFeedback}>
<Button
variant="primary"
size="sm"
disabled={disabledButton()}
onClick={() => {
// submit the rudderstack event
if (type === 'explanation') {
explanationFeedbackEvent(
explanationFeedback.radioInput,
explanationFeedback.text,
querySuggestion,
historical,
prompt
);
updateGaveExplanationFeedback(true);
} else {
suggestionFeedbackEvent(
suggestionFeedback.radioInput,
suggestionFeedback.text,
allSuggestions ?? '',
historical,
prompt
);
updateGaveSuggestionFeedback(true);
}
}}
>
Submit
</Button>
</div>
</div>
);
};
return (
<>
<div className={styles.querySuggestion}>
<div title={query} className={cx(styles.codeText, styles.longCode)}>
{`${order}. ${query}`}
</div>
<div className={styles.useButton}>
<Button
variant="primary"
size="sm"
onClick={() => {
reportInteraction('grafana_prometheus_promqail_use_query_button_clicked', {
query: querySuggestion.query,
});
const pvq = buildVisualQueryFromString(querySuggestion.query);
// check for errors!
onChange(pvq.query);
closeDrawer();
}}
>
Use
</Button>
</div>
</div>
<div>
<Button
fill="text"
variant="secondary"
icon={showExp ? 'angle-up' : 'angle-down'}
onClick={() => {
updShowExp(!showExp);
queryExplain(order - 1);
}}
className={cx(styles.bodySmall)}
size="sm"
>
Explainer
</Button>
{!showExp && order !== 5 && <div className={styles.textPadding}></div>}
{showExp && !querySuggestion.explanation && (
<div className={styles.center}>
<Spinner />
</div>
)}
{showExp && querySuggestion.explanation && (
<>
<div className={cx(styles.bodySmall, styles.explainPadding)}>
<div className={styles.textPadding}>This query is trying to answer the question:</div>
<div className={styles.textPadding}>{explanation}</div>
<div className={styles.textPadding}>
Learn more with this{' '}
<a
className={styles.doc}
href={'https://prometheus.io/docs/prometheus/latest/querying/examples/#query-examples'}
target="_blank"
rel="noopener noreferrer"
>
Prometheus doc
</a>
</div>
<div className={cx(styles.rightButtons, styles.secondaryText)}>
Was this explanation helpful?
<div className={styles.floatRight}>
{!gaveExplanationFeedback ? (
<>
<Button
fill="outline"
variant="secondary"
size="sm"
className={styles.leftButton}
onClick={() => {
explanationFeedbackEvent('Yes', '', querySuggestion, historical, prompt);
updateGaveExplanationFeedback(true);
}}
>
Yes
</Button>
<Toggletip
aria-label="Suggestion feedback"
content={feedbackToggleTip('explanation')}
placement="bottom-end"
closeButton={true}
>
<Button fill="outline" variant="secondary" size="sm">
No
</Button>
</Toggletip>
</>
) : (
'Thank you for your feedback!'
)}
</div>
</div>
</div>
{!last && <hr />}
</>
)}
{last && (
<div className={cx(styles.feedbackStyle)}>
{!gaveSuggestionFeedback ? (
<Toggletip
aria-label="Suggestion feedback"
content={feedbackToggleTip('suggestion')}
placement="bottom-end"
closeButton={true}
>
<Button fill="outline" variant="secondary" size="sm">
Give feedback on suggestions
</Button>
</Toggletip>
) : (
// do this weird thing because the toggle tip doesn't allow an extra close function
<Button fill="outline" variant="secondary" size="sm" disabled={true}>
Thank you for your feedback!
</Button>
)}
</div>
)}
</div>
</>
);
}
function explanationFeedbackEvent(
radioInputFeedback: string,
textFeedback: string,
querySuggestion: QuerySuggestion,
historical: boolean,
prompt: string
) {
const event = 'grafana_prometheus_promqail_explanation_feedback';
reportInteraction(event, {
helpful: radioInputFeedback,
textFeedback: textFeedback,
suggestionType: historical ? 'historical' : 'AI',
query: querySuggestion.query,
explanation: querySuggestion.explanation,
prompt: prompt,
});
}
function suggestionFeedbackEvent(
radioInputFeedback: string,
textFeedback: string,
allSuggestions: string,
historical: boolean,
prompt: string
) {
const event = 'grafana_prometheus_promqail_suggestion_feedback';
reportInteraction(event, {
helpful: radioInputFeedback,
textFeedback: textFeedback,
suggestionType: historical ? 'historical' : 'AI',
allSuggestions: allSuggestions,
prompt: prompt,
});
}

@ -1,115 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/prompts.ts
export const ExplainSystemPrompt = `You are an expert in Prometheus, the event monitoring and alerting application.
You are given relevant PromQL documentation, a type and description for a Prometheus metric, and a PromQL query on that metric. Using the provided information for reference, please explain what the output of a given query is in 1 sentences. Do not walk through what the functions do separately, make your answer concise.
Input will be in the form:
PromQL Documentation:
<PromQL documentation>
PromQL Metrics Metadata:
<metric_name>(<metric type of the metric queried>): <description of what the metric means>
PromQL Expression:
<PromQL query>
Examples of input and output
----------
PromQL Documentation:
A counter is a cumulative metric that represents a single monotonically increasing counter whose value can only increase or be reset to zero on restart. For example, you can use a counter to represent the number of requests served, tasks completed, or errors.
topk (largest k elements by sample value)
sum (calculate sum over dimensions)
rate(v range-vector) calculates the per-second average rate of increase of the time series in the range vector. Breaks in monotonicity (such as counter resets due to target restarts) are automatically adjusted for.
PromQL Metrics Metadata:
traces_exporter_sent_spans(counter): Number of spans successfully sent to destination.
PromQL Expression:
topk(3, sum by(cluster) (rate(traces_exporter_sent_spans{exporter="otlp"}[5m])))
This query is trying to answer the question:
What is the top 3 clusters that have successfully sent the most number of spans to the destination?
`;
export type ExplainUserPromptParams = {
documentation: string;
metricName: string;
metricType: string;
metricMetadata: string;
query: string;
};
export function GetExplainUserPrompt({
documentation,
metricName,
metricType,
metricMetadata,
query,
}: ExplainUserPromptParams): string {
if (documentation === '') {
documentation = 'No documentation provided.';
}
if (metricMetadata === '') {
metricMetadata = 'No description provided.';
}
return `
PromQL Documentation:
${documentation}
PromQL Metrics Metadata:
${metricName}(${metricType}): ${metricMetadata}
PromQL Expression:
${query}
This query is trying to answer the question:
`;
}
export const SuggestSystemPrompt = `You are a Prometheus Query Language (PromQL) expert assistant inside Grafana.
When the user asks a question, respond with a valid PromQL query and only the query.
To help you answer the question, you will receive:
- List of potentially relevant PromQL templates with descriptions, ranked by semantic search score
- Prometheus metric
- Metric type
- Available Prometheus metric labels
- User question
Policy:
- Do not invent labels names, you can only use the available labels
- For rate queries, use the $__rate_interval variable`;
// rewrite with a type
export type SuggestUserPromptParams = {
promql: string;
question: string;
metricType: string;
labels: string;
templates: string;
};
export function GetSuggestUserPrompt({
promql,
question,
metricType,
labels,
templates,
}: SuggestUserPromptParams): string {
if (templates === '') {
templates = 'No templates provided.';
} else {
templates = templates.replace(/\n/g, '\n ');
}
return `Relevant PromQL templates:
${templates}
Prometheus metric: ${promql}
Metric type: ${metricType}
Available Prometheus metric labels: ${labels}
User question: ${question}
\`\`\`promql`;
}

@ -1,4 +0,0 @@
<svg width="17" height="18" viewBox="0 0 17 18" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.03027 12.6328C4.88965 12.6328 4.00293 11.9844 4.00293 10.8047C4.00293 9.44141 5.16699 9.14453 6.30371 9.01172C7.38184 8.88672 7.81934 8.89453 7.81934 8.46875V8.44141C7.81934 7.76172 7.43262 7.36719 6.67871 7.36719C5.89355 7.36719 5.45605 7.77734 5.28418 8.20312L4.18652 7.95312C4.57715 6.85937 5.57715 6.42188 6.66309 6.42188C7.61621 6.42188 8.99121 6.76953 8.99121 8.51563V12.5H7.85059V11.6797H7.80371C7.58105 12.1289 7.02246 12.6328 6.03027 12.6328ZM6.28418 11.6953C7.25684 11.6953 7.82324 11.0469 7.82324 10.3359V9.5625C7.65527 9.73047 6.75684 9.83203 6.37793 9.88281C5.70215 9.97266 5.14746 10.1953 5.14746 10.8203C5.14746 11.3984 5.62402 11.6953 6.28418 11.6953ZM10.5469 12.5V6.5H11.7148V12.5H10.5469ZM11.1367 5.57422C10.7305 5.57422 10.3984 5.26172 10.3984 4.87891C10.3984 4.49609 10.7305 4.17969 11.1367 4.17969C11.5391 4.17969 11.875 4.49609 11.875 4.87891C11.875 5.26172 11.5391 5.57422 11.1367 5.57422Z" fill="white"/>
<path d="M5 0.875H12C14.5543 0.875 16.625 2.94568 16.625 5.5V12.5C16.625 15.0543 14.5543 17.125 12 17.125H0.375V5.5C0.375 2.94568 2.44568 0.875 5 0.875Z" stroke="white" stroke-width="0.75"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.2 KiB

@ -1,11 +0,0 @@
<svg width="26" height="27" viewBox="0 0 26 27" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M0 9.5C0 4.52944 4.02944 0.5 9 0.5H17C21.9706 0.5 26 4.52944 26 9.5V15.5C26 20.4706 21.9706 24.5 17 24.5H0V9.5Z" fill="url(#paint0_linear_68_17626)"/>
<path d="M8.91193 18.7053C7.14915 18.7053 5.77876 17.7031 5.77876 15.88C5.77876 13.7731 7.57777 13.3143 9.33452 13.109C11.0007 12.9158 11.6768 12.9279 11.6768 12.2699V12.2276C11.6768 11.1772 11.0792 10.5675 9.91406 10.5675C8.70064 10.5675 8.0245 11.2013 7.75888 11.8594L6.0625 11.473C6.66619 9.78267 8.21165 9.10653 9.88992 9.10653C11.3629 9.10653 13.4879 9.64382 13.4879 12.3423V18.5H11.7251V17.2322H11.6527C11.3086 17.9265 10.4453 18.7053 8.91193 18.7053ZM9.30433 17.2564C10.8075 17.2564 11.6829 16.2543 11.6829 15.1555V13.9602C11.4233 14.2198 10.0348 14.3768 9.44922 14.4553C8.40483 14.5941 7.54759 14.9382 7.54759 15.9041C7.54759 16.7976 8.28409 17.2564 9.30433 17.2564ZM15.8921 18.5V9.22727H17.6972V18.5H15.8921ZM16.8037 7.79652C16.1759 7.79652 15.6627 7.31357 15.6627 6.72195C15.6627 6.13033 16.1759 5.64133 16.8037 5.64133C17.4255 5.64133 17.9447 6.13033 17.9447 6.72195C17.9447 7.31357 17.4255 7.79652 16.8037 7.79652Z" fill="white"/>
<path d="M0 24.5H3L0 26.5V24.5Z" fill="#5B5CC2"/>
<defs>
<linearGradient id="paint0_linear_68_17626" x1="4.76666" y1="-5.1" x2="24.472" y2="5.4613" gradientUnits="userSpaceOnUse">
<stop offset="0.0333246" stop-color="#965AFB"/>
<stop offset="1" stop-color="#096174"/>
</linearGradient>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

@ -1,73 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/state/helpers.test.ts
import { openai, vector } from '@grafana/llm';
import { guessMetricType, isLLMPluginEnabled } from './helpers';
// Mock the grafana llms module
jest.mock('@grafana/llm', () => ({
openai: {
health: jest.fn(),
},
vector: {
health: jest.fn(),
},
}));
describe('isLLMPluginEnabled', () => {
it('should return true if LLM plugin is enabled', async () => {
jest.mocked(openai.health).mockResolvedValue({ ok: true, configured: true });
jest.mocked(vector.health).mockResolvedValue({ ok: true, enabled: true });
const enabled = await isLLMPluginEnabled();
expect(enabled).toBe(true);
});
it('should return false if LLM plugin is not enabled', async () => {
jest.mocked(openai.health).mockResolvedValue({ ok: false, configured: false });
jest.mocked(vector.health).mockResolvedValue({ ok: false, enabled: false });
const enabled = await isLLMPluginEnabled();
expect(enabled).toBe(false);
});
it('should return false if LLM plugin is enabled but health check fails', async () => {
jest.mocked(openai.health).mockResolvedValue({ ok: false, configured: true });
jest.mocked(vector.health).mockResolvedValue({ ok: false, enabled: true });
const enabled = await isLLMPluginEnabled();
expect(enabled).toBe(false);
});
});
const metricListWithType = [
// below is summary metric family
['go_gc_duration_seconds', 'summary'],
['go_gc_duration_seconds_count', 'summary'],
['go_gc_duration_seconds_sum', 'summary'],
// below is histogram metric family
['go_gc_heap_allocs_by_size_bytes_total_bucket', 'histogram'],
['go_gc_heap_allocs_by_size_bytes_total_count', 'histogram'],
['go_gc_heap_allocs_by_size_bytes_total_sum', 'histogram'],
// below are counters
['go_gc_heap_allocs_bytes_total', 'counter'],
['scrape_samples_post_metric_relabeling', 'counter'],
// below are gauges
['go_gc_heap_goal_bytes', 'gauge'],
['nounderscorename', 'gauge'],
// below is both a histogram & summary
['alertmanager_http_response_size_bytes', 'histogram,summary'],
['alertmanager_http_response_size_bytes_bucket', 'histogram,summary'],
['alertmanager_http_response_size_bytes_count', 'histogram,summary'],
['alertmanager_http_response_size_bytes_sum', 'histogram,summary'],
];
const metricList = metricListWithType.map((item) => item[0]);
describe('guessMetricType', () => {
it.each(metricListWithType)("where input is '%s'", (metric: string, metricType: string) => {
expect(guessMetricType(metric, metricList)).toBe(metricType);
});
});

@ -1,415 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/state/helpers.ts
import { AnyAction } from 'redux';
import { openai, vector } from '@grafana/llm';
import { reportInteraction } from '@grafana/runtime';
import { PrometheusDatasource } from '../../../../datasource';
import { getMetadataHelp, getMetadataType } from '../../../../language_provider';
import { promQueryModeller } from '../../../PromQueryModeller';
import { buildVisualQueryFromString } from '../../../parsing';
import { PromVisualQuery } from '../../../types';
import { updateInteraction } from '../PromQail';
import {
ExplainSystemPrompt,
GetExplainUserPrompt,
SuggestSystemPrompt,
GetSuggestUserPrompt,
SuggestUserPromptParams,
} from '../prompts';
import { Interaction, QuerySuggestion, SuggestionType } from '../types';
import { createInteraction } from './state';
import { getTemplateSuggestions } from './templates';
const OPENAI_MODEL_NAME = 'gpt-3.5-turbo-1106';
const promQLTemplatesCollection = 'grafana.promql.templates';
interface TemplateSearchResult {
description: string | null;
metric_type: string | null;
promql: string | null;
}
export function getExplainMessage(query: string, metric: string, datasource: PrometheusDatasource): openai.Message[] {
let metricMetadata = '';
let metricType = '';
const pvq = buildVisualQueryFromString(query);
if (datasource.languageProvider.metricsMetadata) {
metricType = getMetadataType(metric, datasource.languageProvider.metricsMetadata) ?? '';
metricMetadata = getMetadataHelp(metric, datasource.languageProvider.metricsMetadata) ?? '';
}
const documentationBody = pvq.query.operations
.map((op) => {
const def = promQueryModeller.getOperationDef(op.id);
if (!def) {
return '';
}
const title = def.renderer(op, def, '<expr>');
const body = def.explainHandler ? def.explainHandler(op, def) : def.documentation;
if (!body) {
return '';
}
return `### ${title}:\n${body}`;
})
.filter((item) => item !== '')
.join('\n');
return [
{ role: 'system', content: ExplainSystemPrompt },
{
role: 'user',
content: GetExplainUserPrompt({
documentation: documentationBody,
metricName: metric,
metricType: metricType,
metricMetadata: metricMetadata,
query: query,
}),
},
];
}
function getSuggestMessages({
promql,
question,
metricType,
labels,
templates,
}: SuggestUserPromptParams): openai.Message[] {
return [
{ role: 'system', content: SuggestSystemPrompt },
{ role: 'user', content: GetSuggestUserPrompt({ promql, question, metricType, labels, templates }) },
];
}
/**
* Calls the API and adds suggestions to the interaction
*
* @param dispatch
* @param idx
* @param interaction
* @returns
*/
export async function promQailExplain(
dispatch: React.Dispatch<AnyAction>,
idx: number,
query: PromVisualQuery,
interaction: Interaction,
suggIdx: number,
datasource: PrometheusDatasource
) {
const suggestedQuery = interaction.suggestions[suggIdx].query;
const promptMessages = getExplainMessage(suggestedQuery, query.metric, datasource);
const interactionToUpdate = interaction;
return openai
.streamChatCompletions({
model: OPENAI_MODEL_NAME,
messages: promptMessages,
temperature: 0,
})
.pipe(openai.accumulateContent())
.subscribe((response) => {
const updatedSuggestions = interactionToUpdate.suggestions.map((sg: QuerySuggestion, sidx: number) => {
if (suggIdx === sidx) {
return {
query: interactionToUpdate.suggestions[suggIdx].query,
explanation: response,
};
}
return sg;
});
const payload = {
idx,
interaction: {
...interactionToUpdate,
suggestions: updatedSuggestions,
explanationIsLoading: false,
},
};
dispatch(updateInteraction(payload));
});
}
/**
* Check if sublist is fully contained in the superlist
*
* @param sublist
* @param superlist
* @returns true if fully contained, else false
*/
function isContainedIn(sublist: string[], superlist: string[]): boolean {
for (const item of sublist) {
if (!superlist.includes(item)) {
return false;
}
}
return true;
}
/**
* Guess the type of a metric, based on its name and its relation to other metrics available
*
* @param metric - name of metric whose type to guess
* @param allMetrics - list of all available metrics
* @returns - the guess of the type (string): counter,gauge,summary,histogram,'histogram,summary'
*/
export function guessMetricType(metric: string, allMetrics: string[]): string {
const synthetic_metrics = new Set<string>([
'up',
'scrape_duration_seconds',
'scrape_samples_post_metric_relabeling',
'scrape_series_added',
'scrape_samples_scraped',
'ALERTS',
'ALERTS_FOR_STATE',
]);
if (synthetic_metrics.has(metric)) {
// these are all known to be counters
return 'counter';
}
if (metric.startsWith(':')) {
// probably recording rule
return 'gauge';
}
if (metric.endsWith('_info')) {
// typically series of 1s only, the labels are the useful part. TODO: add 'info' type
return 'counter';
}
if (metric.endsWith('_created') || metric.endsWith('_total')) {
// prometheus naming style recommends counters to have these suffixes.
return 'counter';
}
const underscoreIndex = metric.lastIndexOf('_');
if (underscoreIndex < 0) {
// No underscores in the name at all, very little info to go on. Guess
return 'gauge';
}
// See if the suffix is histogram-y or summary-y
const [root, suffix] = [metric.slice(0, underscoreIndex), metric.slice(underscoreIndex + 1)];
if (['bucket', 'count', 'sum'].includes(suffix)) {
// Might be histogram + summary
let familyMetrics = [`${root}_bucket`, `${root}_count`, `${root}_sum`, root];
if (isContainedIn(familyMetrics, allMetrics)) {
return 'histogram,summary';
}
// Might be a histogram, if so all these metrics should exist too:
familyMetrics = [`${root}_bucket`, `${root}_count`, `${root}_sum`];
if (isContainedIn(familyMetrics, allMetrics)) {
return 'histogram';
}
// Or might be a summary
familyMetrics = [`${root}_sum`, `${root}_count`, root];
if (isContainedIn(familyMetrics, allMetrics)) {
return 'summary';
}
// Otherwise it's probably just a counter!
return 'counter';
}
// One case above doesn't catch: summary or histogram,summary where the non-suffixed metric is chosen
const familyMetrics = [`${metric}_sum`, `${metric}_count`, metric];
if (isContainedIn(familyMetrics, allMetrics)) {
if (allMetrics.includes(`${metric}_bucket`)) {
return 'histogram,summary';
} else {
return 'summary';
}
}
// All else fails, guess gauge
return 'gauge';
}
/**
* Generate a suitable filter structure for the VectorDB call
* @param types: list of metric types to include in the result
* @returns the structure to pass to the vectorDB call.
*/
function generateMetricTypeFilters(types: string[]) {
return types.map((type) => ({
metric_type: {
$eq: type,
},
}));
}
/**
* Taking in a metric name, try to guess its corresponding metric _family_ name
* @param metric name
* @returns metric family name
*/
function guessMetricFamily(metric: string): string {
if (metric.endsWith('_bucket') || metric.endsWith('_count') || metric.endsWith('_sum')) {
return metric.slice(0, metric.lastIndexOf('_'));
}
return metric;
}
/**
* Check if the LLM plugin is enabled.
* Used in the PromQueryBuilder to enable/disable the button based on openai and vector db checks
* @returns true if the LLM plugin is enabled.
*/
export async function isLLMPluginEnabled(): Promise<boolean> {
// Check if the LLM plugin is enabled.
// If not, we won't be able to make requests, so return early.
const openaiEnabled = openai.health().then((response) => response.ok);
const vectorEnabled = vector.health().then((response) => response.ok);
// combine 2 promises
return Promise.all([openaiEnabled, vectorEnabled]).then((results) => {
return results.every((result) => result);
});
}
/**
* Calls the API and adds suggestions to the interaction
*
* @param dispatch
* @param idx
* @param interaction
* @returns
*/
export async function promQailSuggest(
dispatch: React.Dispatch<AnyAction>,
idx: number,
query: PromVisualQuery,
labelNames: string[],
datasource: PrometheusDatasource,
interaction?: Interaction
) {
const interactionToUpdate = interaction ? interaction : createInteraction(SuggestionType.Historical);
// Decide metric type
let metricType = '';
// Makes sure we loaded the metadata for metrics. Usually this is done in the start() method of the
// provider but we only need the metadata here.
if (!datasource.languageProvider.metricsMetadata) {
await datasource.languageProvider.loadMetricsMetadata();
}
if (datasource.languageProvider.metricsMetadata) {
// `datasource.languageProvider.metricsMetadata` is a list of metric family names (with desired type)
// from the datasource metadata endoint, but unfortunately the expanded _sum, _count, _bucket raw
// metric names are also generated and populating this list (all of type counter). We want the metric
// family type, so need to guess the metric family name from the chosen metric name, and test if that
// metric family has a type specified.
const metricFamilyGuess = guessMetricFamily(query.metric);
metricType = getMetadataType(metricFamilyGuess, datasource.languageProvider.metricsMetadata) ?? '';
}
if (metricType === '') {
// fallback to heuristic guess
metricType = guessMetricType(query.metric, datasource.languageProvider.metrics);
}
if (interactionToUpdate.suggestionType === SuggestionType.Historical) {
return new Promise<void>((resolve) => {
return setTimeout(() => {
const suggestions = getTemplateSuggestions(
query.metric,
metricType,
promQueryModeller.renderLabels(query.labels)
);
const payload = {
idx,
interaction: { ...interactionToUpdate, suggestions: suggestions, isLoading: false },
};
dispatch(updateInteraction(payload));
resolve();
}, 1000);
});
} else {
type SuggestionBody = {
metric: string;
labels: string;
prompt?: string;
};
// get all available labels
const metricLabels = await datasource.languageProvider.fetchLabelsWithMatch(query.metric);
let feedTheAI: SuggestionBody = {
metric: query.metric,
// drop __name__ label because it's not useful
labels: Object.keys(metricLabels)
.filter((label) => label !== '__name__')
.join(','),
};
// @ts-ignore llms types issue
let results: Array<llms.vector.SearchResult<TemplateSearchResult>> = [];
if (interaction?.suggestionType === SuggestionType.AI) {
feedTheAI = { ...feedTheAI, prompt: interaction.prompt };
// @ts-ignore llms types issue
results = await llms.vector.search<TemplateSearchResult>({
query: interaction.prompt,
collection: promQLTemplatesCollection,
topK: 5,
filter: {
$or: generateMetricTypeFilters(metricType.split(',').concat(['*'])),
},
});
reportInteraction('grafana_prometheus_promqail_vector_results', {
metric: query.metric,
prompt: interaction.prompt,
results: results,
});
// TODO: handle errors from vector search
}
const resultsString = results
.map((r) => {
return `${r.payload.promql} | ${r.payload.description} (score=${(r.score * 100).toFixed(1)})`;
})
.join('\n');
const promptMessages = getSuggestMessages({
promql: query.metric,
question: interaction ? interaction.prompt : '',
metricType: metricType,
labels: labelNames.join(', '),
templates: resultsString,
});
return openai
.streamChatCompletions({
model: OPENAI_MODEL_NAME,
messages: promptMessages,
temperature: 0.5,
})
.pipe(openai.accumulateContent())
.subscribe((response) => {
const payload = {
idx,
interaction: {
...interactionToUpdate,
suggestions: [
{
query: response,
explanation: '',
},
],
isLoading: false,
},
};
dispatch(updateInteraction(payload));
});
}
}

@ -1,44 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/state/state.ts
import { PromVisualQuery } from '../../../types';
import { Interaction, SuggestionType } from '../types';
/**
* Initial state for PromQAIL
* @param query the prometheus query with metric and possible labels
*/
export function initialState(query?: PromVisualQuery, showStartingMessage?: boolean): PromQailState {
return {
query: query ?? {
metric: '',
labels: [],
operations: [],
},
showExplainer: false,
showStartingMessage: showStartingMessage ?? true,
indicateCheckbox: false,
askForQueryHelp: false,
interactions: [],
};
}
/**
* The PromQAIL state object
*/
export interface PromQailState {
query: PromVisualQuery;
showExplainer: boolean;
showStartingMessage: boolean;
indicateCheckbox: boolean;
askForQueryHelp: boolean;
interactions: Interaction[];
}
export function createInteraction(suggestionType: SuggestionType, isLoading?: boolean): Interaction {
return {
suggestionType: suggestionType,
prompt: '',
suggestions: [],
isLoading: isLoading ?? false,
explanationIsLoading: false,
};
}

@ -1,342 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/state/templates.ts
import { QuerySuggestion } from '../types';
interface TemplateData {
template: string;
description: string;
}
export const generalTemplates: TemplateData[] = [
{
template: 'metric_a{}',
description: 'Get the data for "metric_a"',
},
{
template: 'avg by(c) (metric_a{})',
description: 'Average of all series in "metric_a" grouped by the label "c"',
},
{
template: 'count by(d) (metric_a{})',
description: 'Number of series in the metric "metric_a" grouped by the label "d"',
},
{
template: 'sum by(g) (sum_over_time(metric_a{}[1h]))',
description:
'For each series in the metric "metric_a", sum all values over 1 hour, then group those series by label "g" and sum.',
},
{
template: 'count(metric_a{})',
description: 'Count of series in the metric "metric_a"',
},
{
template: '(metric_a{})',
description: 'Get the data for "metric_a"',
},
{
template: 'count_over_time(metric_a{}[1h])',
description: 'Number of series of metric_a in a 1 hour interval',
},
{
template: 'changes(metric_a{}[1m])',
description: 'Number of times the values of each series in metric_a have changed in 1 minute periods',
},
{
template: 'count(count by(g) (metric_a{}))',
description: 'Total number of series in metric_a',
},
{
template: 'last_over_time(metric_a{}[1h])',
description: 'For each series in metric_a, get the last value in the 1 hour period.',
},
{
template: 'sum by(g) (count_over_time(metric_a{}[1h]))',
description: 'Grouped sum over the label "g" of the number of series of metric_a in a 1 hour period',
},
{
template: 'count(metric_a{} == 99)',
description: 'Number of series of metric_a that have value 99',
},
{
template: 'min(metric_a{})',
description: 'At each timestamp, find the minimum of all series of the metric "metric_a"',
},
{
template: 'metric_a{} != 99',
description: 'Series of metric_a which do not have the value 99',
},
{
template: 'metric_a{} - 99',
description: 'metric_a minus 99',
},
{
template: 'quantile_over_time(0.99,metric_a{}[1h])',
description: 'The 99th quantile of values of metric_a in 1 hour',
},
{
template: 'count_values("aaaa",metric_a{})',
description: 'Count number of label values for a label named "aaaa"',
},
{
template: 'quantile by(l) (1,metric_a)',
description: 'Quantile of series in the metric "metric_a" grouped by the label "l"',
},
];
export const counterTemplates: TemplateData[] = [
{
template: 'sum by(d) (rate(metric_a{}[1h]))',
description:
'Sum of the rate of increase or decrease of the metric "metric_a" per 1 hour period, grouped by the label "d"',
},
{
template: 'rate(metric_a{}[1m])',
description: 'Rate of change of the metric "metric_a" over 1 minute',
},
{
template: 'sum by(a) (increase(metric_a{}[5m]))',
description:
'Taking the metric "metric_a" find the increase in 5 minute periods of each series and aggregate sum over the label "a"',
},
{
template: 'sum(rate(metric_a{}[1m]))',
description: 'Total rate of change of all series of metric "metric_a" in 1 minute intervals',
},
{
template: 'sum(increase(metric_a{}[10m]))',
description: 'Total increase for each series of metric "metric_a" in 10 minute intervals',
},
{
template: 'increase(metric_a{}[1h])',
description: 'Increase in all series of "metric_a" in 1 hour period',
},
{
template: 'sum by(d) (irate(metric_a{}[1h]))',
description: 'Sum of detailed rate of change of the metric "metric_a" over 1 hour grouped by label "d"',
},
{
template: 'irate(metric_a{}[1h])',
description: 'Detailed rate of change of the metric "metric_a" over 1 hour',
},
{
template: 'avg by(d) (rate(metric_a{}[1h]))',
description:
'Taking the rate of change of the metric "metric_a" in a 1 hour period, group by the label "d" and find the average of each group',
},
{
template: 'topk(5,sum by(g) (rate(metric_a{}[1h])))',
description: 'Top 5 of the summed groups "g" of the rate of change of metric_a',
},
{
template: 'sum(rate(metric_a{}[1h])) / sum(rate(metric_a{}[1h]))',
description: 'Relative sums of metric_a with different labels',
},
{
template: 'histogram_quantile(99,rate(metric_a{}[1h]))',
description: '99th percentile of the rate of change of metric_a in 1 hour periods',
},
{
template: 'avg(rate(metric_a{}[1m]))',
description: 'Average of the rate of all series of metric_a in 1 minute periods',
},
{
template: 'rate(metric_a{}[5m]) > 99',
description: 'Show series of metric_a only if their rate over 5 minutes is greater than 99',
},
{
template: 'count by(g) (rate(metric_a{}[1h]))',
description: 'Count of series of metric_a over all labels "g"',
},
];
export const histogramTemplates: TemplateData[] = [
{
template: 'histogram_quantile(99,sum by(le) (rate(metric_a{}[1h])))',
description:
'Calculate the rate at which the metric "metric_a" is increasing or decreasing, summed over each bucket label "le", and then calculates the 99th percentile of those rates.',
},
{
template: 'histogram_quantile(99,sum by(g) (metric_a{}))',
description: '99th percentile of the sum of metric_a grouped by label "g"',
},
{
template: 'histogram_quantile(99,sum by(g) (irate(metric_a{}[1h])))',
description: '99th percentile of the grouped by "g" sum of the rate of each series in metric_a in an hour',
},
{
template: 'histogram_quantile(99,metric_a{})',
description: '99th percentile of metric_a',
},
];
export const gaugeTemplates: TemplateData[] = [
{
template: 'sum by(c) (metric_a{})',
description: 'Sum the metric "metric_a" by each value in label "c"',
},
{
template: 'sum(metric_a{})',
description: 'Total sum of all the series of the metric named "metric_a"',
},
{
template: 'max by(dd) (metric_a{})',
description: 'Grouping the series the metric "metric_a" by the label "dd", get the maximum value of each group',
},
{
template: 'max(metric_a{})',
description: 'Maximum value of all series of the metric "metric_a" ',
},
{
template: 'avg(metric_a{})',
description: 'Average value of all the series of metric "metric_a"',
},
{
template: 'metric_a{} > 99',
description: 'Show only the series of metric "metric_a" which currently have value greater than 99',
},
{
template: 'metric_a{} / 99',
description: 'Values for "metric_a" all divided by 99',
},
{
template: 'metric_a{} == 99',
description: 'Show series of metric_a that have value 99',
},
{
template: 'sum_over_time(metric_a{}[1h])',
description: 'Sum each series of metric_a over 1 hour',
},
{
template: 'avg_over_time(metric_a{}[1h])',
description: 'Average of each series of metric_a in a 1 hour period',
},
{
template: 'sum(sum_over_time(metric_a{}[1h]))',
description: 'Sum of all values in all series in a 1 hour period',
},
{
template: 'delta(metric_a{}[1m])',
description: 'Span or delta (maximum - minimum) of values of the metric "metric_a" in a 1 minute period. ',
},
{
template: 'avg by(g) (avg_over_time(metric_a{}[1h]))',
description:
'For 1 hour, take each series and find the average, then group by label "g" and find the average of each group',
},
{
template: 'max_over_time(metric_a{}[1h])',
description: 'Maximum values of each series in metric "metric_a" in a 1 hour period',
},
{
template: 'metric_a{} * 99',
description: 'Values of metric_a multiplied by 99',
},
{
template: 'metric_a{} < 99',
description: 'Series of metric_a that have values less than 99',
},
{
template: 'max by() (max_over_time(metric_a{}[1h]))',
description: 'Find maximum value of all series in 1 hour periods',
},
{
template: 'topk(99,metric_a{})',
description: 'First 5 series of metric_a that have the highest values',
},
{
template: 'min by(g) (metric_a{})',
description: 'Minimum values of the series of metric_a grouped by label "g"',
},
{
template: 'topk(10,sum by(g) (metric_a{}))',
description: "Top 10 of the series of metric_a grouped and summed by the label 'g'",
},
{
template: 'avg(avg_over_time(metric_a{}[1h]))',
description: 'Average of all values inside a 1 hour period',
},
{
template: 'quantile by(h) (0.95,metric_a{})',
description: 'Calculate 95th percentile of metric_a when aggregated by the label "h"',
},
{
template: 'avg by(g) (metric_a{} > 99)',
description:
'Taking all series of metric_a with value greater than 99, group by label "g" and find the average of each group',
},
{
template: 'sum(metric_a{}) / 99',
description: 'Sum of all series of metric_a divided by 99',
},
{
template: 'count(sum by(g) (metric_a{}))',
description: 'Number of series of metric_a grouped by the label "g"',
},
{
template: 'max(max_over_time(metric_a{}[1h]))',
description: 'Find the max value of all series of metric_a in a 1 hour period',
},
];
function processTemplate(templateData: TemplateData, metric: string, labels: string): QuerySuggestion {
return {
query: templateData.template.replace('metric_a', metric).replace('{}', labels),
explanation: templateData.description.replace('metric_a', metric),
};
}
export function getTemplateSuggestions(metricName: string, metricType: string, labels: string): QuerySuggestion[] {
let templateSuggestions: QuerySuggestion[] = [];
switch (metricType) {
case 'counter':
templateSuggestions = templateSuggestions.concat(
counterTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 2)
);
templateSuggestions = templateSuggestions.concat(
generalTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 3)
);
break;
case 'gauge':
templateSuggestions = templateSuggestions.concat(
gaugeTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 2)
);
templateSuggestions = templateSuggestions.concat(
generalTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 3)
);
break;
case 'histogram':
templateSuggestions = templateSuggestions.concat(
histogramTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 2)
);
templateSuggestions = templateSuggestions.concat(
generalTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 3)
);
break;
default:
templateSuggestions = templateSuggestions.concat(
generalTemplates
.map((t) => processTemplate(t, metricName, labels))
.sort(() => Math.random() - 0.5)
.slice(0, 5)
);
break;
}
return templateSuggestions;
}

@ -1,18 +0,0 @@
// Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/querybuilder/components/promQail/types.ts
export type QuerySuggestion = {
query: string;
explanation: string;
};
export enum SuggestionType {
Historical = 'historical',
AI = 'AI',
}
export type Interaction = {
prompt: string;
suggestionType: SuggestionType;
suggestions: QuerySuggestion[];
isLoading: boolean;
explanationIsLoading: boolean;
};

@ -750,13 +750,6 @@ var (
Owner: grafanaOperatorExperienceSquad,
FrontendOnly: false,
},
{
Name: "prometheusPromQAIL",
Description: "Prometheus and AI/ML to assist users in creating a query",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaOSSBigTent,
},
{
Name: "prometheusCodeModeMetricNamesSearch",
Description: "Enables search for metric names in Code Mode, to improve performance when working with an enormous number of metric names",

@ -228,7 +228,6 @@ pluginsInstrumentationStatusSource,2023-10-17T08:27:45Z,2024-02-21T11:57:40Z,f50
teamHttpHeaders,2023-10-17T10:23:54Z,,be5ba6813209b5b24e955e0f761032cb5826b578,Eric Leijonmarck
costManagementUi,2023-10-17T16:15:51Z,2024-01-08T14:25:11Z,de1ed216f4bbf6f341aa22b144fa66d583a63981,Adam Bannach
managedPluginsInstall,2023-10-18T13:17:03Z,,43add83d1a84fe0945860a9cc700df92c7e8341f,Hugo Kiyodi Oshiro
prometheusPromQAIL,2023-10-19T15:45:32Z,,5580d061019bee46ea2e69c94041f3da14585ceb,Brendan O'Handley
cloudWatchBatchQueries,2023-10-20T19:09:41Z,,ecbc52f51529e1f35e26895db1a10f8a1c2f4244,Isabella Siu
alertingContactPointsV2,2023-10-25T13:57:53Z,2023-11-30T12:37:14Z,e12e40fc2493160338237b0b94e72fa530a78ef4,Gilles De Mey
alertmanagerRemoteOnly,2023-10-30T16:27:08Z,,363830883cb1f5de30f7015df5cba419df47468e,Santiago

1 #name created deleted hash author
228 teamHttpHeaders 2023-10-17T10:23:54Z be5ba6813209b5b24e955e0f761032cb5826b578 Eric Leijonmarck
229 costManagementUi 2023-10-17T16:15:51Z 2024-01-08T14:25:11Z de1ed216f4bbf6f341aa22b144fa66d583a63981 Adam Bannach
230 managedPluginsInstall 2023-10-18T13:17:03Z 43add83d1a84fe0945860a9cc700df92c7e8341f Hugo Kiyodi Oshiro
prometheusPromQAIL 2023-10-19T15:45:32Z 5580d061019bee46ea2e69c94041f3da14585ceb Brendan O'Handley
231 cloudWatchBatchQueries 2023-10-20T19:09:41Z ecbc52f51529e1f35e26895db1a10f8a1c2f4244 Isabella Siu
232 alertingContactPointsV2 2023-10-25T13:57:53Z 2023-11-30T12:37:14Z e12e40fc2493160338237b0b94e72fa530a78ef4 Gilles De Mey
233 alertmanagerRemoteOnly 2023-10-30T16:27:08Z 363830883cb1f5de30f7015df5cba419df47468e Santiago

@ -99,7 +99,6 @@ cloudWatchBatchQueries,preview,@grafana/aws-datasources,false,false,false
recoveryThreshold,GA,@grafana/alerting-squad,false,true,false
lokiStructuredMetadata,GA,@grafana/observability-logs,false,false,false
cachingOptimizeSerializationMemoryUsage,experimental,@grafana/grafana-operator-experience-squad,false,false,false
prometheusPromQAIL,experimental,@grafana/oss-big-tent,false,false,true
prometheusCodeModeMetricNamesSearch,experimental,@grafana/oss-big-tent,false,false,true
addFieldFromCalculationStatFunctions,GA,@grafana/dataviz-squad,false,false,true
alertmanagerRemoteSecondary,experimental,@grafana/alerting-squad,false,false,false

1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
99 recoveryThreshold GA @grafana/alerting-squad false true false
100 lokiStructuredMetadata GA @grafana/observability-logs false false false
101 cachingOptimizeSerializationMemoryUsage experimental @grafana/grafana-operator-experience-squad false false false
prometheusPromQAIL experimental @grafana/oss-big-tent false false true
102 prometheusCodeModeMetricNamesSearch experimental @grafana/oss-big-tent false false true
103 addFieldFromCalculationStatFunctions GA @grafana/dataviz-squad false false true
104 alertmanagerRemoteSecondary experimental @grafana/alerting-squad false false false

@ -407,10 +407,6 @@ const (
// If enabled, the caching backend gradually serializes query responses for the cache, comparing against the configured `[caching]max_value_mb` value as it goes. This can can help prevent Grafana from running out of memory while attempting to cache very large query responses.
FlagCachingOptimizeSerializationMemoryUsage = "cachingOptimizeSerializationMemoryUsage"
// FlagPrometheusPromQAIL
// Prometheus and AI/ML to assist users in creating a query
FlagPrometheusPromQAIL = "prometheusPromQAIL"
// FlagPrometheusCodeModeMetricNamesSearch
// Enables search for metric names in Code Mode, to improve performance when working with an enormous number of metric names
FlagPrometheusCodeModeMetricNamesSearch = "prometheusCodeModeMetricNamesSearch"

@ -3310,22 +3310,6 @@
"expression": "true"
}
},
{
"metadata": {
"name": "prometheusPromQAIL",
"resourceVersion": "1735845919509",
"creationTimestamp": "2023-10-19T15:45:32Z",
"annotations": {
"grafana.app/updatedTimestamp": "2025-01-02 19:25:19.509884 +0000 UTC"
}
},
"spec": {
"description": "Prometheus and AI/ML to assist users in creating a query",
"stage": "experimental",
"codeowner": "@grafana/oss-big-tent",
"frontend": true
}
},
{
"metadata": {
"name": "prometheusRunQueriesInParallel",

@ -3614,7 +3614,6 @@ __metadata:
"@floating-ui/react": "npm:0.27.3"
"@grafana/data": "npm:11.6.0-pre"
"@grafana/e2e-selectors": "npm:11.6.0-pre"
"@grafana/llm": "npm:0.12.0"
"@grafana/plugin-ui": "npm:0.10.1"
"@grafana/runtime": "npm:11.6.0-pre"
"@grafana/schema": "npm:11.6.0-pre"

Loading…
Cancel
Save