Dashboards: update `@grafana/llm` to v0.13.2 and update usage (#101814)

This version of the package deprecates the `openai` object in
favour of the vendor-agnostic `llm` object, so this PR also
updates the usage of the package to use the new object and
take advantage of the vendor-agnostic APIs.
pull/101960/head
Ben Sully 2 months ago committed by GitHub
parent e6f682bc14
commit 8988274912
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      package.json
  2. 20
      public/app/features/dashboard/components/GenAI/GenAIButton.test.tsx
  3. 13
      public/app/features/dashboard/components/GenAI/GenAIButton.tsx
  4. 4
      public/app/features/dashboard/components/GenAI/GenAIDashboardChangesButton.tsx
  5. 8
      public/app/features/dashboard/components/GenAI/GenAIHistory.tsx
  6. 20
      public/app/features/dashboard/components/GenAI/hooks.ts
  7. 15
      public/app/features/dashboard/components/GenAI/utils.test.ts
  8. 15
      public/app/features/dashboard/components/GenAI/utils.ts
  9. 865
      yarn.lock

@ -270,7 +270,7 @@
"@grafana/flamegraph": "workspace:*",
"@grafana/google-sdk": "0.1.2",
"@grafana/lezer-logql": "0.2.7",
"@grafana/llm": "0.12.0",
"@grafana/llm": "0.13.2",
"@grafana/monaco-logql": "^0.0.8",
"@grafana/o11y-ds-frontend": "workspace:*",
"@grafana/plugin-ui": "0.10.1",

@ -6,11 +6,11 @@ import { render } from 'test/test-utils';
import { selectors } from '@grafana/e2e-selectors';
import { GenAIButton, GenAIButtonProps } from './GenAIButton';
import { StreamStatus, useOpenAIStream } from './hooks';
import { StreamStatus, useLLMStream } from './hooks';
import { EventTrackingSrc } from './tracking';
import { Role } from './utils';
const mockedUseOpenAiStreamState = {
const mockedUseLLMStreamState = {
messages: [],
setMessages: jest.fn(),
reply: 'I am a robot',
@ -20,7 +20,7 @@ const mockedUseOpenAiStreamState = {
};
jest.mock('./hooks', () => ({
useOpenAIStream: jest.fn(() => mockedUseOpenAiStreamState),
useLLMStream: jest.fn(() => mockedUseLLMStreamState),
StreamStatus: {
IDLE: 'idle',
GENERATING: 'generating',
@ -37,7 +37,7 @@ describe('GenAIButton', () => {
describe('when LLM plugin is not configured', () => {
beforeAll(() => {
jest.mocked(useOpenAIStream).mockReturnValue({
jest.mocked(useLLMStream).mockReturnValue({
messages: [],
error: undefined,
streamStatus: StreamStatus.IDLE,
@ -65,7 +65,7 @@ describe('GenAIButton', () => {
setMessagesMock.mockClear();
setShouldStopMock.mockClear();
jest.mocked(useOpenAIStream).mockReturnValue({
jest.mocked(useLLMStream).mockReturnValue({
messages: [],
error: undefined,
streamStatus: StreamStatus.IDLE,
@ -151,7 +151,7 @@ describe('GenAIButton', () => {
const setShouldStopMock = jest.fn();
beforeEach(() => {
jest.mocked(useOpenAIStream).mockReturnValue({
jest.mocked(useLLMStream).mockReturnValue({
messages: [],
error: undefined,
streamStatus: StreamStatus.GENERATING,
@ -222,7 +222,7 @@ describe('GenAIButton', () => {
};
jest
.mocked(useOpenAIStream)
.mocked(useLLMStream)
.mockImplementationOnce((options) => {
options?.onResponse?.(reply);
return returnValue;
@ -257,7 +257,7 @@ describe('GenAIButton', () => {
setMessagesMock.mockClear();
setShouldStopMock.mockClear();
jest.mocked(useOpenAIStream).mockReturnValue({
jest.mocked(useLLMStream).mockReturnValue({
messages: [],
error: new Error('Something went wrong'),
streamStatus: StreamStatus.IDLE,
@ -308,7 +308,7 @@ describe('GenAIButton', () => {
await userEvent.hover(tooltip);
expect(tooltip).toBeVisible();
expect(tooltip).toHaveTextContent(
'Failed to generate content using OpenAI. Please try again or if the problem persists, contact your organization admin.'
'Failed to generate content using LLM. Please try again or if the problem persists, contact your organization admin.'
);
});
@ -331,7 +331,7 @@ describe('GenAIButton', () => {
await userEvent.hover(tooltip);
expect(tooltip).toBeVisible();
expect(tooltip).toHaveTextContent(
'Failed to generate content using OpenAI. Please try again or if the problem persists, contact your organization admin.'
'Failed to generate content using LLM. Please try again or if the problem persists, contact your organization admin.'
);
});

@ -3,12 +3,13 @@ import { useCallback, useState } from 'react';
import * as React from 'react';
import { GrafanaTheme2 } from '@grafana/data';
import { llm } from '@grafana/llm';
import { Button, Spinner, useStyles2, Tooltip, Toggletip, Text } from '@grafana/ui';
import { GenAIHistory } from './GenAIHistory';
import { StreamStatus, useOpenAIStream } from './hooks';
import { StreamStatus, useLLMStream } from './hooks';
import { AutoGenerateItem, EventTrackingSrc, reportAutoGenerateInteraction } from './tracking';
import { OAI_MODEL, DEFAULT_OAI_MODEL, Message, sanitizeReply } from './utils';
import { DEFAULT_LLM_MODEL, Message, sanitizeReply } from './utils';
export interface GenAIButtonProps {
// Button label text
@ -23,7 +24,7 @@ export interface GenAIButtonProps {
// Temperature for the LLM plugin. Default is 1.
// Closer to 0 means more conservative, closer to 1 means more creative.
temperature?: number;
model?: OAI_MODEL;
model?: llm.Model;
// Event tracking source. Send as `src` to Rudderstack event
eventTrackingSrc: EventTrackingSrc;
// Whether the button should be disabled
@ -42,7 +43,7 @@ export const GenAIButton = ({
text = 'Auto-generate',
toggleTipTitle = '',
onClick: onClickProp,
model = DEFAULT_OAI_MODEL,
model = DEFAULT_LLM_MODEL,
messages,
onGenerate,
temperature = 1,
@ -66,7 +67,7 @@ export const GenAIButton = ({
[onGenerate, unshiftHistoryEntry]
);
const { setMessages, stopGeneration, value, error, streamStatus } = useOpenAIStream({
const { setMessages, stopGeneration, value, error, streamStatus } = useLLMStream({
model,
temperature,
onResponse,
@ -85,7 +86,7 @@ export const GenAIButton = ({
const showTooltip = error || tooltip ? undefined : false;
const tooltipContent = error
? 'Failed to generate content using OpenAI. Please try again or if the problem persists, contact your organization admin.'
? 'Failed to generate content using LLM. Please try again or if the problem persists, contact your organization admin.'
: tooltip || '';
const onClick = (e: React.MouseEvent<HTMLButtonElement>) => {

@ -1,5 +1,7 @@
import { useCallback } from 'react';
import { llm } from '@grafana/llm';
import { DashboardModel } from '../../state/DashboardModel';
import { GenAIButton } from './GenAIButton';
@ -42,7 +44,7 @@ export const GenAIDashboardChangesButton = ({ dashboard, onGenerate, disabled }:
messages={messages}
onGenerate={onGenerate}
temperature={0}
model={'gpt-3.5-turbo-16k'}
model={llm.Model.BASE}
eventTrackingSrc={EventTrackingSrc.dashboardChanges}
toggleTipTitle={'Improve your dashboard changes summary'}
disabled={disabled}

@ -8,9 +8,9 @@ import { Trans } from 'app/core/internationalization';
import { STOP_GENERATION_TEXT } from './GenAIButton';
import { GenerationHistoryCarousel } from './GenerationHistoryCarousel';
import { QuickFeedback } from './QuickFeedback';
import { StreamStatus, useOpenAIStream } from './hooks';
import { StreamStatus, useLLMStream } from './hooks';
import { AutoGenerateItem, EventTrackingSrc, reportAutoGenerateInteraction } from './tracking';
import { getFeedbackMessage, Message, DEFAULT_OAI_MODEL, QuickFeedbackType, sanitizeReply } from './utils';
import { getFeedbackMessage, Message, DEFAULT_LLM_MODEL, QuickFeedbackType, sanitizeReply } from './utils';
export interface GenAIHistoryProps {
history: string[];
@ -41,8 +41,8 @@ export const GenAIHistory = ({
[updateHistory]
);
const { setMessages, stopGeneration, reply, streamStatus, error } = useOpenAIStream({
model: DEFAULT_OAI_MODEL,
const { setMessages, stopGeneration, reply, streamStatus, error } = useLLMStream({
model: DEFAULT_LLM_MODEL,
temperature,
onResponse,
});

@ -2,15 +2,15 @@ import { Dispatch, SetStateAction, useCallback, useEffect, useState } from 'reac
import { useAsync } from 'react-use';
import { Subscription } from 'rxjs';
import { openai } from '@grafana/llm';
import { llm } from '@grafana/llm';
import { createMonitoringLogger } from '@grafana/runtime';
import { useAppNotification } from 'app/core/copy/appNotification';
import { isLLMPluginEnabled, DEFAULT_OAI_MODEL } from './utils';
import { isLLMPluginEnabled, DEFAULT_LLM_MODEL } from './utils';
// Declared instead of imported from utils to make this hook modular
// Ideally we will want to move the hook itself to a different scope later.
type Message = openai.Message;
type Message = llm.Message;
const genAILogger = createMonitoringLogger('features.dashboards.genai');
@ -29,11 +29,11 @@ interface Options {
}
const defaultOptions = {
model: DEFAULT_OAI_MODEL,
model: DEFAULT_LLM_MODEL,
temperature: 1,
};
interface UseOpenAIStreamResponse {
interface UseLLMStreamResponse {
setMessages: Dispatch<SetStateAction<Message[]>>;
stopGeneration: () => void;
messages: Message[];
@ -47,7 +47,7 @@ interface UseOpenAIStreamResponse {
}
// TODO: Add tests
export function useOpenAIStream({ model, temperature, onResponse }: Options = defaultOptions): UseOpenAIStreamResponse {
export function useLLMStream({ model, temperature, onResponse }: Options = defaultOptions): UseLLMStreamResponse {
// The messages array to send to the LLM, updated when the button is clicked.
const [messages, setMessages] = useState<Message[]>([]);
@ -65,7 +65,7 @@ export function useOpenAIStream({ model, temperature, onResponse }: Options = de
setMessages([]);
setError(e);
notifyError(
'Failed to generate content using OpenAI',
'Failed to generate content using LLM',
'Please try again or if the problem persists, contact your organization admin.'
);
console.error(e);
@ -93,7 +93,7 @@ export function useOpenAIStream({ model, temperature, onResponse }: Options = de
setStreamStatus(StreamStatus.GENERATING);
setError(undefined);
// Stream the completions. Each element is the next stream chunk.
const stream = openai
const stream = llm
.streamChatCompletions({
model,
temperature,
@ -102,7 +102,7 @@ export function useOpenAIStream({ model, temperature, onResponse }: Options = de
.pipe(
// Accumulate the stream content into a stream of strings, where each
// element contains the accumulated message so far.
openai.accumulateContent()
llm.accumulateContent()
// The stream is just a regular Observable, so we can use standard rxjs
// functionality to update state, e.g. recording when the stream
// has completed.
@ -148,7 +148,7 @@ export function useOpenAIStream({ model, temperature, onResponse }: Options = de
let timeout: NodeJS.Timeout | undefined;
if (streamStatus === StreamStatus.GENERATING && reply === '') {
timeout = setTimeout(() => {
onError(new Error(`OpenAI stream timed out after ${TIMEOUT}ms`));
onError(new Error(`LLM stream timed out after ${TIMEOUT}ms`));
}, TIMEOUT);
}

@ -1,4 +1,4 @@
import { openai } from '@grafana/llm';
import { llm } from '@grafana/llm';
import { DASHBOARD_SCHEMA_VERSION } from '../../state/DashboardMigrator';
import { createDashboardModelFixture, createPanelSaveModel } from '../../state/__fixtures__/dashboardFixtures';
@ -6,13 +6,14 @@ import { NEW_PANEL_TITLE } from '../../utils/dashboard';
import { getDashboardChanges, getPanelStrings, isLLMPluginEnabled, sanitizeReply } from './utils';
// Mock the openai module
// Mock the llm module
jest.mock('@grafana/llm', () => ({
...jest.requireActual('@grafana/llm'),
openai: {
llm: {
streamChatCompletions: jest.fn(),
accumulateContent: jest.fn(),
health: jest.fn(),
Model: { LARGE: 'large' },
},
}));
@ -99,8 +100,8 @@ describe('getDashboardChanges', () => {
describe('isLLMPluginEnabled', () => {
it('should return false if LLM plugin is not enabled', async () => {
// Mock openai.health to return false
jest.mocked(openai.health).mockResolvedValue({ ok: false, configured: false });
// Mock llm.health to return false
jest.mocked(llm.health).mockResolvedValue({ ok: false, configured: false });
const enabled = await isLLMPluginEnabled();
@ -108,8 +109,8 @@ describe('isLLMPluginEnabled', () => {
});
it('should return true if LLM plugin is enabled', async () => {
// Mock openai.health to return true
jest.mocked(openai.health).mockResolvedValue({ ok: true, configured: false });
// Mock llm.health to return true
jest.mocked(llm.health).mockResolvedValue({ ok: true, configured: false });
const enabled = await isLLMPluginEnabled();

@ -1,6 +1,6 @@
import { pick } from 'lodash';
import { openai } from '@grafana/llm';
import { llm } from '@grafana/llm';
import { config } from '@grafana/runtime';
import { Panel } from '@grafana/schema';
@ -18,7 +18,7 @@ export enum Role {
'user' = 'user',
}
export type Message = openai.Message;
export type Message = llm.Message;
export enum QuickFeedbackType {
Shorter = 'Even shorter',
@ -27,11 +27,12 @@ export enum QuickFeedbackType {
}
/**
* The OpenAI model to be used.
* The LLM model to be used.
*
* The LLM app abstracts the actual model name since it depends on the provider.
* We want to default to whatever the 'large' model is.
*/
export const DEFAULT_OAI_MODEL = 'gpt-4';
export type OAI_MODEL = 'gpt-4' | 'gpt-4-32k' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k';
export const DEFAULT_LLM_MODEL: llm.Model = llm.Model.LARGE;
/**
* Sanitize the reply from OpenAI by removing the leading and trailing quotes.
@ -80,7 +81,7 @@ export async function isLLMPluginEnabled(): Promise<boolean> {
// Check if the LLM plugin is enabled.
// If not, we won't be able to make requests, so return early.
llmHealthCheck = new Promise((resolve) => {
openai.health().then((response) => {
llm.health().then((response) => {
if (!response.ok) {
// Health check fail clear cached promise so we can try again later
llmHealthCheck = undefined;

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save