OpenAI: Avoid two streams to overlap when doing multiple requests (#75504)

Co-authored-by: Ivan Ortega <ivanortegaalba@gmail.com>
Co-authored-by: nmarrs <nathanielmarrs@gmail.com>
pull/75659/head
Aaron Sanders 2 years ago committed by GitHub
parent 4551de148c
commit 9e78907233
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      .betterer.results
  2. 28
      public/app/features/dashboard/components/GenAI/GenAIDashDescriptionButton.tsx
  3. 28
      public/app/features/dashboard/components/GenAI/GenAIDashTitleButton.tsx
  4. 11
      public/app/features/dashboard/components/GenAI/GenAIPanelDescriptionButton.tsx
  5. 13
      public/app/features/dashboard/components/GenAI/hooks.ts
  6. 19
      public/app/features/dashboard/components/GenAI/llms/constants.ts
  7. 2
      public/app/features/dashboard/components/GenAI/llms/index.ts
  8. 394
      public/app/features/dashboard/components/GenAI/llms/openai.ts
  9. 7
      public/app/features/dashboard/components/GenAI/llms/types.ts
  10. 19
      public/app/features/dashboard/components/GenAI/utils.test.ts
  11. 52
      public/app/features/dashboard/components/GenAI/utils.ts

@ -3132,6 +3132,10 @@ exports[`better eslint`] = {
"public/app/features/dashboard/components/GenAI/GenAIButton.tsx:5381": [
[0, 0, 0, "Styles should be written using objects.", "0"]
],
"public/app/features/dashboard/components/GenAI/llms/openai.ts:5381": [
[0, 0, 0, "Do not use any type assertions.", "0"],
[0, 0, 0, "Do not use any type assertions.", "1"]
],
"public/app/features/dashboard/components/HelpWizard/HelpWizard.tsx:5381": [
[0, 0, 0, "Styles should be written using objects.", "0"],
[0, 0, 0, "Styles should be written using objects.", "1"],

@ -4,7 +4,7 @@ import { DashboardModel } from '../../state';
import { GenAIButton } from './GenAIButton';
import { EventSource, reportGenerateAIButtonClicked } from './tracking';
import { Message, Role } from './utils';
import { getDashboardPanelPrompt, Message, Role } from './utils';
interface GenAIDashDescriptionButtonProps {
onGenerate: (description: string) => void;
@ -12,9 +12,15 @@ interface GenAIDashDescriptionButtonProps {
}
const DESCRIPTION_GENERATION_STANDARD_PROMPT =
'You are an expert in Grafana dashboards.' +
'Your goal is to write short, descriptive, and concise dashboards description using the dashboard panels title and descriptions. ' +
'The description should be shorter than 140 characters.';
'You are an expert in creating Grafana Dashboards.\n' +
'Your goal is to write a descriptive and concise dashboard description.\n' +
"You will be given the title and description of the dashboard's panels.\n" +
'The dashboard description is meant to explain the purpose of the dashboard and what its panels show.\n' +
'If the dashboard has no panels, the description should be "Empty dashboard"\n' +
'There should be no numbers in the description except where they are important.\n' +
'The dashboard description should not have the dashboard title or any quotation marks in it.\n' +
'The description should be, at most, 140 characters.\n' +
'Respond with only the description of the dashboard.';
export const GenAIDashDescriptionButton = ({ onGenerate, dashboard }: GenAIDashDescriptionButtonProps) => {
const messages = React.useMemo(() => getMessages(dashboard), [dashboard]);
@ -26,23 +32,15 @@ export const GenAIDashDescriptionButton = ({ onGenerate, dashboard }: GenAIDashD
};
function getMessages(dashboard: DashboardModel): Message[] {
const panelPrompt = getDashboardPanelPrompt(dashboard);
return [
{
content: DESCRIPTION_GENERATION_STANDARD_PROMPT,
role: Role.system,
},
{
content: `The title of the dashboard is "${
dashboard.title
}" and the the panels in the dashboard are: ${dashboard.panels
.map(
(panel, idx) => `
- Panel ${idx}
- Title: ${panel.title}
${panel.description ? `- Description: ${panel.description}` : ''}
`
)
.join('\n')}`,
content: `The title of the dashboard is "${dashboard.title}"\n` + `${panelPrompt}`,
role: Role.system,
},
];

@ -4,17 +4,23 @@ import { DashboardModel } from '../../state';
import { GenAIButton } from './GenAIButton';
import { EventSource, reportGenerateAIButtonClicked } from './tracking';
import { Message, Role } from './utils';
import { getDashboardPanelPrompt, Message, Role } from './utils';
interface GenAIDashTitleButtonProps {
dashboard: DashboardModel;
onGenerate: (description: string) => void;
}
const DESCRIPTION_GENERATION_STANDARD_PROMPT =
'You are an expert in Grafana dashboards.' +
'Your goal is to write the dashboard title inspired by the title and descriptions for the dashboard panels. ' +
'The title must be shorter than 50 characters.';
const TITLE_GENERATION_STANDARD_PROMPT =
'You are an expert in creating Grafana Dashboards.\n' +
'Your goal is to write a concise dashboard title.\n' +
"You will be given the title and description of the dashboard's panels.\n" +
'The dashboard title is meant to say what it shows on one line for users to navigate to it.\n' +
'If the dashboard has no panels, the title should be "Empty dashboard"\n' +
'There should be no numbers in the title.\n' +
'The dashboard title should not have quotation marks in it.\n' +
'The title should be, at most, 50 characters.\n' +
'Respond with only the title of the dashboard.';
export const GenAIDashTitleButton = ({ onGenerate, dashboard }: GenAIDashTitleButtonProps) => {
const messages = React.useMemo(() => getMessages(dashboard), [dashboard]);
@ -26,19 +32,11 @@ export const GenAIDashTitleButton = ({ onGenerate, dashboard }: GenAIDashTitleBu
function getMessages(dashboard: DashboardModel): Message[] {
return [
{
content: DESCRIPTION_GENERATION_STANDARD_PROMPT,
content: TITLE_GENERATION_STANDARD_PROMPT,
role: Role.system,
},
{
content: `The panels in the dashboard are: ${dashboard.panels
.map(
(panel, idx) => `
- Panel ${idx}
- Title: ${panel.title}
${panel.description ? `- Description: ${panel.description}` : ''}
`
)
.join('\n')}`,
content: `The panels in the dashboard are: ${getDashboardPanelPrompt(dashboard)}`,
role: Role.system,
},
];

@ -13,9 +13,12 @@ interface GenAIPanelDescriptionButtonProps {
}
const DESCRIPTION_GENERATION_STANDARD_PROMPT =
'You are an expert in creating Grafana Panels.' +
'Your goal is to write short, descriptive, and concise panel description using a JSON object with the panel declaration ' +
'The description should be shorter than 140 characters.';
'You are an expert in creating Grafana Panels.\n' +
'You will be given the title and description of the dashboard the panel is in as well as the JSON for the panel.\n' +
'Your goal is to write a descriptive and concise panel description.\n' +
'The panel description is meant to explain the purpose of the panel, not just its attributes.\n' +
'There should be no numbers in the description except for thresholds.\n' +
'The description should be, at most, 140 characters.';
export const GenAIPanelDescriptionButton = ({ onGenerate, panel }: GenAIPanelDescriptionButtonProps) => {
const messages = React.useMemo(() => getMessages(panel), [panel]);
@ -43,7 +46,7 @@ function getMessages(panel: PanelModel): Message[] {
role: Role.system,
},
{
content: `Use this JSON object which defines the panel: ${JSON.stringify(panel.getSaveModel())}`,
content: `This is the JSON which defines the panel: ${JSON.stringify(panel.getSaveModel())}`,
role: Role.user,
},
];

@ -2,13 +2,12 @@ import { useState } from 'react';
import { useAsync } from 'react-use';
import { Subscription } from 'rxjs';
import { llms } from '@grafana/experimental';
import { openai } from './llms';
import { isLLMPluginEnabled, OPEN_AI_MODEL } from './utils';
// Declared instead of imported from utils to make this hook modular
// Ideally we will want to move the hook itself to a different scope later.
type Message = llms.openai.Message;
type Message = openai.Message;
// TODO: Add tests
export function useOpenAIStream(
@ -50,7 +49,7 @@ export function useOpenAIStream(
setIsGenerating(true);
// Stream the completions. Each element is the next stream chunk.
const stream = llms.openai
const stream = openai
.streamChatCompletions({
model,
temperature,
@ -59,7 +58,7 @@ export function useOpenAIStream(
.pipe(
// Accumulate the stream content into a stream of strings, where each
// element contains the accumulated message so far.
llms.openai.accumulateContent()
openai.accumulateContent()
// The stream is just a regular Observable, so we can use standard rxjs
// functionality to update state, e.g. recording when the stream
// has completed.
@ -71,6 +70,10 @@ export function useOpenAIStream(
enabled,
stream: stream.subscribe({
next: setReply,
error: (e) => {
console.log('The backend for the stream returned an error and nobody has implemented error handling yet!');
console.log(e);
},
complete: () => {
setIsGenerating(false);
setMessages([]);

@ -0,0 +1,19 @@
import { SemVer } from 'semver';
import { logWarning } from '@grafana/runtime';
export const LLM_PLUGIN_ID = 'grafana-llm-app';
export const LLM_PLUGIN_ROUTE = `/api/plugins/${LLM_PLUGIN_ID}`;
// The LLM app was at version 0.2.0 before we added the health check.
// If the health check fails, or the details don't exist on the response,
// we should assume it's this older version.
export let LLM_PLUGIN_VERSION = new SemVer('0.2.0');
export function setLLMPluginVersion(version: string) {
try {
LLM_PLUGIN_VERSION = new SemVer(version);
} catch (e) {
logWarning('Failed to parse version of grafana-llm-app; assuming old version is present.');
}
}

@ -0,0 +1,2 @@
// This is here to mimic the structure of imports from @grafana/experimental while switching over
export * as openai from './openai';

@ -0,0 +1,394 @@
/**
* OpenAI API client.
*
* This module contains functions used to make requests to the OpenAI API via
* the Grafana LLM app plugin. That plugin must be installed, enabled and configured
* in order for these functions to work.
*
* The {@link enabled} function can be used to check if the plugin is enabled and configured.
*/
import { pipe, Observable, UnaryFunction } from 'rxjs';
import { filter, map, scan, takeWhile, tap } from 'rxjs/operators';
import {
isLiveChannelMessageEvent,
LiveChannelAddress,
LiveChannelMessageEvent,
LiveChannelScope,
} from '@grafana/data';
import { getBackendSrv, getGrafanaLiveSrv, logDebug } from '@grafana/runtime';
import { LLM_PLUGIN_ID, LLM_PLUGIN_ROUTE, setLLMPluginVersion } from './constants';
import { LLMAppHealthCheck } from './types';
const OPENAI_CHAT_COMPLETIONS_PATH = 'openai/v1/chat/completions';
/** The role of a message's author. */
export type Role = 'system' | 'user' | 'assistant' | 'function';
/** A message in a conversation. */
export interface Message {
/** The role of the message's author. */
role: Role;
/** The contents of the message. content is required for all messages, and may be null for assistant messages with function calls. */
content: string;
/**
* The name of the author of this message.
*
* This is required if role is 'function', and it should be the name of the function whose response is in the content.
*
* May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
*/
name?: string;
/**
* The name and arguments of a function that should be called, as generated by the model.
*/
function_call?: Object;
}
/** A function the model may generate JSON inputs for. */
export interface Function {
/**
* The name of the function to be called.
*
* Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
*/
name: string;
/**
* A description of what the function does, used by the model to choose when and how to call the function.
*/
description?: string;
/*
* The parameters the functions accepts, described as a JSON Schema object. See the OpenAI guide for examples, and the JSON Schema reference for documentation about the format.
*
* To describe a function that accepts no parameters, provide the value {"type": "object", "properties": {}}.
*/
parameters: Object;
}
export interface ChatCompletionsRequest {
/**
* ID of the model to use.
*
* See the model endpoint compatibility table for details on which models work with the Chat Completions API.
*/
model: string;
/** A list of messages comprising the conversation so far. */
messages: Message[];
/** A list of functions the model may generate JSON inputs for. */
functions?: Function[];
/**
* Controls how the model responds to function calls.
*
* "none" means the model does not call a function, and responds to the end-user.
* "auto" means the model can pick between an end-user or calling a function.
* Specifying a particular function via {"name": "my_function"} forces the model to call that function.
*
* "none" is the default when no functions are present. "auto" is the default if functions are present.
*/
function_call?: 'none' | 'auto' | { name: string };
/**
* What sampling temperature to use, between 0 and 2.
* Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
*
* We generally recommend altering this or top_p but not both.
*/
temperature?: number;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
* So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*
* We generally recommend altering this or temperature but not both.
*/
top_p?: number;
/**
* How many chat completion choices to generate for each input message.
*/
n?: number;
/**
* Up to 4 sequences where the API will stop generating further tokens.
*/
stop?: string | string[];
/**
* The maximum number of tokens to generate in the chat completion.
*
* The total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens.
*/
max_tokens?: number;
/**
* Number between -2.0 and 2.0.
*
* Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
*/
presence_penalty?: number;
/**
* Number between -2.0 and 2.0.
*
* Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*/
frequency_penalty?: number;
/**
* Modify the likelihood of specified tokens appearing in the completion.
*
* Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
* Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
* but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban
* or exclusive selection of the relevant token.
*/
logit_bias?: { [key: string]: number };
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
*/
user?: string;
}
/** A completion object from an OpenAI model. */
export interface Choice {
/** The message object generated by the model. */
message: Message;
/**
* The reason the model stopped generating text.
*
* This may be one of:
* - stop: API returned complete message, or a message terminated by one of the stop sequences provided via the stop parameter
* - length: incomplete model output due to max_tokens parameter or token limit
* - function_call: the model decided to call a function
* - content_filter: omitted content due to a flag from our content filters
* - null: API response still in progress or incomplete
*/
finish_reason: string;
/** The index of the completion in the list of choices. */
index: number;
}
/** The usage statistics for a request to OpenAPI. */
export interface Usage {
/** The number of tokens in the prompt. */
prompt_tokens: number;
/** The number of tokens in the completion. */
completion_tokens: number;
/** The total number of tokens. */
total_tokens: number;
}
/** The error response from the Grafana LLM app when trying to call the chat completions API. */
interface ChatCompletionsErrorResponse {
/** The error message. */
error: string;
}
/** A response from the OpenAI Chat Completions API. */
export interface ChatCompletionsResponse<T = Choice> {
/** The ID of the request. */
id: string;
/** The type of object returned (e.g. 'chat.completion'). */
object: string;
/** The timestamp of the request, as a UNIX timestamp. */
created: number;
/** The name of the model used to generate the response. */
model: string;
/** A list of completion objects (only one, unless `n > 1` in the request). */
choices: T[];
/** The number of tokens used to generate the replies, counting prompt, completion, and total. */
usage: Usage;
}
/** A content message returned from the model. */
export interface ContentMessage {
/** The content of the message. */
content: string;
}
/** A message returned from the model indicating that it is done. */
export interface DoneMessage {
done: boolean;
}
/** A function call message returned from the model. */
export interface FunctionCallMessage {
/** The name of the function to call. */
name: string;
/** JSON string for the arguments to the function call. */
arguments: string;
}
/**
* A delta returned from a stream of chat completion responses.
*
* In practice this will be either a content message or a function call;
* done messages are filtered out by the `streamChatCompletions` function.
*/
export type ChatCompletionsDelta = ContentMessage | FunctionCallMessage | DoneMessage;
/** A chunk included in a chat completion response. */
export interface ChatCompletionsChunk {
/** The delta since the previous chunk. */
delta: ChatCompletionsDelta;
}
/** Return true if the message is a 'content' message. */
export function isContentMessage(message: unknown): message is ContentMessage {
return typeof message === 'object' && message !== null && 'content' in message;
}
/** Return true if the message is a 'done' message. */
export function isDoneMessage(message: unknown): message is DoneMessage {
return typeof message === 'object' && message !== null && 'done' in message;
}
/** Return true if the response is an error response. */
export function isErrorResponse(response: unknown): response is ChatCompletionsErrorResponse {
return typeof response === 'object' && response !== null && 'error' in response;
}
/**
* An rxjs operator that extracts the content messages from a stream of chat completion responses.
*
* @returns An observable that emits the content messages. Each emission will be a string containing the
* token emitted by the model.
* @example <caption>Example of reading all tokens in a stream.</caption>
* const stream = streamChatCompletions({ model: 'gpt-3.5-turbo', messages: [
* { role: 'system', content: 'You are a great bot.' },
* { role: 'user', content: 'Hello, bot.' },
* ]}).pipe(extractContent());
* stream.subscribe({ next: console.log, error: console.error });
* // Output:
* // ['Hello', '? ', 'How ', 'are ', 'you', '?']
*/
export function extractContent(): UnaryFunction<
Observable<ChatCompletionsResponse<ChatCompletionsChunk>>,
Observable<string>
> {
return pipe(
filter((response: ChatCompletionsResponse<ChatCompletionsChunk>) => isContentMessage(response.choices[0].delta)),
// The type assertion is needed here because the type predicate above doesn't seem to propagate.
map(
(response: ChatCompletionsResponse<ChatCompletionsChunk>) => (response.choices[0].delta as ContentMessage).content
)
);
}
/**
* An rxjs operator that accumulates the content messages from a stream of chat completion responses.
*
* @returns An observable that emits the accumulated content messages. Each emission will be a string containing the
* content of all messages received so far.
* @example
* const stream = streamChatCompletions({ model: 'gpt-3.5-turbo', messages: [
* { role: 'system', content: 'You are a great bot.' },
* { role: 'user', content: 'Hello, bot.' },
* ]}).pipe(accumulateContent());
* stream.subscribe({ next: console.log, error: console.error });
* // Output:
* // ['Hello', 'Hello! ', 'Hello! How ', 'Hello! How are ', 'Hello! How are you', 'Hello! How are you?']
*/
export function accumulateContent(): UnaryFunction<
Observable<ChatCompletionsResponse<ChatCompletionsChunk>>,
Observable<string>
> {
return pipe(
extractContent(),
scan((acc, curr) => acc + curr, '')
);
}
/**
* Make a request to OpenAI's chat-completions API via the Grafana LLM plugin proxy.
*/
export async function chatCompletions(request: ChatCompletionsRequest): Promise<ChatCompletionsResponse> {
const response = await getBackendSrv().post<ChatCompletionsResponse>(
'/api/plugins/grafana-llm-app/resources/openai/v1/chat/completions',
request,
{
headers: { 'Content-Type': 'application/json' },
}
);
return response;
}
/**
* Make a streaming request to OpenAI's chat-completions API via the Grafana LLM plugin proxy.
*
* A stream of tokens will be returned as an `Observable<string>`. Use the `extractContent` operator to
* filter the stream to only content messages, or the `accumulateContent` operator to obtain a stream of
* accumulated content messages.
*
* The 'done' message will not be emitted; the stream will simply end when this message is encountered.
*
* @example <caption>Example of reading all tokens in a stream.</caption>
* const stream = streamChatCompletions({ model: 'gpt-3.5-turbo', messages: [
* { role: 'system', content: 'You are a great bot.' },
* { role: 'user', content: 'Hello, bot.' },
* ]}).pipe(extractContent());
* stream.subscribe({ next: console.log, error: console.error });
* // Output:
* // ['Hello', '? ', 'How ', 'are ', 'you', '?']
*
* @example <caption>Example of accumulating tokens in a stream.</caption>
* const stream = streamChatCompletions({ model: 'gpt-3.5-turbo', messages: [
* { role: 'system', content: 'You are a great bot.' },
* { role: 'user', content: 'Hello, bot.' },
* ]}).pipe(accumulateContent());
* stream.subscribe({ next: console.log, error: console.error });
* // Output:
* // ['Hello', 'Hello! ', 'Hello! How ', 'Hello! How are ', 'Hello! How are you', 'Hello! How are you?']
*/
export function streamChatCompletions(
request: ChatCompletionsRequest
): Observable<ChatCompletionsResponse<ChatCompletionsChunk>> {
const channel: LiveChannelAddress = {
scope: LiveChannelScope.Plugin,
namespace: LLM_PLUGIN_ID,
path: OPENAI_CHAT_COMPLETIONS_PATH + '/' + self.crypto.randomUUID(),
data: request,
};
const messages = getGrafanaLiveSrv()
.getStream(channel)
.pipe(filter((event) => isLiveChannelMessageEvent(event))) as Observable<
LiveChannelMessageEvent<ChatCompletionsResponse<ChatCompletionsChunk>>
>;
return messages.pipe(
tap((event) => {
if (isErrorResponse(event.message)) {
throw new Error(event.message.error);
}
}),
takeWhile((event) => isErrorResponse(event.message) || !isDoneMessage(event.message.choices[0].delta)),
map((event) => event.message)
);
}
let loggedWarning = false;
/** Check if the OpenAI API is enabled via the LLM plugin. */
export const enabled = async () => {
// Run a health check to see if the plugin is installed.
let response: LLMAppHealthCheck;
try {
response = await getBackendSrv().get(`${LLM_PLUGIN_ROUTE}/health`, undefined, undefined, {
showSuccessAlert: false,
showErrorAlert: false,
});
} catch (e) {
if (!loggedWarning) {
logDebug(String(e));
logDebug(
'Failed to check if OpenAI is enabled. This is expected if the Grafana LLM plugin is not installed, and the above error can be ignored.'
);
loggedWarning = true;
}
return false;
}
const { details } = response;
// Update the version if it's present on the response.
if (details.version !== undefined) {
setLLMPluginVersion(details.version);
}
// If the plugin is installed then check if it is configured.
return details?.openAI ?? false;
};

@ -0,0 +1,7 @@
export type LLMAppHealthCheck = {
details: {
openAI?: boolean;
vector?: boolean;
version?: string;
};
};

@ -1,17 +1,14 @@
import { llms } from '@grafana/experimental';
import { createDashboardModelFixture, createPanelJSONFixture } from '../../state/__fixtures__/dashboardFixtures';
import { openai } from './llms';
import { getDashboardChanges, isLLMPluginEnabled } from './utils';
// Mock the llms.openai module
jest.mock('@grafana/experimental', () => ({
llms: {
openai: {
streamChatCompletions: jest.fn(),
accumulateContent: jest.fn(),
enabled: jest.fn(),
},
jest.mock('./llms', () => ({
openai: {
streamChatCompletions: jest.fn(),
accumulateContent: jest.fn(),
enabled: jest.fn(),
},
}));
@ -69,7 +66,7 @@ describe('getDashboardChanges', () => {
describe('isLLMPluginEnabled', () => {
it('should return true if LLM plugin is enabled', async () => {
// Mock llms.openai.enabled to return true
jest.mocked(llms.openai.enabled).mockResolvedValue(true);
jest.mocked(openai.enabled).mockResolvedValue(true);
const enabled = await isLLMPluginEnabled();
@ -78,7 +75,7 @@ describe('isLLMPluginEnabled', () => {
it('should return false if LLM plugin is not enabled', async () => {
// Mock llms.openai.enabled to return false
jest.mocked(llms.openai.enabled).mockResolvedValue(false);
jest.mocked(openai.enabled).mockResolvedValue(false);
const enabled = await isLLMPluginEnabled();

@ -1,8 +1,8 @@
import { llms } from '@grafana/experimental';
import { DashboardModel } from '../../state';
import { DashboardModel, PanelModel } from '../../state';
import { Diffs, jsonDiff } from '../VersionHistory/utils';
import { openai } from './llms';
export enum Role {
// System content cannot be overwritten by user prompts.
'system' = 'system',
@ -11,7 +11,7 @@ export enum Role {
'user' = 'user',
}
export type Message = llms.openai.Message;
export type Message = openai.Message;
/**
* The OpenAI model to be used.
@ -49,5 +49,47 @@ export function getDashboardChanges(dashboard: DashboardModel): {
export async function isLLMPluginEnabled() {
// Check if the LLM plugin is enabled and configured.
// If not, we won't be able to make requests, so return early.
return await llms.openai.enabled();
return await openai.enabled();
}
/**
*
* @param dashboard Dashboard to generate a title or description for
* @returns String for inclusion in prompts stating what the dashboard's panels are
*/
export function getDashboardPanelPrompt(dashboard: DashboardModel): string {
const getPanelString = (panel: PanelModel, idx: number) => `
- Panel ${idx}\n
- Title: ${panel.title}\n
${panel.description ? `- Description: ${panel.description}` : ''}
`;
const panelStrings: string[] = dashboard.panels.map(getPanelString);
let panelPrompt: string;
if (panelStrings.length <= 10) {
panelPrompt = `The panels in the dashboard are:\n${panelStrings.join('\n')}`;
} else {
const withDescriptions = dashboard.panels.filter((panel) => panel.description);
const withoutDescriptions = dashboard.panels.filter((panel) => !panel.description);
let concatenatedPanelStrings;
if (withDescriptions.length >= 10) {
concatenatedPanelStrings = withDescriptions.slice(10).map(getPanelString).join('\n');
} else {
concatenatedPanelStrings = withDescriptions.map(getPanelString).join('\n');
concatenatedPanelStrings += '\n';
concatenatedPanelStrings += withoutDescriptions
.slice(10 - withDescriptions.length)
.map(getPanelString)
.join('n');
}
panelPrompt =
`There are ${panelStrings.length} panels.\n` +
'Due to space constraints, only the information for ten of them is presented.\n' +
'These ten are not necessarily the first ten, but prioritized to those with descriptions.\n' +
`The panels in the dashboard are:\n${concatenatedPanelStrings}`;
} // This truncation should prevent exceeding the allowed size for GPT calls.
// Additionally, context windows that are too long degrade performance,
// So it is possibly that if we can condense it further it would be better
return panelPrompt;
}

Loading…
Cancel
Save