+ {loading && }
+
+ The LLM plugin is not correctly configured. See your settings{' '}
+ and enable your plugin.
+
+ }
+ >
+
+
+
+ );
+};
+
+const getStyles = (theme: GrafanaTheme2) => ({
+ wrapper: css`
+ display: flex;
+ `,
+});
diff --git a/public/app/features/dashboard/components/GenAI/GenAIPanelDescriptionButton.tsx b/public/app/features/dashboard/components/GenAI/GenAIPanelDescriptionButton.tsx
new file mode 100644
index 00000000000..c835eaa3683
--- /dev/null
+++ b/public/app/features/dashboard/components/GenAI/GenAIPanelDescriptionButton.tsx
@@ -0,0 +1,44 @@
+import React from 'react';
+
+import { getDashboardSrv } from '../../services/DashboardSrv';
+import { PanelModel } from '../../state';
+
+import { GenAIButton } from './GenAIButton';
+import { Message, Role } from './utils';
+
+interface GenAIPanelDescriptionButtonProps {
+ onGenerate: (description: string, isDone: boolean) => void;
+ panel: PanelModel;
+}
+
+const DESCRIPTION_GENERATION_STANDARD_PROMPT =
+ 'You are an expert in creating Grafana Panels.' +
+ 'Your goal is to write short, descriptive, and concise panel description using a JSON object with the panel declaration ' +
+ 'The description should be shorter than 140 characters.';
+
+export const GenAIPanelDescriptionButton = ({ onGenerate, panel }: GenAIPanelDescriptionButtonProps) => {
+ function getMessages(): Message[] {
+ const dashboard = getDashboardSrv().getCurrent()!;
+
+ return [
+ {
+ content: DESCRIPTION_GENERATION_STANDARD_PROMPT,
+ role: Role.system,
+ },
+ {
+ content: `The panel is part of a dashboard with the title: ${dashboard.title}`,
+ role: Role.system,
+ },
+ {
+ content: `The panel is part of a dashboard with the description: ${dashboard.title}`,
+ role: Role.system,
+ },
+ {
+ content: `Use this JSON object which defines the panel: ${JSON.stringify(panel.getSaveModel())}`,
+ role: Role.user,
+ },
+ ];
+ }
+
+ return ;
+};
diff --git a/public/app/features/dashboard/components/GenAI/GenAIPanelTitleButton.tsx b/public/app/features/dashboard/components/GenAI/GenAIPanelTitleButton.tsx
new file mode 100644
index 00000000000..16f3cd22b60
--- /dev/null
+++ b/public/app/features/dashboard/components/GenAI/GenAIPanelTitleButton.tsx
@@ -0,0 +1,44 @@
+import React from 'react';
+
+import { getDashboardSrv } from '../../services/DashboardSrv';
+import { PanelModel } from '../../state';
+
+import { GenAIButton } from './GenAIButton';
+import { Message, Role } from './utils';
+
+interface GenAIPanelTitleButtonProps {
+ onGenerate: (title: string, isDone: boolean) => void;
+ panel: PanelModel;
+}
+
+const TITLE_GENERATION_STANDARD_PROMPT =
+ 'You are an expert in creating Grafana Panels.' +
+ 'Your goal is to write short, descriptive, and concise panel title for a panel.' +
+ 'The title should be shorter than 50 characters.';
+
+export const GenAIPanelTitleButton = ({ onGenerate, panel }: GenAIPanelTitleButtonProps) => {
+ function getMessages(): Message[] {
+ const dashboard = getDashboardSrv().getCurrent()!;
+
+ return [
+ {
+ content: TITLE_GENERATION_STANDARD_PROMPT,
+ role: Role.system,
+ },
+ {
+ content: `The panel is part of a dashboard with the title: ${dashboard.title}`,
+ role: Role.system,
+ },
+ {
+ content: `The panel is part of a dashboard with the description: ${dashboard.title}`,
+ role: Role.system,
+ },
+ {
+ content: `Use this JSON object which defines the panel: ${JSON.stringify(panel.getSaveModel())}`,
+ role: Role.user,
+ },
+ ];
+ }
+
+ return ;
+};
diff --git a/public/app/features/dashboard/components/GenAI/utils.test.ts b/public/app/features/dashboard/components/GenAI/utils.test.ts
new file mode 100644
index 00000000000..00ddb8861ae
--- /dev/null
+++ b/public/app/features/dashboard/components/GenAI/utils.test.ts
@@ -0,0 +1,105 @@
+import { llms } from '@grafana/experimental';
+
+import {
+ generateTextWithLLM,
+ isLLMPluginEnabled,
+ isResponseCompleted,
+ cleanupResponse,
+ Role,
+ DONE_MESSAGE,
+ OPEN_AI_MODEL,
+} from './utils';
+
+// Mock the llms.openai module
+jest.mock('@grafana/experimental', () => ({
+ llms: {
+ openai: {
+ streamChatCompletions: jest.fn(),
+ accumulateContent: jest.fn(),
+ enabled: jest.fn(),
+ },
+ },
+}));
+
+describe('generateTextWithLLM', () => {
+ it('should throw an error if LLM plugin is not enabled', async () => {
+ jest.mocked(llms.openai.enabled).mockResolvedValue(false);
+
+ await expect(generateTextWithLLM([{ role: Role.user, content: 'Hello' }], jest.fn())).rejects.toThrow(
+ 'LLM plugin is not enabled'
+ );
+ });
+
+ it('should call llms.openai.streamChatCompletions with the correct parameters', async () => {
+ // Mock llms.openai.enabled to return true
+ jest.mocked(llms.openai.enabled).mockResolvedValue(true);
+
+ // Mock llms.openai.streamChatCompletions to return a mock observable (types not exported from library)
+ const mockObservable = { pipe: jest.fn().mockReturnValue({ subscribe: jest.fn() }) } as unknown as ReturnType<
+ typeof llms.openai.streamChatCompletions
+ >;
+ jest.mocked(llms.openai.streamChatCompletions).mockReturnValue(mockObservable);
+
+ const messages = [{ role: Role.user, content: 'Hello' }];
+ const onReply = jest.fn();
+
+ await generateTextWithLLM(messages, onReply);
+
+ expect(llms.openai.streamChatCompletions).toHaveBeenCalledWith({
+ model: OPEN_AI_MODEL,
+ messages: [
+ // It will always includes the DONE_MESSAGE by default as the first message
+ DONE_MESSAGE,
+ ...messages,
+ ],
+ });
+ });
+});
+
+describe('isLLMPluginEnabled', () => {
+ it('should return true if LLM plugin is enabled', async () => {
+ // Mock llms.openai.enabled to return true
+ jest.mocked(llms.openai.enabled).mockResolvedValue(true);
+
+ const enabled = await isLLMPluginEnabled();
+
+ expect(enabled).toBe(true);
+ });
+
+ it('should return false if LLM plugin is not enabled', async () => {
+ // Mock llms.openai.enabled to return false
+ jest.mocked(llms.openai.enabled).mockResolvedValue(false);
+
+ const enabled = await isLLMPluginEnabled();
+
+ expect(enabled).toBe(false);
+ });
+});
+
+describe('isResponseCompleted', () => {
+ it('should return true if response ends with the special done token', () => {
+ const response = 'This is a response¬';
+
+ const completed = isResponseCompleted(response);
+
+ expect(completed).toBe(true);
+ });
+
+ it('should return false if response does not end with the special done token', () => {
+ const response = 'This is a response';
+
+ const completed = isResponseCompleted(response);
+
+ expect(completed).toBe(false);
+ });
+});
+
+describe('cleanupResponse', () => {
+ it('should remove the special done token and quotes from the response', () => {
+ const response = 'This is a "response¬"';
+
+ const cleanedResponse = cleanupResponse(response);
+
+ expect(cleanedResponse).toBe('This is a response');
+ });
+});
diff --git a/public/app/features/dashboard/components/GenAI/utils.ts b/public/app/features/dashboard/components/GenAI/utils.ts
new file mode 100644
index 00000000000..7d9ff97580f
--- /dev/null
+++ b/public/app/features/dashboard/components/GenAI/utils.ts
@@ -0,0 +1,95 @@
+import { llms } from '@grafana/experimental';
+
+export interface Message {
+ role: Role;
+ content: string;
+}
+
+export enum Role {
+ // System content cannot be overwritten by user propmts.
+ 'system' = 'system',
+ // User content is the content that the user has entered.
+ // This content can be overwritten by following propmt.
+ 'user' = 'user',
+}
+
+// TODO: Replace this approach with more stable approach
+export const SPECIAL_DONE_TOKEN = '¬';
+
+/**
+ * The llm library doesn't indicate when the stream is done, so we need to ask the LLM to add an special token to indicate that the stream is done at the end of the message.
+ */
+export const DONE_MESSAGE = {
+ role: Role.system,
+ content: `When you are done with the response, write "${SPECIAL_DONE_TOKEN}" always at the end of the response.`,
+};
+
+/**
+ * The OpenAI model to be used.
+ */
+export const OPEN_AI_MODEL = 'gpt-4';
+
+/**
+ * Generate a text with the instructions for LLM to follow.
+ * Every message will be sent to LLM as a prompt. The messages will be sent in order. The messages will be composed by the content and the role.
+ *
+ * The role can be system or user.
+ * - System messages cannot be overwritten by user input. They are used to send instructions to LLM about how to behave or how to format the response.
+ * - User messages can be overwritten by user input and they will be used to send manually user input.
+ *
+ * @param messages messages to send to LLM
+ * @param onReply callback to call when LLM replies. The reply will be streamed, so it will be called for every token received.
+ * @returns The subscription to the stream.
+ */
+export const generateTextWithLLM = async (
+ messages: Message[],
+ onReply: (response: string, isDone: boolean) => void
+) => {
+ const enabled = await isLLMPluginEnabled();
+
+ if (!enabled) {
+ throw Error('LLM plugin is not enabled');
+ }
+
+ return llms.openai
+ .streamChatCompletions({
+ model: OPEN_AI_MODEL,
+ messages: [DONE_MESSAGE, ...messages],
+ })
+ .pipe(
+ // Accumulate the stream content into a stream of strings, where each
+ // element contains the accumulated message so far.
+ llms.openai.accumulateContent()
+ )
+ .subscribe((response) => {
+ return onReply(cleanupResponse(response), isResponseCompleted(response));
+ });
+};
+
+/**
+ * Check if the LLM plugin is enabled and configured.
+ * @returns true if the LLM plugin is enabled and configured.
+ */
+export async function isLLMPluginEnabled() {
+ // Check if the LLM plugin is enabled and configured.
+ // If not, we won't be able to make requests, so return early.
+ return await llms.openai.enabled();
+}
+
+/**
+ * Check if the response is completed using the special done token.
+ * @param response The response to check.
+ * @returns true if the response is completed.
+ */
+export function isResponseCompleted(response: string) {
+ return response.endsWith(SPECIAL_DONE_TOKEN);
+}
+
+/**
+ * Remove the special done token and quotes from the response.
+ * @param response The response to clean up.
+ * @returns The cleaned up response.
+ */
+export function cleanupResponse(response: string) {
+ return response.replace(SPECIAL_DONE_TOKEN, '').replace(/"/g, '');
+}
diff --git a/public/app/features/dashboard/components/PanelEditor/OptionsPaneItemDescriptor.tsx b/public/app/features/dashboard/components/PanelEditor/OptionsPaneItemDescriptor.tsx
index aa0c4fb63ae..125bb6fc8d9 100644
--- a/public/app/features/dashboard/components/PanelEditor/OptionsPaneItemDescriptor.tsx
+++ b/public/app/features/dashboard/components/PanelEditor/OptionsPaneItemDescriptor.tsx
@@ -1,8 +1,10 @@
+import { css } from '@emotion/css';
import React, { ReactNode } from 'react';
import Highlighter from 'react-highlight-words';
+import { GrafanaTheme2 } from '@grafana/data';
import { selectors } from '@grafana/e2e-selectors';
-import { Field, Label } from '@grafana/ui';
+import { Field, Label, useStyles2 } from '@grafana/ui';
import { OptionsPaneCategoryDescriptor } from './OptionsPaneCategoryDescriptor';
import { OptionsPaneItemOverrides } from './OptionsPaneItemOverrides';
@@ -17,6 +19,7 @@ export interface OptionsPaneItemProps {
skipField?: boolean;
showIf?: () => boolean;
overrides?: OptionPaneItemOverrideInfo[];
+ addon?: ReactNode;
}
/**
@@ -28,7 +31,7 @@ export class OptionsPaneItemDescriptor {
constructor(public props: OptionsPaneItemProps) {}
getLabel(searchQuery?: string): ReactNode {
- const { title, description, overrides } = this.props;
+ const { title, description, overrides, addon } = this.props;
if (!searchQuery) {
// Do not render label for categories with only one child
@@ -36,12 +39,7 @@ export class OptionsPaneItemDescriptor {
return null;
}
- return (
-
- );
+ return ;
}
const categories: React.ReactNode[] = [];
@@ -99,3 +97,32 @@ export class OptionsPaneItemDescriptor {
);
}
}
+
+interface OptionPanelLabelProps {
+ title: string;
+ description?: string;
+ overrides?: OptionPaneItemOverrideInfo[];
+ addon: ReactNode;
+}
+
+function OptionPaneLabel({ title, description, overrides, addon }: OptionPanelLabelProps) {
+ const styles = useStyles2(getLabelStyles);
+ return (
+