GenAI: Fixes multiple calls to settings and health (#87623)

* GenAI: Fixes multiple calls to settings and health

* swap order of tests given new caching

---------

Co-authored-by: nmarrs <nathanielmarrs@gmail.com>
This commit is contained in:
Torkel Ödegaard 2024-05-10 20:52:59 +02:00 committed by GitHub
parent ef51a64b57
commit f83366fcdd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 28 additions and 11 deletions

View File

@ -98,15 +98,6 @@ describe('getDashboardChanges', () => {
});
describe('isLLMPluginEnabled', () => {
it('should return true if LLM plugin is enabled', async () => {
// Mock llms.openai.health to return true
jest.mocked(llms.openai.health).mockResolvedValue({ ok: true, configured: false });
const enabled = await isLLMPluginEnabled();
expect(enabled).toBe(true);
});
it('should return false if LLM plugin is not enabled', async () => {
// Mock llms.openai.health to return false
jest.mocked(llms.openai.health).mockResolvedValue({ ok: false, configured: false });
@ -115,6 +106,15 @@ describe('isLLMPluginEnabled', () => {
expect(enabled).toBe(false);
});
it('should return true if LLM plugin is enabled', async () => {
// Mock llms.openai.health to return true
jest.mocked(llms.openai.health).mockResolvedValue({ ok: true, configured: false });
const enabled = await isLLMPluginEnabled();
expect(enabled).toBe(true);
});
});
describe('sanitizeReply', () => {

View File

@ -59,18 +59,35 @@ export function getDashboardChanges(dashboard: DashboardModel): {
};
}
// Shared healthcheck promise so avoid multiple calls llm app settings and health check APIs
let llmHealthCheck: Promise<boolean> | undefined;
/**
* Check if the LLM plugin is enabled.
* @returns true if the LLM plugin is enabled.
*/
export async function isLLMPluginEnabled() {
export async function isLLMPluginEnabled(): Promise<boolean> {
if (!config.apps['grafana-llm-app']) {
return false;
}
if (llmHealthCheck) {
return llmHealthCheck;
}
// Check if the LLM plugin is enabled.
// If not, we won't be able to make requests, so return early.
return llms.openai.health().then((response) => response.ok);
llmHealthCheck = new Promise((resolve) => {
llms.openai.health().then((response) => {
if (!response.ok) {
// Health check fail clear cached promise so we can try again later
llmHealthCheck = undefined;
}
resolve(response.ok);
});
});
return llmHealthCheck;
}
/**