mirror of
https://github.com/grafana/grafana.git
synced 2024-11-22 17:06:57 -06:00
Prometheus: Incremental query cache keep extra datapoint before new 'from' (#93186)
* fix: keep extra datapoint before new 'from' when evicting datapoints in incremental query cache * remove redundant comments * Update todo * move prometheus specific code in a new function * add unit test --------- Co-authored-by: ismail simsek <ismailsimsek09@gmail.com>
This commit is contained in:
parent
5a2f22c60d
commit
26b039bb81
@ -6,8 +6,8 @@ import { DataFrame, DataQueryRequest, DateTime, dateTime, TimeRange } from '@gra
|
||||
import { QueryEditorMode } from '../querybuilder/shared/types';
|
||||
import { PromQuery } from '../types';
|
||||
|
||||
import { QueryCache } from './QueryCache';
|
||||
import { IncrementalStorageDataFrameScenarios } from './QueryCacheTestData';
|
||||
import { CacheRequestInfo, QueryCache } from './QueryCache';
|
||||
import { IncrementalStorageDataFrameScenarios, trimmedFirstPointInPromFrames } from './QueryCacheTestData';
|
||||
|
||||
// Will not interpolate vars!
|
||||
const interpolateStringTest = (query: PromQuery) => {
|
||||
@ -415,10 +415,10 @@ describe('QueryCache: Prometheus', function () {
|
||||
|
||||
const secondMergedLength = secondQueryResult[0].fields[0].values.length;
|
||||
|
||||
// Since the step is 15s, and the request was 30 seconds later, we should have 2 extra frames, but we should evict the first two, so we should get the same length
|
||||
expect(firstMergedLength).toEqual(secondMergedLength);
|
||||
expect(firstQueryResult[0].fields[0].values[2]).toEqual(secondQueryResult[0].fields[0].values[0]);
|
||||
expect(firstQueryResult[0].fields[0].values[0] + 30000).toEqual(secondQueryResult[0].fields[0].values[0]);
|
||||
// Since the step is 15s, and the request was 30 seconds later, we should have 2 extra frames, but we should evict the first one so we keep one datapoint before the new from so the first datapoint in view connects to the y-axis
|
||||
expect(firstMergedLength + 1).toEqual(secondMergedLength);
|
||||
expect(firstQueryResult[0].fields[0].values[1]).toEqual(secondQueryResult[0].fields[0].values[0]);
|
||||
expect(firstQueryResult[0].fields[0].values[0] + 30000).toEqual(secondQueryResult[0].fields[0].values[1]);
|
||||
|
||||
cache.set(targetIdentity, `'1=1'|${interval}|${JSON.stringify(thirdRange.raw)}`);
|
||||
|
||||
@ -438,7 +438,9 @@ describe('QueryCache: Prometheus', function () {
|
||||
|
||||
const cachedAfterThird = storage.cache.get(targetIdentity);
|
||||
const storageLengthAfterThirdQuery = cachedAfterThird?.frames[0].fields[0].values.length;
|
||||
expect(storageLengthAfterThirdQuery).toEqual(20);
|
||||
|
||||
// Should have the 20 data points in the viz, plus one extra
|
||||
expect(storageLengthAfterThirdQuery).toEqual(21);
|
||||
});
|
||||
|
||||
it('Will build signature using target overrides', () => {
|
||||
@ -496,6 +498,56 @@ describe('QueryCache: Prometheus', function () {
|
||||
expect(cacheRequest.shouldCache).toBe(true);
|
||||
});
|
||||
|
||||
it('should not modify the initial request', () => {
|
||||
const storage = new QueryCache<PromQuery>({
|
||||
getTargetSignature: getPrometheusTargetSignature,
|
||||
overlapString: '10m',
|
||||
});
|
||||
|
||||
const firstFrames = trimmedFirstPointInPromFrames as unknown as DataFrame[];
|
||||
// There are 6 values
|
||||
expect(firstFrames[0].fields[1].values.length).toBe(6);
|
||||
const expectedValueLength = firstFrames[0].fields[1].values.length;
|
||||
|
||||
const cache = new Map<string, string>();
|
||||
const interval = 15000;
|
||||
// start time of scenario
|
||||
const firstFrom = dateTime(new Date(1726835104488));
|
||||
const firstTo = dateTime(new Date(1726836004488));
|
||||
const firstRange: TimeRange = {
|
||||
from: firstFrom,
|
||||
to: firstTo,
|
||||
raw: {
|
||||
from: 'now-15m',
|
||||
to: 'now',
|
||||
},
|
||||
};
|
||||
// Signifier definition
|
||||
const dashboardId = `dashid`;
|
||||
const panelId = 200;
|
||||
const targetIdentity = `${dashboardId}|${panelId}|A`;
|
||||
|
||||
const request = mockPromRequest({
|
||||
range: firstRange,
|
||||
dashboardUID: dashboardId,
|
||||
panelId: panelId,
|
||||
});
|
||||
|
||||
// we set a bigger interval than query interval
|
||||
request.targets[0].interval = '1m';
|
||||
const requestInfo: CacheRequestInfo<PromQuery> = {
|
||||
requests: [], // unused
|
||||
targSigs: cache,
|
||||
shouldCache: true,
|
||||
};
|
||||
const targetSignature = `1=1|${interval}|${JSON.stringify(request.rangeRaw ?? '')}`;
|
||||
cache.set(targetIdentity, targetSignature);
|
||||
|
||||
const firstQueryResult = storage.procFrames(request, requestInfo, firstFrames);
|
||||
|
||||
expect(firstQueryResult[0].fields[1].values.length).toBe(expectedValueLength);
|
||||
});
|
||||
|
||||
it('Should modify request', () => {
|
||||
const request = mockPromRequest();
|
||||
const storage = new QueryCache<PromQuery>({
|
||||
|
@ -9,6 +9,7 @@ import {
|
||||
incrRoundDn,
|
||||
isValidDuration,
|
||||
parseDuration,
|
||||
rangeUtil,
|
||||
Table,
|
||||
trimTable,
|
||||
} from '@grafana/data';
|
||||
@ -220,7 +221,10 @@ export class QueryCache<T extends SupportedQueryTypes> {
|
||||
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
||||
let table: Table = frame.fields.map((field) => field.values) as Table;
|
||||
|
||||
let trimmed = trimTable(table, newFrom, newTo);
|
||||
const dataPointStep = findDatapointStep(request, respFrames);
|
||||
|
||||
// query interval is greater than request.intervalMs, use query interval to make sure we've always got one datapoint outside the panel viewport
|
||||
let trimmed = trimTable(table, newFrom - dataPointStep, newTo);
|
||||
|
||||
if (trimmed[0].length > 0) {
|
||||
for (let i = 0; i < trimmed.length; i++) {
|
||||
@ -255,3 +259,21 @@ export class QueryCache<T extends SupportedQueryTypes> {
|
||||
return respFrames;
|
||||
}
|
||||
}
|
||||
|
||||
function findDatapointStep(request: DataQueryRequest<PromQuery>, respFrames: DataFrame[]): number {
|
||||
// Prometheus specific logic below
|
||||
if (request.targets[0].datasource?.type !== 'prometheus') {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const target = request.targets.find((t) => t.refId === respFrames[0].refId);
|
||||
|
||||
let dataPointStep = request.intervalMs;
|
||||
if (target?.interval) {
|
||||
const minStepMs = rangeUtil.intervalToMs(target.interval);
|
||||
if (minStepMs > request.intervalMs) {
|
||||
dataPointStep = minStepMs;
|
||||
}
|
||||
}
|
||||
return dataPointStep;
|
||||
}
|
||||
|
@ -258,6 +258,151 @@ const twoRequestsOneCachedMissingData = {
|
||||
},
|
||||
};
|
||||
|
||||
export const trimmedFirstPointInPromFrames = [
|
||||
{
|
||||
refId: 'A',
|
||||
meta: {
|
||||
type: 'timeseries-multi',
|
||||
typeVersion: [0, 1],
|
||||
custom: {
|
||||
resultType: 'matrix',
|
||||
},
|
||||
executedQueryString: 'Expr: ecs_cpu_seconds_total\nStep: 1m0s',
|
||||
},
|
||||
fields: [
|
||||
{
|
||||
name: 'Time',
|
||||
type: 'time',
|
||||
typeInfo: {
|
||||
frame: 'time.Time',
|
||||
},
|
||||
config: {
|
||||
interval: 60000,
|
||||
},
|
||||
values: [1726835100000, 1726835160000, 1726835220000, 1726835280000, 1726835340000, 1726835400000],
|
||||
entities: {},
|
||||
},
|
||||
{
|
||||
name: 'ecs_cpu_seconds_total',
|
||||
type: 'number',
|
||||
typeInfo: {
|
||||
frame: 'float64',
|
||||
},
|
||||
labels: {
|
||||
__name__: 'ecs_cpu_seconds_total',
|
||||
container: 'browser-container',
|
||||
cpu: '0',
|
||||
environment: 'staging',
|
||||
instance: 'localhost:9779',
|
||||
job: 'node',
|
||||
task_id: '7eaae23357564449bbde3d6b8aa3d171',
|
||||
test_run_id: '178196',
|
||||
},
|
||||
config: {},
|
||||
values: [148.528672986, 148.535654343, 148.535654343, 148.535654343, 148.535654343, 148.535654343],
|
||||
entities: {},
|
||||
},
|
||||
],
|
||||
length: 6,
|
||||
},
|
||||
{
|
||||
refId: 'A',
|
||||
meta: {
|
||||
type: 'timeseries-multi',
|
||||
typeVersion: [0, 1],
|
||||
custom: {
|
||||
resultType: 'matrix',
|
||||
},
|
||||
},
|
||||
fields: [
|
||||
{
|
||||
name: 'Time',
|
||||
type: 'time',
|
||||
typeInfo: {
|
||||
frame: 'time.Time',
|
||||
},
|
||||
config: {
|
||||
interval: 60000,
|
||||
},
|
||||
values: [
|
||||
1726835100000, 1726835160000, 1726835220000, 1726835280000, 1726835340000, 1726835400000, 1726835460000,
|
||||
1726835520000, 1726835580000, 1726835640000, 1726835700000, 1726835760000, 1726835820000, 1726835880000,
|
||||
1726835940000, 1726836000000,
|
||||
],
|
||||
entities: {},
|
||||
},
|
||||
{
|
||||
name: 'ecs_cpu_seconds_total',
|
||||
type: 'number',
|
||||
typeInfo: {
|
||||
frame: 'float64',
|
||||
},
|
||||
labels: {
|
||||
__name__: 'ecs_cpu_seconds_total',
|
||||
container: 'browser-container',
|
||||
cpu: '0',
|
||||
environment: 'staging',
|
||||
instance: 'localhost:9779',
|
||||
job: 'node',
|
||||
task_id: '800bedb7d7434ee69251e1d72aa24ee4',
|
||||
},
|
||||
config: {},
|
||||
values: [
|
||||
18.273081476, 18.27823287, 18.28002373, 18.281447944, 18.282133248, 18.283555666, 18.28503474, 18.287278624,
|
||||
18.290889095, 18.295363816, 18.29912598, 18.301647198, 18.305721365, 18.313378915, 18.31617255, 18.32104371,
|
||||
],
|
||||
entities: {},
|
||||
},
|
||||
],
|
||||
length: 16,
|
||||
},
|
||||
{
|
||||
refId: 'A',
|
||||
meta: {
|
||||
type: 'timeseries-multi',
|
||||
typeVersion: [0, 1],
|
||||
custom: {
|
||||
resultType: 'matrix',
|
||||
},
|
||||
},
|
||||
fields: [
|
||||
{
|
||||
name: 'Time',
|
||||
type: 'time',
|
||||
typeInfo: {
|
||||
frame: 'time.Time',
|
||||
},
|
||||
config: {
|
||||
interval: 60000,
|
||||
},
|
||||
values: [1726835100000, 1726835160000, 1726835220000, 1726835280000, 1726835340000, 1726835400000],
|
||||
entities: {},
|
||||
},
|
||||
{
|
||||
name: 'ecs_cpu_seconds_total',
|
||||
type: 'number',
|
||||
typeInfo: {
|
||||
frame: 'float64',
|
||||
},
|
||||
labels: {
|
||||
__name__: 'ecs_cpu_seconds_total',
|
||||
container: 'browser-container',
|
||||
cpu: '1',
|
||||
environment: 'staging',
|
||||
instance: 'localhost:9779',
|
||||
job: 'node',
|
||||
task_id: '7eaae23357564449bbde3d6b8aa3d171',
|
||||
test_run_id: '178196',
|
||||
},
|
||||
config: {},
|
||||
values: [147.884430886, 147.893771728, 147.893771728, 147.893771728, 147.893771728, 147.893771728],
|
||||
entities: {},
|
||||
},
|
||||
],
|
||||
length: 6,
|
||||
},
|
||||
];
|
||||
|
||||
export const IncrementalStorageDataFrameScenarios = {
|
||||
histogram: {
|
||||
// 3 requests, one 30 seconds after the first, and then the user waits a minute and shortens to a 5 minute query window from 1 hour to force frames to get evicted
|
||||
|
Loading…
Reference in New Issue
Block a user