mirror of
https://github.com/grafana/grafana.git
synced 2024-11-23 09:26:43 -06:00
Elasticsearch: Added support for calendar_interval in ES date histogram queries (#75459)
* Introduce support for calendar intervals in ES date histogram queries * Add missing undef type check for ES calendar interval query support
This commit is contained in:
parent
07266aa983
commit
ce462e8cd7
@ -212,14 +212,20 @@ type HistogramAgg struct {
|
||||
|
||||
// DateHistogramAgg represents a date histogram aggregation
|
||||
type DateHistogramAgg struct {
|
||||
Field string `json:"field"`
|
||||
FixedInterval string `json:"fixed_interval,omitempty"`
|
||||
MinDocCount int `json:"min_doc_count"`
|
||||
Missing *string `json:"missing,omitempty"`
|
||||
ExtendedBounds *ExtendedBounds `json:"extended_bounds"`
|
||||
Format string `json:"format"`
|
||||
Offset string `json:"offset,omitempty"`
|
||||
TimeZone string `json:"time_zone,omitempty"`
|
||||
Field string `json:"field"`
|
||||
FixedInterval string `json:"fixed_interval,omitempty"`
|
||||
CalendarInterval string `json:"calendar_interval,omitempty"`
|
||||
MinDocCount int `json:"min_doc_count"`
|
||||
Missing *string `json:"missing,omitempty"`
|
||||
ExtendedBounds *ExtendedBounds `json:"extended_bounds"`
|
||||
Format string `json:"format"`
|
||||
Offset string `json:"offset,omitempty"`
|
||||
TimeZone string `json:"time_zone,omitempty"`
|
||||
}
|
||||
|
||||
// GetCalendarIntervals provides the list of intervals used for building calendar bucketAgg
|
||||
func GetCalendarIntervals() []string {
|
||||
return []string{"1w", "1M", "1q", "1y"}
|
||||
}
|
||||
|
||||
// FiltersAggregation represents a filters aggregation
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
@ -53,6 +54,7 @@ func (e *elasticsearchDataQuery) execute() (*backend.QueryDataResponse, error) {
|
||||
from := e.dataQueries[0].TimeRange.From.UnixNano() / int64(time.Millisecond)
|
||||
to := e.dataQueries[0].TimeRange.To.UnixNano() / int64(time.Millisecond)
|
||||
for _, q := range queries {
|
||||
fmt.Printf("Query = %v", q)
|
||||
if err := e.processQuery(q, ms, from, to); err != nil {
|
||||
mq, _ := json.Marshal(q)
|
||||
e.logger.Error("Failed to process query to multisearch request builder", "error", err, "query", string(mq), "queriesLength", len(queries), "duration", time.Since(start), "stage", es.StagePrepareRequest)
|
||||
@ -160,22 +162,27 @@ func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFro
|
||||
field = timeField
|
||||
}
|
||||
aggBuilder.DateHistogram(bucketAgg.ID, field, func(a *es.DateHistogramAgg, b es.AggBuilder) {
|
||||
a.FixedInterval = bucketAgg.Settings.Get("interval").MustString("auto")
|
||||
var interval = bucketAgg.Settings.Get("interval").MustString("auto")
|
||||
if slices.Contains(es.GetCalendarIntervals(), interval) {
|
||||
a.CalendarInterval = interval
|
||||
} else {
|
||||
if interval == "auto" {
|
||||
// note this is not really a valid grafana-variable-handling,
|
||||
// because normally this would not match `$__interval_ms`,
|
||||
// but because how we apply these in the go-code, this will work
|
||||
// correctly, and becomes something like `500ms`.
|
||||
// a nicer way would be to use `${__interval_ms}ms`, but
|
||||
// that format is not recognized where we apply these variables
|
||||
// in the elasticsearch datasource
|
||||
a.FixedInterval = "$__interval_msms"
|
||||
} else {
|
||||
a.FixedInterval = interval
|
||||
}
|
||||
}
|
||||
a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
|
||||
a.ExtendedBounds = &es.ExtendedBounds{Min: timeFrom, Max: timeTo}
|
||||
a.Format = bucketAgg.Settings.Get("format").MustString(es.DateFormatEpochMS)
|
||||
|
||||
if a.FixedInterval == "auto" {
|
||||
// note this is not really a valid grafana-variable-handling,
|
||||
// because normally this would not match `$__interval_ms`,
|
||||
// but because how we apply these in the go-code, this will work
|
||||
// correctly, and becomes something like `500ms`.
|
||||
// a nicer way would be to use `${__interval_ms}ms`, but
|
||||
// that format is not recognized where we apply these variables
|
||||
// in the elasticsearch datasource
|
||||
a.FixedInterval = "$__interval_msms"
|
||||
}
|
||||
|
||||
if offset, err := bucketAgg.Settings.Get("offset").String(); err == nil {
|
||||
a.Offset = offset
|
||||
}
|
||||
|
@ -1669,6 +1669,31 @@ func TestSettingsCasting(t *testing.T) {
|
||||
|
||||
assert.NotZero(t, dateHistogramAgg.FixedInterval)
|
||||
})
|
||||
|
||||
t.Run("Uses calendar_interval", func(t *testing.T) {
|
||||
c := newFakeClient()
|
||||
_, err := executeElasticsearchDataQuery(c, `{
|
||||
"bucketAggs": [
|
||||
{
|
||||
"type": "date_histogram",
|
||||
"field": "@timestamp",
|
||||
"id": "2",
|
||||
"settings": {
|
||||
"interval": "1M"
|
||||
}
|
||||
}
|
||||
],
|
||||
"metrics": [
|
||||
{ "id": "1", "type": "average", "field": "@value" }
|
||||
]
|
||||
}`, from, to)
|
||||
assert.Nil(t, err)
|
||||
sr := c.multisearchRequests[0].Requests[0]
|
||||
|
||||
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
|
||||
|
||||
assert.NotZero(t, dateHistogramAgg.CalendarInterval)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -1755,6 +1780,21 @@ func TestSettingsCasting(t *testing.T) {
|
||||
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
|
||||
assert.Equal(t, dateHistogramAgg.FixedInterval, "1d")
|
||||
})
|
||||
|
||||
t.Run("Should use calendar_interval", func(t *testing.T) {
|
||||
c := newFakeClient()
|
||||
_, err := executeElasticsearchDataQuery(c, `{
|
||||
"metrics": [{ "type": "count", "id": "1" }],
|
||||
"bucketAggs": [
|
||||
{ "type": "date_histogram", "id": "2", "field": "@time", "settings": { "min_doc_count": "1", "interval": "1w" } }
|
||||
]
|
||||
}`, from, to)
|
||||
|
||||
assert.Nil(t, err)
|
||||
sr := c.multisearchRequests[0].Requests[0]
|
||||
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
|
||||
assert.Equal(t, dateHistogramAgg.CalendarInterval, "1w")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -899,6 +899,25 @@ describe('ElasticQueryBuilder', () => {
|
||||
expect(query.aggs['2'].date_histogram.interval).toBeUndefined();
|
||||
expect(query.aggs['2'].date_histogram.fixed_interval).toBe('1d');
|
||||
});
|
||||
|
||||
it('should use calendar_interval', () => {
|
||||
const query = builder.build({
|
||||
refId: 'A',
|
||||
metrics: [{ type: 'count', id: '1' }],
|
||||
timeField: '@timestamp',
|
||||
bucketAggs: [
|
||||
{
|
||||
type: 'date_histogram',
|
||||
id: '2',
|
||||
field: '@time',
|
||||
settings: { min_doc_count: '1', interval: '1w' },
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
expect(query.aggs['2'].date_histogram.interval).toBeUndefined();
|
||||
expect(query.aggs['2'].date_histogram.calendar_interval).toBe('1w');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@ -100,6 +100,7 @@ export class ElasticQueryBuilder {
|
||||
getDateHistogramAgg(aggDef: DateHistogram) {
|
||||
const esAgg: any = {};
|
||||
const settings = aggDef.settings || {};
|
||||
const calendarIntervals: string[] = ['1w', '1M', '1q', '1y'];
|
||||
|
||||
esAgg.field = aggDef.field || this.timeField;
|
||||
esAgg.min_doc_count = settings.min_doc_count || 0;
|
||||
@ -115,7 +116,11 @@ export class ElasticQueryBuilder {
|
||||
|
||||
const interval = settings.interval === 'auto' ? '${__interval_ms}ms' : settings.interval;
|
||||
|
||||
esAgg.fixed_interval = interval;
|
||||
if (interval !== undefined && calendarIntervals.includes(interval)) {
|
||||
esAgg.calendar_interval = interval;
|
||||
} else {
|
||||
esAgg.fixed_interval = interval;
|
||||
}
|
||||
|
||||
return esAgg;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user