2018-05-23 07:36:41 -05:00
package elasticsearch
import (
2023-09-07 06:54:16 -05:00
"context"
"encoding/json"
2018-05-23 07:36:41 -05:00
"fmt"
2021-01-15 05:10:16 -06:00
"regexp"
2023-11-01 11:17:38 -05:00
"slices"
2018-05-23 08:09:58 -05:00
"strconv"
2021-07-15 09:45:59 -05:00
"time"
2018-05-23 07:36:41 -05:00
2021-07-15 09:45:59 -05:00
"github.com/grafana/grafana-plugin-sdk-go/backend"
2023-11-06 04:36:39 -06:00
"github.com/grafana/grafana-plugin-sdk-go/experimental/errorsource"
2023-01-30 02:50:27 -06:00
2018-05-23 07:36:41 -05:00
"github.com/grafana/grafana/pkg/components/simplejson"
2023-09-07 06:54:16 -05:00
"github.com/grafana/grafana/pkg/infra/log"
2023-09-18 03:49:12 -05:00
"github.com/grafana/grafana/pkg/infra/tracing"
2020-06-29 07:08:32 -05:00
es "github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
2018-05-23 07:36:41 -05:00
)
2022-12-22 08:08:15 -06:00
const (
defaultSize = 500
)
2023-03-07 06:57:24 -06:00
type elasticsearchDataQuery struct {
2024-03-18 12:01:33 -05:00
client es . Client
dataQueries [ ] backend . DataQuery
logger log . Logger
ctx context . Context
tracer tracing . Tracer
keepLabelsInResponse bool
2018-05-23 07:36:41 -05:00
}
2024-03-18 12:01:33 -05:00
var newElasticsearchDataQuery = func ( ctx context . Context , client es . Client , req * backend . QueryDataRequest , logger log . Logger , tracer tracing . Tracer ) * elasticsearchDataQuery {
_ , fromAlert := req . Headers [ headerFromAlert ]
fromExpression := req . GetHTTPHeader ( headerFromExpression ) != ""
2023-03-07 06:57:24 -06:00
return & elasticsearchDataQuery {
2023-01-05 05:26:27 -06:00
client : client ,
2024-03-18 12:01:33 -05:00
dataQueries : req . Queries ,
2023-09-07 06:54:16 -05:00
logger : logger ,
ctx : ctx ,
2023-09-18 03:49:12 -05:00
tracer : tracer ,
2024-03-18 12:01:33 -05:00
// To maintain backward compatibility, it is necessary to keep labels in responses for alerting and expressions queries.
// Historically, these labels have been used in alerting rules and transformations.
keepLabelsInResponse : fromAlert || fromExpression ,
2018-05-23 08:09:58 -05:00
}
}
2023-03-07 06:57:24 -06:00
func ( e * elasticsearchDataQuery ) execute ( ) ( * backend . QueryDataResponse , error ) {
2023-09-07 06:54:16 -05:00
start := time . Now ( )
2023-11-06 04:36:39 -06:00
response := backend . NewQueryDataResponse ( )
2023-09-07 06:54:16 -05:00
e . logger . Debug ( "Parsing queries" , "queriesLength" , len ( e . dataQueries ) )
queries , err := parseQuery ( e . dataQueries , e . logger )
2018-05-23 07:36:41 -05:00
if err != nil {
2023-09-07 06:54:16 -05:00
mq , _ := json . Marshal ( e . dataQueries )
2023-09-07 11:15:24 -05:00
e . logger . Error ( "Failed to parse queries" , "error" , err , "queries" , string ( mq ) , "queriesLength" , len ( queries ) , "duration" , time . Since ( start ) , "stage" , es . StagePrepareRequest )
2023-11-06 04:36:39 -06:00
return errorsource . AddPluginErrorToResponse ( e . dataQueries [ 0 ] . RefID , response , err ) , nil
2018-05-23 07:36:41 -05:00
}
2018-05-23 08:09:58 -05:00
ms := e . client . MultiSearch ( )
for _ , q := range queries {
2024-03-13 05:49:35 -05:00
from := q . TimeRange . From . UnixNano ( ) / int64 ( time . Millisecond )
to := q . TimeRange . To . UnixNano ( ) / int64 ( time . Millisecond )
2023-01-06 04:14:17 -06:00
if err := e . processQuery ( q , ms , from , to ) ; err != nil {
2023-09-07 06:54:16 -05:00
mq , _ := json . Marshal ( q )
2023-09-07 11:15:24 -05:00
e . logger . Error ( "Failed to process query to multisearch request builder" , "error" , err , "query" , string ( mq ) , "queriesLength" , len ( queries ) , "duration" , time . Since ( start ) , "stage" , es . StagePrepareRequest )
2023-11-06 04:36:39 -06:00
return errorsource . AddPluginErrorToResponse ( q . RefID , response , err ) , nil
2018-05-23 07:36:41 -05:00
}
2020-08-18 07:43:18 -05:00
}
2018-05-23 07:36:41 -05:00
2020-08-18 07:43:18 -05:00
req , err := ms . Build ( )
if err != nil {
2023-09-07 06:54:16 -05:00
mqs , _ := json . Marshal ( e . dataQueries )
2023-09-07 11:15:24 -05:00
e . logger . Error ( "Failed to build multisearch request" , "error" , err , "queriesLength" , len ( queries ) , "queries" , string ( mqs ) , "duration" , time . Since ( start ) , "stage" , es . StagePrepareRequest )
2023-11-06 04:36:39 -06:00
return errorsource . AddPluginErrorToResponse ( e . dataQueries [ 0 ] . RefID , response , err ) , nil
2020-08-18 07:43:18 -05:00
}
2018-05-23 07:36:41 -05:00
2023-09-07 11:15:24 -05:00
e . logger . Info ( "Prepared request" , "queriesLength" , len ( queries ) , "duration" , time . Since ( start ) , "stage" , es . StagePrepareRequest )
2020-08-18 07:43:18 -05:00
res , err := e . client . ExecuteMultisearch ( req )
if err != nil {
2023-11-06 04:36:39 -06:00
// We are returning error containing the source that was added trough errorsource.Middleware
return errorsource . AddErrorToResponse ( e . dataQueries [ 0 ] . RefID , response , err ) , nil
2020-08-18 07:43:18 -05:00
}
2018-05-23 08:09:58 -05:00
2024-03-18 12:01:33 -05:00
return parseResponse ( e . ctx , res . Responses , queries , e . client . GetConfiguredFields ( ) , e . keepLabelsInResponse , e . logger , e . tracer )
2020-08-18 07:43:18 -05:00
}
2023-03-07 06:57:24 -06:00
func ( e * elasticsearchDataQuery ) processQuery ( q * Query , ms * es . MultiSearchRequestBuilder , from , to int64 ) error {
2023-01-10 03:49:43 -06:00
err := isQueryWithError ( q )
if err != nil {
2023-09-07 06:54:16 -05:00
err = fmt . Errorf ( "received invalid query. %w" , err )
2023-01-10 03:49:43 -06:00
return err
}
2020-08-18 07:43:18 -05:00
2023-03-01 04:50:56 -06:00
defaultTimeField := e . client . GetConfiguredFields ( ) . TimeField
2024-03-13 05:49:35 -05:00
b := ms . Search ( q . Interval , q . TimeRange )
2020-08-18 07:43:18 -05:00
b . Size ( 0 )
filters := b . Query ( ) . Bool ( ) . Filter ( )
2023-01-10 03:49:43 -06:00
filters . AddDateRangeFilter ( defaultTimeField , to , from , es . DateFormatEpochMS )
2022-12-14 06:56:09 -06:00
filters . AddQueryStringFilter ( q . RawQuery , true )
2020-08-18 07:43:18 -05:00
2023-01-10 03:49:43 -06:00
if isLogsQuery ( q ) {
processLogsQuery ( q , b , from , to , defaultTimeField )
} else if isDocumentQuery ( q ) {
processDocumentQuery ( q , b , from , to , defaultTimeField )
} else {
// Otherwise, it is a time series query and we process it
processTimeSeriesQuery ( q , b , from , to , defaultTimeField )
2018-05-23 07:36:41 -05:00
}
2020-08-18 07:43:18 -05:00
return nil
2018-05-23 08:09:58 -05:00
}
2018-05-23 07:36:41 -05:00
2021-06-04 09:42:00 -05:00
func setFloatPath ( settings * simplejson . Json , path ... string ) {
if stringValue , err := settings . GetPath ( path ... ) . String ( ) ; err == nil {
if value , err := strconv . ParseFloat ( stringValue , 64 ) ; err == nil {
settings . SetPath ( path , value )
}
}
}
func setIntPath ( settings * simplejson . Json , path ... string ) {
if stringValue , err := settings . GetPath ( path ... ) . String ( ) ; err == nil {
if value , err := strconv . ParseInt ( stringValue , 10 , 64 ) ; err == nil {
settings . SetPath ( path , value )
2021-04-26 10:54:23 -05:00
}
}
2021-06-04 09:42:00 -05:00
}
2021-04-26 10:54:23 -05:00
2021-06-04 09:42:00 -05:00
// Casts values to float when required by Elastic's query DSL
2023-08-30 10:46:47 -05:00
func ( metricAggregation MetricAgg ) generateSettingsForDSL ( ) map [ string ] any {
2021-04-26 10:54:23 -05:00
switch metricAggregation . Type {
case "moving_avg" :
2021-06-04 09:42:00 -05:00
setFloatPath ( metricAggregation . Settings , "window" )
setFloatPath ( metricAggregation . Settings , "predict" )
setFloatPath ( metricAggregation . Settings , "settings" , "alpha" )
setFloatPath ( metricAggregation . Settings , "settings" , "beta" )
setFloatPath ( metricAggregation . Settings , "settings" , "gamma" )
setFloatPath ( metricAggregation . Settings , "settings" , "period" )
2021-04-26 10:54:23 -05:00
case "serial_diff" :
2021-06-04 09:42:00 -05:00
setFloatPath ( metricAggregation . Settings , "lag" )
2021-04-26 10:54:23 -05:00
}
2021-05-14 05:50:15 -05:00
if isMetricAggregationWithInlineScriptSupport ( metricAggregation . Type ) {
scriptValue , err := metricAggregation . Settings . GetPath ( "script" ) . String ( )
if err != nil {
// the script is stored using the old format : `script:{inline: "value"}` or is not set
scriptValue , err = metricAggregation . Settings . GetPath ( "script" , "inline" ) . String ( )
}
if err == nil {
2022-08-22 09:25:20 -05:00
metricAggregation . Settings . SetPath ( [ ] string { "script" } , scriptValue )
2021-05-14 05:50:15 -05:00
}
}
2021-04-26 10:54:23 -05:00
return metricAggregation . Settings . MustMap ( )
}
2023-08-30 10:46:47 -05:00
func ( bucketAgg BucketAgg ) generateSettingsForDSL ( ) map [ string ] any {
2022-12-07 02:47:31 -06:00
setIntPath ( bucketAgg . Settings , "min_doc_count" )
2021-06-04 09:42:00 -05:00
return bucketAgg . Settings . MustMap ( )
}
2022-12-14 07:19:03 -06:00
func addDateHistogramAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg , timeFrom , timeTo int64 , timeField string ) es . AggBuilder {
// If no field is specified, use the time field
field := bucketAgg . Field
if field == "" {
field = timeField
}
aggBuilder . DateHistogram ( bucketAgg . ID , field , func ( a * es . DateHistogramAgg , b es . AggBuilder ) {
2023-10-09 05:37:38 -05:00
var interval = bucketAgg . Settings . Get ( "interval" ) . MustString ( "auto" )
if slices . Contains ( es . GetCalendarIntervals ( ) , interval ) {
a . CalendarInterval = interval
} else {
if interval == "auto" {
// note this is not really a valid grafana-variable-handling,
// because normally this would not match `$__interval_ms`,
// but because how we apply these in the go-code, this will work
// correctly, and becomes something like `500ms`.
// a nicer way would be to use `${__interval_ms}ms`, but
// that format is not recognized where we apply these variables
// in the elasticsearch datasource
a . FixedInterval = "$__interval_msms"
} else {
a . FixedInterval = interval
}
}
2018-05-23 08:09:58 -05:00
a . MinDocCount = bucketAgg . Settings . Get ( "min_doc_count" ) . MustInt ( 0 )
a . ExtendedBounds = & es . ExtendedBounds { Min : timeFrom , Max : timeTo }
a . Format = bucketAgg . Settings . Get ( "format" ) . MustString ( es . DateFormatEpochMS )
2018-05-23 07:36:41 -05:00
2018-11-27 02:42:20 -06:00
if offset , err := bucketAgg . Settings . Get ( "offset" ) . String ( ) ; err == nil {
a . Offset = offset
2018-11-26 07:58:25 -06:00
}
2018-11-27 02:42:20 -06:00
2018-05-23 08:09:58 -05:00
if missing , err := bucketAgg . Settings . Get ( "missing" ) . String ( ) ; err == nil {
a . Missing = & missing
}
2021-11-02 11:18:39 -05:00
if timezone , err := bucketAgg . Settings . Get ( "timeZone" ) . String ( ) ; err == nil {
if timezone != "utc" {
a . TimeZone = timezone
}
}
2018-05-23 08:09:58 -05:00
aggBuilder = b
} )
return aggBuilder
}
func addHistogramAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg ) es . AggBuilder {
aggBuilder . Histogram ( bucketAgg . ID , bucketAgg . Field , func ( a * es . HistogramAgg , b es . AggBuilder ) {
2023-03-23 03:36:50 -05:00
a . Interval = stringToIntWithDefaultValue ( bucketAgg . Settings . Get ( "interval" ) . MustString ( ) , 1000 )
2018-05-23 08:09:58 -05:00
a . MinDocCount = bucketAgg . Settings . Get ( "min_doc_count" ) . MustInt ( 0 )
if missing , err := bucketAgg . Settings . Get ( "missing" ) . Int ( ) ; err == nil {
a . Missing = & missing
}
aggBuilder = b
} )
return aggBuilder
2018-05-23 07:36:41 -05:00
}
2018-05-23 08:09:58 -05:00
func addTermsAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg , metrics [ ] * MetricAgg ) es . AggBuilder {
aggBuilder . Terms ( bucketAgg . ID , bucketAgg . Field , func ( a * es . TermsAggregation , b es . AggBuilder ) {
if size , err := bucketAgg . Settings . Get ( "size" ) . Int ( ) ; err == nil {
a . Size = size
} else {
2023-03-23 03:36:50 -05:00
a . Size = stringToIntWithDefaultValue ( bucketAgg . Settings . Get ( "size" ) . MustString ( ) , defaultSize )
2018-10-10 22:21:06 -05:00
}
2018-10-10 22:40:23 -05:00
2018-05-23 08:09:58 -05:00
if minDocCount , err := bucketAgg . Settings . Get ( "min_doc_count" ) . Int ( ) ; err == nil {
a . MinDocCount = & minDocCount
}
if missing , err := bucketAgg . Settings . Get ( "missing" ) . String ( ) ; err == nil {
a . Missing = & missing
}
if orderBy , err := bucketAgg . Settings . Get ( "orderBy" ) . String ( ) ; err == nil {
2021-01-15 05:10:16 -06:00
/ *
The format for extended stats and percentiles is { metricId } [ bucket_path ]
for everything else it ' s just { metricId } , _count , _term , or _key
* /
metricIdRegex := regexp . MustCompile ( ` ^(\d+) ` )
metricId := metricIdRegex . FindString ( orderBy )
if len ( metricId ) > 0 {
2018-05-23 08:09:58 -05:00
for _ , m := range metrics {
2021-01-15 05:10:16 -06:00
if m . ID == metricId {
if m . Type == "count" {
a . Order [ "_count" ] = bucketAgg . Settings . Get ( "order" ) . MustString ( "desc" )
} else {
a . Order [ orderBy ] = bucketAgg . Settings . Get ( "order" ) . MustString ( "desc" )
b . Metric ( m . ID , m . Type , m . Field , nil )
}
2018-05-23 08:09:58 -05:00
break
}
}
2021-01-15 05:10:16 -06:00
} else {
a . Order [ orderBy ] = bucketAgg . Settings . Get ( "order" ) . MustString ( "desc" )
2018-05-23 08:09:58 -05:00
}
}
aggBuilder = b
} )
return aggBuilder
2018-05-23 07:36:41 -05:00
}
2023-01-27 09:18:36 -06:00
func addNestedAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg ) es . AggBuilder {
aggBuilder . Nested ( bucketAgg . ID , bucketAgg . Field , func ( a * es . NestedAggregation , b es . AggBuilder ) {
aggBuilder = b
} )
return aggBuilder
}
2018-05-23 08:09:58 -05:00
func addFiltersAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg ) es . AggBuilder {
2023-08-30 10:46:47 -05:00
filters := make ( map [ string ] any )
2018-05-23 08:09:58 -05:00
for _ , filter := range bucketAgg . Settings . Get ( "filters" ) . MustArray ( ) {
json := simplejson . NewFromAny ( filter )
query := json . Get ( "query" ) . MustString ( )
label := json . Get ( "label" ) . MustString ( )
if label == "" {
label = query
}
filters [ label ] = & es . QueryStringFilter { Query : query , AnalyzeWildcard : true }
2018-05-23 07:36:41 -05:00
}
2018-05-23 08:09:58 -05:00
if len ( filters ) > 0 {
aggBuilder . Filters ( bucketAgg . ID , func ( a * es . FiltersAggregation , b es . AggBuilder ) {
a . Filters = filters
aggBuilder = b
} )
}
return aggBuilder
}
func addGeoHashGridAgg ( aggBuilder es . AggBuilder , bucketAgg * BucketAgg ) es . AggBuilder {
aggBuilder . GeoHashGrid ( bucketAgg . ID , bucketAgg . Field , func ( a * es . GeoHashGridAggregation , b es . AggBuilder ) {
2023-08-28 09:38:44 -05:00
a . Precision = stringToIntWithDefaultValue ( bucketAgg . Settings . Get ( "precision" ) . MustString ( ) , es . DefaultGeoHashPrecision )
2018-05-23 08:09:58 -05:00
aggBuilder = b
} )
return aggBuilder
}
2022-12-16 10:45:43 -06:00
func getPipelineAggField ( m * MetricAgg ) string {
// In frontend we are using Field as pipelineAggField
// There might be historical reason why in backend we were using PipelineAggregate as pipelineAggField
// So for now let's check Field first and then PipelineAggregate to ensure that we are not breaking anything
// TODO: Investigate, if we can remove check for PipelineAggregate
pipelineAggField := m . Field
if pipelineAggField == "" {
pipelineAggField = m . PipelineAggregate
}
return pipelineAggField
}
2023-01-10 03:49:43 -06:00
func isQueryWithError ( query * Query ) error {
if len ( query . BucketAggs ) == 0 {
// If no aggregations, only document and logs queries are valid
if len ( query . Metrics ) == 0 || ! ( isLogsQuery ( query ) || isDocumentQuery ( query ) ) {
return fmt . Errorf ( "invalid query, missing metrics and aggregations" )
}
}
return nil
}
func isLogsQuery ( query * Query ) bool {
return query . Metrics [ 0 ] . Type == logsType
}
func isDocumentQuery ( query * Query ) bool {
2023-03-02 02:50:54 -06:00
return isRawDataQuery ( query ) || isRawDocumentQuery ( query )
}
func isRawDataQuery ( query * Query ) bool {
return query . Metrics [ 0 ] . Type == rawDataType
}
func isRawDocumentQuery ( query * Query ) bool {
return query . Metrics [ 0 ] . Type == rawDocumentType
2023-01-10 03:49:43 -06:00
}
func processLogsQuery ( q * Query , b * es . SearchRequestBuilder , from , to int64 , defaultTimeField string ) {
metric := q . Metrics [ 0 ]
2023-04-05 11:32:02 -05:00
sort := es . SortOrderDesc
if metric . Settings . Get ( "sortDirection" ) . MustString ( ) == "asc" {
// This is currently used only for log context query
sort = es . SortOrderAsc
}
b . Sort ( sort , defaultTimeField , "boolean" )
b . Sort ( sort , "_doc" , "" )
2023-01-10 03:49:43 -06:00
b . AddDocValueField ( defaultTimeField )
2023-05-04 12:33:00 -05:00
// We need to add timeField as field with standardized time format to not receive
// invalid formats that elasticsearch can parse, but our frontend can't (e.g. yyyy_MM_dd_HH_mm_ss)
b . AddTimeFieldWithStandardizedFormat ( defaultTimeField )
2023-03-23 03:36:50 -05:00
b . Size ( stringToIntWithDefaultValue ( metric . Settings . Get ( "limit" ) . MustString ( ) , defaultSize ) )
2023-01-10 03:49:43 -06:00
b . AddHighlight ( )
2023-04-05 11:32:02 -05:00
// This is currently used only for log context query to get
// log lines before and after the selected log line
searchAfter := metric . Settings . Get ( "searchAfter" ) . MustArray ( )
for _ , value := range searchAfter {
b . AddSearchAfter ( value )
}
2023-01-10 03:49:43 -06:00
// For log query, we add a date histogram aggregation
aggBuilder := b . Agg ( )
q . BucketAggs = append ( q . BucketAggs , & BucketAgg {
Type : dateHistType ,
Field : defaultTimeField ,
ID : "1" ,
2023-08-30 10:46:47 -05:00
Settings : simplejson . NewFromAny ( map [ string ] any {
2023-01-10 03:49:43 -06:00
"interval" : "auto" ,
} ) ,
} )
bucketAgg := q . BucketAggs [ 0 ]
bucketAgg . Settings = simplejson . NewFromAny (
bucketAgg . generateSettingsForDSL ( ) ,
)
_ = addDateHistogramAgg ( aggBuilder , bucketAgg , from , to , defaultTimeField )
}
func processDocumentQuery ( q * Query , b * es . SearchRequestBuilder , from , to int64 , defaultTimeField string ) {
metric := q . Metrics [ 0 ]
2023-04-05 11:32:02 -05:00
b . Sort ( es . SortOrderDesc , defaultTimeField , "boolean" )
b . Sort ( es . SortOrderDesc , "_doc" , "" )
2023-01-10 03:49:43 -06:00
b . AddDocValueField ( defaultTimeField )
2023-11-20 05:01:41 -06:00
if isRawDataQuery ( q ) {
// For raw_data queries we need to add timeField as field with standardized time format to not receive
// invalid formats that elasticsearch can parse, but our frontend can't (e.g. yyyy_MM_dd_HH_mm_ss)
b . AddTimeFieldWithStandardizedFormat ( defaultTimeField )
}
2023-03-23 03:36:50 -05:00
b . Size ( stringToIntWithDefaultValue ( metric . Settings . Get ( "size" ) . MustString ( ) , defaultSize ) )
2023-01-10 03:49:43 -06:00
}
func processTimeSeriesQuery ( q * Query , b * es . SearchRequestBuilder , from , to int64 , defaultTimeField string ) {
aggBuilder := b . Agg ( )
// Process buckets
// iterate backwards to create aggregations bottom-down
for _ , bucketAgg := range q . BucketAggs {
bucketAgg . Settings = simplejson . NewFromAny (
bucketAgg . generateSettingsForDSL ( ) ,
)
switch bucketAgg . Type {
case dateHistType :
aggBuilder = addDateHistogramAgg ( aggBuilder , bucketAgg , from , to , defaultTimeField )
case histogramType :
aggBuilder = addHistogramAgg ( aggBuilder , bucketAgg )
case filtersType :
aggBuilder = addFiltersAgg ( aggBuilder , bucketAgg )
case termsType :
aggBuilder = addTermsAgg ( aggBuilder , bucketAgg , q . Metrics )
case geohashGridType :
aggBuilder = addGeoHashGridAgg ( aggBuilder , bucketAgg )
2023-01-27 09:18:36 -06:00
case nestedType :
aggBuilder = addNestedAgg ( aggBuilder , bucketAgg )
2023-01-10 03:49:43 -06:00
}
}
// Process metrics
for _ , m := range q . Metrics {
m := m
if m . Type == countType {
continue
}
if isPipelineAgg ( m . Type ) {
if isPipelineAggWithMultipleBucketPaths ( m . Type ) {
if len ( m . PipelineVariables ) > 0 {
2023-08-30 10:46:47 -05:00
bucketPaths := map [ string ] any { }
2023-01-10 03:49:43 -06:00
for name , pipelineAgg := range m . PipelineVariables {
if _ , err := strconv . Atoi ( pipelineAgg ) ; err == nil {
var appliedAgg * MetricAgg
for _ , pipelineMetric := range q . Metrics {
if pipelineMetric . ID == pipelineAgg {
appliedAgg = pipelineMetric
break
}
}
if appliedAgg != nil {
if appliedAgg . Type == countType {
bucketPaths [ name ] = "_count"
} else {
bucketPaths [ name ] = pipelineAgg
}
}
}
}
aggBuilder . Pipeline ( m . ID , m . Type , bucketPaths , func ( a * es . PipelineAggregation ) {
a . Settings = m . generateSettingsForDSL ( )
} )
} else {
continue
}
} else {
pipelineAggField := getPipelineAggField ( m )
if _ , err := strconv . Atoi ( pipelineAggField ) ; err == nil {
var appliedAgg * MetricAgg
for _ , pipelineMetric := range q . Metrics {
if pipelineMetric . ID == pipelineAggField {
appliedAgg = pipelineMetric
break
}
}
if appliedAgg != nil {
bucketPath := pipelineAggField
if appliedAgg . Type == countType {
bucketPath = "_count"
}
aggBuilder . Pipeline ( m . ID , m . Type , bucketPath , func ( a * es . PipelineAggregation ) {
a . Settings = m . generateSettingsForDSL ( )
} )
}
} else {
continue
}
}
} else {
aggBuilder . Metric ( m . ID , m . Type , m . Field , func ( a * es . MetricAggregation ) {
a . Settings = m . generateSettingsForDSL ( )
} )
}
}
}
2023-03-06 06:41:45 -06:00
2023-03-23 03:36:50 -05:00
func stringToIntWithDefaultValue ( valueStr string , defaultValue int ) int {
value , err := strconv . Atoi ( valueStr )
2023-03-06 06:41:45 -06:00
if err != nil {
2023-03-23 03:36:50 -05:00
value = defaultValue
2023-03-06 06:41:45 -06:00
}
2023-03-23 03:36:50 -05:00
// In our case, 0 is not a valid value and in this case we default to defaultValue
if value == 0 {
value = defaultValue
2023-03-06 06:41:45 -06:00
}
2023-03-23 03:36:50 -05:00
return value
2023-03-06 06:41:45 -06:00
}