Merge pull request #11380 from WPH95/feature/add_es_alerting

elasticsearch: alerting support
This commit is contained in:
Marcus Efraimsson 2018-06-01 15:17:28 +02:00 committed by GitHub
commit b6afe5f2e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 4874 additions and 1 deletions

View File

@ -27,7 +27,9 @@ and the conditions that need to be met for the alert to change state and trigger
## Execution
The alert rules are evaluated in the Grafana backend in a scheduler and query execution engine that is part
of core Grafana. Only some data sources are supported right now. They include `Graphite`, `Prometheus`, `InfluxDB`, `OpenTSDB`, `MySQL`, `Postgres` and `Cloudwatch`.
of core Grafana. Only some data sources are supported right now. They include `Graphite`, `Prometheus`, `Elasticsearch`, `InfluxDB`, `OpenTSDB`, `MySQL`, `Postgres` and `Cloudwatch`.
> Alerting support for Elasticsearch is only available in Grafana v5.2 and above.
### Clustering
@ -152,6 +154,8 @@ filters = alerting.scheduler:debug \
tsdb.prometheus:debug \
tsdb.opentsdb:debug \
tsdb.influxdb:debug \
tsdb.elasticsearch:debug \
tsdb.elasticsearch.client:debug \
```
If you want to log raw query sent to your TSDB and raw response in log you also have to set grafana.ini option `app_mode` to

View File

@ -22,6 +22,7 @@ import (
_ "github.com/grafana/grafana/pkg/services/alerting/conditions"
_ "github.com/grafana/grafana/pkg/services/alerting/notifiers"
_ "github.com/grafana/grafana/pkg/tsdb/cloudwatch"
_ "github.com/grafana/grafana/pkg/tsdb/elasticsearch"
_ "github.com/grafana/grafana/pkg/tsdb/graphite"
_ "github.com/grafana/grafana/pkg/tsdb/influxdb"
_ "github.com/grafana/grafana/pkg/tsdb/mysql"

View File

@ -0,0 +1,257 @@
package es
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/models"
"golang.org/x/net/context/ctxhttp"
)
const loggerName = "tsdb.elasticsearch.client"
var (
clientLog = log.New(loggerName)
)
var newDatasourceHttpClient = func(ds *models.DataSource) (*http.Client, error) {
return ds.GetHttpClient()
}
// Client represents a client which can interact with elasticsearch api
type Client interface {
GetVersion() int
GetTimeField() string
GetMinInterval(queryInterval string) (time.Duration, error)
ExecuteMultisearch(r *MultiSearchRequest) (*MultiSearchResponse, error)
MultiSearch() *MultiSearchRequestBuilder
}
// NewClient creates a new elasticsearch client
var NewClient = func(ctx context.Context, ds *models.DataSource, timeRange *tsdb.TimeRange) (Client, error) {
version, err := ds.JsonData.Get("esVersion").Int()
if err != nil {
return nil, fmt.Errorf("eleasticsearch version is required, err=%v", err)
}
timeField, err := ds.JsonData.Get("timeField").String()
if err != nil {
return nil, fmt.Errorf("eleasticsearch time field name is required, err=%v", err)
}
indexInterval := ds.JsonData.Get("interval").MustString()
ip, err := newIndexPattern(indexInterval, ds.Database)
if err != nil {
return nil, err
}
indices, err := ip.GetIndices(timeRange)
if err != nil {
return nil, err
}
clientLog.Debug("Creating new client", "version", version, "timeField", timeField, "indices", strings.Join(indices, ", "))
switch version {
case 2, 5, 56:
return &baseClientImpl{
ctx: ctx,
ds: ds,
version: version,
timeField: timeField,
indices: indices,
timeRange: timeRange,
}, nil
}
return nil, fmt.Errorf("elasticsearch version=%d is not supported", version)
}
type baseClientImpl struct {
ctx context.Context
ds *models.DataSource
version int
timeField string
indices []string
timeRange *tsdb.TimeRange
}
func (c *baseClientImpl) GetVersion() int {
return c.version
}
func (c *baseClientImpl) GetTimeField() string {
return c.timeField
}
func (c *baseClientImpl) GetMinInterval(queryInterval string) (time.Duration, error) {
return tsdb.GetIntervalFrom(c.ds, simplejson.NewFromAny(map[string]interface{}{
"interval": queryInterval,
}), 5*time.Second)
}
func (c *baseClientImpl) getSettings() *simplejson.Json {
return c.ds.JsonData
}
type multiRequest struct {
header map[string]interface{}
body interface{}
interval tsdb.Interval
}
func (c *baseClientImpl) executeBatchRequest(uriPath string, requests []*multiRequest) (*http.Response, error) {
bytes, err := c.encodeBatchRequests(requests)
if err != nil {
return nil, err
}
return c.executeRequest(http.MethodPost, uriPath, bytes)
}
func (c *baseClientImpl) encodeBatchRequests(requests []*multiRequest) ([]byte, error) {
clientLog.Debug("Encoding batch requests to json", "batch requests", len(requests))
start := time.Now()
payload := bytes.Buffer{}
for _, r := range requests {
reqHeader, err := json.Marshal(r.header)
if err != nil {
return nil, err
}
payload.WriteString(string(reqHeader) + "\n")
reqBody, err := json.Marshal(r.body)
if err != nil {
return nil, err
}
body := string(reqBody)
body = strings.Replace(body, "$__interval_ms", strconv.FormatInt(r.interval.Value.Nanoseconds()/int64(time.Millisecond), 10), -1)
body = strings.Replace(body, "$__interval", r.interval.Text, -1)
payload.WriteString(body + "\n")
}
elapsed := time.Now().Sub(start)
clientLog.Debug("Encoded batch requests to json", "took", elapsed)
return payload.Bytes(), nil
}
func (c *baseClientImpl) executeRequest(method, uriPath string, body []byte) (*http.Response, error) {
u, _ := url.Parse(c.ds.Url)
u.Path = path.Join(u.Path, uriPath)
var req *http.Request
var err error
if method == http.MethodPost {
req, err = http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(body))
} else {
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
}
if err != nil {
return nil, err
}
clientLog.Debug("Executing request", "url", req.URL.String(), "method", method)
req.Header.Set("User-Agent", "Grafana")
req.Header.Set("Content-Type", "application/json")
if c.ds.BasicAuth {
clientLog.Debug("Request configured to use basic authentication")
req.SetBasicAuth(c.ds.BasicAuthUser, c.ds.BasicAuthPassword)
}
if !c.ds.BasicAuth && c.ds.User != "" {
clientLog.Debug("Request configured to use basic authentication")
req.SetBasicAuth(c.ds.User, c.ds.Password)
}
httpClient, err := newDatasourceHttpClient(c.ds)
if err != nil {
return nil, err
}
start := time.Now()
defer func() {
elapsed := time.Now().Sub(start)
clientLog.Debug("Executed request", "took", elapsed)
}()
return ctxhttp.Do(c.ctx, httpClient, req)
}
func (c *baseClientImpl) ExecuteMultisearch(r *MultiSearchRequest) (*MultiSearchResponse, error) {
clientLog.Debug("Executing multisearch", "search requests", len(r.Requests))
multiRequests := c.createMultiSearchRequests(r.Requests)
res, err := c.executeBatchRequest("_msearch", multiRequests)
if err != nil {
return nil, err
}
clientLog.Debug("Received multisearch response", "code", res.StatusCode, "status", res.Status, "content-length", res.ContentLength)
start := time.Now()
clientLog.Debug("Decoding multisearch json response")
var msr MultiSearchResponse
defer res.Body.Close()
dec := json.NewDecoder(res.Body)
err = dec.Decode(&msr)
if err != nil {
return nil, err
}
elapsed := time.Now().Sub(start)
clientLog.Debug("Decoded multisearch json response", "took", elapsed)
msr.status = res.StatusCode
return &msr, nil
}
func (c *baseClientImpl) createMultiSearchRequests(searchRequests []*SearchRequest) []*multiRequest {
multiRequests := []*multiRequest{}
for _, searchReq := range searchRequests {
mr := multiRequest{
header: map[string]interface{}{
"search_type": "query_then_fetch",
"ignore_unavailable": true,
"index": strings.Join(c.indices, ","),
},
body: searchReq,
interval: searchReq.Interval,
}
if c.version == 2 {
mr.header["search_type"] = "count"
}
if c.version >= 56 {
maxConcurrentShardRequests := c.getSettings().Get("maxConcurrentShardRequests").MustInt(256)
mr.header["max_concurrent_shard_requests"] = maxConcurrentShardRequests
}
multiRequests = append(multiRequests, &mr)
}
return multiRequests
}
func (c *baseClientImpl) MultiSearch() *MultiSearchRequestBuilder {
return NewMultiSearchRequestBuilder(c.GetVersion())
}

View File

@ -0,0 +1,304 @@
package es
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/models"
. "github.com/smartystreets/goconvey/convey"
)
func TestClient(t *testing.T) {
Convey("Test elasticsearch client", t, func() {
Convey("NewClient", func() {
Convey("When no version set should return error", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(make(map[string]interface{})),
}
_, err := NewClient(nil, ds, nil)
So(err, ShouldNotBeNil)
})
Convey("When no time field name set should return error", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 5,
}),
}
_, err := NewClient(nil, ds, nil)
So(err, ShouldNotBeNil)
})
Convey("When unspported version set should return error", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 6,
"timeField": "@timestamp",
}),
}
_, err := NewClient(nil, ds, nil)
So(err, ShouldNotBeNil)
})
Convey("When version 2 should return v2 client", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 2,
"timeField": "@timestamp",
}),
}
c, err := NewClient(nil, ds, nil)
So(err, ShouldBeNil)
So(c.GetVersion(), ShouldEqual, 2)
})
Convey("When version 5 should return v5 client", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 5,
"timeField": "@timestamp",
}),
}
c, err := NewClient(nil, ds, nil)
So(err, ShouldBeNil)
So(c.GetVersion(), ShouldEqual, 5)
})
Convey("When version 56 should return v5.6 client", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 56,
"timeField": "@timestamp",
}),
}
c, err := NewClient(nil, ds, nil)
So(err, ShouldBeNil)
So(c.GetVersion(), ShouldEqual, 56)
})
})
Convey("Given a fake http client", func() {
var responseBuffer *bytes.Buffer
var req *http.Request
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
req = r
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Failed to read response body, err=%v", err)
}
responseBuffer = bytes.NewBuffer(buf)
}))
currentNewDatasourceHttpClient := newDatasourceHttpClient
newDatasourceHttpClient = func(ds *models.DataSource) (*http.Client, error) {
return ts.Client(), nil
}
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
timeRange := tsdb.NewTimeRange(fromStr, toStr)
Convey("and a v2.x client", func() {
ds := models.DataSource{
Database: "[metrics-]YYYY.MM.DD",
Url: ts.URL,
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 2,
"timeField": "@timestamp",
"interval": "Daily",
}),
}
c, err := NewClient(context.Background(), &ds, timeRange)
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("When executing multi search", func() {
ms, err := createMultisearchForTest(c)
So(err, ShouldBeNil)
c.ExecuteMultisearch(ms)
Convey("Should send correct request and payload", func() {
So(req, ShouldNotBeNil)
So(req.Method, ShouldEqual, http.MethodPost)
So(req.URL.Path, ShouldEqual, "/_msearch")
So(responseBuffer, ShouldNotBeNil)
headerBytes, err := responseBuffer.ReadBytes('\n')
So(err, ShouldBeNil)
bodyBytes := responseBuffer.Bytes()
jHeader, err := simplejson.NewJson(headerBytes)
So(err, ShouldBeNil)
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "count")
So(jHeader.Get("max_concurrent_shard_requests").MustInt(10), ShouldEqual, 10)
Convey("and replace $__interval variable", func() {
So(jBody.GetPath("aggs", "2", "aggs", "1", "avg", "script").MustString(), ShouldEqual, "15000*@hostname")
})
Convey("and replace $__interval_ms variable", func() {
So(jBody.GetPath("aggs", "2", "date_histogram", "interval").MustString(), ShouldEqual, "15s")
})
})
})
})
Convey("and a v5.x client", func() {
ds := models.DataSource{
Database: "[metrics-]YYYY.MM.DD",
Url: ts.URL,
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 5,
"maxConcurrentShardRequests": 100,
"timeField": "@timestamp",
"interval": "Daily",
}),
}
c, err := NewClient(context.Background(), &ds, timeRange)
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("When executing multi search", func() {
ms, err := createMultisearchForTest(c)
So(err, ShouldBeNil)
c.ExecuteMultisearch(ms)
Convey("Should send correct request and payload", func() {
So(req, ShouldNotBeNil)
So(req.Method, ShouldEqual, http.MethodPost)
So(req.URL.Path, ShouldEqual, "/_msearch")
So(responseBuffer, ShouldNotBeNil)
headerBytes, err := responseBuffer.ReadBytes('\n')
So(err, ShouldBeNil)
bodyBytes := responseBuffer.Bytes()
jHeader, err := simplejson.NewJson(headerBytes)
So(err, ShouldBeNil)
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")
So(jHeader.Get("max_concurrent_shard_requests").MustInt(10), ShouldEqual, 10)
Convey("and replace $__interval variable", func() {
So(jBody.GetPath("aggs", "2", "aggs", "1", "avg", "script").MustString(), ShouldEqual, "15000*@hostname")
})
Convey("and replace $__interval_ms variable", func() {
So(jBody.GetPath("aggs", "2", "date_histogram", "interval").MustString(), ShouldEqual, "15s")
})
})
})
})
Convey("and a v5.6 client", func() {
ds := models.DataSource{
Database: "[metrics-]YYYY.MM.DD",
Url: ts.URL,
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 56,
"maxConcurrentShardRequests": 100,
"timeField": "@timestamp",
"interval": "Daily",
}),
}
c, err := NewClient(context.Background(), &ds, timeRange)
So(err, ShouldBeNil)
So(c, ShouldNotBeNil)
Convey("When executing multi search", func() {
ms, err := createMultisearchForTest(c)
So(err, ShouldBeNil)
c.ExecuteMultisearch(ms)
Convey("Should send correct request and payload", func() {
So(req, ShouldNotBeNil)
So(req.Method, ShouldEqual, http.MethodPost)
So(req.URL.Path, ShouldEqual, "/_msearch")
So(responseBuffer, ShouldNotBeNil)
headerBytes, err := responseBuffer.ReadBytes('\n')
So(err, ShouldBeNil)
bodyBytes := responseBuffer.Bytes()
jHeader, err := simplejson.NewJson(headerBytes)
So(err, ShouldBeNil)
jBody, err := simplejson.NewJson(bodyBytes)
So(err, ShouldBeNil)
fmt.Println("body", string(headerBytes))
So(jHeader.Get("index").MustString(), ShouldEqual, "metrics-2018.05.15")
So(jHeader.Get("ignore_unavailable").MustBool(false), ShouldEqual, true)
So(jHeader.Get("search_type").MustString(), ShouldEqual, "query_then_fetch")
So(jHeader.Get("max_concurrent_shard_requests").MustInt(), ShouldEqual, 100)
Convey("and replace $__interval variable", func() {
So(jBody.GetPath("aggs", "2", "aggs", "1", "avg", "script").MustString(), ShouldEqual, "15000*@hostname")
})
Convey("and replace $__interval_ms variable", func() {
So(jBody.GetPath("aggs", "2", "date_histogram", "interval").MustString(), ShouldEqual, "15s")
})
})
})
})
Reset(func() {
newDatasourceHttpClient = currentNewDatasourceHttpClient
})
})
})
}
func createMultisearchForTest(c Client) (*MultiSearchRequest, error) {
msb := c.MultiSearch()
s := msb.Search(tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
s.Agg().DateHistogram("2", "@timestamp", func(a *DateHistogramAgg, ab AggBuilder) {
a.Interval = "$__interval"
ab.Metric("1", "avg", "@hostname", func(a *MetricAggregation) {
a.Settings["script"] = "$__interval_ms*@hostname"
})
})
return msb.Build()
}

View File

@ -0,0 +1,312 @@
package es
import (
"fmt"
"regexp"
"strings"
"time"
"github.com/grafana/grafana/pkg/tsdb"
)
const (
noInterval = ""
intervalHourly = "hourly"
intervalDaily = "daily"
intervalWeekly = "weekly"
intervalMonthly = "monthly"
intervalYearly = "yearly"
)
type indexPattern interface {
GetIndices(timeRange *tsdb.TimeRange) ([]string, error)
}
var newIndexPattern = func(interval string, pattern string) (indexPattern, error) {
if interval == noInterval {
return &staticIndexPattern{indexName: pattern}, nil
}
return newDynamicIndexPattern(interval, pattern)
}
type staticIndexPattern struct {
indexName string
}
func (ip *staticIndexPattern) GetIndices(timeRange *tsdb.TimeRange) ([]string, error) {
return []string{ip.indexName}, nil
}
type intervalGenerator interface {
Generate(from, to time.Time) []time.Time
}
type dynamicIndexPattern struct {
interval string
pattern string
intervalGenerator intervalGenerator
}
func newDynamicIndexPattern(interval, pattern string) (*dynamicIndexPattern, error) {
var generator intervalGenerator
switch strings.ToLower(interval) {
case intervalHourly:
generator = &hourlyInterval{}
case intervalDaily:
generator = &dailyInterval{}
case intervalWeekly:
generator = &weeklyInterval{}
case intervalMonthly:
generator = &monthlyInterval{}
case intervalYearly:
generator = &yearlyInterval{}
default:
return nil, fmt.Errorf("unsupported interval '%s'", interval)
}
return &dynamicIndexPattern{
interval: interval,
pattern: pattern,
intervalGenerator: generator,
}, nil
}
func (ip *dynamicIndexPattern) GetIndices(timeRange *tsdb.TimeRange) ([]string, error) {
from := timeRange.GetFromAsTimeUTC()
to := timeRange.GetToAsTimeUTC()
intervals := ip.intervalGenerator.Generate(from, to)
indices := make([]string, 0)
for _, t := range intervals {
indices = append(indices, formatDate(t, ip.pattern))
}
return indices, nil
}
type hourlyInterval struct{}
func (i *hourlyInterval) Generate(from, to time.Time) []time.Time {
intervals := []time.Time{}
start := time.Date(from.Year(), from.Month(), from.Day(), from.Hour(), 0, 0, 0, time.UTC)
end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, time.UTC)
intervals = append(intervals, start)
for start.Before(end) {
start = start.Add(time.Hour)
intervals = append(intervals, start)
}
return intervals
}
type dailyInterval struct{}
func (i *dailyInterval) Generate(from, to time.Time) []time.Time {
intervals := []time.Time{}
start := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, time.UTC)
end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, time.UTC)
intervals = append(intervals, start)
for start.Before(end) {
start = start.Add(24 * time.Hour)
intervals = append(intervals, start)
}
return intervals
}
type weeklyInterval struct{}
func (i *weeklyInterval) Generate(from, to time.Time) []time.Time {
intervals := []time.Time{}
start := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, time.UTC)
end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, time.UTC)
for start.Weekday() != time.Monday {
start = start.Add(-24 * time.Hour)
}
for end.Weekday() != time.Monday {
end = end.Add(-24 * time.Hour)
}
year, week := start.ISOWeek()
intervals = append(intervals, start)
for start.Before(end) {
start = start.Add(24 * time.Hour)
nextYear, nextWeek := start.ISOWeek()
if nextYear != year || nextWeek != week {
intervals = append(intervals, start)
}
year = nextYear
week = nextWeek
}
return intervals
}
type monthlyInterval struct{}
func (i *monthlyInterval) Generate(from, to time.Time) []time.Time {
intervals := []time.Time{}
start := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, time.UTC)
end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, time.UTC)
month := start.Month()
intervals = append(intervals, start)
for start.Before(end) {
start = start.Add(24 * time.Hour)
nextMonth := start.Month()
if nextMonth != month {
intervals = append(intervals, start)
}
month = nextMonth
}
return intervals
}
type yearlyInterval struct{}
func (i *yearlyInterval) Generate(from, to time.Time) []time.Time {
intervals := []time.Time{}
start := time.Date(from.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
end := time.Date(to.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
year := start.Year()
intervals = append(intervals, start)
for start.Before(end) {
start = start.Add(24 * time.Hour)
nextYear := start.Year()
if nextYear != year {
intervals = append(intervals, start)
}
year = nextYear
}
return intervals
}
var datePatternRegex = regexp.MustCompile("(LT|LL?L?L?|l{1,4}|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|mm?|ss?|SS?S?|X|zz?|ZZ?|Q)")
var datePatternReplacements = map[string]string{
"M": "1", // stdNumMonth 1 2 ... 11 12
"MM": "01", // stdZeroMonth 01 02 ... 11 12
"MMM": "Jan", // stdMonth Jan Feb ... Nov Dec
"MMMM": "January", // stdLongMonth January February ... November December
"D": "2", // stdDay 1 2 ... 30 30
"DD": "02", // stdZeroDay 01 02 ... 30 31
"DDD": "<stdDayOfYear>", // Day of the year 1 2 ... 364 365
"DDDD": "<stdDayOfYearZero>", // Day of the year 001 002 ... 364 365 @todo****
"d": "<stdDayOfWeek>", // Numeric representation of day of the week 0 1 ... 5 6
"dd": "Mon", // ***Su Mo ... Fr Sa @todo
"ddd": "Mon", // Sun Mon ... Fri Sat
"dddd": "Monday", // stdLongWeekDay Sunday Monday ... Friday Saturday
"e": "<stdDayOfWeek>", // Numeric representation of day of the week 0 1 ... 5 6 @todo
"E": "<stdDayOfWeekISO>", // ISO-8601 numeric representation of the day of the week (added in PHP 5.1.0) 1 2 ... 6 7 @todo
"w": "<stdWeekOfYear>", // 1 2 ... 52 53
"ww": "<stdWeekOfYear>", // ***01 02 ... 52 53 @todo
"W": "<stdWeekOfYear>", // 1 2 ... 52 53
"WW": "<stdWeekOfYear>", // ***01 02 ... 52 53 @todo
"YY": "06", // stdYear 70 71 ... 29 30
"YYYY": "2006", // stdLongYear 1970 1971 ... 2029 2030
"gg": "<stdIsoYearShort>", // ISO-8601 year number 70 71 ... 29 30
"gggg": "<stdIsoYear>", // ***1970 1971 ... 2029 2030
"GG": "<stdIsoYearShort>", //70 71 ... 29 30
"GGGG": "<stdIsoYear>", // ***1970 1971 ... 2029 2030
"Q": "<stdQuarter>", // 1, 2, 3, 4
"A": "PM", // stdPM AM PM
"a": "pm", // stdpm am pm
"H": "<stdHourNoZero>", // stdHour 0 1 ... 22 23
"HH": "15", // 00 01 ... 22 23
"h": "3", // stdHour12 1 2 ... 11 12
"hh": "03", // stdZeroHour12 01 02 ... 11 12
"m": "4", // stdZeroMinute 0 1 ... 58 59
"mm": "04", // stdZeroMinute 00 01 ... 58 59
"s": "5", // stdSecond 0 1 ... 58 59
"ss": "05", // stdZeroSecond ***00 01 ... 58 59
"z": "MST", //EST CST ... MST PST
"zz": "MST", //EST CST ... MST PST
"Z": "Z07:00", // stdNumColonTZ -07:00 -06:00 ... +06:00 +07:00
"ZZ": "-0700", // stdNumTZ -0700 -0600 ... +0600 +0700
"X": "<stdUnix>", // Seconds since unix epoch 1360013296
"LT": "3:04 PM", // 8:30 PM
"L": "01/02/2006", //09/04/1986
"l": "1/2/2006", //9/4/1986
"ll": "Jan 2 2006", //Sep 4 1986
"lll": "Jan 2 2006 3:04 PM", //Sep 4 1986 8:30 PM
"llll": "Mon, Jan 2 2006 3:04 PM", //Thu, Sep 4 1986 8:30 PM
}
func formatDate(t time.Time, pattern string) string {
var datePattern string
parts := strings.Split(strings.TrimLeft(pattern, "["), "]")
base := parts[0]
if len(parts) == 2 {
datePattern = parts[1]
} else {
datePattern = base
base = ""
}
formatted := t.Format(patternToLayout(datePattern))
if strings.Contains(formatted, "<std") {
isoYear, isoWeek := t.ISOWeek()
isoYearShort := fmt.Sprintf("%d", isoYear)[2:4]
formatted = strings.Replace(formatted, "<stdIsoYear>", fmt.Sprintf("%d", isoYear), -1)
formatted = strings.Replace(formatted, "<stdIsoYearShort>", isoYearShort, -1)
formatted = strings.Replace(formatted, "<stdWeekOfYear>", fmt.Sprintf("%d", isoWeek), -1)
formatted = strings.Replace(formatted, "<stdUnix>", fmt.Sprintf("%d", t.Unix()), -1)
day := t.Weekday()
dayOfWeekIso := int(day)
if day == time.Sunday {
dayOfWeekIso = 7
}
formatted = strings.Replace(formatted, "<stdDayOfWeek>", fmt.Sprintf("%d", day), -1)
formatted = strings.Replace(formatted, "<stdDayOfWeekISO>", fmt.Sprintf("%d", dayOfWeekIso), -1)
formatted = strings.Replace(formatted, "<stdDayOfYear>", fmt.Sprintf("%d", t.YearDay()), -1)
quarter := 4
switch t.Month() {
case time.January, time.February, time.March:
quarter = 1
case time.April, time.May, time.June:
quarter = 2
case time.July, time.August, time.September:
quarter = 3
}
formatted = strings.Replace(formatted, "<stdQuarter>", fmt.Sprintf("%d", quarter), -1)
formatted = strings.Replace(formatted, "<stdHourNoZero>", fmt.Sprintf("%d", t.Hour()), -1)
}
return base + formatted
}
func patternToLayout(pattern string) string {
var match [][]string
if match = datePatternRegex.FindAllStringSubmatch(pattern, -1); match == nil {
return pattern
}
for i := range match {
if replace, ok := datePatternReplacements[match[i][0]]; ok {
pattern = strings.Replace(pattern, match[i][0], replace, 1)
}
}
return pattern
}

View File

@ -0,0 +1,244 @@
package es
import (
"fmt"
"testing"
"time"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
)
func TestIndexPattern(t *testing.T) {
Convey("Static index patterns", t, func() {
indexPatternScenario(noInterval, "data-*", nil, func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-*")
})
indexPatternScenario(noInterval, "es-index-name", nil, func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "es-index-name")
})
})
Convey("Dynamic index patterns", t, func() {
from := fmt.Sprintf("%d", time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC).UnixNano()/int64(time.Millisecond))
to := fmt.Sprintf("%d", time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC).UnixNano()/int64(time.Millisecond))
indexPatternScenario(intervalHourly, "[data-]YYYY.MM.DD.HH", tsdb.NewTimeRange(from, to), func(indices []string) {
//So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-2018.05.15.17")
})
indexPatternScenario(intervalDaily, "[data-]YYYY.MM.DD", tsdb.NewTimeRange(from, to), func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-2018.05.15")
})
indexPatternScenario(intervalWeekly, "[data-]GGGG.WW", tsdb.NewTimeRange(from, to), func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-2018.20")
})
indexPatternScenario(intervalMonthly, "[data-]YYYY.MM", tsdb.NewTimeRange(from, to), func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-2018.05")
})
indexPatternScenario(intervalYearly, "[data-]YYYY", tsdb.NewTimeRange(from, to), func(indices []string) {
So(indices, ShouldHaveLength, 1)
So(indices[0], ShouldEqual, "data-2018")
})
})
Convey("Hourly interval", t, func() {
Convey("Should return 1 interval", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 1, 23, 6, 0, 0, time.UTC)
intervals := (&hourlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 23, 0, 0, 0, time.UTC))
})
Convey("Should return 2 intervals", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 2, 0, 6, 0, 0, time.UTC)
intervals := (&hourlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 23, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2018, 1, 2, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 10 intervals", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 2, 8, 6, 0, 0, time.UTC)
intervals := (&hourlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 10)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 23, 0, 0, 0, time.UTC))
So(intervals[4], ShouldEqual, time.Date(2018, 1, 2, 3, 0, 0, 0, time.UTC))
So(intervals[9], ShouldEqual, time.Date(2018, 1, 2, 8, 0, 0, 0, time.UTC))
})
})
Convey("Daily interval", t, func() {
Convey("Should return 1 day", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 1, 23, 6, 0, 0, time.UTC)
intervals := (&dailyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 2 days", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 2, 0, 6, 0, 0, time.UTC)
intervals := (&dailyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2018, 1, 2, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 32 days", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 2, 1, 8, 6, 0, 0, time.UTC)
intervals := (&dailyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 32)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[30], ShouldEqual, time.Date(2018, 1, 31, 0, 0, 0, 0, time.UTC))
So(intervals[31], ShouldEqual, time.Date(2018, 2, 1, 0, 0, 0, 0, time.UTC))
})
})
Convey("Weekly interval", t, func() {
Convey("Should return 1 week (1)", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 1, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 1 week (2)", func() {
from := time.Date(2017, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2017, 1, 1, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2016, 12, 26, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 2 weeks (1)", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 10, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2018, 1, 8, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 2 weeks (2)", func() {
from := time.Date(2017, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2017, 1, 8, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2016, 12, 26, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2017, 1, 2, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 3 weeks (1)", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 21, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 3)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2018, 1, 8, 0, 0, 0, 0, time.UTC))
So(intervals[2], ShouldEqual, time.Date(2018, 1, 15, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 3 weeks (2)", func() {
from := time.Date(2017, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2017, 1, 9, 23, 6, 0, 0, time.UTC)
intervals := (&weeklyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 3)
So(intervals[0], ShouldEqual, time.Date(2016, 12, 26, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2017, 1, 2, 0, 0, 0, 0, time.UTC))
So(intervals[2], ShouldEqual, time.Date(2017, 1, 9, 0, 0, 0, 0, time.UTC))
})
})
Convey("Monthly interval", t, func() {
Convey("Should return 1 month", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 1, 1, 23, 6, 0, 0, time.UTC)
intervals := (&monthlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 2 months", func() {
from := time.Date(2018, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 2, 2, 0, 6, 0, 0, time.UTC)
intervals := (&monthlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2018, 2, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 14 months", func() {
from := time.Date(2017, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 2, 1, 8, 6, 0, 0, time.UTC)
intervals := (&monthlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 14)
So(intervals[0], ShouldEqual, time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[13], ShouldEqual, time.Date(2018, 2, 1, 0, 0, 0, 0, time.UTC))
})
})
Convey("Yearly interval", t, func() {
Convey("Should return 1 year (hour diff)", func() {
from := time.Date(2018, 2, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 2, 1, 23, 6, 0, 0, time.UTC)
intervals := (&yearlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 1 year (month diff)", func() {
from := time.Date(2018, 2, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 12, 31, 23, 59, 59, 0, time.UTC)
intervals := (&yearlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 1)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 2 years", func() {
from := time.Date(2018, 2, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2019, 1, 1, 23, 59, 59, 0, time.UTC)
intervals := (&yearlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 2)
So(intervals[0], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[1], ShouldEqual, time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC))
})
Convey("Should return 5 years", func() {
from := time.Date(2014, 1, 1, 23, 1, 1, 0, time.UTC)
to := time.Date(2018, 11, 1, 23, 59, 59, 0, time.UTC)
intervals := (&yearlyInterval{}).Generate(from, to)
So(intervals, ShouldHaveLength, 5)
So(intervals[0], ShouldEqual, time.Date(2014, 1, 1, 0, 0, 0, 0, time.UTC))
So(intervals[4], ShouldEqual, time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC))
})
})
}
func indexPatternScenario(interval string, pattern string, timeRange *tsdb.TimeRange, fn func(indices []string)) {
Convey(fmt.Sprintf("Index pattern (interval=%s, index=%s", interval, pattern), func() {
ip, err := newIndexPattern(interval, pattern)
So(err, ShouldBeNil)
So(ip, ShouldNotBeNil)
indices, err := ip.GetIndices(timeRange)
So(err, ShouldBeNil)
fn(indices)
})
}

View File

@ -0,0 +1,311 @@
package es
import (
"encoding/json"
"github.com/grafana/grafana/pkg/tsdb"
)
// SearchRequest represents a search request
type SearchRequest struct {
Index string
Interval tsdb.Interval
Size int
Sort map[string]interface{}
Query *Query
Aggs AggArray
CustomProps map[string]interface{}
}
// MarshalJSON returns the JSON encoding of the request.
func (r *SearchRequest) MarshalJSON() ([]byte, error) {
root := make(map[string]interface{})
root["size"] = r.Size
if len(r.Sort) > 0 {
root["sort"] = r.Sort
}
for key, value := range r.CustomProps {
root[key] = value
}
root["query"] = r.Query
if len(r.Aggs) > 0 {
root["aggs"] = r.Aggs
}
return json.Marshal(root)
}
// SearchResponseHits represents search response hits
type SearchResponseHits struct {
Hits []map[string]interface{}
Total int64
}
// SearchResponse represents a search response
type SearchResponse struct {
Error map[string]interface{} `json:"error"`
Aggregations map[string]interface{} `json:"aggregations"`
Hits *SearchResponseHits `json:"hits"`
}
// func (r *Response) getErrMsg() string {
// var msg bytes.Buffer
// errJson := simplejson.NewFromAny(r.Err)
// errType, err := errJson.Get("type").String()
// if err == nil {
// msg.WriteString(fmt.Sprintf("type:%s", errType))
// }
// reason, err := errJson.Get("type").String()
// if err == nil {
// msg.WriteString(fmt.Sprintf("reason:%s", reason))
// }
// return msg.String()
// }
// MultiSearchRequest represents a multi search request
type MultiSearchRequest struct {
Requests []*SearchRequest
}
// MultiSearchResponse represents a multi search response
type MultiSearchResponse struct {
status int `json:"status,omitempty"`
Responses []*SearchResponse `json:"responses"`
}
// Query represents a query
type Query struct {
Bool *BoolQuery `json:"bool"`
}
// BoolQuery represents a bool query
type BoolQuery struct {
Filters []Filter
}
// NewBoolQuery create a new bool query
func NewBoolQuery() *BoolQuery {
return &BoolQuery{Filters: make([]Filter, 0)}
}
// MarshalJSON returns the JSON encoding of the boolean query.
func (q *BoolQuery) MarshalJSON() ([]byte, error) {
root := make(map[string]interface{})
if len(q.Filters) > 0 {
if len(q.Filters) == 1 {
root["filter"] = q.Filters[0]
} else {
root["filter"] = q.Filters
}
}
return json.Marshal(root)
}
// Filter represents a search filter
type Filter interface{}
// QueryStringFilter represents a query string search filter
type QueryStringFilter struct {
Filter
Query string
AnalyzeWildcard bool
}
// MarshalJSON returns the JSON encoding of the query string filter.
func (f *QueryStringFilter) MarshalJSON() ([]byte, error) {
root := map[string]interface{}{
"query_string": map[string]interface{}{
"query": f.Query,
"analyze_wildcard": f.AnalyzeWildcard,
},
}
return json.Marshal(root)
}
// RangeFilter represents a range search filter
type RangeFilter struct {
Filter
Key string
Gte string
Lte string
Format string
}
// DateFormatEpochMS represents a date format of epoch milliseconds (epoch_millis)
const DateFormatEpochMS = "epoch_millis"
// MarshalJSON returns the JSON encoding of the query string filter.
func (f *RangeFilter) MarshalJSON() ([]byte, error) {
root := map[string]map[string]map[string]interface{}{
"range": {
f.Key: {
"lte": f.Lte,
"gte": f.Gte,
},
},
}
if f.Format != "" {
root["range"][f.Key]["format"] = f.Format
}
return json.Marshal(root)
}
// Aggregation represents an aggregation
type Aggregation interface{}
// Agg represents a key and aggregation
type Agg struct {
Key string
Aggregation *aggContainer
}
// MarshalJSON returns the JSON encoding of the agg
func (a *Agg) MarshalJSON() ([]byte, error) {
root := map[string]interface{}{
a.Key: a.Aggregation,
}
return json.Marshal(root)
}
// AggArray represents a collection of key/aggregation pairs
type AggArray []*Agg
// MarshalJSON returns the JSON encoding of the agg
func (a AggArray) MarshalJSON() ([]byte, error) {
aggsMap := make(map[string]Aggregation)
for _, subAgg := range a {
aggsMap[subAgg.Key] = subAgg.Aggregation
}
return json.Marshal(aggsMap)
}
type aggContainer struct {
Type string
Aggregation Aggregation
Aggs AggArray
}
// MarshalJSON returns the JSON encoding of the aggregation container
func (a *aggContainer) MarshalJSON() ([]byte, error) {
root := map[string]interface{}{
a.Type: a.Aggregation,
}
if len(a.Aggs) > 0 {
root["aggs"] = a.Aggs
}
return json.Marshal(root)
}
type aggDef struct {
key string
aggregation *aggContainer
builders []AggBuilder
}
func newAggDef(key string, aggregation *aggContainer) *aggDef {
return &aggDef{
key: key,
aggregation: aggregation,
builders: make([]AggBuilder, 0),
}
}
// HistogramAgg represents a histogram aggregation
type HistogramAgg struct {
Interval int `json:"interval,omitempty"`
Field string `json:"field"`
MinDocCount int `json:"min_doc_count"`
Missing *int `json:"missing,omitempty"`
}
// DateHistogramAgg represents a date histogram aggregation
type DateHistogramAgg struct {
Field string `json:"field"`
Interval string `json:"interval,omitempty"`
MinDocCount int `json:"min_doc_count"`
Missing *string `json:"missing,omitempty"`
ExtendedBounds *ExtendedBounds `json:"extended_bounds"`
Format string `json:"format"`
}
// FiltersAggregation represents a filters aggregation
type FiltersAggregation struct {
Filters map[string]interface{} `json:"filters"`
}
// TermsAggregation represents a terms aggregation
type TermsAggregation struct {
Field string `json:"field"`
Size int `json:"size"`
Order map[string]interface{} `json:"order"`
MinDocCount *int `json:"min_doc_count,omitempty"`
Missing *string `json:"missing,omitempty"`
}
// ExtendedBounds represents extended bounds
type ExtendedBounds struct {
Min string `json:"min"`
Max string `json:"max"`
}
// GeoHashGridAggregation represents a geo hash grid aggregation
type GeoHashGridAggregation struct {
Field string `json:"field"`
Precision int `json:"precision"`
}
// MetricAggregation represents a metric aggregation
type MetricAggregation struct {
Field string
Settings map[string]interface{}
}
// MarshalJSON returns the JSON encoding of the metric aggregation
func (a *MetricAggregation) MarshalJSON() ([]byte, error) {
root := map[string]interface{}{
"field": a.Field,
}
for k, v := range a.Settings {
if k != "" && v != nil {
root[k] = v
}
}
return json.Marshal(root)
}
// PipelineAggregation represents a metric aggregation
type PipelineAggregation struct {
BucketPath string
Settings map[string]interface{}
}
// MarshalJSON returns the JSON encoding of the pipeline aggregation
func (a *PipelineAggregation) MarshalJSON() ([]byte, error) {
root := map[string]interface{}{
"buckets_path": a.BucketPath,
}
for k, v := range a.Settings {
if k != "" && v != nil {
root[k] = v
}
}
return json.Marshal(root)
}

View File

@ -0,0 +1,451 @@
package es
import (
"strings"
"github.com/grafana/grafana/pkg/tsdb"
)
// SearchRequestBuilder represents a builder which can build a search request
type SearchRequestBuilder struct {
version int
interval tsdb.Interval
index string
size int
sort map[string]interface{}
queryBuilder *QueryBuilder
aggBuilders []AggBuilder
customProps map[string]interface{}
}
// NewSearchRequestBuilder create a new search request builder
func NewSearchRequestBuilder(version int, interval tsdb.Interval) *SearchRequestBuilder {
builder := &SearchRequestBuilder{
version: version,
interval: interval,
sort: make(map[string]interface{}),
customProps: make(map[string]interface{}),
aggBuilders: make([]AggBuilder, 0),
}
return builder
}
// Build builds and return a search request
func (b *SearchRequestBuilder) Build() (*SearchRequest, error) {
sr := SearchRequest{
Index: b.index,
Interval: b.interval,
Size: b.size,
Sort: b.sort,
CustomProps: b.customProps,
}
if b.queryBuilder != nil {
q, err := b.queryBuilder.Build()
if err != nil {
return nil, err
}
sr.Query = q
}
if len(b.aggBuilders) > 0 {
sr.Aggs = make(AggArray, 0)
for _, ab := range b.aggBuilders {
aggArray, err := ab.Build()
if err != nil {
return nil, err
}
for _, agg := range aggArray {
sr.Aggs = append(sr.Aggs, agg)
}
}
}
return &sr, nil
}
// Size sets the size of the search request
func (b *SearchRequestBuilder) Size(size int) *SearchRequestBuilder {
b.size = size
return b
}
// SortDesc adds a sort to the search request
func (b *SearchRequestBuilder) SortDesc(field, unmappedType string) *SearchRequestBuilder {
props := map[string]string{
"order": "desc",
}
if unmappedType != "" {
props["unmapped_type"] = unmappedType
}
b.sort[field] = props
return b
}
// AddDocValueField adds a doc value field to the search request
func (b *SearchRequestBuilder) AddDocValueField(field string) *SearchRequestBuilder {
// fields field not supported on version >= 5
if b.version < 5 {
b.customProps["fields"] = []string{"*", "_source"}
}
b.customProps["script_fields"] = make(map[string]interface{})
if b.version < 5 {
b.customProps["fielddata_fields"] = []string{field}
} else {
b.customProps["docvalue_fields"] = []string{field}
}
return b
}
// Query creates and return a query builder
func (b *SearchRequestBuilder) Query() *QueryBuilder {
if b.queryBuilder == nil {
b.queryBuilder = NewQueryBuilder()
}
return b.queryBuilder
}
// Agg initaite and returns a new aggregation builder
func (b *SearchRequestBuilder) Agg() AggBuilder {
aggBuilder := newAggBuilder()
b.aggBuilders = append(b.aggBuilders, aggBuilder)
return aggBuilder
}
// MultiSearchRequestBuilder represents a builder which can build a multi search request
type MultiSearchRequestBuilder struct {
version int
requestBuilders []*SearchRequestBuilder
}
// NewMultiSearchRequestBuilder creates a new multi search request builder
func NewMultiSearchRequestBuilder(version int) *MultiSearchRequestBuilder {
return &MultiSearchRequestBuilder{
version: version,
}
}
// Search initiates and returns a new search request builder
func (m *MultiSearchRequestBuilder) Search(interval tsdb.Interval) *SearchRequestBuilder {
b := NewSearchRequestBuilder(m.version, interval)
m.requestBuilders = append(m.requestBuilders, b)
return b
}
// Build builds and return a multi search request
func (m *MultiSearchRequestBuilder) Build() (*MultiSearchRequest, error) {
requests := []*SearchRequest{}
for _, sb := range m.requestBuilders {
searchRequest, err := sb.Build()
if err != nil {
return nil, err
}
requests = append(requests, searchRequest)
}
return &MultiSearchRequest{
Requests: requests,
}, nil
}
// QueryBuilder represents a query builder
type QueryBuilder struct {
boolQueryBuilder *BoolQueryBuilder
}
// NewQueryBuilder create a new query builder
func NewQueryBuilder() *QueryBuilder {
return &QueryBuilder{}
}
// Build builds and return a query builder
func (b *QueryBuilder) Build() (*Query, error) {
q := Query{}
if b.boolQueryBuilder != nil {
b, err := b.boolQueryBuilder.Build()
if err != nil {
return nil, err
}
q.Bool = b
}
return &q, nil
}
// Bool creates and return a query builder
func (b *QueryBuilder) Bool() *BoolQueryBuilder {
if b.boolQueryBuilder == nil {
b.boolQueryBuilder = NewBoolQueryBuilder()
}
return b.boolQueryBuilder
}
// BoolQueryBuilder represents a bool query builder
type BoolQueryBuilder struct {
filterQueryBuilder *FilterQueryBuilder
}
// NewBoolQueryBuilder create a new bool query builder
func NewBoolQueryBuilder() *BoolQueryBuilder {
return &BoolQueryBuilder{}
}
// Filter creates and return a filter query builder
func (b *BoolQueryBuilder) Filter() *FilterQueryBuilder {
if b.filterQueryBuilder == nil {
b.filterQueryBuilder = NewFilterQueryBuilder()
}
return b.filterQueryBuilder
}
// Build builds and return a bool query builder
func (b *BoolQueryBuilder) Build() (*BoolQuery, error) {
boolQuery := BoolQuery{}
if b.filterQueryBuilder != nil {
filters, err := b.filterQueryBuilder.Build()
if err != nil {
return nil, err
}
boolQuery.Filters = filters
}
return &boolQuery, nil
}
// FilterQueryBuilder represents a filter query builder
type FilterQueryBuilder struct {
filters []Filter
}
// NewFilterQueryBuilder creates a new filter query builder
func NewFilterQueryBuilder() *FilterQueryBuilder {
return &FilterQueryBuilder{
filters: make([]Filter, 0),
}
}
// Build builds and return a filter query builder
func (b *FilterQueryBuilder) Build() ([]Filter, error) {
return b.filters, nil
}
// AddDateRangeFilter adds a new time range filter
func (b *FilterQueryBuilder) AddDateRangeFilter(timeField, lte, gte, format string) *FilterQueryBuilder {
b.filters = append(b.filters, &RangeFilter{
Key: timeField,
Lte: lte,
Gte: gte,
Format: format,
})
return b
}
// AddQueryStringFilter adds a new query string filter
func (b *FilterQueryBuilder) AddQueryStringFilter(querystring string, analyseWildcard bool) *FilterQueryBuilder {
if len(strings.TrimSpace(querystring)) == 0 {
return b
}
b.filters = append(b.filters, &QueryStringFilter{
Query: querystring,
AnalyzeWildcard: analyseWildcard,
})
return b
}
// AggBuilder represents an aggregation builder
type AggBuilder interface {
Histogram(key, field string, fn func(a *HistogramAgg, b AggBuilder)) AggBuilder
DateHistogram(key, field string, fn func(a *DateHistogramAgg, b AggBuilder)) AggBuilder
Terms(key, field string, fn func(a *TermsAggregation, b AggBuilder)) AggBuilder
Filters(key string, fn func(a *FiltersAggregation, b AggBuilder)) AggBuilder
GeoHashGrid(key, field string, fn func(a *GeoHashGridAggregation, b AggBuilder)) AggBuilder
Metric(key, metricType, field string, fn func(a *MetricAggregation)) AggBuilder
Pipeline(key, pipelineType, bucketPath string, fn func(a *PipelineAggregation)) AggBuilder
Build() (AggArray, error)
}
type aggBuilderImpl struct {
AggBuilder
aggDefs []*aggDef
}
func newAggBuilder() *aggBuilderImpl {
return &aggBuilderImpl{
aggDefs: make([]*aggDef, 0),
}
}
func (b *aggBuilderImpl) Build() (AggArray, error) {
aggs := make(AggArray, 0)
for _, aggDef := range b.aggDefs {
agg := &Agg{
Key: aggDef.key,
Aggregation: aggDef.aggregation,
}
for _, cb := range aggDef.builders {
childAggs, err := cb.Build()
if err != nil {
return nil, err
}
for _, childAgg := range childAggs {
agg.Aggregation.Aggs = append(agg.Aggregation.Aggs, childAgg)
}
}
aggs = append(aggs, agg)
}
return aggs, nil
}
func (b *aggBuilderImpl) Histogram(key, field string, fn func(a *HistogramAgg, b AggBuilder)) AggBuilder {
innerAgg := &HistogramAgg{
Field: field,
}
aggDef := newAggDef(key, &aggContainer{
Type: "histogram",
Aggregation: innerAgg,
})
if fn != nil {
builder := newAggBuilder()
aggDef.builders = append(aggDef.builders, builder)
fn(innerAgg, builder)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) DateHistogram(key, field string, fn func(a *DateHistogramAgg, b AggBuilder)) AggBuilder {
innerAgg := &DateHistogramAgg{
Field: field,
}
aggDef := newAggDef(key, &aggContainer{
Type: "date_histogram",
Aggregation: innerAgg,
})
if fn != nil {
builder := newAggBuilder()
aggDef.builders = append(aggDef.builders, builder)
fn(innerAgg, builder)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) Terms(key, field string, fn func(a *TermsAggregation, b AggBuilder)) AggBuilder {
innerAgg := &TermsAggregation{
Field: field,
Order: make(map[string]interface{}),
}
aggDef := newAggDef(key, &aggContainer{
Type: "terms",
Aggregation: innerAgg,
})
if fn != nil {
builder := newAggBuilder()
aggDef.builders = append(aggDef.builders, builder)
fn(innerAgg, builder)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) Filters(key string, fn func(a *FiltersAggregation, b AggBuilder)) AggBuilder {
innerAgg := &FiltersAggregation{
Filters: make(map[string]interface{}),
}
aggDef := newAggDef(key, &aggContainer{
Type: "filters",
Aggregation: innerAgg,
})
if fn != nil {
builder := newAggBuilder()
aggDef.builders = append(aggDef.builders, builder)
fn(innerAgg, builder)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) GeoHashGrid(key, field string, fn func(a *GeoHashGridAggregation, b AggBuilder)) AggBuilder {
innerAgg := &GeoHashGridAggregation{
Field: field,
Precision: 5,
}
aggDef := newAggDef(key, &aggContainer{
Type: "geohash_grid",
Aggregation: innerAgg,
})
if fn != nil {
builder := newAggBuilder()
aggDef.builders = append(aggDef.builders, builder)
fn(innerAgg, builder)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) Metric(key, metricType, field string, fn func(a *MetricAggregation)) AggBuilder {
innerAgg := &MetricAggregation{
Field: field,
Settings: make(map[string]interface{}),
}
aggDef := newAggDef(key, &aggContainer{
Type: metricType,
Aggregation: innerAgg,
})
if fn != nil {
fn(innerAgg)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}
func (b *aggBuilderImpl) Pipeline(key, pipelineType, bucketPath string, fn func(a *PipelineAggregation)) AggBuilder {
innerAgg := &PipelineAggregation{
BucketPath: bucketPath,
Settings: make(map[string]interface{}),
}
aggDef := newAggDef(key, &aggContainer{
Type: pipelineType,
Aggregation: innerAgg,
})
if fn != nil {
fn(innerAgg)
}
b.aggDefs = append(b.aggDefs, aggDef)
return b
}

View File

@ -0,0 +1,473 @@
package es
import (
"encoding/json"
"testing"
"time"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
)
func TestSearchRequest(t *testing.T) {
Convey("Test elasticsearch search request", t, func() {
timeField := "@timestamp"
Convey("Given new search request builder for es version 5", func() {
b := NewSearchRequestBuilder(5, tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have size of zero", func() {
So(sr.Size, ShouldEqual, 0)
})
Convey("Should have no sorting", func() {
So(sr.Sort, ShouldHaveLength, 0)
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
So(json.Get("size").MustInt(500), ShouldEqual, 0)
So(json.Get("sort").Interface(), ShouldBeNil)
So(json.Get("aggs").Interface(), ShouldBeNil)
So(json.Get("query").Interface(), ShouldBeNil)
})
})
Convey("When adding size, sort, filters", func() {
b.Size(200)
b.SortDesc(timeField, "boolean")
filters := b.Query().Bool().Filter()
filters.AddDateRangeFilter(timeField, "$timeTo", "$timeFrom", DateFormatEpochMS)
filters.AddQueryStringFilter("test", true)
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have correct size", func() {
So(sr.Size, ShouldEqual, 200)
})
Convey("Should have correct sorting", func() {
sort, ok := sr.Sort[timeField].(map[string]string)
So(ok, ShouldBeTrue)
So(sort["order"], ShouldEqual, "desc")
So(sort["unmapped_type"], ShouldEqual, "boolean")
})
Convey("Should have range filter", func() {
f, ok := sr.Query.Bool.Filters[0].(*RangeFilter)
So(ok, ShouldBeTrue)
So(f.Gte, ShouldEqual, "$timeFrom")
So(f.Lte, ShouldEqual, "$timeTo")
So(f.Format, ShouldEqual, "epoch_millis")
})
Convey("Should have query string filter", func() {
f, ok := sr.Query.Bool.Filters[1].(*QueryStringFilter)
So(ok, ShouldBeTrue)
So(f.Query, ShouldEqual, "test")
So(f.AnalyzeWildcard, ShouldBeTrue)
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
So(json.Get("size").MustInt(0), ShouldEqual, 200)
sort := json.GetPath("sort", timeField)
So(sort.Get("order").MustString(), ShouldEqual, "desc")
So(sort.Get("unmapped_type").MustString(), ShouldEqual, "boolean")
timeRangeFilter := json.GetPath("query", "bool", "filter").GetIndex(0).Get("range").Get(timeField)
So(timeRangeFilter.Get("gte").MustString(""), ShouldEqual, "$timeFrom")
So(timeRangeFilter.Get("lte").MustString(""), ShouldEqual, "$timeTo")
So(timeRangeFilter.Get("format").MustString(""), ShouldEqual, DateFormatEpochMS)
queryStringFilter := json.GetPath("query", "bool", "filter").GetIndex(1).Get("query_string")
So(queryStringFilter.Get("analyze_wildcard").MustBool(false), ShouldEqual, true)
So(queryStringFilter.Get("query").MustString(""), ShouldEqual, "test")
})
})
})
Convey("When adding doc value field", func() {
b.AddDocValueField(timeField)
Convey("should set correct props", func() {
So(b.customProps["fields"], ShouldBeNil)
scriptFields, ok := b.customProps["script_fields"].(map[string]interface{})
So(ok, ShouldBeTrue)
So(scriptFields, ShouldHaveLength, 0)
docValueFields, ok := b.customProps["docvalue_fields"].([]string)
So(ok, ShouldBeTrue)
So(docValueFields, ShouldHaveLength, 1)
So(docValueFields[0], ShouldEqual, timeField)
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
scriptFields, err := json.Get("script_fields").Map()
So(err, ShouldBeNil)
So(scriptFields, ShouldHaveLength, 0)
_, err = json.Get("fields").StringArray()
So(err, ShouldNotBeNil)
docValueFields, err := json.Get("docvalue_fields").StringArray()
So(err, ShouldBeNil)
So(docValueFields, ShouldHaveLength, 1)
So(docValueFields[0], ShouldEqual, timeField)
})
})
})
Convey("and adding multiple top level aggs", func() {
aggBuilder := b.Agg()
aggBuilder.Terms("1", "@hostname", nil)
aggBuilder.DateHistogram("2", "@timestamp", nil)
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have 2 top level aggs", func() {
aggs := sr.Aggs
So(aggs, ShouldHaveLength, 2)
So(aggs[0].Key, ShouldEqual, "1")
So(aggs[0].Aggregation.Type, ShouldEqual, "terms")
So(aggs[1].Key, ShouldEqual, "2")
So(aggs[1].Aggregation.Type, ShouldEqual, "date_histogram")
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
So(json.Get("aggs").MustMap(), ShouldHaveLength, 2)
So(json.GetPath("aggs", "1", "terms", "field").MustString(), ShouldEqual, "@hostname")
So(json.GetPath("aggs", "2", "date_histogram", "field").MustString(), ShouldEqual, "@timestamp")
})
})
})
Convey("and adding top level agg with child agg", func() {
aggBuilder := b.Agg()
aggBuilder.Terms("1", "@hostname", func(a *TermsAggregation, ib AggBuilder) {
ib.DateHistogram("2", "@timestamp", nil)
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have 1 top level agg and one child agg", func() {
aggs := sr.Aggs
So(aggs, ShouldHaveLength, 1)
topAgg := aggs[0]
So(topAgg.Key, ShouldEqual, "1")
So(topAgg.Aggregation.Type, ShouldEqual, "terms")
So(topAgg.Aggregation.Aggs, ShouldHaveLength, 1)
childAgg := aggs[0].Aggregation.Aggs[0]
So(childAgg.Key, ShouldEqual, "2")
So(childAgg.Aggregation.Type, ShouldEqual, "date_histogram")
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
So(json.Get("aggs").MustMap(), ShouldHaveLength, 1)
firstLevelAgg := json.GetPath("aggs", "1")
secondLevelAgg := firstLevelAgg.GetPath("aggs", "2")
So(firstLevelAgg.GetPath("terms", "field").MustString(), ShouldEqual, "@hostname")
So(secondLevelAgg.GetPath("date_histogram", "field").MustString(), ShouldEqual, "@timestamp")
})
})
})
Convey("and adding two top level aggs with child agg", func() {
aggBuilder := b.Agg()
aggBuilder.Histogram("1", "@hostname", func(a *HistogramAgg, ib AggBuilder) {
ib.DateHistogram("2", "@timestamp", nil)
})
aggBuilder.Filters("3", func(a *FiltersAggregation, ib AggBuilder) {
ib.Terms("4", "@test", nil)
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have 2 top level aggs with one child agg each", func() {
aggs := sr.Aggs
So(aggs, ShouldHaveLength, 2)
topAggOne := aggs[0]
So(topAggOne.Key, ShouldEqual, "1")
So(topAggOne.Aggregation.Type, ShouldEqual, "histogram")
So(topAggOne.Aggregation.Aggs, ShouldHaveLength, 1)
topAggOnechildAgg := topAggOne.Aggregation.Aggs[0]
So(topAggOnechildAgg.Key, ShouldEqual, "2")
So(topAggOnechildAgg.Aggregation.Type, ShouldEqual, "date_histogram")
topAggTwo := aggs[1]
So(topAggTwo.Key, ShouldEqual, "3")
So(topAggTwo.Aggregation.Type, ShouldEqual, "filters")
So(topAggTwo.Aggregation.Aggs, ShouldHaveLength, 1)
topAggTwochildAgg := topAggTwo.Aggregation.Aggs[0]
So(topAggTwochildAgg.Key, ShouldEqual, "4")
So(topAggTwochildAgg.Aggregation.Type, ShouldEqual, "terms")
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
topAggOne := json.GetPath("aggs", "1")
So(topAggOne.GetPath("histogram", "field").MustString(), ShouldEqual, "@hostname")
topAggOnechildAgg := topAggOne.GetPath("aggs", "2")
So(topAggOnechildAgg.GetPath("date_histogram", "field").MustString(), ShouldEqual, "@timestamp")
topAggTwo := json.GetPath("aggs", "3")
topAggTwochildAgg := topAggTwo.GetPath("aggs", "4")
So(topAggTwo.GetPath("filters").MustArray(), ShouldHaveLength, 0)
So(topAggTwochildAgg.GetPath("terms", "field").MustString(), ShouldEqual, "@test")
})
})
})
Convey("and adding top level agg with child agg with child agg", func() {
aggBuilder := b.Agg()
aggBuilder.Terms("1", "@hostname", func(a *TermsAggregation, ib AggBuilder) {
ib.Terms("2", "@app", func(a *TermsAggregation, ib AggBuilder) {
ib.DateHistogram("3", "@timestamp", nil)
})
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have 1 top level agg with one child having a child", func() {
aggs := sr.Aggs
So(aggs, ShouldHaveLength, 1)
topAgg := aggs[0]
So(topAgg.Key, ShouldEqual, "1")
So(topAgg.Aggregation.Type, ShouldEqual, "terms")
So(topAgg.Aggregation.Aggs, ShouldHaveLength, 1)
childAgg := topAgg.Aggregation.Aggs[0]
So(childAgg.Key, ShouldEqual, "2")
So(childAgg.Aggregation.Type, ShouldEqual, "terms")
childChildAgg := childAgg.Aggregation.Aggs[0]
So(childChildAgg.Key, ShouldEqual, "3")
So(childChildAgg.Aggregation.Type, ShouldEqual, "date_histogram")
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
topAgg := json.GetPath("aggs", "1")
So(topAgg.GetPath("terms", "field").MustString(), ShouldEqual, "@hostname")
childAgg := topAgg.GetPath("aggs", "2")
So(childAgg.GetPath("terms", "field").MustString(), ShouldEqual, "@app")
childChildAgg := childAgg.GetPath("aggs", "3")
So(childChildAgg.GetPath("date_histogram", "field").MustString(), ShouldEqual, "@timestamp")
})
})
})
Convey("and adding bucket and metric aggs", func() {
aggBuilder := b.Agg()
aggBuilder.Terms("1", "@hostname", func(a *TermsAggregation, ib AggBuilder) {
ib.Terms("2", "@app", func(a *TermsAggregation, ib AggBuilder) {
ib.Metric("4", "avg", "@value", nil)
ib.DateHistogram("3", "@timestamp", func(a *DateHistogramAgg, ib AggBuilder) {
ib.Metric("4", "avg", "@value", nil)
ib.Metric("5", "max", "@value", nil)
})
})
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("Should have 1 top level agg with one child having a child", func() {
aggs := sr.Aggs
So(aggs, ShouldHaveLength, 1)
topAgg := aggs[0]
So(topAgg.Key, ShouldEqual, "1")
So(topAgg.Aggregation.Type, ShouldEqual, "terms")
So(topAgg.Aggregation.Aggs, ShouldHaveLength, 1)
childAgg := topAgg.Aggregation.Aggs[0]
So(childAgg.Key, ShouldEqual, "2")
So(childAgg.Aggregation.Type, ShouldEqual, "terms")
childChildOneAgg := childAgg.Aggregation.Aggs[0]
So(childChildOneAgg.Key, ShouldEqual, "4")
So(childChildOneAgg.Aggregation.Type, ShouldEqual, "avg")
childChildTwoAgg := childAgg.Aggregation.Aggs[1]
So(childChildTwoAgg.Key, ShouldEqual, "3")
So(childChildTwoAgg.Aggregation.Type, ShouldEqual, "date_histogram")
childChildTwoChildOneAgg := childChildTwoAgg.Aggregation.Aggs[0]
So(childChildTwoChildOneAgg.Key, ShouldEqual, "4")
So(childChildTwoChildOneAgg.Aggregation.Type, ShouldEqual, "avg")
childChildTwoChildTwoAgg := childChildTwoAgg.Aggregation.Aggs[1]
So(childChildTwoChildTwoAgg.Key, ShouldEqual, "5")
So(childChildTwoChildTwoAgg.Aggregation.Type, ShouldEqual, "max")
})
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
termsAgg := json.GetPath("aggs", "1")
So(termsAgg.GetPath("terms", "field").MustString(), ShouldEqual, "@hostname")
termsAggTwo := termsAgg.GetPath("aggs", "2")
So(termsAggTwo.GetPath("terms", "field").MustString(), ShouldEqual, "@app")
termsAggTwoAvg := termsAggTwo.GetPath("aggs", "4")
So(termsAggTwoAvg.GetPath("avg", "field").MustString(), ShouldEqual, "@value")
dateHistAgg := termsAggTwo.GetPath("aggs", "3")
So(dateHistAgg.GetPath("date_histogram", "field").MustString(), ShouldEqual, "@timestamp")
avgAgg := dateHistAgg.GetPath("aggs", "4")
So(avgAgg.GetPath("avg", "field").MustString(), ShouldEqual, "@value")
maxAgg := dateHistAgg.GetPath("aggs", "5")
So(maxAgg.GetPath("max", "field").MustString(), ShouldEqual, "@value")
})
})
})
})
Convey("Given new search request builder for es version 2", func() {
b := NewSearchRequestBuilder(2, tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
Convey("When adding doc value field", func() {
b.AddDocValueField(timeField)
Convey("should set correct props", func() {
fields, ok := b.customProps["fields"].([]string)
So(ok, ShouldBeTrue)
So(fields, ShouldHaveLength, 2)
So(fields[0], ShouldEqual, "*")
So(fields[1], ShouldEqual, "_source")
scriptFields, ok := b.customProps["script_fields"].(map[string]interface{})
So(ok, ShouldBeTrue)
So(scriptFields, ShouldHaveLength, 0)
fieldDataFields, ok := b.customProps["fielddata_fields"].([]string)
So(ok, ShouldBeTrue)
So(fieldDataFields, ShouldHaveLength, 1)
So(fieldDataFields[0], ShouldEqual, timeField)
})
Convey("When building search request", func() {
sr, err := b.Build()
So(err, ShouldBeNil)
Convey("When marshal to JSON should generate correct json", func() {
body, err := json.Marshal(sr)
So(err, ShouldBeNil)
json, err := simplejson.NewJson([]byte(body))
So(err, ShouldBeNil)
scriptFields, err := json.Get("script_fields").Map()
So(err, ShouldBeNil)
So(scriptFields, ShouldHaveLength, 0)
fields, err := json.Get("fields").StringArray()
So(err, ShouldBeNil)
So(fields, ShouldHaveLength, 2)
So(fields[0], ShouldEqual, "*")
So(fields[1], ShouldEqual, "_source")
fieldDataFields, err := json.Get("fielddata_fields").StringArray()
So(err, ShouldBeNil)
So(fieldDataFields, ShouldHaveLength, 1)
So(fieldDataFields[0], ShouldEqual, timeField)
})
})
})
})
})
}
func TestMultiSearchRequest(t *testing.T) {
Convey("Test elasticsearch multi search request", t, func() {
Convey("Given new multi search request builder", func() {
b := NewMultiSearchRequestBuilder(0)
Convey("When adding one search request", func() {
b.Search(tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
Convey("When building search request should contain one search request", func() {
mr, err := b.Build()
So(err, ShouldBeNil)
So(mr.Requests, ShouldHaveLength, 1)
})
})
Convey("When adding two search requests", func() {
b.Search(tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
b.Search(tsdb.Interval{Value: 15 * time.Second, Text: "15s"})
Convey("When building search request should contain two search requests", func() {
mr, err := b.Build()
So(err, ShouldBeNil)
So(mr.Requests, ShouldHaveLength, 2)
})
})
})
})
}

View File

@ -0,0 +1,45 @@
package elasticsearch
import (
"context"
"fmt"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
)
// ElasticsearchExecutor represents a handler for handling elasticsearch datasource request
type ElasticsearchExecutor struct{}
var (
glog log.Logger
intervalCalculator tsdb.IntervalCalculator
)
// NewElasticsearchExecutor creates a new elasticsearch executor
func NewElasticsearchExecutor(dsInfo *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
return &ElasticsearchExecutor{}, nil
}
func init() {
glog = log.New("tsdb.elasticsearch")
intervalCalculator = tsdb.NewIntervalCalculator(nil)
tsdb.RegisterTsdbQueryEndpoint("elasticsearch", NewElasticsearchExecutor)
}
// Query handles an elasticsearch datasource request
func (e *ElasticsearchExecutor) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
if len(tsdbQuery.Queries) == 0 {
return nil, fmt.Errorf("query contains no queries")
}
client, err := es.NewClient(ctx, dsInfo, tsdbQuery.TimeRange)
if err != nil {
return nil, err
}
query := newTimeSeriesQuery(client, tsdbQuery, intervalCalculator)
return query.execute()
}

View File

@ -0,0 +1,77 @@
package elasticsearch
import (
"github.com/grafana/grafana/pkg/components/simplejson"
)
// Query represents the time series query model of the datasource
type Query struct {
TimeField string `json:"timeField"`
RawQuery string `json:"query"`
BucketAggs []*BucketAgg `json:"bucketAggs"`
Metrics []*MetricAgg `json:"metrics"`
Alias string `json:"alias"`
Interval string
RefID string
}
// BucketAgg represents a bucket aggregation of the time series query model of the datasource
type BucketAgg struct {
Field string `json:"field"`
ID string `json:"id"`
Settings *simplejson.Json `json:"settings"`
Type string `jsons:"type"`
}
// MetricAgg represents a metric aggregation of the time series query model of the datasource
type MetricAgg struct {
Field string `json:"field"`
Hide bool `json:"hide"`
ID string `json:"id"`
PipelineAggregate string `json:"pipelineAgg"`
Settings *simplejson.Json `json:"settings"`
Meta *simplejson.Json `json:"meta"`
Type string `json:"type"`
}
var metricAggType = map[string]string{
"count": "Count",
"avg": "Average",
"sum": "Sum",
"max": "Max",
"min": "Min",
"extended_stats": "Extended Stats",
"percentiles": "Percentiles",
"cardinality": "Unique Count",
"moving_avg": "Moving Average",
"derivative": "Derivative",
"raw_document": "Raw Document",
}
var extendedStats = map[string]string{
"avg": "Avg",
"min": "Min",
"max": "Max",
"sum": "Sum",
"count": "Count",
"std_deviation": "Std Dev",
"std_deviation_bounds_upper": "Std Dev Upper",
"std_deviation_bounds_lower": "Std Dev Lower",
}
var pipelineAggType = map[string]string{
"moving_avg": "moving_avg",
"derivative": "derivative",
}
func isPipelineAgg(metricType string) bool {
if _, ok := pipelineAggType[metricType]; ok {
return true
}
return false
}
func describeMetric(metricType, field string) string {
text := metricAggType[metricType]
return text + " " + field
}

View File

@ -0,0 +1,530 @@
package elasticsearch
import (
"errors"
"regexp"
"sort"
"strconv"
"strings"
"github.com/grafana/grafana/pkg/components/null"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
)
type responseParser struct {
Responses []*es.SearchResponse
Targets []*Query
}
var newResponseParser = func(responses []*es.SearchResponse, targets []*Query) *responseParser {
return &responseParser{
Responses: responses,
Targets: targets,
}
}
func (rp *responseParser) getTimeSeries() (*tsdb.Response, error) {
result := &tsdb.Response{}
result.Results = make(map[string]*tsdb.QueryResult)
if rp.Responses == nil {
return result, nil
}
for i, res := range rp.Responses {
target := rp.Targets[i]
if res.Error != nil {
result.Results[target.RefID] = getErrorFromElasticResponse(res)
continue
}
queryRes := tsdb.NewQueryResult()
props := make(map[string]string)
table := tsdb.Table{
Columns: make([]tsdb.TableColumn, 0),
Rows: make([]tsdb.RowValues, 0),
}
err := rp.processBuckets(res.Aggregations, target, &queryRes.Series, &table, props, 0)
if err != nil {
return nil, err
}
rp.nameSeries(&queryRes.Series, target)
rp.trimDatapoints(&queryRes.Series, target)
if len(table.Rows) > 0 {
queryRes.Tables = append(queryRes.Tables, &table)
}
result.Results[target.RefID] = queryRes
}
return result, nil
}
func (rp *responseParser) processBuckets(aggs map[string]interface{}, target *Query, series *tsdb.TimeSeriesSlice, table *tsdb.Table, props map[string]string, depth int) error {
var err error
maxDepth := len(target.BucketAggs) - 1
aggIDs := make([]string, 0)
for k := range aggs {
aggIDs = append(aggIDs, k)
}
sort.Strings(aggIDs)
for _, aggID := range aggIDs {
v := aggs[aggID]
aggDef, _ := findAgg(target, aggID)
esAgg := simplejson.NewFromAny(v)
if aggDef == nil {
continue
}
if depth == maxDepth {
if aggDef.Type == "date_histogram" {
err = rp.processMetrics(esAgg, target, series, props)
} else {
err = rp.processAggregationDocs(esAgg, aggDef, target, table, props)
}
if err != nil {
return err
}
} else {
for _, b := range esAgg.Get("buckets").MustArray() {
bucket := simplejson.NewFromAny(b)
newProps := make(map[string]string, 0)
for k, v := range props {
newProps[k] = v
}
if key, err := bucket.Get("key").String(); err == nil {
newProps[aggDef.Field] = key
} else if key, err := bucket.Get("key").Int64(); err == nil {
newProps[aggDef.Field] = strconv.FormatInt(key, 10)
}
if key, err := bucket.Get("key_as_string").String(); err == nil {
newProps[aggDef.Field] = key
}
err = rp.processBuckets(bucket.MustMap(), target, series, table, newProps, depth+1)
if err != nil {
return err
}
}
for k, v := range esAgg.Get("buckets").MustMap() {
bucket := simplejson.NewFromAny(v)
newProps := make(map[string]string, 0)
for k, v := range props {
newProps[k] = v
}
newProps["filter"] = k
err = rp.processBuckets(bucket.MustMap(), target, series, table, newProps, depth+1)
if err != nil {
return err
}
}
}
}
return nil
}
func (rp *responseParser) processMetrics(esAgg *simplejson.Json, target *Query, series *tsdb.TimeSeriesSlice, props map[string]string) error {
for _, metric := range target.Metrics {
if metric.Hide {
continue
}
switch metric.Type {
case "count":
newSeries := tsdb.TimeSeries{
Tags: make(map[string]string),
}
for _, v := range esAgg.Get("buckets").MustArray() {
bucket := simplejson.NewFromAny(v)
value := castToNullFloat(bucket.Get("doc_count"))
key := castToNullFloat(bucket.Get("key"))
newSeries.Points = append(newSeries.Points, tsdb.TimePoint{value, key})
}
for k, v := range props {
newSeries.Tags[k] = v
}
newSeries.Tags["metric"] = "count"
*series = append(*series, &newSeries)
case "percentiles":
buckets := esAgg.Get("buckets").MustArray()
if len(buckets) == 0 {
break
}
firstBucket := simplejson.NewFromAny(buckets[0])
percentiles := firstBucket.GetPath(metric.ID, "values").MustMap()
percentileKeys := make([]string, 0)
for k := range percentiles {
percentileKeys = append(percentileKeys, k)
}
sort.Strings(percentileKeys)
for _, percentileName := range percentileKeys {
newSeries := tsdb.TimeSeries{
Tags: make(map[string]string),
}
for k, v := range props {
newSeries.Tags[k] = v
}
newSeries.Tags["metric"] = "p" + percentileName
newSeries.Tags["field"] = metric.Field
for _, v := range buckets {
bucket := simplejson.NewFromAny(v)
value := castToNullFloat(bucket.GetPath(metric.ID, "values", percentileName))
key := castToNullFloat(bucket.Get("key"))
newSeries.Points = append(newSeries.Points, tsdb.TimePoint{value, key})
}
*series = append(*series, &newSeries)
}
case "extended_stats":
buckets := esAgg.Get("buckets").MustArray()
metaKeys := make([]string, 0)
meta := metric.Meta.MustMap()
for k := range meta {
metaKeys = append(metaKeys, k)
}
sort.Strings(metaKeys)
for _, statName := range metaKeys {
v := meta[statName]
if enabled, ok := v.(bool); !ok || !enabled {
continue
}
newSeries := tsdb.TimeSeries{
Tags: make(map[string]string),
}
for k, v := range props {
newSeries.Tags[k] = v
}
newSeries.Tags["metric"] = statName
newSeries.Tags["field"] = metric.Field
for _, v := range buckets {
bucket := simplejson.NewFromAny(v)
key := castToNullFloat(bucket.Get("key"))
var value null.Float
if statName == "std_deviation_bounds_upper" {
value = castToNullFloat(bucket.GetPath(metric.ID, "std_deviation_bounds", "upper"))
} else if statName == "std_deviation_bounds_lower" {
value = castToNullFloat(bucket.GetPath(metric.ID, "std_deviation_bounds", "lower"))
} else {
value = castToNullFloat(bucket.GetPath(metric.ID, statName))
}
newSeries.Points = append(newSeries.Points, tsdb.TimePoint{value, key})
}
*series = append(*series, &newSeries)
}
default:
newSeries := tsdb.TimeSeries{
Tags: make(map[string]string),
}
for k, v := range props {
newSeries.Tags[k] = v
}
newSeries.Tags["metric"] = metric.Type
newSeries.Tags["field"] = metric.Field
for _, v := range esAgg.Get("buckets").MustArray() {
bucket := simplejson.NewFromAny(v)
key := castToNullFloat(bucket.Get("key"))
valueObj, err := bucket.Get(metric.ID).Map()
if err != nil {
continue
}
var value null.Float
if _, ok := valueObj["normalized_value"]; ok {
value = castToNullFloat(bucket.GetPath(metric.ID, "normalized_value"))
} else {
value = castToNullFloat(bucket.GetPath(metric.ID, "value"))
}
newSeries.Points = append(newSeries.Points, tsdb.TimePoint{value, key})
}
*series = append(*series, &newSeries)
}
}
return nil
}
func (rp *responseParser) processAggregationDocs(esAgg *simplejson.Json, aggDef *BucketAgg, target *Query, table *tsdb.Table, props map[string]string) error {
propKeys := make([]string, 0)
for k := range props {
propKeys = append(propKeys, k)
}
sort.Strings(propKeys)
if len(table.Columns) == 0 {
for _, propKey := range propKeys {
table.Columns = append(table.Columns, tsdb.TableColumn{Text: propKey})
}
table.Columns = append(table.Columns, tsdb.TableColumn{Text: aggDef.Field})
}
addMetricValue := func(values *tsdb.RowValues, metricName string, value null.Float) {
found := false
for _, c := range table.Columns {
if c.Text == metricName {
found = true
break
}
}
if !found {
table.Columns = append(table.Columns, tsdb.TableColumn{Text: metricName})
}
*values = append(*values, value)
}
for _, v := range esAgg.Get("buckets").MustArray() {
bucket := simplejson.NewFromAny(v)
values := make(tsdb.RowValues, 0)
for _, propKey := range propKeys {
values = append(values, props[propKey])
}
if key, err := bucket.Get("key").String(); err == nil {
values = append(values, key)
} else {
values = append(values, castToNullFloat(bucket.Get("key")))
}
for _, metric := range target.Metrics {
switch metric.Type {
case "count":
addMetricValue(&values, rp.getMetricName(metric.Type), castToNullFloat(bucket.Get("doc_count")))
break
case "extended_stats":
metaKeys := make([]string, 0)
meta := metric.Meta.MustMap()
for k := range meta {
metaKeys = append(metaKeys, k)
}
sort.Strings(metaKeys)
for _, statName := range metaKeys {
v := meta[statName]
if enabled, ok := v.(bool); !ok || !enabled {
continue
}
var value null.Float
if statName == "std_deviation_bounds_upper" {
value = castToNullFloat(bucket.GetPath(metric.ID, "std_deviation_bounds", "upper"))
} else if statName == "std_deviation_bounds_lower" {
value = castToNullFloat(bucket.GetPath(metric.ID, "std_deviation_bounds", "lower"))
} else {
value = castToNullFloat(bucket.GetPath(metric.ID, statName))
}
addMetricValue(&values, rp.getMetricName(metric.Type), value)
break
}
default:
metricName := rp.getMetricName(metric.Type)
otherMetrics := make([]*MetricAgg, 0)
for _, m := range target.Metrics {
if m.Type == metric.Type {
otherMetrics = append(otherMetrics, m)
}
}
if len(otherMetrics) > 1 {
metricName += " " + metric.Field
}
addMetricValue(&values, metricName, castToNullFloat(bucket.GetPath(metric.ID, "value")))
break
}
}
table.Rows = append(table.Rows, values)
}
return nil
}
func (rp *responseParser) trimDatapoints(series *tsdb.TimeSeriesSlice, target *Query) {
var histogram *BucketAgg
for _, bucketAgg := range target.BucketAggs {
if bucketAgg.Type == "date_histogram" {
histogram = bucketAgg
break
}
}
if histogram == nil {
return
}
trimEdges, err := histogram.Settings.Get("trimEdges").Int()
if err != nil {
return
}
for _, s := range *series {
if len(s.Points) > trimEdges*2 {
s.Points = s.Points[trimEdges : len(s.Points)-trimEdges]
}
}
}
func (rp *responseParser) nameSeries(seriesList *tsdb.TimeSeriesSlice, target *Query) {
set := make(map[string]string)
for _, v := range *seriesList {
if metricType, exists := v.Tags["metric"]; exists {
if _, ok := set[metricType]; !ok {
set[metricType] = ""
}
}
}
metricTypeCount := len(set)
for _, series := range *seriesList {
series.Name = rp.getSeriesName(series, target, metricTypeCount)
}
}
var aliasPatternRegex = regexp.MustCompile(`\{\{([\s\S]+?)\}\}`)
func (rp *responseParser) getSeriesName(series *tsdb.TimeSeries, target *Query, metricTypeCount int) string {
metricType := series.Tags["metric"]
metricName := rp.getMetricName(metricType)
delete(series.Tags, "metric")
field := ""
if v, ok := series.Tags["field"]; ok {
field = v
delete(series.Tags, "field")
}
if target.Alias != "" {
seriesName := target.Alias
subMatches := aliasPatternRegex.FindAllStringSubmatch(target.Alias, -1)
for _, subMatch := range subMatches {
group := subMatch[0]
if len(subMatch) > 1 {
group = subMatch[1]
}
if strings.Index(group, "term ") == 0 {
seriesName = strings.Replace(seriesName, subMatch[0], series.Tags[group[5:]], 1)
}
if v, ok := series.Tags[group]; ok {
seriesName = strings.Replace(seriesName, subMatch[0], v, 1)
}
if group == "metric" {
seriesName = strings.Replace(seriesName, subMatch[0], metricName, 1)
}
if group == "field" {
seriesName = strings.Replace(seriesName, subMatch[0], field, 1)
}
}
return seriesName
}
// todo, if field and pipelineAgg
if field != "" && isPipelineAgg(metricType) {
found := false
for _, metric := range target.Metrics {
if metric.ID == field {
metricName += " " + describeMetric(metric.Type, field)
found = true
}
}
if !found {
metricName = "Unset"
}
} else if field != "" {
metricName += " " + field
}
if len(series.Tags) == 0 {
return metricName
}
name := ""
for _, v := range series.Tags {
name += v + " "
}
if metricTypeCount == 1 {
return strings.TrimSpace(name)
}
return strings.TrimSpace(name) + " " + metricName
}
func (rp *responseParser) getMetricName(metric string) string {
if text, ok := metricAggType[metric]; ok {
return text
}
if text, ok := extendedStats[metric]; ok {
return text
}
return metric
}
func castToNullFloat(j *simplejson.Json) null.Float {
f, err := j.Float64()
if err == nil {
return null.FloatFrom(f)
}
if s, err := j.String(); err == nil {
if strings.ToLower(s) == "nan" {
return null.NewFloat(0, false)
}
if v, err := strconv.ParseFloat(s, 64); err == nil {
return null.FloatFromPtr(&v)
}
}
return null.NewFloat(0, false)
}
func findAgg(target *Query, aggID string) (*BucketAgg, error) {
for _, v := range target.BucketAggs {
if aggID == v.ID {
return v, nil
}
}
return nil, errors.New("can't found aggDef, aggID:" + aggID)
}
func getErrorFromElasticResponse(response *es.SearchResponse) *tsdb.QueryResult {
result := tsdb.NewQueryResult()
json := simplejson.NewFromAny(response.Error)
reason := json.Get("reason").MustString()
rootCauseReason := json.Get("root_cause").GetIndex(0).Get("reason").MustString()
if rootCauseReason != "" {
result.ErrorString = rootCauseReason
} else if reason != "" {
result.ErrorString = reason
} else {
result.ErrorString = "Unkown elasticsearch error response"
}
return result
}

View File

@ -0,0 +1,880 @@
package elasticsearch
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/grafana/grafana/pkg/components/null"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
)
func TestResponseParser(t *testing.T) {
Convey("Elasticsearch response parser test", t, func() {
Convey("Simple query and count", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }],
"bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"doc_count": 10,
"key": 1000
},
{
"doc_count": 15,
"key": 2000
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 1)
series := queryRes.Series[0]
So(series.Name, ShouldEqual, "Count")
So(series.Points, ShouldHaveLength, 2)
So(series.Points[0][0].Float64, ShouldEqual, 10)
So(series.Points[0][1].Float64, ShouldEqual, 1000)
So(series.Points[1][0].Float64, ShouldEqual, 15)
So(series.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("Simple query count & avg aggregation", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }, {"type": "avg", "field": "value", "id": "2" }],
"bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "3" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"3": {
"buckets": [
{
"2": { "value": 88 },
"doc_count": 10,
"key": 1000
},
{
"2": { "value": 99 },
"doc_count": 15,
"key": 2000
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 2)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "Count")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 10)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 15)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "Average value")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 88)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 99)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("Single group by query one metric", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }],
"bucketAggs": [
{ "type": "terms", "field": "host", "id": "2" },
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"3": {
"buckets": [{ "doc_count": 1, "key": 1000 }, { "doc_count": 3, "key": 2000 }]
},
"doc_count": 4,
"key": "server1"
},
{
"3": {
"buckets": [{ "doc_count": 2, "key": 1000 }, { "doc_count": 8, "key": 2000 }]
},
"doc_count": 10,
"key": "server2"
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 2)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "server1")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 1)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 3)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "server2")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 2)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 8)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("Single group by query two metrics", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }, { "type": "avg", "field": "@value", "id": "4" }],
"bucketAggs": [
{ "type": "terms", "field": "host", "id": "2" },
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"3": {
"buckets": [
{ "4": { "value": 10 }, "doc_count": 1, "key": 1000 },
{ "4": { "value": 12 }, "doc_count": 3, "key": 2000 }
]
},
"doc_count": 4,
"key": "server1"
},
{
"3": {
"buckets": [
{ "4": { "value": 20 }, "doc_count": 1, "key": 1000 },
{ "4": { "value": 32 }, "doc_count": 3, "key": 2000 }
]
},
"doc_count": 10,
"key": "server2"
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 4)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "server1 Count")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 1)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 3)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "server1 Average @value")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 10)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 12)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
seriesThree := queryRes.Series[2]
So(seriesThree.Name, ShouldEqual, "server2 Count")
So(seriesThree.Points, ShouldHaveLength, 2)
So(seriesThree.Points[0][0].Float64, ShouldEqual, 1)
So(seriesThree.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesThree.Points[1][0].Float64, ShouldEqual, 3)
So(seriesThree.Points[1][1].Float64, ShouldEqual, 2000)
seriesFour := queryRes.Series[3]
So(seriesFour.Name, ShouldEqual, "server2 Average @value")
So(seriesFour.Points, ShouldHaveLength, 2)
So(seriesFour.Points[0][0].Float64, ShouldEqual, 20)
So(seriesFour.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesFour.Points[1][0].Float64, ShouldEqual, 32)
So(seriesFour.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("With percentiles", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "percentiles", "settings": { "percents": [75, 90] }, "id": "1" }],
"bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "3" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"3": {
"buckets": [
{
"1": { "values": { "75": 3.3, "90": 5.5 } },
"doc_count": 10,
"key": 1000
},
{
"1": { "values": { "75": 2.3, "90": 4.5 } },
"doc_count": 15,
"key": 2000
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 2)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "p75")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 3.3)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 2.3)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "p90")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 5.5)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 4.5)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("With extended stats", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "extended_stats", "meta": { "max": true, "std_deviation_bounds_upper": true, "std_deviation_bounds_lower": true }, "id": "1" }],
"bucketAggs": [
{ "type": "terms", "field": "host", "id": "3" },
{ "type": "date_histogram", "field": "@timestamp", "id": "4" }
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"3": {
"buckets": [
{
"key": "server1",
"4": {
"buckets": [
{
"1": {
"max": 10.2,
"min": 5.5,
"std_deviation_bounds": { "upper": 3, "lower": -2 }
},
"doc_count": 10,
"key": 1000
}
]
}
},
{
"key": "server2",
"4": {
"buckets": [
{
"1": {
"max": 15.5,
"min": 3.4,
"std_deviation_bounds": { "upper": 4, "lower": -1 }
},
"doc_count": 10,
"key": 1000
}
]
}
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 6)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "server1 Max")
So(seriesOne.Points, ShouldHaveLength, 1)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 10.2)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "server1 Std Dev Lower")
So(seriesTwo.Points, ShouldHaveLength, 1)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, -2)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
seriesThree := queryRes.Series[2]
So(seriesThree.Name, ShouldEqual, "server1 Std Dev Upper")
So(seriesThree.Points, ShouldHaveLength, 1)
So(seriesThree.Points[0][0].Float64, ShouldEqual, 3)
So(seriesThree.Points[0][1].Float64, ShouldEqual, 1000)
seriesFour := queryRes.Series[3]
So(seriesFour.Name, ShouldEqual, "server2 Max")
So(seriesFour.Points, ShouldHaveLength, 1)
So(seriesFour.Points[0][0].Float64, ShouldEqual, 15.5)
So(seriesFour.Points[0][1].Float64, ShouldEqual, 1000)
seriesFive := queryRes.Series[4]
So(seriesFive.Name, ShouldEqual, "server2 Std Dev Lower")
So(seriesFive.Points, ShouldHaveLength, 1)
So(seriesFive.Points[0][0].Float64, ShouldEqual, -1)
So(seriesFive.Points[0][1].Float64, ShouldEqual, 1000)
seriesSix := queryRes.Series[5]
So(seriesSix.Name, ShouldEqual, "server2 Std Dev Upper")
So(seriesSix.Points, ShouldHaveLength, 1)
So(seriesSix.Points[0][0].Float64, ShouldEqual, 4)
So(seriesSix.Points[0][1].Float64, ShouldEqual, 1000)
})
Convey("Single group by with alias pattern", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"alias": "{{term @host}} {{metric}} and {{not_exist}} {{@host}}",
"metrics": [{ "type": "count", "id": "1" }],
"bucketAggs": [
{ "type": "terms", "field": "@host", "id": "2" },
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"3": {
"buckets": [{ "doc_count": 1, "key": 1000 }, { "doc_count": 3, "key": 2000 }]
},
"doc_count": 4,
"key": "server1"
},
{
"3": {
"buckets": [{ "doc_count": 2, "key": 1000 }, { "doc_count": 8, "key": 2000 }]
},
"doc_count": 10,
"key": "server2"
},
{
"3": {
"buckets": [{ "doc_count": 2, "key": 1000 }, { "doc_count": 8, "key": 2000 }]
},
"doc_count": 10,
"key": 0
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 3)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "server1 Count and {{not_exist}} server1")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 1)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 3)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "server2 Count and {{not_exist}} server2")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 2)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 8)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
seriesThree := queryRes.Series[2]
So(seriesThree.Name, ShouldEqual, "0 Count and {{not_exist}} 0")
So(seriesThree.Points, ShouldHaveLength, 2)
So(seriesThree.Points[0][0].Float64, ShouldEqual, 2)
So(seriesThree.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesThree.Points[1][0].Float64, ShouldEqual, 8)
So(seriesThree.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("Histogram response", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }],
"bucketAggs": [{ "type": "histogram", "field": "bytes", "id": "3" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"3": {
"buckets": [{ "doc_count": 1, "key": 1000 }, { "doc_count": 3, "key": 2000 }, { "doc_count": 2, "key": 3000 }]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Tables, ShouldHaveLength, 1)
rows := queryRes.Tables[0].Rows
So(rows, ShouldHaveLength, 3)
cols := queryRes.Tables[0].Columns
So(cols, ShouldHaveLength, 2)
So(cols[0].Text, ShouldEqual, "bytes")
So(cols[1].Text, ShouldEqual, "Count")
So(rows[0][0].(null.Float).Float64, ShouldEqual, 1000)
So(rows[0][1].(null.Float).Float64, ShouldEqual, 1)
So(rows[1][0].(null.Float).Float64, ShouldEqual, 2000)
So(rows[1][1].(null.Float).Float64, ShouldEqual, 3)
So(rows[2][0].(null.Float).Float64, ShouldEqual, 3000)
So(rows[2][1].(null.Float).Float64, ShouldEqual, 2)
})
Convey("With two filters agg", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "count", "id": "1" }],
"bucketAggs": [
{
"type": "filters",
"id": "2",
"settings": {
"filters": [{ "query": "@metric:cpu" }, { "query": "@metric:logins.count" }]
}
},
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": {
"@metric:cpu": {
"3": {
"buckets": [{ "doc_count": 1, "key": 1000 }, { "doc_count": 3, "key": 2000 }]
}
},
"@metric:logins.count": {
"3": {
"buckets": [{ "doc_count": 2, "key": 1000 }, { "doc_count": 8, "key": 2000 }]
}
}
}
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 2)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "@metric:cpu")
So(seriesOne.Points, ShouldHaveLength, 2)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 1)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesOne.Points[1][0].Float64, ShouldEqual, 3)
So(seriesOne.Points[1][1].Float64, ShouldEqual, 2000)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "@metric:logins.count")
So(seriesTwo.Points, ShouldHaveLength, 2)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 2)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 1000)
So(seriesTwo.Points[1][0].Float64, ShouldEqual, 8)
So(seriesTwo.Points[1][1].Float64, ShouldEqual, 2000)
})
Convey("With dropfirst and last aggregation", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "avg", "id": "1" }, { "type": "count" }],
"bucketAggs": [
{
"type": "date_histogram",
"field": "@timestamp",
"id": "2",
"settings": { "trimEdges": 1 }
}
]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"1": { "value": 1000 },
"key": 1,
"doc_count": 369
},
{
"1": { "value": 2000 },
"key": 2,
"doc_count": 200
},
{
"1": { "value": 2000 },
"key": 3,
"doc_count": 200
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Series, ShouldHaveLength, 2)
seriesOne := queryRes.Series[0]
So(seriesOne.Name, ShouldEqual, "Average")
So(seriesOne.Points, ShouldHaveLength, 1)
So(seriesOne.Points[0][0].Float64, ShouldEqual, 2000)
So(seriesOne.Points[0][1].Float64, ShouldEqual, 2)
seriesTwo := queryRes.Series[1]
So(seriesTwo.Name, ShouldEqual, "Count")
So(seriesTwo.Points, ShouldHaveLength, 1)
So(seriesTwo.Points[0][0].Float64, ShouldEqual, 200)
So(seriesTwo.Points[0][1].Float64, ShouldEqual, 2)
})
Convey("No group by time", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "avg", "id": "1" }, { "type": "count" }],
"bucketAggs": [{ "type": "terms", "field": "host", "id": "2" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"1": { "value": 1000 },
"key": "server-1",
"doc_count": 369
},
{
"1": { "value": 2000 },
"key": "server-2",
"doc_count": 200
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Tables, ShouldHaveLength, 1)
rows := queryRes.Tables[0].Rows
So(rows, ShouldHaveLength, 2)
cols := queryRes.Tables[0].Columns
So(cols, ShouldHaveLength, 3)
So(cols[0].Text, ShouldEqual, "host")
So(cols[1].Text, ShouldEqual, "Average")
So(cols[2].Text, ShouldEqual, "Count")
So(rows[0][0].(string), ShouldEqual, "server-1")
So(rows[0][1].(null.Float).Float64, ShouldEqual, 1000)
So(rows[0][2].(null.Float).Float64, ShouldEqual, 369)
So(rows[1][0].(string), ShouldEqual, "server-2")
So(rows[1][1].(null.Float).Float64, ShouldEqual, 2000)
So(rows[1][2].(null.Float).Float64, ShouldEqual, 200)
})
Convey("Multiple metrics of same type", func() {
targets := map[string]string{
"A": `{
"timeField": "@timestamp",
"metrics": [{ "type": "avg", "field": "test", "id": "1" }, { "type": "avg", "field": "test2", "id": "2" }],
"bucketAggs": [{ "type": "terms", "field": "host", "id": "2" }]
}`,
}
response := `{
"responses": [
{
"aggregations": {
"2": {
"buckets": [
{
"1": { "value": 1000 },
"2": { "value": 3000 },
"key": "server-1",
"doc_count": 369
}
]
}
}
}
]
}`
rp, err := newResponseParserForTest(targets, response)
So(err, ShouldBeNil)
result, err := rp.getTimeSeries()
So(err, ShouldBeNil)
So(result.Results, ShouldHaveLength, 1)
queryRes := result.Results["A"]
So(queryRes, ShouldNotBeNil)
So(queryRes.Tables, ShouldHaveLength, 1)
rows := queryRes.Tables[0].Rows
So(rows, ShouldHaveLength, 1)
cols := queryRes.Tables[0].Columns
So(cols, ShouldHaveLength, 3)
So(cols[0].Text, ShouldEqual, "host")
So(cols[1].Text, ShouldEqual, "Average test")
So(cols[2].Text, ShouldEqual, "Average test2")
So(rows[0][0].(string), ShouldEqual, "server-1")
So(rows[0][1].(null.Float).Float64, ShouldEqual, 1000)
So(rows[0][2].(null.Float).Float64, ShouldEqual, 3000)
})
// Convey("Raw documents query", func() {
// targets := map[string]string{
// "A": `{
// "timeField": "@timestamp",
// "metrics": [{ "type": "raw_document", "id": "1" }]
// }`,
// }
// response := `{
// "responses": [
// {
// "hits": {
// "total": 100,
// "hits": [
// {
// "_id": "1",
// "_type": "type",
// "_index": "index",
// "_source": { "sourceProp": "asd" },
// "fields": { "fieldProp": "field" }
// },
// {
// "_source": { "sourceProp": "asd2" },
// "fields": { "fieldProp": "field2" }
// }
// ]
// }
// }
// ]
// }`
// rp, err := newResponseParserForTest(targets, response)
// So(err, ShouldBeNil)
// result, err := rp.getTimeSeries()
// So(err, ShouldBeNil)
// So(result.Results, ShouldHaveLength, 1)
// queryRes := result.Results["A"]
// So(queryRes, ShouldNotBeNil)
// So(queryRes.Tables, ShouldHaveLength, 1)
// rows := queryRes.Tables[0].Rows
// So(rows, ShouldHaveLength, 1)
// cols := queryRes.Tables[0].Columns
// So(cols, ShouldHaveLength, 3)
// So(cols[0].Text, ShouldEqual, "host")
// So(cols[1].Text, ShouldEqual, "Average test")
// So(cols[2].Text, ShouldEqual, "Average test2")
// So(rows[0][0].(string), ShouldEqual, "server-1")
// So(rows[0][1].(null.Float).Float64, ShouldEqual, 1000)
// So(rows[0][2].(null.Float).Float64, ShouldEqual, 3000)
// })
})
}
func newResponseParserForTest(tsdbQueries map[string]string, responseBody string) (*responseParser, error) {
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
tsdbQuery := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{},
TimeRange: tsdb.NewTimeRange(fromStr, toStr),
}
for refID, tsdbQueryBody := range tsdbQueries {
tsdbQueryJSON, err := simplejson.NewJson([]byte(tsdbQueryBody))
if err != nil {
return nil, err
}
tsdbQuery.Queries = append(tsdbQuery.Queries, &tsdb.Query{
Model: tsdbQueryJSON,
RefId: refID,
})
}
var response es.MultiSearchResponse
err := json.Unmarshal([]byte(responseBody), &response)
if err != nil {
return nil, err
}
tsQueryParser := newTimeSeriesQueryParser()
queries, err := tsQueryParser.parse(tsdbQuery)
if err != nil {
return nil, err
}
return newResponseParser(response.Responses, queries), nil
}

View File

@ -0,0 +1,318 @@
package elasticsearch
import (
"fmt"
"strconv"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
)
type timeSeriesQuery struct {
client es.Client
tsdbQuery *tsdb.TsdbQuery
intervalCalculator tsdb.IntervalCalculator
}
var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, intervalCalculator tsdb.IntervalCalculator) *timeSeriesQuery {
return &timeSeriesQuery{
client: client,
tsdbQuery: tsdbQuery,
intervalCalculator: intervalCalculator,
}
}
func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
result := &tsdb.Response{}
result.Results = make(map[string]*tsdb.QueryResult)
tsQueryParser := newTimeSeriesQueryParser()
queries, err := tsQueryParser.parse(e.tsdbQuery)
if err != nil {
return nil, err
}
ms := e.client.MultiSearch()
from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch())
to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch())
for _, q := range queries {
minInterval, err := e.client.GetMinInterval(q.Interval)
if err != nil {
return nil, err
}
interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval)
b := ms.Search(interval)
b.Size(0)
filters := b.Query().Bool().Filter()
filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS)
if q.RawQuery != "" {
filters.AddQueryStringFilter(q.RawQuery, true)
}
if len(q.BucketAggs) == 0 {
if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" {
result.Results[q.RefID] = &tsdb.QueryResult{
RefId: q.RefID,
Error: fmt.Errorf("invalid query, missing metrics and aggregations"),
ErrorString: "invalid query, missing metrics and aggregations",
}
continue
}
metric := q.Metrics[0]
b.Size(metric.Settings.Get("size").MustInt(500))
b.SortDesc("@timestamp", "boolean")
b.AddDocValueField("@timestamp")
continue
}
aggBuilder := b.Agg()
// iterate backwards to create aggregations bottom-down
for _, bucketAgg := range q.BucketAggs {
switch bucketAgg.Type {
case "date_histogram":
aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
case "histogram":
aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
case "filters":
aggBuilder = addFiltersAgg(aggBuilder, bucketAgg)
case "terms":
aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics)
case "geohash_grid":
aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg)
}
}
for _, m := range q.Metrics {
if m.Type == "count" {
continue
}
if isPipelineAgg(m.Type) {
if _, err := strconv.Atoi(m.PipelineAggregate); err == nil {
aggBuilder.Pipeline(m.ID, m.Type, m.PipelineAggregate, func(a *es.PipelineAggregation) {
a.Settings = m.Settings.MustMap()
})
} else {
continue
}
} else {
aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
a.Settings = m.Settings.MustMap()
})
}
}
}
req, err := ms.Build()
if err != nil {
return nil, err
}
res, err := e.client.ExecuteMultisearch(req)
if err != nil {
return nil, err
}
rp := newResponseParser(res.Responses, queries)
return rp.getTimeSeries()
}
func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
aggBuilder.DateHistogram(bucketAgg.ID, bucketAgg.Field, func(a *es.DateHistogramAgg, b es.AggBuilder) {
a.Interval = bucketAgg.Settings.Get("interval").MustString("auto")
a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
a.ExtendedBounds = &es.ExtendedBounds{Min: timeFrom, Max: timeTo}
a.Format = bucketAgg.Settings.Get("format").MustString(es.DateFormatEpochMS)
if a.Interval == "auto" {
a.Interval = "$__interval"
}
if missing, err := bucketAgg.Settings.Get("missing").String(); err == nil {
a.Missing = &missing
}
aggBuilder = b
})
return aggBuilder
}
func addHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
aggBuilder.Histogram(bucketAgg.ID, bucketAgg.Field, func(a *es.HistogramAgg, b es.AggBuilder) {
a.Interval = bucketAgg.Settings.Get("interval").MustInt(1000)
a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
if missing, err := bucketAgg.Settings.Get("missing").Int(); err == nil {
a.Missing = &missing
}
aggBuilder = b
})
return aggBuilder
}
func addTermsAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, metrics []*MetricAgg) es.AggBuilder {
aggBuilder.Terms(bucketAgg.ID, bucketAgg.Field, func(a *es.TermsAggregation, b es.AggBuilder) {
if size, err := bucketAgg.Settings.Get("size").Int(); err == nil {
a.Size = size
} else if size, err := bucketAgg.Settings.Get("size").String(); err == nil {
a.Size, err = strconv.Atoi(size)
if err != nil {
a.Size = 500
}
} else {
a.Size = 500
}
if minDocCount, err := bucketAgg.Settings.Get("min_doc_count").Int(); err == nil {
a.MinDocCount = &minDocCount
}
if missing, err := bucketAgg.Settings.Get("missing").String(); err == nil {
a.Missing = &missing
}
if orderBy, err := bucketAgg.Settings.Get("orderBy").String(); err == nil {
a.Order[orderBy] = bucketAgg.Settings.Get("order").MustString("desc")
if _, err := strconv.Atoi(orderBy); err == nil {
for _, m := range metrics {
if m.ID == orderBy {
b.Metric(m.ID, m.Type, m.Field, nil)
break
}
}
}
}
aggBuilder = b
})
return aggBuilder
}
func addFiltersAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
filters := make(map[string]interface{})
for _, filter := range bucketAgg.Settings.Get("filters").MustArray() {
json := simplejson.NewFromAny(filter)
query := json.Get("query").MustString()
label := json.Get("label").MustString()
if label == "" {
label = query
}
filters[label] = &es.QueryStringFilter{Query: query, AnalyzeWildcard: true}
}
if len(filters) > 0 {
aggBuilder.Filters(bucketAgg.ID, func(a *es.FiltersAggregation, b es.AggBuilder) {
a.Filters = filters
aggBuilder = b
})
}
return aggBuilder
}
func addGeoHashGridAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
aggBuilder.GeoHashGrid(bucketAgg.ID, bucketAgg.Field, func(a *es.GeoHashGridAggregation, b es.AggBuilder) {
a.Precision = bucketAgg.Settings.Get("precision").MustInt(3)
aggBuilder = b
})
return aggBuilder
}
type timeSeriesQueryParser struct{}
func newTimeSeriesQueryParser() *timeSeriesQueryParser {
return &timeSeriesQueryParser{}
}
func (p *timeSeriesQueryParser) parse(tsdbQuery *tsdb.TsdbQuery) ([]*Query, error) {
queries := make([]*Query, 0)
for _, q := range tsdbQuery.Queries {
model := q.Model
timeField, err := model.Get("timeField").String()
if err != nil {
return nil, err
}
rawQuery := model.Get("query").MustString()
bucketAggs, err := p.parseBucketAggs(model)
if err != nil {
return nil, err
}
metrics, err := p.parseMetrics(model)
if err != nil {
return nil, err
}
alias := model.Get("alias").MustString("")
interval := strconv.FormatInt(q.IntervalMs, 10) + "ms"
queries = append(queries, &Query{
TimeField: timeField,
RawQuery: rawQuery,
BucketAggs: bucketAggs,
Metrics: metrics,
Alias: alias,
Interval: interval,
RefID: q.RefId,
})
}
return queries, nil
}
func (p *timeSeriesQueryParser) parseBucketAggs(model *simplejson.Json) ([]*BucketAgg, error) {
var err error
var result []*BucketAgg
for _, t := range model.Get("bucketAggs").MustArray() {
aggJSON := simplejson.NewFromAny(t)
agg := &BucketAgg{}
agg.Type, err = aggJSON.Get("type").String()
if err != nil {
return nil, err
}
agg.ID, err = aggJSON.Get("id").String()
if err != nil {
return nil, err
}
agg.Field = aggJSON.Get("field").MustString()
agg.Settings = simplejson.NewFromAny(aggJSON.Get("settings").MustMap())
result = append(result, agg)
}
return result, nil
}
func (p *timeSeriesQueryParser) parseMetrics(model *simplejson.Json) ([]*MetricAgg, error) {
var err error
var result []*MetricAgg
for _, t := range model.Get("metrics").MustArray() {
metricJSON := simplejson.NewFromAny(t)
metric := &MetricAgg{}
metric.Field = metricJSON.Get("field").MustString()
metric.Hide = metricJSON.Get("hide").MustBool(false)
metric.ID = metricJSON.Get("id").MustString()
metric.PipelineAggregate = metricJSON.Get("pipelineAgg").MustString()
metric.Settings = simplejson.NewFromAny(metricJSON.Get("settings").MustMap())
metric.Meta = simplejson.NewFromAny(metricJSON.Get("meta").MustMap())
metric.Type, err = metricJSON.Get("type").String()
if err != nil {
return nil, err
}
result = append(result, metric)
}
return result, nil
}

View File

@ -0,0 +1,604 @@
package elasticsearch
import (
"fmt"
"testing"
"time"
"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/tsdb"
. "github.com/smartystreets/goconvey/convey"
)
func TestExecuteTimeSeriesQuery(t *testing.T) {
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
Convey("Test execute time series query", t, func() {
Convey("With defaults on es 2", func() {
c := newFakeClient(2)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
"metrics": [{"type": "count", "id": "0" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
rangeFilter := sr.Query.Bool.Filters[0].(*es.RangeFilter)
So(rangeFilter.Key, ShouldEqual, c.timeField)
So(rangeFilter.Lte, ShouldEqual, toStr)
So(rangeFilter.Gte, ShouldEqual, fromStr)
So(rangeFilter.Format, ShouldEqual, es.DateFormatEpochMS)
So(sr.Aggs[0].Key, ShouldEqual, "2")
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
So(dateHistogramAgg.Field, ShouldEqual, "@timestamp")
So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromStr)
So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toStr)
})
Convey("With defaults on es 5", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
"metrics": [{"type": "count", "id": "0" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
So(sr.Query.Bool.Filters[0].(*es.RangeFilter).Key, ShouldEqual, c.timeField)
So(sr.Aggs[0].Key, ShouldEqual, "2")
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromStr)
So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toStr)
})
Convey("With multiple bucket aggs", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "terms", "field": "@host", "id": "2" },
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "2")
So(firstLevel.Aggregation.Aggregation.(*es.TermsAggregation).Field, ShouldEqual, "@host")
secondLevel := firstLevel.Aggregation.Aggs[0]
So(secondLevel.Key, ShouldEqual, "3")
So(secondLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
})
Convey("With select field", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "date_histogram", "field": "@timestamp", "id": "2" }
],
"metrics": [{"type": "avg", "field": "@value", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "2")
So(firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
secondLevel := firstLevel.Aggregation.Aggs[0]
So(secondLevel.Key, ShouldEqual, "1")
So(secondLevel.Aggregation.Type, ShouldEqual, "avg")
So(secondLevel.Aggregation.Aggregation.(*es.MetricAggregation).Field, ShouldEqual, "@value")
})
Convey("With term agg and order by metric agg", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"type": "terms",
"field": "@host",
"id": "2",
"settings": { "size": "5", "order": "asc", "orderBy": "5" }
},
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
],
"metrics": [
{"type": "count", "id": "1" },
{"type": "avg", "field": "@value", "id": "5" }
]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
avgAggOrderBy := sr.Aggs[0].Aggregation.Aggs[0]
So(avgAggOrderBy.Key, ShouldEqual, "5")
So(avgAggOrderBy.Aggregation.Type, ShouldEqual, "avg")
avgAgg := sr.Aggs[0].Aggregation.Aggs[1].Aggregation.Aggs[0]
So(avgAgg.Key, ShouldEqual, "5")
So(avgAgg.Aggregation.Type, ShouldEqual, "avg")
})
Convey("With metric percentiles", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "date_histogram", "field": "@timestamp", "id": "3" }
],
"metrics": [
{
"id": "1",
"type": "percentiles",
"field": "@load_time",
"settings": {
"percents": [ "1", "2", "3", "4" ]
}
}
]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
percentilesAgg := sr.Aggs[0].Aggregation.Aggs[0]
So(percentilesAgg.Key, ShouldEqual, "1")
So(percentilesAgg.Aggregation.Type, ShouldEqual, "percentiles")
metricAgg := percentilesAgg.Aggregation.Aggregation.(*es.MetricAggregation)
percents := metricAgg.Settings["percents"].([]interface{})
So(percents, ShouldHaveLength, 4)
So(percents[0], ShouldEqual, "1")
So(percents[1], ShouldEqual, "2")
So(percents[2], ShouldEqual, "3")
So(percents[3], ShouldEqual, "4")
})
Convey("With filters aggs on es 2", func() {
c := newFakeClient(2)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"id": "2",
"type": "filters",
"settings": {
"filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
}
},
{ "type": "date_histogram", "field": "@timestamp", "id": "4" }
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
filtersAgg := sr.Aggs[0]
So(filtersAgg.Key, ShouldEqual, "2")
So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
So(dateHistogramAgg.Key, ShouldEqual, "4")
So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
})
Convey("With filters aggs on es 5", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"id": "2",
"type": "filters",
"settings": {
"filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
}
},
{ "type": "date_histogram", "field": "@timestamp", "id": "4" }
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
filtersAgg := sr.Aggs[0]
So(filtersAgg.Key, ShouldEqual, "2")
So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
So(dateHistogramAgg.Key, ShouldEqual, "4")
So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
})
Convey("With raw document metric", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [],
"metrics": [{ "id": "1", "type": "raw_document", "settings": {} }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
So(sr.Size, ShouldEqual, 500)
})
Convey("With raw document metric size set", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [],
"metrics": [{ "id": "1", "type": "raw_document", "settings": { "size": 1337 } }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
So(sr.Size, ShouldEqual, 1337)
})
Convey("With date histogram agg", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"id": "2",
"type": "date_histogram",
"field": "@timestamp",
"settings": { "interval": "auto", "min_doc_count": 2 }
}
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "2")
So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
hAgg := firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg)
So(hAgg.Field, ShouldEqual, "@timestamp")
So(hAgg.Interval, ShouldEqual, "$__interval")
So(hAgg.MinDocCount, ShouldEqual, 2)
})
Convey("With histogram agg", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"id": "3",
"type": "histogram",
"field": "bytes",
"settings": { "interval": 10, "min_doc_count": 2, "missing": 5 }
}
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "3")
So(firstLevel.Aggregation.Type, ShouldEqual, "histogram")
hAgg := firstLevel.Aggregation.Aggregation.(*es.HistogramAgg)
So(hAgg.Field, ShouldEqual, "bytes")
So(hAgg.Interval, ShouldEqual, 10)
So(hAgg.MinDocCount, ShouldEqual, 2)
So(*hAgg.Missing, ShouldEqual, 5)
})
Convey("With geo hash grid agg", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{
"id": "3",
"type": "geohash_grid",
"field": "@location",
"settings": { "precision": 3 }
}
],
"metrics": [{"type": "count", "id": "1" }]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "3")
So(firstLevel.Aggregation.Type, ShouldEqual, "geohash_grid")
ghGridAgg := firstLevel.Aggregation.Aggregation.(*es.GeoHashGridAggregation)
So(ghGridAgg.Field, ShouldEqual, "@location")
So(ghGridAgg.Precision, ShouldEqual, 3)
})
Convey("With moving average", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "date_histogram", "field": "@timestamp", "id": "4" }
],
"metrics": [
{ "id": "3", "type": "sum", "field": "@value" },
{
"id": "2",
"type": "moving_avg",
"field": "3",
"pipelineAgg": "3"
}
]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "4")
So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
sumAgg := firstLevel.Aggregation.Aggs[0]
So(sumAgg.Key, ShouldEqual, "3")
So(sumAgg.Aggregation.Type, ShouldEqual, "sum")
mAgg := sumAgg.Aggregation.Aggregation.(*es.MetricAggregation)
So(mAgg.Field, ShouldEqual, "@value")
movingAvgAgg := firstLevel.Aggregation.Aggs[1]
So(movingAvgAgg.Key, ShouldEqual, "2")
So(movingAvgAgg.Aggregation.Type, ShouldEqual, "moving_avg")
pl := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
So(pl.BucketPath, ShouldEqual, "3")
})
Convey("With broken moving average", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "date_histogram", "field": "@timestamp", "id": "5" }
],
"metrics": [
{ "id": "3", "type": "sum", "field": "@value" },
{
"id": "2",
"type": "moving_avg",
"pipelineAgg": "3"
},
{
"id": "4",
"type": "moving_avg",
"pipelineAgg": "Metric to apply moving average"
}
]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "5")
So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
movingAvgAgg := firstLevel.Aggregation.Aggs[1]
So(movingAvgAgg.Key, ShouldEqual, "2")
plAgg := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
So(plAgg.BucketPath, ShouldEqual, "3")
})
Convey("With derivative", func() {
c := newFakeClient(5)
_, err := executeTsdbQuery(c, `{
"timeField": "@timestamp",
"bucketAggs": [
{ "type": "date_histogram", "field": "@timestamp", "id": "4" }
],
"metrics": [
{ "id": "3", "type": "sum", "field": "@value" },
{
"id": "2",
"type": "derivative",
"pipelineAgg": "3"
}
]
}`, from, to, 15*time.Second)
So(err, ShouldBeNil)
sr := c.multisearchRequests[0].Requests[0]
firstLevel := sr.Aggs[0]
So(firstLevel.Key, ShouldEqual, "4")
So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
derivativeAgg := firstLevel.Aggregation.Aggs[1]
So(derivativeAgg.Key, ShouldEqual, "2")
plAgg := derivativeAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
So(plAgg.BucketPath, ShouldEqual, "3")
})
})
}
type fakeClient struct {
version int
timeField string
multiSearchResponse *es.MultiSearchResponse
multiSearchError error
builder *es.MultiSearchRequestBuilder
multisearchRequests []*es.MultiSearchRequest
}
func newFakeClient(version int) *fakeClient {
return &fakeClient{
version: version,
timeField: "@timestamp",
multisearchRequests: make([]*es.MultiSearchRequest, 0),
multiSearchResponse: &es.MultiSearchResponse{},
}
}
func (c *fakeClient) GetVersion() int {
return c.version
}
func (c *fakeClient) GetTimeField() string {
return c.timeField
}
func (c *fakeClient) GetMinInterval(queryInterval string) (time.Duration, error) {
return 15 * time.Second, nil
}
func (c *fakeClient) ExecuteMultisearch(r *es.MultiSearchRequest) (*es.MultiSearchResponse, error) {
c.multisearchRequests = append(c.multisearchRequests, r)
return c.multiSearchResponse, c.multiSearchError
}
func (c *fakeClient) MultiSearch() *es.MultiSearchRequestBuilder {
c.builder = es.NewMultiSearchRequestBuilder(c.version)
return c.builder
}
func newTsdbQuery(body string) (*tsdb.TsdbQuery, error) {
json, err := simplejson.NewJson([]byte(body))
if err != nil {
return nil, err
}
return &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: json,
},
},
}, nil
}
func executeTsdbQuery(c es.Client, body string, from, to time.Time, minInterval time.Duration) (*tsdb.Response, error) {
json, err := simplejson.NewJson([]byte(body))
if err != nil {
return nil, err
}
fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
tsdbQuery := &tsdb.TsdbQuery{
Queries: []*tsdb.Query{
{
Model: json,
},
},
TimeRange: tsdb.NewTimeRange(fromStr, toStr),
}
query := newTimeSeriesQuery(c, tsdbQuery, tsdb.NewIntervalCalculator(&tsdb.IntervalOptions{MinInterval: minInterval}))
return query.execute()
}
func TestTimeSeriesQueryParser(t *testing.T) {
Convey("Test time series query parser", t, func() {
p := newTimeSeriesQueryParser()
Convey("Should be able to parse query", func() {
body := `{
"timeField": "@timestamp",
"query": "@metric:cpu",
"alias": "{{@hostname}} {{metric}}",
"metrics": [
{
"field": "@value",
"id": "1",
"meta": {},
"settings": {
"percents": [
"90"
]
},
"type": "percentiles"
},
{
"type": "count",
"field": "select field",
"id": "4",
"settings": {},
"meta": {}
}
],
"bucketAggs": [
{
"fake": true,
"field": "@hostname",
"id": "3",
"settings": {
"min_doc_count": 1,
"order": "desc",
"orderBy": "_term",
"size": "10"
},
"type": "terms"
},
{
"field": "@timestamp",
"id": "2",
"settings": {
"interval": "5m",
"min_doc_count": 0,
"trimEdges": 0
},
"type": "date_histogram"
}
]
}`
tsdbQuery, err := newTsdbQuery(body)
So(err, ShouldBeNil)
queries, err := p.parse(tsdbQuery)
So(err, ShouldBeNil)
So(queries, ShouldHaveLength, 1)
q := queries[0]
So(q.TimeField, ShouldEqual, "@timestamp")
So(q.RawQuery, ShouldEqual, "@metric:cpu")
So(q.Alias, ShouldEqual, "{{@hostname}} {{metric}}")
So(q.Metrics, ShouldHaveLength, 2)
So(q.Metrics[0].Field, ShouldEqual, "@value")
So(q.Metrics[0].ID, ShouldEqual, "1")
So(q.Metrics[0].Type, ShouldEqual, "percentiles")
So(q.Metrics[0].Hide, ShouldBeFalse)
So(q.Metrics[0].PipelineAggregate, ShouldEqual, "")
So(q.Metrics[0].Settings.Get("percents").MustStringArray()[0], ShouldEqual, "90")
So(q.Metrics[1].Field, ShouldEqual, "select field")
So(q.Metrics[1].ID, ShouldEqual, "4")
So(q.Metrics[1].Type, ShouldEqual, "count")
So(q.Metrics[1].Hide, ShouldBeFalse)
So(q.Metrics[1].PipelineAggregate, ShouldEqual, "")
So(q.Metrics[1].Settings.MustMap(), ShouldBeEmpty)
So(q.BucketAggs, ShouldHaveLength, 2)
So(q.BucketAggs[0].Field, ShouldEqual, "@hostname")
So(q.BucketAggs[0].ID, ShouldEqual, "3")
So(q.BucketAggs[0].Type, ShouldEqual, "terms")
So(q.BucketAggs[0].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 1)
So(q.BucketAggs[0].Settings.Get("order").MustString(), ShouldEqual, "desc")
So(q.BucketAggs[0].Settings.Get("orderBy").MustString(), ShouldEqual, "_term")
So(q.BucketAggs[0].Settings.Get("size").MustString(), ShouldEqual, "10")
So(q.BucketAggs[1].Field, ShouldEqual, "@timestamp")
So(q.BucketAggs[1].ID, ShouldEqual, "2")
So(q.BucketAggs[1].Type, ShouldEqual, "date_histogram")
So(q.BucketAggs[1].Settings.Get("interval").MustString(), ShouldEqual, "5m")
So(q.BucketAggs[1].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 0)
So(q.BucketAggs[1].Settings.Get("trimEdges").MustInt64(), ShouldEqual, 0)
})
})
}

View File

@ -408,4 +408,65 @@ export class ElasticDatasource {
getTagValues(options) {
return this.getTerms({ field: options.key, query: '*' });
}
targetContainsTemplate(target) {
if (this.templateSrv.variableExists(target.query) || this.templateSrv.variableExists(target.alias)) {
return true;
}
for (let bucketAgg of target.bucketAggs) {
if (this.templateSrv.variableExists(bucketAgg.field) || this.objectContainsTemplate(bucketAgg.settings)) {
return true;
}
}
for (let metric of target.metrics) {
if (
this.templateSrv.variableExists(metric.field) ||
this.objectContainsTemplate(metric.settings) ||
this.objectContainsTemplate(metric.meta)
) {
return true;
}
}
return false;
}
private isPrimitive(obj) {
if (obj === null || obj === undefined) {
return true;
}
if (['string', 'number', 'boolean'].some(type => type === typeof true)) {
return true;
}
return false;
}
private objectContainsTemplate(obj) {
if (!obj) {
return false;
}
for (let key of Object.keys(obj)) {
if (this.isPrimitive(obj[key])) {
if (this.templateSrv.variableExists(obj[key])) {
return true;
}
} else if (Array.isArray(obj[key])) {
for (let item of obj[key]) {
if (this.objectContainsTemplate(item)) {
return true;
}
}
} else {
if (this.objectContainsTemplate(obj[key])) {
return true;
}
}
}
return false;
}
}

View File

@ -20,6 +20,7 @@
"version": "5.0.0"
},
"alerting": true,
"annotations": true,
"metrics": true,