mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
* improve reduce transformer * add measurment classes * sync with new grafana measure format * use address for live * use plural in URL * set the field name * fix build * find changes * POST http to channel * Yarn: Update lock file (#28014) * Loki: Run instant query only in Explore (#27974) * Run instant query only in Explore * Replace forEach with for loop * don't cast * Docs: Fixed row display in table (#28031) * Plugins: Let descendant plugins inherit their root's signature (#27970) * plugins: Let descendant plugins inherit their root's signature Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Registry: Fix service shutdown mode trigger location (#28025) * Add Alex Khomenko as member (#28032) * show history * fix confirm * fix confirm * add tests * fix lint * add more errors * set values * remove unrelated changes * unrelated changes * Update pkg/models/live.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/models/live.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/services/live/live.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/services/live/pluginHandler.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/services/live/pluginHandler.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/services/live/pluginHandler.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * use measurments for testdata endpoints * add live to testdata * add live to testdata * Update pkg/services/live/channel.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Apply suggestions from code review Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * update comment formats * uprevert testdata * Apply suggestions from code review Co-authored-by: Will Browne <wbrowne@users.noreply.github.com> Co-authored-by: Ryan McKinley <ryantxu@gmail.com> Co-authored-by: Hugo Häggmark <hugo.haggmark@grafana.com> * Apply suggestions from code review * CloudWatch: Add EC2CapacityReservations Namespace (#28309) * API: Fix short URLs (#28300) * API: Fix short URLs Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Chore: Add cloud-middleware as code owners (#28310) Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * SQLStore: Run tests as integration tests (#28265) * sqlstore: Run tests as integration tests * Truncate database instead of re-creating it on each test * Fix test description See https://github.com/grafana/grafana/pull/12129 * Fix lint issues * Fix postgres dialect after review suggestion * Rename and document functions after review suggestion * Add periods * Fix auto-increment value for mysql dialect Co-authored-by: Emil Tullstedt <emil.tullstedt@grafana.com> * Drone: Fix grafana-mixin linting (#28308) * Drone: Fix Starlark script Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * grafana-mixin: Move build logic to scripts Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Drone: Use mixin scripts Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * CI build image: Install jsonnetfmt and mixtool Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Makefile: Print commands Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * should only ignore the file in the grafana mixin root folder (#28306) Signed-off-by: bergquist <carl.bergquist@gmail.com> * fix: for graph size not taking up full height or width * Graph NG: fix toggling queries and extract Graph component from graph3 panel (#28290) * Fix issue when data and config is not in sync * Extract GraphNG component from graph panel and add some tests coverage * Update packages/grafana-ui/src/components/uPlot/hooks.test.ts * Update packages/grafana-ui/src/components/uPlot/hooks.test.ts * Update packages/grafana-ui/src/components/uPlot/hooks.test.ts * Fix grid color and annotations refresh * Drone: Use ${DRONE_TAG} in release pipelines, since it should work (#28299) Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Explore: respect min_refresh_interval (#27988) * Explore: respect min_refresh_interval Fixes #27494 * fixup! Explore: respect min_refresh_interval * fixup! Explore: respect min_refresh_interval * UI: export defaultIntervals from refresh picker * fixup! Explore: respect min_refresh_interval Co-authored-by: Zoltán Bedi <zoltan.bedi@gmail.com> * Loki: Base maxDataPoints limits on query type (#28298) * Base maxLines and maxDataPoints based on query type * Allow overriding the limit to higher value * Bump tree-kill from 1.2.1 to 1.2.2 (#27405) Bumps [tree-kill](https://github.com/pkrumins/node-tree-kill) from 1.2.1 to 1.2.2. - [Release notes](https://github.com/pkrumins/node-tree-kill/releases) - [Commits](https://github.com/pkrumins/node-tree-kill/compare/v1.2.1...v1.2.2) Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump handlebars from 4.4.3 to 4.7.6 (#27416) Bumps [handlebars](https://github.com/wycats/handlebars.js) from 4.4.3 to 4.7.6. - [Release notes](https://github.com/wycats/handlebars.js/releases) - [Changelog](https://github.com/handlebars-lang/handlebars.js/blob/master/release-notes.md) - [Commits](https://github.com/wycats/handlebars.js/compare/v4.4.3...v4.7.6) Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Build(deps): Bump http-proxy from 1.18.0 to 1.18.1 (#27507) Bumps [http-proxy](https://github.com/http-party/node-http-proxy) from 1.18.0 to 1.18.1. - [Release notes](https://github.com/http-party/node-http-proxy/releases) - [Changelog](https://github.com/http-party/node-http-proxy/blob/master/CHANGELOG.md) - [Commits](https://github.com/http-party/node-http-proxy/compare/1.18.0...1.18.1) Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Automation: Add backport github action (#28318) * BackendSrv: Fixes queue countdown when unsubscribe is before response (#28323) * GraphNG: Use AxisSide enum (#28320) * IssueTriage: Needs more info automation and messages (#28137) * IssueTriage: Needs more info automation and messages * Updated * Updated * Updated wording * SAML: IdP-initiated SSO docs (#28280) * SAML: IdP-initiated SSO docs * Update docs/sources/enterprise/saml.md Co-authored-by: Emil Tullstedt <emil.tullstedt@grafana.com> * Apply suggestions from code review Co-authored-by: Emil Tullstedt <emil.tullstedt@grafana.com> Co-authored-by: Emil Tullstedt <emil.tullstedt@grafana.com> * Loki: Run instant query only when doing metric query (#28325) * Run instant query only when doing metric query * Update public/app/plugins/datasource/loki/datasource.ts Co-authored-by: Ivana Huckova <30407135+ivanahuckova@users.noreply.github.com> Co-authored-by: Ivana Huckova <30407135+ivanahuckova@users.noreply.github.com> * Automation: Tweaks to more info message (#28332) * AlertingNG: remove warn/crit from eval prototype (#28334) and misc cleanup * area/grafana/toolkit: update e2e docker image (#28335) * add xvfb to image * comment out toolkit inclusion * add latest tag * update packages for cypress * cleanup script * Update auth-proxy.md (#28339) Fix a minor grammar mistake: 'handling' to 'handle'. * Git: Create .gitattributes for windows line endings (#28340) With this set, Windows users will have text files converted from Windows style line endings (\r\n) to Unix style line endings (\n) when they’re added to the repository. https://www.edwardthomson.com/blog/git_for_windows_line_endings.html * Docs: Add docs for valuepicker (#28327) * Templating: Replace all '$tag' in tag values query (#28343) * Docs: Add missing records from grafana-ui 7.2.1 CHANGELOG (#28302) * Dashboard links: Places drop down list so it's always visible (#28330) * calculating whether to place the list on the right or left edge of the parent * change naming and add import of createRef * Automation: Update backport github action trigger (#28352) It seems like GitHub has solved the problem of running github actions on PRs from forks with access to secrets. https://github.blog/2020-08-03-github-actions-improvements-for-fork-and-pull-request-workflows/#improvements-for-public-repository-forks If I change the event that triggers it to pull_request_target the action is run in the context of the base instead of the merged PR branch * ColorSchemes: Adds more color schemes and text colors that depend on the background (#28305) * Adding more color modes and text colors that depend on the background color * Updates * Updated * Another big value fix * Fixing unit tests * Updated * Updated test * Update * Updated * Updated * Updated * Updated * Added new demo dashboard * Updated * updated * Updated * Updateed * added beta notice * Fixed e2e test * Fix typos Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * revert pseduo code * apply feedback * remove HTTP for now * fix backend test * change to datasource * clear input for streams * fix docs? * consistent measure vs measurements * better jsdocs * fix a few jsdoc errors * fix comment style * Remove commented out code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Clean up code Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * Update pkg/models/live.go Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> * Fix build Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com> * set the stringField Co-authored-by: Torkel Ödegaard <torkel@grafana.org> Co-authored-by: Ivana Huckova <30407135+ivanahuckova@users.noreply.github.com> Co-authored-by: ozhuang <ozhuang.95@gmail.com> Co-authored-by: Arve Knudsen <arve.knudsen@gmail.com> Co-authored-by: Amos Law <ahlaw.dev@gmail.com> Co-authored-by: Alex Khomenko <Clarity-89@users.noreply.github.com> Co-authored-by: Will Browne <wbrowne@users.noreply.github.com> Co-authored-by: Hugo Häggmark <hugo.haggmark@grafana.com> Co-authored-by: The Rock Guy <fabian.bracco@gvcgroup.com.au> Co-authored-by: Sofia Papagiannaki <papagian@users.noreply.github.com> Co-authored-by: Emil Tullstedt <emil.tullstedt@grafana.com> Co-authored-by: Carl Bergquist <carl@grafana.com> Co-authored-by: Jack Westbrook <jack.westbrook@gmail.com> Co-authored-by: Dominik Prokop <dominik.prokop@grafana.com> Co-authored-by: Elliot Pryde <elliot.pryde@elliotpryde.com> Co-authored-by: Zoltán Bedi <zoltan.bedi@gmail.com> Co-authored-by: Andrej Ocenas <mr.ocenas@gmail.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alexander Zobnin <alexanderzobnin@gmail.com> Co-authored-by: Kyle Brandt <kyle@grafana.com> Co-authored-by: Brian Gann <briangann@users.noreply.github.com> Co-authored-by: J-F-Far <joel.f.farthing@gmail.com> Co-authored-by: acoder77 <73009264+acoder77@users.noreply.github.com> Co-authored-by: Peter Holmberg <peterholmberg@users.noreply.github.com> Co-authored-by: Krzysztof Dąbrowski <krzysdabro@live.com> Co-authored-by: maknik <mooniczkam@gmail.com>
782 lines
21 KiB
Go
782 lines
21 KiB
Go
package testdatasource
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"math"
|
|
"math/rand"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/grafana/grafana/pkg/components/simplejson"
|
|
"github.com/grafana/grafana/pkg/util/errutil"
|
|
|
|
"github.com/grafana/grafana/pkg/components/null"
|
|
"github.com/grafana/grafana/pkg/infra/log"
|
|
"github.com/grafana/grafana/pkg/tsdb"
|
|
)
|
|
|
|
type ScenarioHandler func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult
|
|
|
|
type Scenario struct {
|
|
Id string `json:"id"`
|
|
Name string `json:"name"`
|
|
StringInput string `json:"stringOption"`
|
|
Description string `json:"description"`
|
|
Handler ScenarioHandler `json:"-"`
|
|
}
|
|
|
|
var ScenarioRegistry map[string]*Scenario
|
|
|
|
func init() {
|
|
ScenarioRegistry = make(map[string]*Scenario)
|
|
logger := log.New("tsdb.testdata")
|
|
|
|
logger.Debug("Initializing TestData Scenario")
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "exponential_heatmap_bucket_data",
|
|
Name: "Exponential heatmap bucket data",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
to := context.TimeRange.GetToAsMsEpoch()
|
|
|
|
var series []*tsdb.TimeSeries
|
|
start := 1
|
|
factor := 2
|
|
for i := 0; i < 10; i++ {
|
|
timeWalkerMs := context.TimeRange.GetFromAsMsEpoch()
|
|
ts := &tsdb.TimeSeries{Name: strconv.Itoa(start)}
|
|
start *= factor
|
|
|
|
points := make(tsdb.TimeSeriesPoints, 0)
|
|
for j := int64(0); j < 100 && timeWalkerMs < to; j++ {
|
|
v := float64(rand.Int63n(100))
|
|
points = append(points, tsdb.NewTimePoint(null.FloatFrom(v), float64(timeWalkerMs)))
|
|
timeWalkerMs += query.IntervalMs * 50
|
|
}
|
|
|
|
ts.Points = points
|
|
series = append(series, ts)
|
|
}
|
|
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Series = append(queryRes.Series, series...)
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "linear_heatmap_bucket_data",
|
|
Name: "Linear heatmap bucket data",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
to := context.TimeRange.GetToAsMsEpoch()
|
|
|
|
var series []*tsdb.TimeSeries
|
|
for i := 0; i < 10; i++ {
|
|
timeWalkerMs := context.TimeRange.GetFromAsMsEpoch()
|
|
ts := &tsdb.TimeSeries{Name: strconv.Itoa(i * 10)}
|
|
|
|
points := make(tsdb.TimeSeriesPoints, 0)
|
|
for j := int64(0); j < 100 && timeWalkerMs < to; j++ {
|
|
v := float64(rand.Int63n(100))
|
|
points = append(points, tsdb.NewTimePoint(null.FloatFrom(v), float64(timeWalkerMs)))
|
|
timeWalkerMs += query.IntervalMs * 50
|
|
}
|
|
|
|
ts.Points = points
|
|
series = append(series, ts)
|
|
}
|
|
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Series = append(queryRes.Series, series...)
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "random_walk",
|
|
Name: "Random Walk",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
seriesCount := query.Model.Get("seriesCount").MustInt(1)
|
|
|
|
for i := 0; i < seriesCount; i++ {
|
|
queryRes.Series = append(queryRes.Series, getRandomWalk(query, context, i))
|
|
}
|
|
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "predictable_pulse",
|
|
Name: "Predictable Pulse",
|
|
Handler: getPredictablePulse,
|
|
Description: PredictablePulseDesc,
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "predictable_csv_wave",
|
|
Name: "Predictable CSV Wave",
|
|
Handler: getPredictableCSVWave,
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "random_walk_table",
|
|
Name: "Random Walk Table",
|
|
Handler: getRandomWalkTable,
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "slow_query",
|
|
Name: "Slow Query",
|
|
StringInput: "5s",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
stringInput := query.Model.Get("stringInput").MustString()
|
|
parsedInterval, _ := time.ParseDuration(stringInput)
|
|
time.Sleep(parsedInterval)
|
|
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Series = append(queryRes.Series, getRandomWalk(query, context, 0))
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "no_data_points",
|
|
Name: "No Data Points",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "datapoints_outside_range",
|
|
Name: "Datapoints Outside Range",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
series := newSeriesForQuery(query, 0)
|
|
outsideTime := context.TimeRange.MustGetFrom().Add(-1*time.Hour).Unix() * 1000
|
|
|
|
series.Points = append(series.Points, tsdb.NewTimePoint(null.FloatFrom(10), float64(outsideTime)))
|
|
queryRes.Series = append(queryRes.Series, series)
|
|
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "manual_entry",
|
|
Name: "Manual Entry",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
points := query.Model.Get("points").MustArray()
|
|
|
|
series := newSeriesForQuery(query, 0)
|
|
startTime := context.TimeRange.GetFromAsMsEpoch()
|
|
endTime := context.TimeRange.GetToAsMsEpoch()
|
|
|
|
for _, val := range points {
|
|
pointValues := val.([]interface{})
|
|
|
|
var value null.Float
|
|
var time int64
|
|
|
|
if valueFloat, err := strconv.ParseFloat(string(pointValues[0].(json.Number)), 64); err == nil {
|
|
value = null.FloatFrom(valueFloat)
|
|
}
|
|
|
|
timeInt, err := strconv.ParseInt(string(pointValues[1].(json.Number)), 10, 64)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
time = timeInt
|
|
|
|
if time >= startTime && time <= endTime {
|
|
series.Points = append(series.Points, tsdb.NewTimePoint(value, float64(time)))
|
|
}
|
|
}
|
|
|
|
queryRes.Series = append(queryRes.Series, series)
|
|
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "csv_metric_values",
|
|
Name: "CSV Metric Values",
|
|
StringInput: "1,20,90,30,5,0",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
stringInput := query.Model.Get("stringInput").MustString()
|
|
stringInput = strings.ReplaceAll(stringInput, " ", "")
|
|
|
|
values := []null.Float{}
|
|
for _, strVal := range strings.Split(stringInput, ",") {
|
|
if strVal == "null" {
|
|
values = append(values, null.FloatFromPtr(nil))
|
|
}
|
|
if val, err := strconv.ParseFloat(strVal, 64); err == nil {
|
|
values = append(values, null.FloatFrom(val))
|
|
}
|
|
}
|
|
|
|
if len(values) == 0 {
|
|
return queryRes
|
|
}
|
|
|
|
series := newSeriesForQuery(query, 0)
|
|
startTime := context.TimeRange.GetFromAsMsEpoch()
|
|
endTime := context.TimeRange.GetToAsMsEpoch()
|
|
step := (endTime - startTime) / int64(len(values)-1)
|
|
|
|
for _, val := range values {
|
|
series.Points = append(series.Points, tsdb.TimePoint{val, null.FloatFrom(float64(startTime))})
|
|
startTime += step
|
|
}
|
|
|
|
queryRes.Series = append(queryRes.Series, series)
|
|
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "streaming_client",
|
|
Name: "Streaming Client",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
// Real work is in javascript client
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "live",
|
|
Name: "Grafana Live",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
// Real work is in javascript client
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "grafana_api",
|
|
Name: "Grafana API",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
// Real work is in javascript client
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "arrow",
|
|
Name: "Load Apache Arrow Data",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
// Real work is in javascript client
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "annotations",
|
|
Name: "Annotations",
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
return tsdb.NewQueryResult()
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "table_static",
|
|
Name: "Table Static",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
timeWalkerMs := context.TimeRange.GetFromAsMsEpoch()
|
|
to := context.TimeRange.GetToAsMsEpoch()
|
|
|
|
table := tsdb.Table{
|
|
Columns: []tsdb.TableColumn{
|
|
{Text: "Time"},
|
|
{Text: "Message"},
|
|
{Text: "Description"},
|
|
{Text: "Value"},
|
|
},
|
|
Rows: []tsdb.RowValues{},
|
|
}
|
|
for i := int64(0); i < 10 && timeWalkerMs < to; i++ {
|
|
table.Rows = append(table.Rows, tsdb.RowValues{float64(timeWalkerMs), "This is a message", "Description", 23.1})
|
|
timeWalkerMs += query.IntervalMs
|
|
}
|
|
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Tables = append(queryRes.Tables, &table)
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "random_walk_with_error",
|
|
Name: "Random Walk (with error)",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Series = append(queryRes.Series, getRandomWalk(query, context, 0))
|
|
queryRes.ErrorString = "This is an error. It can include URLs http://grafana.com/"
|
|
return queryRes
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "server_error_500",
|
|
Name: "Server Error (500)",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
panic("Test Data Panic!")
|
|
},
|
|
})
|
|
|
|
registerScenario(&Scenario{
|
|
Id: "logs",
|
|
Name: "Logs",
|
|
|
|
Handler: func(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
from := context.TimeRange.GetFromAsMsEpoch()
|
|
to := context.TimeRange.GetToAsMsEpoch()
|
|
lines := query.Model.Get("lines").MustInt64(10)
|
|
includeLevelColumn := query.Model.Get("levelColumn").MustBool(false)
|
|
|
|
logLevelGenerator := newRandomStringProvider([]string{
|
|
"emerg",
|
|
"alert",
|
|
"crit",
|
|
"critical",
|
|
"warn",
|
|
"warning",
|
|
"err",
|
|
"eror",
|
|
"error",
|
|
"info",
|
|
"notice",
|
|
"dbug",
|
|
"debug",
|
|
"trace",
|
|
"",
|
|
})
|
|
containerIDGenerator := newRandomStringProvider([]string{
|
|
"f36a9eaa6d34310686f2b851655212023a216de955cbcc764210cefa71179b1a",
|
|
"5a354a630364f3742c602f315132e16def594fe68b1e4a195b2fce628e24c97a",
|
|
})
|
|
hostnameGenerator := newRandomStringProvider([]string{
|
|
"srv-001",
|
|
"srv-002",
|
|
})
|
|
|
|
table := tsdb.Table{
|
|
Columns: []tsdb.TableColumn{
|
|
{Text: "time"},
|
|
{Text: "message"},
|
|
{Text: "container_id"},
|
|
{Text: "hostname"},
|
|
},
|
|
Rows: []tsdb.RowValues{},
|
|
}
|
|
|
|
if includeLevelColumn {
|
|
table.Columns = append(table.Columns, tsdb.TableColumn{Text: "level"})
|
|
}
|
|
|
|
for i := int64(0); i < lines && to > from; i++ {
|
|
row := tsdb.RowValues{float64(to)}
|
|
|
|
logLevel := logLevelGenerator.Next()
|
|
timeFormatted := time.Unix(to/1000, 0).Format(time.RFC3339)
|
|
lvlString := ""
|
|
if !includeLevelColumn {
|
|
lvlString = fmt.Sprintf("lvl=%s ", logLevel)
|
|
}
|
|
|
|
row = append(row, fmt.Sprintf("t=%s %smsg=\"Request Completed\" logger=context userId=1 orgId=1 uname=admin method=GET path=/api/datasources/proxy/152/api/prom/label status=502 remote_addr=[::1] time_ms=1 size=0 referer=\"http://localhost:3000/explore?left=%%5B%%22now-6h%%22,%%22now%%22,%%22Prometheus%%202.x%%22,%%7B%%7D,%%7B%%22ui%%22:%%5Btrue,true,true,%%22none%%22%%5D%%7D%%5D\"", timeFormatted, lvlString))
|
|
row = append(row, containerIDGenerator.Next())
|
|
row = append(row, hostnameGenerator.Next())
|
|
|
|
if includeLevelColumn {
|
|
row = append(row, logLevel)
|
|
}
|
|
|
|
table.Rows = append(table.Rows, row)
|
|
to -= query.IntervalMs
|
|
}
|
|
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Tables = append(queryRes.Tables, &table)
|
|
return queryRes
|
|
},
|
|
})
|
|
}
|
|
|
|
// PredictablePulseDesc is the description for the Predictable Pulse scenerio.
|
|
const PredictablePulseDesc = `Predictable Pulse returns a pulse wave where there is a datapoint every timeStepSeconds.
|
|
The wave cycles at timeStepSeconds*(onCount+offCount).
|
|
The cycle of the wave is based off of absolute time (from the epoch) which makes it predictable.
|
|
Timestamps will line up evenly on timeStepSeconds (For example, 60 seconds means times will all end in :00 seconds).`
|
|
|
|
func getPredictablePulse(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
// Process Input
|
|
var timeStep int64
|
|
var onCount int64
|
|
var offCount int64
|
|
var onValue null.Float
|
|
var offValue null.Float
|
|
|
|
options := query.Model.Get("pulseWave")
|
|
|
|
var err error
|
|
if timeStep, err = options.Get("timeStep").Int64(); err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse timeStep value '%v' into integer: %v", options.Get("timeStep"), err)
|
|
return queryRes
|
|
}
|
|
if onCount, err = options.Get("onCount").Int64(); err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse onCount value '%v' into integer: %v", options.Get("onCount"), err)
|
|
return queryRes
|
|
}
|
|
if offCount, err = options.Get("offCount").Int64(); err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse offCount value '%v' into integer: %v", options.Get("offCount"), err)
|
|
return queryRes
|
|
}
|
|
|
|
onValue, err = fromStringOrNumber(options.Get("onValue"))
|
|
if err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse onValue value '%v' into float: %v", options.Get("onValue"), err)
|
|
return queryRes
|
|
}
|
|
offValue, err = fromStringOrNumber(options.Get("offValue"))
|
|
if err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse offValue value '%v' into float: %v", options.Get("offValue"), err)
|
|
return queryRes
|
|
}
|
|
|
|
timeStep *= 1000 // Seconds to Milliseconds
|
|
onFor := func(mod int64) (null.Float, error) { // How many items in the cycle should get the on value
|
|
var i int64
|
|
for i = 0; i < onCount; i++ {
|
|
if mod == i*timeStep {
|
|
return onValue, nil
|
|
}
|
|
}
|
|
return offValue, nil
|
|
}
|
|
points, err := predictableSeries(context.TimeRange, timeStep, onCount+offCount, onFor)
|
|
if err != nil {
|
|
queryRes.Error = err
|
|
return queryRes
|
|
}
|
|
|
|
series := newSeriesForQuery(query, 0)
|
|
series.Points = *points
|
|
series.Tags = parseLabels(query)
|
|
|
|
queryRes.Series = append(queryRes.Series, series)
|
|
return queryRes
|
|
}
|
|
|
|
func getPredictableCSVWave(query *tsdb.Query, context *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
queryRes := tsdb.NewQueryResult()
|
|
|
|
// Process Input
|
|
var timeStep int64
|
|
|
|
options := query.Model.Get("csvWave")
|
|
|
|
var err error
|
|
if timeStep, err = options.Get("timeStep").Int64(); err != nil {
|
|
queryRes.Error = fmt.Errorf("failed to parse timeStep value '%v' into integer: %v", options.Get("timeStep"), err)
|
|
return queryRes
|
|
}
|
|
rawValues := options.Get("valuesCSV").MustString()
|
|
rawValues = strings.TrimRight(strings.TrimSpace(rawValues), ",") // Strip Trailing Comma
|
|
rawValesCSV := strings.Split(rawValues, ",")
|
|
values := make([]null.Float, len(rawValesCSV))
|
|
for i, rawValue := range rawValesCSV {
|
|
val, err := null.FloatFromString(strings.TrimSpace(rawValue), "null")
|
|
if err != nil {
|
|
queryRes.Error = errutil.Wrapf(err, "failed to parse value '%v' into nullable float", rawValue)
|
|
return queryRes
|
|
}
|
|
values[i] = val
|
|
}
|
|
|
|
timeStep *= 1000 // Seconds to Milliseconds
|
|
valuesLen := int64(len(values))
|
|
getValue := func(mod int64) (null.Float, error) {
|
|
var i int64
|
|
for i = 0; i < valuesLen; i++ {
|
|
if mod == i*timeStep {
|
|
return values[i], nil
|
|
}
|
|
}
|
|
return null.Float{}, fmt.Errorf("did not get value at point in waveform - should not be here")
|
|
}
|
|
points, err := predictableSeries(context.TimeRange, timeStep, valuesLen, getValue)
|
|
if err != nil {
|
|
queryRes.Error = err
|
|
return queryRes
|
|
}
|
|
|
|
series := newSeriesForQuery(query, 0)
|
|
series.Points = *points
|
|
series.Tags = parseLabels(query)
|
|
|
|
queryRes.Series = append(queryRes.Series, series)
|
|
return queryRes
|
|
}
|
|
|
|
func predictableSeries(timeRange *tsdb.TimeRange, timeStep, length int64, getValue func(mod int64) (null.Float, error)) (*tsdb.TimeSeriesPoints, error) {
|
|
points := make(tsdb.TimeSeriesPoints, 0)
|
|
|
|
from := timeRange.GetFromAsMsEpoch()
|
|
to := timeRange.GetToAsMsEpoch()
|
|
|
|
timeCursor := from - (from % timeStep) // Truncate Start
|
|
wavePeriod := timeStep * length
|
|
maxPoints := 10000 // Don't return too many points
|
|
|
|
for i := 0; i < maxPoints && timeCursor < to; i++ {
|
|
val, err := getValue(timeCursor % wavePeriod)
|
|
if err != nil {
|
|
return &points, err
|
|
}
|
|
point := tsdb.NewTimePoint(val, float64(timeCursor))
|
|
points = append(points, point)
|
|
timeCursor += timeStep
|
|
}
|
|
return &points, nil
|
|
}
|
|
|
|
func getRandomWalk(query *tsdb.Query, tsdbQuery *tsdb.TsdbQuery, index int) *tsdb.TimeSeries {
|
|
timeWalkerMs := tsdbQuery.TimeRange.GetFromAsMsEpoch()
|
|
to := tsdbQuery.TimeRange.GetToAsMsEpoch()
|
|
series := newSeriesForQuery(query, index)
|
|
|
|
startValue := query.Model.Get("startValue").MustFloat64(rand.Float64() * 100)
|
|
spread := query.Model.Get("spread").MustFloat64(1)
|
|
noise := query.Model.Get("noise").MustFloat64(0)
|
|
|
|
min, err := query.Model.Get("min").Float64()
|
|
hasMin := err == nil
|
|
max, err := query.Model.Get("max").Float64()
|
|
hasMax := err == nil
|
|
|
|
points := make(tsdb.TimeSeriesPoints, 0)
|
|
walker := startValue
|
|
|
|
for i := int64(0); i < 10000 && timeWalkerMs < to; i++ {
|
|
nextValue := walker + (rand.Float64() * noise)
|
|
|
|
if hasMin && nextValue < min {
|
|
nextValue = min
|
|
walker = min
|
|
}
|
|
|
|
if hasMax && nextValue > max {
|
|
nextValue = max
|
|
walker = max
|
|
}
|
|
|
|
points = append(points, tsdb.NewTimePoint(null.FloatFrom(nextValue), float64(timeWalkerMs)))
|
|
|
|
walker += (rand.Float64() - 0.5) * spread
|
|
timeWalkerMs += query.IntervalMs
|
|
}
|
|
|
|
series.Points = points
|
|
series.Tags = parseLabels(query)
|
|
return series
|
|
}
|
|
|
|
/**
|
|
* Looks for a labels request and adds them as tags
|
|
*
|
|
* '{job="foo", instance="bar"} => {job: "foo", instance: "bar"}`
|
|
*/
|
|
func parseLabels(query *tsdb.Query) map[string]string {
|
|
tags := map[string]string{}
|
|
|
|
labelText := query.Model.Get("labels").MustString("")
|
|
if labelText == "" {
|
|
return map[string]string{}
|
|
}
|
|
|
|
text := strings.Trim(labelText, `{}`)
|
|
if len(text) < 2 {
|
|
return tags
|
|
}
|
|
|
|
tags = make(map[string]string)
|
|
|
|
for _, keyval := range strings.Split(text, ",") {
|
|
idx := strings.Index(keyval, "=")
|
|
key := strings.TrimSpace(keyval[:idx])
|
|
val := strings.TrimSpace(keyval[idx+1:])
|
|
val = strings.Trim(val, "\"")
|
|
tags[key] = val
|
|
}
|
|
|
|
return tags
|
|
}
|
|
|
|
func getRandomWalkTable(query *tsdb.Query, tsdbQuery *tsdb.TsdbQuery) *tsdb.QueryResult {
|
|
timeWalkerMs := tsdbQuery.TimeRange.GetFromAsMsEpoch()
|
|
to := tsdbQuery.TimeRange.GetToAsMsEpoch()
|
|
|
|
table := tsdb.Table{
|
|
Columns: []tsdb.TableColumn{
|
|
{Text: "Time"},
|
|
{Text: "Value"},
|
|
{Text: "Min"},
|
|
{Text: "Max"},
|
|
{Text: "Info"},
|
|
},
|
|
Rows: []tsdb.RowValues{},
|
|
}
|
|
|
|
withNil := query.Model.Get("withNil").MustBool(false)
|
|
walker := query.Model.Get("startValue").MustFloat64(rand.Float64() * 100)
|
|
spread := 2.5
|
|
var info strings.Builder
|
|
|
|
for i := int64(0); i < query.MaxDataPoints && timeWalkerMs < to; i++ {
|
|
delta := rand.Float64() - 0.5
|
|
walker += delta
|
|
|
|
info.Reset()
|
|
if delta > 0 {
|
|
info.WriteString("up")
|
|
} else {
|
|
info.WriteString("down")
|
|
}
|
|
if math.Abs(delta) > .4 {
|
|
info.WriteString(" fast")
|
|
}
|
|
row := tsdb.RowValues{
|
|
float64(timeWalkerMs),
|
|
walker,
|
|
walker - ((rand.Float64() * spread) + 0.01), // Min
|
|
walker + ((rand.Float64() * spread) + 0.01), // Max
|
|
info.String(),
|
|
}
|
|
|
|
// Add some random null values
|
|
if withNil && rand.Float64() > 0.8 {
|
|
for i := 1; i < 4; i++ {
|
|
if rand.Float64() > .2 {
|
|
row[i] = nil
|
|
}
|
|
}
|
|
}
|
|
|
|
table.Rows = append(table.Rows, row)
|
|
timeWalkerMs += query.IntervalMs
|
|
}
|
|
queryRes := tsdb.NewQueryResult()
|
|
queryRes.Tables = append(queryRes.Tables, &table)
|
|
return queryRes
|
|
}
|
|
|
|
func registerScenario(scenario *Scenario) {
|
|
ScenarioRegistry[scenario.Id] = scenario
|
|
}
|
|
|
|
func newSeriesForQuery(query *tsdb.Query, index int) *tsdb.TimeSeries {
|
|
alias := query.Model.Get("alias").MustString("")
|
|
suffix := ""
|
|
|
|
if index > 0 {
|
|
suffix = strconv.Itoa(index)
|
|
}
|
|
|
|
if alias == "" {
|
|
alias = fmt.Sprintf("%s-series%s", query.RefId, suffix)
|
|
}
|
|
|
|
if alias == "__server_names" && len(serverNames) > index {
|
|
alias = serverNames[index]
|
|
}
|
|
|
|
if alias == "__house_locations" && len(houseLocations) > index {
|
|
alias = houseLocations[index]
|
|
}
|
|
|
|
return &tsdb.TimeSeries{Name: alias}
|
|
}
|
|
|
|
func fromStringOrNumber(val *simplejson.Json) (null.Float, error) {
|
|
switch v := val.Interface().(type) {
|
|
case json.Number:
|
|
fV, err := v.Float64()
|
|
if err != nil {
|
|
return null.Float{}, err
|
|
}
|
|
return null.FloatFrom(fV), nil
|
|
case string:
|
|
return null.FloatFromString(v, "null")
|
|
default:
|
|
return null.Float{}, fmt.Errorf("failed to extract value")
|
|
}
|
|
}
|
|
|
|
var serverNames = []string{
|
|
"Backend-ops-01",
|
|
"Backend-ops-02",
|
|
"Backend-ops-03",
|
|
"Backend-ops-04",
|
|
"Frontend-web-01",
|
|
"Frontend-web-02",
|
|
"Frontend-web-03",
|
|
"Frontend-web-04",
|
|
"MySQL-01",
|
|
"MySQL-02",
|
|
"MySQL-03",
|
|
"MySQL-04",
|
|
"Postgres-01",
|
|
"Postgres-02",
|
|
"Postgres-03",
|
|
"Postgres-04",
|
|
"DB-01",
|
|
"DB-02",
|
|
"SAN-01",
|
|
"SAN-02",
|
|
"SAN-02",
|
|
"SAN-04",
|
|
"Kaftka-01",
|
|
"Kaftka-02",
|
|
"Kaftka-03",
|
|
"Zookeeper-01",
|
|
"Zookeeper-02",
|
|
"Zookeeper-03",
|
|
"Zookeeper-04",
|
|
}
|
|
|
|
var houseLocations = []string{
|
|
"Cellar",
|
|
"Living room",
|
|
"Porch",
|
|
"Bedroom",
|
|
"Guest room",
|
|
"Kitchen",
|
|
"Playroom",
|
|
"Bathroom",
|
|
"Outside",
|
|
"Roof",
|
|
"Terrace",
|
|
}
|