Merge remote-tracking branch 'origin/master' into stackdriver-plugin

This commit is contained in:
Daniel Lee 2018-09-26 17:59:56 +02:00
commit 66c95991c1
92 changed files with 547 additions and 193 deletions

1
.gitignore vendored
View File

@ -72,3 +72,4 @@ debug.test
*.orig
/devenv/bulk-dashboards/*.json
/devenv/bulk_alerting_dashboards/*.json

View File

@ -318,7 +318,7 @@ See [security announcement](https://community.grafana.com/t/grafana-5-2-3-and-4-
* **Dashboard**: Sizing and positioning of settings menu icons [#11572](https://github.com/grafana/grafana/pull/11572)
* **Dashboard**: Add search filter/tabs to new panel control [#10427](https://github.com/grafana/grafana/issues/10427)
* **Folders**: User with org viewer role should not be able to save/move dashboards in/to general folder [#11553](https://github.com/grafana/grafana/issues/11553)
* **Influxdb**: Dont assume the first column in table response is time. [#11476](https://github.com/grafana/grafana/issues/11476), thx [@hahnjo](https://github.com/hahnjo)
* **Influxdb**: Don't assume the first column in table response is time. [#11476](https://github.com/grafana/grafana/issues/11476), thx [@hahnjo](https://github.com/hahnjo)
### Tech
* Backend code simplification [#11613](https://github.com/grafana/grafana/pull/11613), thx [@knweiss](https://github.com/knweiss)
@ -505,7 +505,7 @@ See [security announcement](https://community.grafana.com/t/grafana-5-2-3-and-4-
# 4.6.2 (2017-11-16)
## Important
* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if your using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if you're using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
## Fixes
* **Color picker**: Bug after using textbox input field to change/paste color string [#9769](https://github.com/grafana/grafana/issues/9769)
@ -1464,7 +1464,7 @@ Grafana 2.x is fundamentally different from 1.x; it now ships with an integrated
**New features**
- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site
- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embed a single graph on another web site
- [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes in between the user is prompted with a warning if he really wants to overwrite the other's changes
- [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views
- [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, useful when you want to ignore last minute because it contains incomplete data

View File

@ -120,7 +120,6 @@ func main() {
createLinuxPackages()
}
case "pkg-rpm":
grunt(gruntBuildArg("release")...)
createRpmPackages()
@ -417,7 +416,7 @@ func test(pkg string) {
func build(binaryName, pkg string, tags []string) {
binary := fmt.Sprintf("./bin/%s-%s/%s", goos, goarch, binaryName)
if isDev {
//dont include os and arch in output path in dev environment
//don't include os and arch in output path in dev environment
binary = fmt.Sprintf("./bin/%s", binaryName)
}

View File

@ -474,6 +474,10 @@ error_or_timeout = alerting
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
concurrent_render_limit = 5
#################################### Explore #############################
[explore]
# Enable the Explore section

View File

@ -393,6 +393,10 @@ log_queries =
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
;nodata_or_nullvalues = no_data
# Alert notifications can include images, but rendering many images at the same time can overload the server
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
#################################### Explore #############################
[explore]
# Enable the Explore section

View File

@ -0,0 +1,9 @@
apiVersion: 1
providers:
- name: 'Bulk alerting dashboards'
folder: 'Bulk alerting dashboards'
type: file
options:
path: devenv/bulk_alerting_dashboards

View File

@ -0,0 +1,168 @@
{
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [
{
"alert": {
"conditions": [
{
"evaluator": {
"params": [
65
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"A",
"5m",
"now"
]
},
"reducer": {
"params": [],
"type": "avg"
},
"type": "query"
}
],
"executionErrorState": "alerting",
"frequency": "10s",
"handler": 1,
"name": "bulk alerting",
"noDataState": "no_data",
"notifications": []
},
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "gdev-prometheus",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 5,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"$$hashKey": "object:117",
"expr": "go_goroutines",
"format": "time_series",
"intervalFactor": 1,
"refId": "A"
}
],
"thresholds": [
{
"colorMode": "critical",
"fill": true,
"line": true,
"op": "gt",
"value": 50
}
],
"timeFrom": null,
"timeShift": null,
"title": "Panel Title",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
]
}
],
"schemaVersion": 16,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "New dashboard",
"uid": null,
"version": 0
}

View File

@ -8,7 +8,7 @@
# 'avg'. The name of the aggregate metric will be derived from
# 'output_template' filling in any captured fields from 'input_pattern'.
#
# For example, if you're metric naming scheme is:
# For example, if your metric naming scheme is:
#
# <env>.applications.<app>.<server>.<metric>
#

View File

@ -11,7 +11,21 @@ bulkDashboard() {
let COUNTER=COUNTER+1
done
ln -s -f -r ./bulk-dashboards/bulk-dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
ln -s -f ../../../devenv/bulk-dashboards/bulk-dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
}
bulkAlertingDashboard() {
requiresJsonnet
COUNTER=0
MAX=100
while [ $COUNTER -lt $MAX ]; do
jsonnet -o "bulk_alerting_dashboards/alerting_dashboard${COUNTER}.json" -e "local bulkDash = import 'bulk_alerting_dashboards/bulkdash_alerting.jsonnet'; bulkDash + { uid: 'bd-${COUNTER}', title: 'alerting-title-${COUNTER}' }"
let COUNTER=COUNTER+1
done
ln -s -f ../../../devenv/bulk_alerting_dashboards/bulk_alerting_dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
}
requiresJsonnet() {
@ -36,8 +50,9 @@ devDatasources() {
usage() {
echo -e "\n"
echo "Usage:"
echo " bulk-dashboards - create and provisioning 400 dashboards"
echo " no args - provisiong core datasources and dev dashboards"
echo " bulk-dashboards - create and provisioning 400 dashboards"
echo " bulk-alerting-dashboards - create and provisioning 400 dashboards with alerts"
echo " no args - provisiong core datasources and dev dashboards"
}
main() {
@ -48,7 +63,9 @@ main() {
local cmd=$1
if [[ $cmd == "bulk-dashboards" ]]; then
if [[ $cmd == "bulk-alerting-dashboards" ]]; then
bulkAlertingDashboard
elif [[ $cmd == "bulk-dashboards" ]]; then
bulkDashboard
else
devDashboards

View File

@ -65,7 +65,7 @@ make docs-build
This will rebuild the docs docker container.
To be able to use the image your have to quit (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image.
To be able to use the image you have to quit (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image.
### Editing content

View File

@ -200,7 +200,7 @@ providers:
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 3 #how often Grafana will scan for changed dashboards
updateIntervalSeconds: 10 #how often Grafana will scan for changed dashboards
options:
path: /var/lib/grafana/dashboards
```

View File

@ -174,6 +174,8 @@ allowed_organizations =
allowed_organizations =
```
> Note: It's important to ensure that the [root_url](/installation/configuration/#root-url) in Grafana is set in your Azure Application Reply URLs (App -> Settings -> Reply URLs)
## Set up OAuth2 with Centrify
1. Create a new Custom OpenID Connect application configuration in the Centrify dashboard.

View File

@ -67,7 +67,7 @@ Making it possible to have users in multiple groups and have detailed access con
## Upgrade & Breaking changes
If your using https in grafana we now force you to use tls 1.2 and the most secure ciphers.
If you're using https in grafana we now force you to use tls 1.2 and the most secure ciphers.
We think its better to be secure by default rather then making it configurable.
If you want to run https with lower versions of tls we suggest you put a reserve proxy in front of grafana.

View File

@ -566,3 +566,11 @@ Default setting for new alert rules. Defaults to categorize error and timeouts a
> Available in 5.3 and above
Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
# concurrent_render_limit
> Available in 5.3 and above
Alert notifications can include images, but rendering many images at the same time can overload the server.
This limit will protect the server from render overloading and make sure notifications are sent out quickly. Default
value is `5`.

View File

@ -22,7 +22,7 @@ Setting up Grafana for high availability is fairly simple. It comes down to two
First, you need to do is to setup MySQL or Postgres on another server and configure Grafana to use that database.
You can find the configuration for doing that in the [[database]]({{< relref "configuration.md" >}}#database) section in the grafana config.
Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database your using.
Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database you're using.
## User sessions

View File

@ -29,7 +29,7 @@ func TestFormatShort(t *testing.T) {
}
if parsed != tc.interval {
t.Errorf("expectes the parsed duration to equal the interval. Got %v expected: %v", parsed, tc.interval)
t.Errorf("expects the parsed duration to equal the interval. Got %v expected: %v", parsed, tc.interval)
}
}
}

View File

@ -41,15 +41,16 @@ func (hs *HTTPServer) RenderToPng(c *m.ReqContext) {
}
result, err := hs.RenderService.Render(c.Req.Context(), rendering.Opts{
Width: width,
Height: height,
Timeout: time.Duration(timeout) * time.Second,
OrgId: c.OrgId,
UserId: c.UserId,
OrgRole: c.OrgRole,
Path: c.Params("*") + queryParams,
Timezone: queryReader.Get("tz", ""),
Encoding: queryReader.Get("encoding", ""),
Width: width,
Height: height,
Timeout: time.Duration(timeout) * time.Second,
OrgId: c.OrgId,
UserId: c.UserId,
OrgRole: c.OrgRole,
Path: c.Params("*") + queryParams,
Timezone: queryReader.Get("tz", ""),
Encoding: queryReader.Get("encoding", ""),
ConcurrentLimit: 30,
})
if err != nil && err == rendering.ErrTimeout {

View File

@ -112,7 +112,7 @@ func SelectVersion(plugin m.Plugin, version string) (m.Version, error) {
}
}
return m.Version{}, errors.New("Could not find the version your looking for")
return m.Version{}, errors.New("Could not find the version you're looking for")
}
func RemoveGitBuildFromName(pluginName, filename string) string {

View File

@ -52,7 +52,7 @@ func (az *AzureBlobUploader) Upload(ctx context.Context, imageDiskPath string) (
}
randomFileName := util.GetRandomString(30) + ".png"
// upload image
az.log.Debug("Uploading image to azure_blob", "conatiner_name", az.container_name, "blob_name", randomFileName)
az.log.Debug("Uploading image to azure_blob", "container_name", az.container_name, "blob_name", randomFileName)
resp, err := blob.FileUpload(az.container_name, randomFileName, file)
if err != nil {
return "", err
@ -274,10 +274,10 @@ func (a *Auth) canonicalizedHeaders(req *http.Request) string {
}
}
splitted := strings.Split(buffer.String(), "\n")
sort.Strings(splitted)
split := strings.Split(buffer.String(), "\n")
sort.Strings(split)
return strings.Join(splitted, "\n")
return strings.Join(split, "\n")
}
/*
@ -313,8 +313,8 @@ func (a *Auth) canonicalizedResource(req *http.Request) string {
buffer.WriteString(fmt.Sprintf("\n%s:%s", key, strings.Join(values, ",")))
}
splitted := strings.Split(buffer.String(), "\n")
sort.Strings(splitted)
split := strings.Split(buffer.String(), "\n")
sort.Strings(split)
return strings.Join(splitted, "\n")
return strings.Join(split, "\n")
}

View File

@ -256,7 +256,7 @@ func (j *Json) StringArray() ([]string, error) {
// MustArray guarantees the return of a `[]interface{}` (with optional default)
//
// useful when you want to interate over array values in a succinct manner:
// useful when you want to iterate over array values in a succinct manner:
// for i, v := range js.Get("results").MustArray() {
// fmt.Println(i, v)
// }
@ -281,7 +281,7 @@ func (j *Json) MustArray(args ...[]interface{}) []interface{} {
// MustMap guarantees the return of a `map[string]interface{}` (with optional default)
//
// useful when you want to interate over map values in a succinct manner:
// useful when you want to iterate over map values in a succinct manner:
// for k, v := range js.Get("dictionary").MustMap() {
// fmt.Println(k, v)
// }
@ -329,7 +329,7 @@ func (j *Json) MustString(args ...string) string {
// MustStringArray guarantees the return of a `[]string` (with optional default)
//
// useful when you want to interate over array values in a succinct manner:
// useful when you want to iterate over array values in a succinct manner:
// for i, s := range js.Get("results").MustStringArray() {
// fmt.Println(i, s)
// }

View File

@ -48,7 +48,7 @@ type LdapAttributeMap struct {
type LdapGroupToOrgRole struct {
GroupDN string `toml:"group_dn"`
OrgId int64 `toml:"org_id"`
IsGrafanaAdmin *bool `toml:"grafana_admin"` // This is a pointer to know if it was set or not (for backwards compatability)
IsGrafanaAdmin *bool `toml:"grafana_admin"` // This is a pointer to know if it was set or not (for backwards compatibility)
OrgRole m.RoleType `toml:"org_role"`
}

View File

@ -98,7 +98,7 @@ type GetLatestNotificationQuery struct {
AlertId int64
NotifierId int64
Result *AlertNotificationJournal
Result []AlertNotificationJournal
}
type CleanNotificationJournalCommand struct {

View File

@ -82,12 +82,13 @@ func (e *DashAlertExtractor) getAlertFromPanels(jsonWithPanels *simplejson.Json,
if collapsed && collapsedJSON.MustBool() {
// extract alerts from sub panels for collapsed panels
als, err := e.getAlertFromPanels(panel, validateAlertFunc)
alertSlice, err := e.getAlertFromPanels(panel,
validateAlertFunc)
if err != nil {
return nil, err
}
alerts = append(alerts, als...)
alerts = append(alerts, alertSlice...)
continue
}

View File

@ -11,6 +11,7 @@ import (
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/services/rendering"
"github.com/grafana/grafana/pkg/setting"
m "github.com/grafana/grafana/pkg/models"
)
@ -67,7 +68,7 @@ func (n *notificationService) sendNotifications(evalContext *EvalContext, notifi
// Verify that we can send the notification again
// but this time within the same transaction.
if !evalContext.IsTestRun && !not.ShouldNotify(context.Background(), evalContext) {
if !evalContext.IsTestRun && !not.ShouldNotify(ctx, evalContext) {
return nil
}
@ -108,11 +109,12 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
}
renderOpts := rendering.Opts{
Width: 1000,
Height: 500,
Timeout: alertTimeout / 2,
OrgId: context.Rule.OrgId,
OrgRole: m.ROLE_ADMIN,
Width: 1000,
Height: 500,
Timeout: alertTimeout / 2,
OrgId: context.Rule.OrgId,
OrgRole: m.ROLE_ADMIN,
ConcurrentLimit: setting.AlertingRenderLimit,
}
ref, err := context.GetDashboardUID()

View File

@ -42,12 +42,21 @@ func NewNotifierBase(model *models.AlertNotification) NotifierBase {
}
}
func defaultShouldNotify(context *alerting.EvalContext, sendReminder bool, frequency time.Duration, lastNotify time.Time) bool {
func defaultShouldNotify(context *alerting.EvalContext, sendReminder bool, frequency time.Duration, journals []models.AlertNotificationJournal) bool {
// Only notify on state change.
if context.PrevAlertState == context.Rule.State && !sendReminder {
return false
}
// get last successfully sent notification
lastNotify := time.Time{}
for _, j := range journals {
if j.Success {
lastNotify = time.Unix(j.SentAt, 0)
break
}
}
// Do not notify if interval has not elapsed
if sendReminder && !lastNotify.IsZero() && lastNotify.Add(frequency).After(time.Now()) {
return false
@ -75,20 +84,12 @@ func (n *NotifierBase) ShouldNotify(ctx context.Context, c *alerting.EvalContext
}
err := bus.DispatchCtx(ctx, cmd)
if err == models.ErrJournalingNotFound {
return true
}
if err != nil {
n.log.Error("Could not determine last time alert notifier fired", "Alert name", c.Rule.Name, "Error", err)
return false
}
if !cmd.Result.Success {
return true
}
return defaultShouldNotify(c, n.SendReminder, n.Frequency, time.Unix(cmd.Result.SentAt, 0))
return defaultShouldNotify(c, n.SendReminder, n.Frequency, cmd.Result)
}
func (n *NotifierBase) GetType() string {

View File

@ -15,51 +15,105 @@ import (
)
func TestShouldSendAlertNotification(t *testing.T) {
tnow := time.Now()
tcs := []struct {
name string
prevState m.AlertStateType
newState m.AlertStateType
expected bool
sendReminder bool
frequency time.Duration
journals []m.AlertNotificationJournal
expect bool
}{
{
name: "pending -> ok should not trigger an notification",
newState: m.AlertStatePending,
prevState: m.AlertStateOK,
expected: false,
name: "pending -> ok should not trigger an notification",
newState: m.AlertStatePending,
prevState: m.AlertStateOK,
sendReminder: false,
journals: []m.AlertNotificationJournal{},
expect: false,
},
{
name: "ok -> alerting should trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStateAlerting,
expected: true,
name: "ok -> alerting should trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStateAlerting,
sendReminder: false,
journals: []m.AlertNotificationJournal{},
expect: true,
},
{
name: "ok -> pending should not trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStatePending,
expected: false,
name: "ok -> pending should not trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStatePending,
sendReminder: false,
journals: []m.AlertNotificationJournal{},
expect: false,
},
{
name: "ok -> ok should not trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStateOK,
expected: false,
sendReminder: false,
journals: []m.AlertNotificationJournal{},
expect: false,
},
{
name: "ok -> alerting should not trigger an notification",
name: "ok -> alerting should trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStateAlerting,
expected: true,
sendReminder: true,
journals: []m.AlertNotificationJournal{},
expect: true,
},
{
name: "ok -> ok with reminder should not trigger an notification",
newState: m.AlertStateOK,
prevState: m.AlertStateOK,
expected: false,
sendReminder: true,
journals: []m.AlertNotificationJournal{},
expect: false,
},
{
name: "alerting -> alerting with reminder and no journaling should trigger",
newState: m.AlertStateAlerting,
prevState: m.AlertStateAlerting,
frequency: time.Minute * 10,
sendReminder: true,
journals: []m.AlertNotificationJournal{},
expect: true,
},
{
name: "alerting -> alerting with reminder and successful recent journal event should not trigger",
newState: m.AlertStateAlerting,
prevState: m.AlertStateAlerting,
frequency: time.Minute * 10,
sendReminder: true,
journals: []m.AlertNotificationJournal{
{SentAt: tnow.Add(-time.Minute).Unix(), Success: true},
},
expect: false,
},
{
name: "alerting -> alerting with reminder and failed recent journal event should trigger",
newState: m.AlertStateAlerting,
prevState: m.AlertStateAlerting,
frequency: time.Minute * 10,
sendReminder: true,
expect: true,
journals: []m.AlertNotificationJournal{
{SentAt: tnow.Add(-time.Minute).Unix(), Success: false}, // recent failed notification
{SentAt: tnow.Add(-time.Hour).Unix(), Success: true}, // old successful notification
},
},
}
@ -69,8 +123,8 @@ func TestShouldSendAlertNotification(t *testing.T) {
})
evalContext.Rule.State = tc.prevState
if defaultShouldNotify(evalContext, true, 0, time.Now()) != tc.expected {
t.Errorf("failed %s. expected %+v to return %v", tc.name, tc, tc.expected)
if defaultShouldNotify(evalContext, true, tc.frequency, tc.journals) != tc.expect {
t.Errorf("failed test %s.\n expected \n%+v \nto return: %v", tc.name, tc, tc.expect)
}
}
}
@ -87,16 +141,6 @@ func TestShouldNotifyWhenNoJournalingIsFound(t *testing.T) {
})
evalContext := alerting.NewEvalContext(context.TODO(), &alerting.Rule{})
Convey("should notify if no journaling is found", func() {
bus.AddHandlerCtx("", func(ctx context.Context, q *m.GetLatestNotificationQuery) error {
return m.ErrJournalingNotFound
})
if !notifier.ShouldNotify(context.Background(), evalContext) {
t.Errorf("should send notifications when ErrJournalingNotFound is returned")
}
})
Convey("should not notify query returns error", func() {
bus.AddHandlerCtx("", func(ctx context.Context, q *m.GetLatestNotificationQuery) error {
return errors.New("some kind of error unknown error")

View File

@ -74,7 +74,7 @@ func (this *TeamsNotifier) Notify(evalContext *alerting.EvalContext) error {
}
message := ""
if evalContext.Rule.State != m.AlertStateOK { //dont add message when going back to alert state ok.
if evalContext.Rule.State != m.AlertStateOK { //don't add message when going back to alert state ok.
message = evalContext.Rule.Message
}

View File

@ -100,7 +100,7 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
}
}
}
handler.notifier.SendIfNeeded(evalContext)
handler.notifier.SendIfNeeded(evalContext)
return nil
}

View File

@ -83,7 +83,7 @@ func (cr *configReader) readConfig() ([]*DashboardsAsConfig, error) {
}
if dashboards[i].UpdateIntervalSeconds == 0 {
dashboards[i].UpdateIntervalSeconds = 3
dashboards[i].UpdateIntervalSeconds = 10
}
}

View File

@ -70,7 +70,7 @@ func validateDashboardAsConfig(t *testing.T, cfg []*DashboardsAsConfig) {
So(len(ds.Options), ShouldEqual, 1)
So(ds.Options["path"], ShouldEqual, "/var/lib/grafana/dashboards")
So(ds.DisableDeletion, ShouldBeTrue)
So(ds.UpdateIntervalSeconds, ShouldEqual, 10)
So(ds.UpdateIntervalSeconds, ShouldEqual, 15)
ds2 := cfg[1]
So(ds2.Name, ShouldEqual, "default")
@ -81,5 +81,5 @@ func validateDashboardAsConfig(t *testing.T, cfg []*DashboardsAsConfig) {
So(len(ds2.Options), ShouldEqual, 1)
So(ds2.Options["path"], ShouldEqual, "/var/lib/grafana/dashboards")
So(ds2.DisableDeletion, ShouldBeFalse)
So(ds2.UpdateIntervalSeconds, ShouldEqual, 3)
So(ds2.UpdateIntervalSeconds, ShouldEqual, 10)
}

View File

@ -6,7 +6,7 @@ providers:
folder: 'developers'
editable: true
disableDeletion: true
updateIntervalSeconds: 10
updateIntervalSeconds: 15
type: file
options:
path: /var/lib/grafana/dashboards

View File

@ -3,7 +3,7 @@
folder: 'developers'
editable: true
disableDeletion: true
updateIntervalSeconds: 10
updateIntervalSeconds: 15
type: file
options:
path: /var/lib/grafana/dashboards

View File

@ -4,7 +4,7 @@
# org_id: 1
# # list of datasources to insert/update depending
# # whats available in the datbase
# # what's available in the database
#datasources:
# # <string, required> name of the datasource. Required
# - name: Graphite

View File

@ -70,7 +70,7 @@ func (rs *RenderingService) renderViaHttp(ctx context.Context, opts Opts) (*Rend
return nil, ErrTimeout
}
// if we didnt get a 200 response, something went wrong.
// if we didn't get a 200 response, something went wrong.
if resp.StatusCode != http.StatusOK {
rs.log.Error("Remote rendering request failed", "error", resp.Status)
return nil, fmt.Errorf("Remote rendering request failed. %d: %s", resp.StatusCode, resp.Status)
@ -83,7 +83,7 @@ func (rs *RenderingService) renderViaHttp(ctx context.Context, opts Opts) (*Rend
defer out.Close()
_, err = io.Copy(out, resp.Body)
if err != nil {
// check that we didnt timeout while receiving the response.
// check that we didn't timeout while receiving the response.
if reqContext.Err() == context.DeadlineExceeded {
rs.log.Info("Rendering timed out")
return nil, ErrTimeout

View File

@ -13,15 +13,16 @@ var ErrNoRenderer = errors.New("No renderer plugin found nor is an external rend
var ErrPhantomJSNotInstalled = errors.New("PhantomJS executable not found")
type Opts struct {
Width int
Height int
Timeout time.Duration
OrgId int64
UserId int64
OrgRole models.RoleType
Path string
Encoding string
Timezone string
Width int
Height int
Timeout time.Duration
OrgId int64
UserId int64
OrgRole models.RoleType
Path string
Encoding string
Timezone string
ConcurrentLimit int
}
type RenderResult struct {

View File

@ -24,12 +24,13 @@ func init() {
}
type RenderingService struct {
log log.Logger
pluginClient *plugin.Client
grpcPlugin pluginModel.RendererPlugin
pluginInfo *plugins.RendererPlugin
renderAction renderFunc
domain string
log log.Logger
pluginClient *plugin.Client
grpcPlugin pluginModel.RendererPlugin
pluginInfo *plugins.RendererPlugin
renderAction renderFunc
domain string
inProgressCount int
Cfg *setting.Cfg `inject:""`
}
@ -45,7 +46,7 @@ func (rs *RenderingService) Init() error {
// set value used for domain attribute of renderKey cookie
if rs.Cfg.RendererUrl != "" {
// RendererCallbackUrl has already been passed, it wont generate an error.
// RendererCallbackUrl has already been passed, it won't generate an error.
u, _ := url.Parse(rs.Cfg.RendererCallbackUrl)
rs.domain = u.Hostname()
} else if setting.HttpAddr != setting.DEFAULT_HTTP_ADDR {
@ -90,6 +91,18 @@ func (rs *RenderingService) Run(ctx context.Context) error {
}
func (rs *RenderingService) Render(ctx context.Context, opts Opts) (*RenderResult, error) {
if rs.inProgressCount > opts.ConcurrentLimit {
return &RenderResult{
FilePath: filepath.Join(setting.HomePath, "public/img/rendering_limit.png"),
}, nil
}
defer func() {
rs.inProgressCount -= 1
}()
rs.inProgressCount += 1
if rs.renderAction != nil {
return rs.renderAction(ctx, opts)
} else {

View File

@ -230,7 +230,7 @@ func UpdateAlertNotification(cmd *m.UpdateAlertNotificationCommand) error {
}
func RecordNotificationJournal(ctx context.Context, cmd *m.RecordNotificationJournalCommand) error {
return inTransactionCtx(ctx, func(sess *DBSession) error {
return withDbSession(ctx, func(sess *DBSession) error {
journalEntry := &m.AlertNotificationJournal{
OrgId: cmd.OrgId,
AlertId: cmd.AlertId,
@ -245,21 +245,19 @@ func RecordNotificationJournal(ctx context.Context, cmd *m.RecordNotificationJou
}
func GetLatestNotification(ctx context.Context, cmd *m.GetLatestNotificationQuery) error {
return inTransactionCtx(ctx, func(sess *DBSession) error {
nj := &m.AlertNotificationJournal{}
return withDbSession(ctx, func(sess *DBSession) error {
nj := []m.AlertNotificationJournal{}
_, err := sess.Desc("alert_notification_journal.sent_at").
Limit(1).
Where("alert_notification_journal.org_id = ? AND alert_notification_journal.alert_id = ? AND alert_notification_journal.notifier_id = ?", cmd.OrgId, cmd.AlertId, cmd.NotifierId).Get(nj)
err := sess.Desc("alert_notification_journal.sent_at").
Where("alert_notification_journal.org_id = ?", cmd.OrgId).
Where("alert_notification_journal.alert_id = ?", cmd.AlertId).
Where("alert_notification_journal.notifier_id = ?", cmd.NotifierId).
Find(&nj)
if err != nil {
return err
}
if nj.AlertId == 0 && nj.Id == 0 && nj.NotifierId == 0 && nj.OrgId == 0 {
return m.ErrJournalingNotFound
}
cmd.Result = nj
return nil
})

View File

@ -15,16 +15,21 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
InitTestDB(t)
Convey("Alert notification journal", func() {
var alertId int64 = 5
var alertId int64 = 7
var orgId int64 = 5
var notifierId int64 = 5
var notifierId int64 = 10
Convey("Getting last journal should raise error if no one exists", func() {
query := &m.GetLatestNotificationQuery{AlertId: alertId, OrgId: orgId, NotifierId: notifierId}
err := GetLatestNotification(context.Background(), query)
So(err, ShouldEqual, m.ErrJournalingNotFound)
GetLatestNotification(context.Background(), query)
So(len(query.Result), ShouldEqual, 0)
Convey("shoulbe be able to record two journaling events", func() {
// recording an journal entry in another org to make sure org filter works as expected.
journalInOtherOrg := &m.RecordNotificationJournalCommand{AlertId: alertId, NotifierId: notifierId, OrgId: 10, Success: true, SentAt: 1}
err := RecordNotificationJournal(context.Background(), journalInOtherOrg)
So(err, ShouldBeNil)
Convey("should be able to record two journaling events", func() {
createCmd := &m.RecordNotificationJournalCommand{AlertId: alertId, NotifierId: notifierId, OrgId: orgId, Success: true, SentAt: 1}
err := RecordNotificationJournal(context.Background(), createCmd)
@ -38,17 +43,20 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
Convey("get last journaling event", func() {
err := GetLatestNotification(context.Background(), query)
So(err, ShouldBeNil)
So(query.Result.SentAt, ShouldEqual, 1001)
So(len(query.Result), ShouldEqual, 2)
last := query.Result[0]
So(last.SentAt, ShouldEqual, 1001)
Convey("be able to clear all journaling for an notifier", func() {
cmd := &m.CleanNotificationJournalCommand{AlertId: alertId, NotifierId: notifierId, OrgId: orgId}
err := CleanNotificationJournal(context.Background(), cmd)
So(err, ShouldBeNil)
Convey("querying for last junaling should raise error", func() {
Convey("querying for last journaling should return no journal entries", func() {
query := &m.GetLatestNotificationQuery{AlertId: alertId, OrgId: orgId, NotifierId: notifierId}
err := GetLatestNotification(context.Background(), query)
So(err, ShouldEqual, m.ErrJournalingNotFound)
So(err, ShouldBeNil)
So(len(query.Result), ShouldEqual, 0)
})
})
})

View File

@ -105,7 +105,7 @@ func addAnnotationMig(mg *Migrator) {
}))
//
// Convert epoch saved as seconds to miliseconds
// Convert epoch saved as seconds to milliseconds
//
updateEpochSql := "UPDATE annotation SET epoch = (epoch*1000) where epoch < 9999999999"
mg.AddMigration("Convert existing annotations from seconds to milliseconds", NewRawSqlMigration(updateEpochSql))

View File

@ -39,7 +39,7 @@ func TestTransaction(t *testing.T) {
So(err, ShouldEqual, models.ErrInvalidApiKey)
})
Convey("wont update if one handler fails", func() {
Convey("won't update if one handler fails", func() {
err := ss.InTransaction(context.Background(), func(ctx context.Context) error {
err := DeleteApiKeyCtx(ctx, deleteApiKeyCmd)
if err != nil {

View File

@ -271,9 +271,6 @@ func ChangeUserPassword(cmd *m.ChangeUserPasswordCommand) error {
func UpdateUserLastSeenAt(cmd *m.UpdateUserLastSeenAtCommand) error {
return inTransaction(func(sess *DBSession) error {
if cmd.UserId <= 0 {
}
user := m.User{
Id: cmd.UserId,
LastSeenAt: time.Now(),

View File

@ -166,6 +166,7 @@ var (
// Alerting
AlertingEnabled bool
ExecuteAlerts bool
AlertingRenderLimit int
AlertingErrorOrTimeout string
AlertingNoDataOrNullValues string
@ -196,10 +197,13 @@ type Cfg struct {
Smtp SmtpSettings
// Rendering
ImagesDir string
PhantomDir string
RendererUrl string
RendererCallbackUrl string
ImagesDir string
PhantomDir string
RendererUrl string
RendererCallbackUrl string
RendererLimit int
RendererLimitAlerting int
DisableBruteForceLoginProtection bool
TempDataLifetime time.Duration
@ -677,6 +681,7 @@ func (cfg *Cfg) Load(args *CommandLineArgs) error {
alerting := iniFile.Section("alerting")
AlertingEnabled = alerting.Key("enabled").MustBool(true)
ExecuteAlerts = alerting.Key("execute_alerts").MustBool(true)
AlertingRenderLimit = alerting.Key("concurrent_render_limit").MustInt(5)
AlertingErrorOrTimeout = alerting.Key("error_or_timeout").MustString("alerting")
AlertingNoDataOrNullValues = alerting.Key("nodata_or_nullvalues").MustString("no_data")

View File

@ -58,7 +58,8 @@ func (ts *TracingService) parseSettings() {
func (ts *TracingService) initGlobalTracer() error {
cfg := jaegercfg.Configuration{
Disabled: !ts.enabled,
ServiceName: "grafana",
Disabled: !ts.enabled,
Sampler: &jaegercfg.SamplerConfig{
Type: ts.samplerType,
Param: ts.samplerParam,
@ -78,7 +79,7 @@ func (ts *TracingService) initGlobalTracer() error {
options = append(options, jaegercfg.Tag(tag, value))
}
tracer, closer, err := cfg.New("grafana", options...)
tracer, closer, err := cfg.NewTracer(options...)
if err != nil {
return err
}

View File

@ -196,7 +196,7 @@ func (e *CloudWatchExecutor) executeQuery(ctx context.Context, query *CloudWatch
params.ExtendedStatistics = query.ExtendedStatistics
}
// 1 minutes resolutin metrics is stored for 15 days, 15 * 24 * 60 = 21600
// 1 minutes resolution metrics is stored for 15 days, 15 * 24 * 60 = 21600
if query.HighResolution && (((endTime.Unix() - startTime.Unix()) / int64(query.Period)) > 21600) {
return nil, errors.New("too long query period")
}
@ -267,7 +267,7 @@ func (e *CloudWatchExecutor) executeGetMetricDataQuery(ctx context.Context, regi
ScanBy: aws.String("TimestampAscending"),
}
for _, query := range queries {
// 1 minutes resolutin metrics is stored for 15 days, 15 * 24 * 60 = 21600
// 1 minutes resolution metrics is stored for 15 days, 15 * 24 * 60 = 21600
if query.HighResolution && (((endTime.Unix() - startTime.Unix()) / int64(query.Period)) > 21600) {
return nil, errors.New("too long query period")
}

View File

@ -40,7 +40,7 @@ func TestClient(t *testing.T) {
So(err, ShouldNotBeNil)
})
Convey("When unspported version set should return error", func() {
Convey("When unsupported version set should return error", func() {
ds := &models.DataSource{
JsonData: simplejson.NewFromAny(map[string]interface{}{
"esVersion": 6,

View File

@ -56,9 +56,7 @@ func (b *SearchRequestBuilder) Build() (*SearchRequest, error) {
if err != nil {
return nil, err
}
for _, agg := range aggArray {
sr.Aggs = append(sr.Aggs, agg)
}
sr.Aggs = append(sr.Aggs, aggArray...)
}
}
@ -112,7 +110,7 @@ func (b *SearchRequestBuilder) Query() *QueryBuilder {
return b.queryBuilder
}
// Agg initaite and returns a new aggregation builder
// Agg initiate and returns a new aggregation builder
func (b *SearchRequestBuilder) Agg() AggBuilder {
aggBuilder := newAggBuilder()
b.aggBuilders = append(b.aggBuilders, aggBuilder)
@ -300,9 +298,7 @@ func (b *aggBuilderImpl) Build() (AggArray, error) {
return nil, err
}
for _, childAgg := range childAggs {
agg.Aggregation.Aggs = append(agg.Aggregation.Aggs, childAgg)
}
agg.Aggregation.Aggs = append(agg.Aggregation.Aggs, childAggs...)
}
aggs = append(aggs, agg)

View File

@ -92,7 +92,7 @@ func (rp *responseParser) processBuckets(aggs map[string]interface{}, target *Qu
} else {
for _, b := range esAgg.Get("buckets").MustArray() {
bucket := simplejson.NewFromAny(b)
newProps := make(map[string]string, 0)
newProps := make(map[string]string)
for k, v := range props {
newProps[k] = v
@ -122,7 +122,7 @@ func (rp *responseParser) processBuckets(aggs map[string]interface{}, target *Qu
for _, bucketKey := range bucketKeys {
bucket := simplejson.NewFromAny(buckets[bucketKey])
newProps := make(map[string]string, 0)
newProps := make(map[string]string)
for k, v := range props {
newProps[k] = v
@ -314,7 +314,6 @@ func (rp *responseParser) processAggregationDocs(esAgg *simplejson.Json, aggDef
switch metric.Type {
case "count":
addMetricValue(&values, rp.getMetricName(metric.Type), castToNullFloat(bucket.Get("doc_count")))
break
case "extended_stats":
metaKeys := make([]string, 0)
meta := metric.Meta.MustMap()
@ -355,7 +354,6 @@ func (rp *responseParser) processAggregationDocs(esAgg *simplejson.Json, aggDef
}
addMetricValue(&values, metricName, castToNullFloat(bucket.GetPath(metric.ID, "value")))
break
}
}

View File

@ -158,7 +158,7 @@ func TestInfluxdbQueryBuilder(t *testing.T) {
So(strings.Join(query.renderTags(), ""), ShouldEqual, `"key" < 10001`)
})
Convey("can render number greather then condition tags", func() {
Convey("can render number greater then condition tags", func() {
query := &Query{Tags: []*Tag{{Operator: ">", Value: "10001", Key: "key"}}}
So(strings.Join(query.renderTags(), ""), ShouldEqual, `"key" > 10001`)

View File

@ -92,12 +92,12 @@ func (e *PrometheusExecutor) Query(ctx context.Context, dsInfo *models.DataSourc
return nil, err
}
querys, err := parseQuery(dsInfo, tsdbQuery.Queries, tsdbQuery)
queries, err := parseQuery(dsInfo, tsdbQuery.Queries, tsdbQuery)
if err != nil {
return nil, err
}
for _, query := range querys {
for _, query := range queries {
timeRange := apiv1.Range{
Start: query.Start,
End: query.End,

View File

@ -3,14 +3,14 @@ package util
import "testing"
func TestMd5Sum(t *testing.T) {
input := "dont hash passwords with md5"
input := "don't hash passwords with md5"
have, err := Md5SumString(input)
if err != nil {
t.Fatal("expected err to be nil")
}
want := "2d6a56c82d09d374643b926d3417afba"
want := "dd1f7fdb3466c0d09c2e839d1f1530f8"
if have != want {
t.Fatalf("expected: %s got: %s", want, have)
}

View File

@ -245,6 +245,9 @@ export function grafanaAppDirective(playlistSrv, contextSrv, $timeout, $rootScop
return;
}
// ensure dropdown menu doesn't impact on z-index
body.find('.dropdown-menu-open').removeClass('dropdown-menu-open');
// for stuff that animates, slides out etc, clicking it needs to
// hide it right away
const clickAutoHide = target.closest('[data-click-hide]');

View File

@ -34,6 +34,7 @@
</span>
<span class="search-item__body" ng-click="ctrl.onItemClick(item)">
<div class="search-item__body-title">{{::item.title}}</div>
<span class="search-item__body-folder-title">{{::item.folderTitle}}</span>
</span>
<span class="search-item__tags">
<span ng-click="ctrl.selectTag(tag, $event)" ng-repeat="tag in item.tags" tag-color-from-name="tag" class="label label-tag">

View File

@ -118,6 +118,9 @@ export function metricSegment($compile, $sce) {
};
$scope.matcher = function(item) {
if (linkMode) {
return false;
}
let str = this.query;
if (str[0] === '/') {
str = str.substring(1);

View File

@ -582,6 +582,7 @@ export class Explore extends React.Component<any, ExploreState> {
onClickHintFix={this.onModifyQueries}
onExecuteQuery={this.onSubmit}
onRemoveQueryRow={this.onRemoveQueryRow}
supportsLogs={supportsLogs}
/>
<div className="result-options">
{supportsGraph ? (

View File

@ -147,12 +147,14 @@ interface PromQueryFieldProps {
onQueryChange?: (value: string, override?: boolean) => void;
portalPrefix?: string;
request?: (url: string) => any;
supportsLogs?: boolean; // To be removed after Logging gets its own query field
}
interface PromQueryFieldState {
histogramMetrics: string[];
labelKeys: { [index: string]: string[] }; // metric -> [labelKey,...]
labelValues: { [index: string]: { [index: string]: string[] } }; // metric -> labelKey -> [labelValue,...]
logLabelOptions: any[];
metrics: string[];
metricsByPrefix: CascaderOption[];
}
@ -184,16 +186,41 @@ class PromQueryField extends React.Component<PromQueryFieldProps, PromQueryField
histogramMetrics: props.histogramMetrics || [],
labelKeys: props.labelKeys || {},
labelValues: props.labelValues || {},
logLabelOptions: [],
metrics: props.metrics || [],
metricsByPrefix: props.metricsByPrefix || [],
};
}
componentDidMount() {
this.fetchMetricNames();
this.fetchHistogramMetrics();
// Temporarily reused by logging
const { supportsLogs } = this.props;
if (supportsLogs) {
this.fetchLogLabels();
} else {
// Usual actions
this.fetchMetricNames();
this.fetchHistogramMetrics();
}
}
onChangeLogLabels = (values: string[], selectedOptions: CascaderOption[]) => {
let query;
if (selectedOptions.length === 1) {
if (selectedOptions[0].children.length === 0) {
query = selectedOptions[0].value;
} else {
// Ignore click on group
return;
}
} else {
const key = selectedOptions[0].value;
const value = selectedOptions[1].value;
query = `{${key}="${value}"}`;
}
this.onChangeQuery(query, true);
};
onChangeMetrics = (values: string[], selectedOptions: CascaderOption[]) => {
let query;
if (selectedOptions.length === 1) {
@ -401,7 +428,8 @@ class PromQueryField extends React.Component<PromQueryFieldProps, PromQueryField
}
// Query labels for selector
if (selector && !this.state.labelValues[selector]) {
// Temporarily add skip for logging
if (selector && !this.state.labelValues[selector] && !this.props.supportsLogs) {
if (selector === EMPTY_SELECTOR) {
// Query label values for default labels
refresher = Promise.all(DEFAULT_KEYS.map(key => this.fetchLabelValues(key)));
@ -430,6 +458,38 @@ class PromQueryField extends React.Component<PromQueryFieldProps, PromQueryField
});
}
// Temporarily here while reusing this field for logging
async fetchLogLabels() {
const url = '/api/prom/label';
try {
const res = await this.request(url);
const body = await (res.data || res.json());
const labelKeys = body.data.slice().sort();
const labelKeysBySelector = {
...this.state.labelKeys,
[EMPTY_SELECTOR]: labelKeys,
};
const labelValuesByKey = {};
const logLabelOptions = [];
for (const key of labelKeys) {
const valuesUrl = `/api/prom/label/${key}/values`;
const res = await this.request(valuesUrl);
const body = await (res.data || res.json());
const values = body.data.slice().sort();
labelValuesByKey[key] = values;
logLabelOptions.push({
label: key,
value: key,
children: values.map(value => ({ label: value, value })),
});
}
const labelValues = { [EMPTY_SELECTOR]: labelValuesByKey };
this.setState({ labelKeys: labelKeysBySelector, labelValues, logLabelOptions });
} catch (e) {
console.error(e);
}
}
async fetchLabelValues(key: string) {
const url = `/api/v1/label/${key}/values`;
try {
@ -484,8 +544,8 @@ class PromQueryField extends React.Component<PromQueryFieldProps, PromQueryField
}
render() {
const { error, hint } = this.props;
const { histogramMetrics, metricsByPrefix } = this.state;
const { error, hint, supportsLogs } = this.props;
const { histogramMetrics, logLabelOptions, metricsByPrefix } = this.state;
const histogramOptions = histogramMetrics.map(hm => ({ label: hm, value: hm }));
const metricsOptions = [
{ label: 'Histograms', value: HISTOGRAM_GROUP, children: histogramOptions },
@ -495,9 +555,15 @@ class PromQueryField extends React.Component<PromQueryFieldProps, PromQueryField
return (
<div className="prom-query-field">
<div className="prom-query-field-tools">
<Cascader options={metricsOptions} onChange={this.onChangeMetrics}>
<button className="btn navbar-button navbar-button--tight">Metrics</button>
</Cascader>
{supportsLogs ? (
<Cascader options={logLabelOptions} onChange={this.onChangeLogLabels}>
<button className="btn navbar-button navbar-button--tight">Log labels</button>
</Cascader>
) : (
<Cascader options={metricsOptions} onChange={this.onChangeMetrics}>
<button className="btn navbar-button navbar-button--tight">Metrics</button>
</Cascader>
)}
</div>
<div className="prom-query-field-wrapper">
<div className="slate-query-field-wrapper">

View File

@ -44,7 +44,7 @@ class QueryRow extends PureComponent<any, {}> {
};
render() {
const { edited, history, query, queryError, queryHint, request } = this.props;
const { edited, history, query, queryError, queryHint, request, supportsLogs } = this.props;
return (
<div className="query-row">
<div className="query-row-field">
@ -58,6 +58,7 @@ class QueryRow extends PureComponent<any, {}> {
onPressEnter={this.onPressEnter}
onQueryChange={this.onChangeQuery}
request={request}
supportsLogs={supportsLogs}
/>
</div>
<div className="query-row-tools">

View File

@ -57,5 +57,8 @@ describe('parseSelector()', () => {
parsed = parseSelector('baz{foo="bar"}', 12);
expect(parsed.selector).toBe('{__name__="baz",foo="bar"}');
parsed = parseSelector('bar:metric:1m{}', 14);
expect(parsed.selector).toBe('{__name__="bar:metric:1m"}');
});
});

View File

@ -32,7 +32,7 @@ const labelRegexp = /\b\w+="[^"\n]*?"/g;
export function parseSelector(query: string, cursorOffset = 1): { labelKeys: any[]; selector: string } {
if (!query.match(selectorRegexp)) {
// Special matcher for metrics
if (query.match(/^\w+$/)) {
if (query.match(/^[A-Za-z:][\w:]*$/)) {
return {
selector: `{__name__="${query}"}`,
labelKeys: ['__name__'],
@ -76,7 +76,7 @@ export function parseSelector(query: string, cursorOffset = 1): { labelKeys: any
// Add metric if there is one before the selector
const metricPrefix = query.slice(0, prefixOpen);
const metricMatch = metricPrefix.match(/\w+$/);
const metricMatch = metricPrefix.match(/[A-Za-z:][\w:]*$/);
if (metricMatch) {
labels['__name__'] = `"${metricMatch[0]}"`;
}

View File

@ -1,4 +1,4 @@
import {GrafanaDatasource} from "../datasource";
import { GrafanaDatasource } from '../datasource';
import q from 'q';
import moment from 'moment';
@ -9,23 +9,19 @@ describe('grafana data source', () => {
get: (url, options) => {
calledBackendSrvParams = options;
return q.resolve([]);
}
},
};
const templateSrvStub = {
replace: val => {
return val
.replace('$var2', 'replaced|replaced2')
.replace('$var', 'replaced');
}
return val.replace('$var2', 'replaced|replaced2').replace('$var', 'replaced');
},
};
const ds = new GrafanaDatasource(backendSrvStub, q, templateSrvStub);
describe('with tags that have template variables', () => {
const options = setupAnnotationQueryOptions(
{tags: ['tag1:$var']}
);
const options = setupAnnotationQueryOptions({ tags: ['tag1:$var'] });
beforeEach(() => {
return ds.annotationQuery(options);
@ -37,9 +33,7 @@ describe('grafana data source', () => {
});
describe('with tags that have multi value template variables', () => {
const options = setupAnnotationQueryOptions(
{tags: ['$var2']}
);
const options = setupAnnotationQueryOptions({ tags: ['$var2'] });
beforeEach(() => {
return ds.annotationQuery(options);
@ -55,9 +49,9 @@ describe('grafana data source', () => {
const options = setupAnnotationQueryOptions(
{
type: 'dashboard',
tags: ['tag1']
tags: ['tag1'],
},
{id: 1}
{ id: 1 }
);
beforeEach(() => {
@ -77,8 +71,8 @@ function setupAnnotationQueryOptions(annotation, dashboard?) {
dashboard: dashboard,
range: {
from: moment(1432288354),
to: moment(1432288401)
to: moment(1432288401),
},
rangeRaw: {from: "now-24h", to: "now"}
rangeRaw: { from: 'now-24h', to: 'now' },
};
}

View File

@ -144,6 +144,7 @@ table_schema IN (
let query = 'SELECT DISTINCT quote_literal(' + column + ')';
query += ' FROM ' + this.target.table;
query += ' WHERE $__timeFilter(' + this.target.timeColumn + ')';
query += ' AND ' + column + ' IS NOT NULL';
query += ' ORDER BY 1 LIMIT 100';
return query;
}

View File

@ -116,7 +116,7 @@ export function setupAngularRoutes($routeProvider, $locationProvider) {
template: '<react-container />',
resolve: {
roles: () => ['Editor', 'Admin'],
component: () => import(/* webpackChunkName: "explore" */ 'app/containers/Explore/Wrapper'),
component: () => import(/* webpackChunkName: "explore" */ 'app/features/explore/Wrapper'),
},
})
.when('/org', {

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

View File

@ -17,10 +17,6 @@
padding-left: 0px;
}
.graph-legend-table {
width: auto;
}
.graph-legend-table .graph-legend-series {
display: table-row;
}
@ -35,7 +31,6 @@
}
.datapoints-warning {
pointer: none;
position: absolute;
top: 50%;
left: 50%;

View File

@ -210,12 +210,20 @@
.search-item__body-title {
color: $list-item-link-color;
line-height: 14px;
}
.search-item__body-folder-title {
color: $text-color-weak;
font-size: $font-size-xs;
line-height: 11px;
}
.search-item__icon {
padding: 5px;
flex: 0 0 auto;
font-size: 19px;
line-height: 22px;
padding: 5px 2px 5px 10px;
}

View File

@ -1129,7 +1129,7 @@ Licensed under the MIT license.
format.push({ x: true, number: true, required: true });
format.push({ y: true, number: true, required: true });
if (s.bars.show || (s.lines.show && s.lines.fill)) {
if (s.stack || s.bars.show || (s.lines.show && s.lines.fill)) {
var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero));
format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale });
if (s.bars.horizontal) {

View File

@ -184,7 +184,7 @@
<div class="preloader__text">Loading Grafana</div>
<div class="preloader__text preloader__text--fail">
<p>
<strong>If your seeing this Grafana has failed to load its application files</strong>
<strong>If you're seeing this Grafana has failed to load its application files</strong>
<br />
<br />
</p>