mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Merge branch 'master' into WPH95-feature/add_es_alerting
This commit is contained in:
commit
1324a67cbd
@ -13,6 +13,10 @@
|
||||
* **Security**: Fix XSS vulnerabilities in dashboard links [#11813](https://github.com/grafana/grafana/pull/11813)
|
||||
* **Singlestat**: Fix "time of last point" shows local time when dashboard timezone set to UTC [#10338](https://github.com/grafana/grafana/issues/10338)
|
||||
|
||||
# 5.1.3 (2018-05-16)
|
||||
|
||||
* **Scroll**: Graph panel / legend texts shifts on the left each time we move scrollbar on firefox [#11830](https://github.com/grafana/grafana/issues/11830)
|
||||
|
||||
# 5.1.2 (2018-05-09)
|
||||
|
||||
* **Database**: Fix MySql migration issue [#11862](https://github.com/grafana/grafana/issues/11862)
|
||||
|
@ -94,7 +94,7 @@ deleteDatasources:
|
||||
orgId: 1
|
||||
|
||||
# list of datasources to insert/update depending
|
||||
# whats available in the database
|
||||
# what's available in the database
|
||||
datasources:
|
||||
# <string, required> name of the datasource. Required
|
||||
- name: Graphite
|
||||
@ -154,7 +154,7 @@ Since not all datasources have the same configuration settings we only have the
|
||||
| tlsAuthWithCACert | boolean | *All* | Enable TLS authentication using CA cert |
|
||||
| tlsSkipVerify | boolean | *All* | Controls whether a client verifies the server's certificate chain and host name. |
|
||||
| graphiteVersion | string | Graphite | Graphite version |
|
||||
| timeInterval | string | Elastic, Influxdb & Prometheus | Lowest interval/step value that should be used for this data source |
|
||||
| timeInterval | string | Elastic, InfluxDB & Prometheus | Lowest interval/step value that should be used for this data source |
|
||||
| esVersion | string | Elastic | Elasticsearch version as an number (2/5/56) |
|
||||
| timeField | string | Elastic | Which field that should be used as timestamp |
|
||||
| interval | string | Elastic | Index date time format |
|
||||
@ -162,9 +162,9 @@ Since not all datasources have the same configuration settings we only have the
|
||||
| assumeRoleArn | string | Cloudwatch | ARN of Assume Role |
|
||||
| defaultRegion | string | Cloudwatch | AWS region |
|
||||
| customMetricsNamespaces | string | Cloudwatch | Namespaces of Custom Metrics |
|
||||
| tsdbVersion | string | OpenTsdb | Version |
|
||||
| tsdbResolution | string | OpenTsdb | Resolution |
|
||||
| sslmode | string | Postgre | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
|
||||
| tsdbVersion | string | OpenTSDB | Version |
|
||||
| tsdbResolution | string | OpenTSDB | Resolution |
|
||||
| sslmode | string | PostgreSQL | SSLmode. 'disable', 'require', 'verify-ca' or 'verify-full' |
|
||||
|
||||
#### Secure Json Data
|
||||
|
||||
@ -177,8 +177,8 @@ Secure json data is a map of settings that will be encrypted with [secret key](/
|
||||
| tlsCACert | string | *All* |CA cert for out going requests |
|
||||
| tlsClientCert | string | *All* |TLS Client cert for outgoing requests |
|
||||
| tlsClientKey | string | *All* |TLS Client key for outgoing requests |
|
||||
| password | string | Postgre | password |
|
||||
| user | string | Postgre | user |
|
||||
| password | string | PostgreSQL | password |
|
||||
| user | string | PostgreSQL | user |
|
||||
| accessKey | string | Cloudwatch | Access key for connecting to Cloudwatch |
|
||||
| secretKey | string | Cloudwatch | Secret key for connecting to Cloudwatch |
|
||||
|
||||
|
@ -11,7 +11,7 @@ weight = 3
|
||||
+++
|
||||
|
||||
|
||||
## Whats new in Grafana v4.1
|
||||
## What's new in Grafana v4.1
|
||||
- **Graph**: Support for shared tooltip on all graphs as you hover over one graph. [#1578](https://github.com/grafana/grafana/pull/1578), [#6274](https://github.com/grafana/grafana/pull/6274)
|
||||
- **Victorops**: Add VictorOps notification integration [#6411](https://github.com/grafana/grafana/issues/6411), thx [@ichekrygin](https://github.com/ichekrygin)
|
||||
- **Opsgenie**: Add OpsGenie notification integratiion [#6687](https://github.com/grafana/grafana/issues/6687), thx [@kylemcc](https://github.com/kylemcc)
|
||||
|
@ -10,7 +10,7 @@ parent = "whatsnew"
|
||||
weight = -1
|
||||
+++
|
||||
|
||||
## Whats new in Grafana v4.2
|
||||
## What's new in Grafana v4.2
|
||||
|
||||
Grafana v4.2 Beta is now [available for download](https://grafana.com/grafana/download/4.2.0).
|
||||
Just like the last release this one contains lots bug fixes and minor improvements.
|
||||
|
@ -64,7 +64,7 @@ This makes exploring and filtering Prometheus data much easier.
|
||||
* **Dataproxy**: Allow grafan to renegotiate tls connection [#9250](https://github.com/grafana/grafana/issues/9250)
|
||||
* **HTTP**: set net.Dialer.DualStack to true for all http clients [#9367](https://github.com/grafana/grafana/pull/9367)
|
||||
* **Alerting**: Add diff and percent diff as series reducers [#9386](https://github.com/grafana/grafana/pull/9386), thx [@shanhuhai5739](https://github.com/shanhuhai5739)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is precent [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is present [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Opsgenie**: Use their latest API instead of old version [#9399](https://github.com/grafana/grafana/pull/9399), thx [@cglrkn](https://github.com/cglrkn)
|
||||
* **Table**: Add support for displaying the timestamp with milliseconds [#9429](https://github.com/grafana/grafana/pull/9429), thx [@s1061123](https://github.com/s1061123)
|
||||
* **Hipchat**: Add metrics, message and image to hipchat notifications [#9110](https://github.com/grafana/grafana/issues/9110), thx [@eloo](https://github.com/eloo)
|
||||
|
@ -49,18 +49,15 @@ Content-Type: application/json
|
||||
{
|
||||
"id": 1,
|
||||
"dashboardId": 1,
|
||||
"dashboardUId": "ABcdEFghij"
|
||||
"dashboardSlug": "sensors",
|
||||
"panelId": 1,
|
||||
"name": "fire place sensor",
|
||||
"message": "Someone is trying to break in through the fire place",
|
||||
"state": "alerting",
|
||||
"message": "Someone is trying to break in through the fire place",
|
||||
"newStateDate": "2018-05-14T05:55:20+02:00",
|
||||
"evalDate": "0001-01-01T00:00:00Z",
|
||||
"evalData": [
|
||||
{
|
||||
"metric": "fire",
|
||||
"tags": null,
|
||||
"value": 5.349999999999999
|
||||
}
|
||||
"newStateDate": "2016-12-25",
|
||||
"evalData": null,
|
||||
"executionError": "",
|
||||
"url": "http://grafana.com/dashboard/db/sensors"
|
||||
}
|
||||
@ -88,16 +85,35 @@ Content-Type: application/json
|
||||
{
|
||||
"id": 1,
|
||||
"dashboardId": 1,
|
||||
"dashboardUId": "ABcdEFghij"
|
||||
"dashboardSlug": "sensors",
|
||||
"panelId": 1,
|
||||
"name": "fire place sensor",
|
||||
"message": "Someone is trying to break in through the fire place",
|
||||
"state": "alerting",
|
||||
"newStateDate": "2016-12-25",
|
||||
"message": "Someone is trying to break in through the fire place",
|
||||
"newStateDate": "2018-05-14T05:55:20+02:00",
|
||||
"evalDate": "0001-01-01T00:00:00Z",
|
||||
"evalData": "evalMatches": [
|
||||
{
|
||||
"metric": "movement",
|
||||
"tags": {
|
||||
"name": "fireplace_chimney"
|
||||
},
|
||||
"value": 98.765
|
||||
}
|
||||
],
|
||||
"executionError": "",
|
||||
"url": "http://grafana.com/dashboard/db/sensors"
|
||||
}
|
||||
```
|
||||
|
||||
**Important Note**:
|
||||
"evalMatches" data is cached in the db when and only when the state of the alert changes
|
||||
(e.g. transitioning from "ok" to "alerting" state).
|
||||
|
||||
If data from one server triggers the alert first and, before that server is seen leaving alerting state,
|
||||
a second server also enters a state that would trigger the alert, the second server will not be visible in "evalMatches" data.
|
||||
|
||||
## Pause alert
|
||||
|
||||
`POST /api/alerts/:id/pause`
|
||||
|
@ -93,8 +93,6 @@ Directory where grafana will automatically scan and look for plugins
|
||||
|
||||
### provisioning
|
||||
|
||||
> This feature is available in 5.0+
|
||||
|
||||
Folder that contains [provisioning](/administration/provisioning) config files that grafana will apply on startup. Dashboards will be reloaded when the json files changes
|
||||
|
||||
## [server]
|
||||
@ -717,7 +715,7 @@ Analytics ID here. By default this feature is disabled.
|
||||
|
||||
## [dashboards]
|
||||
|
||||
### versions_to_keep (introduced in v5.0)
|
||||
### versions_to_keep
|
||||
|
||||
Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1.
|
||||
|
||||
|
@ -15,7 +15,7 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_5.1.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.2_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_5.1.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.3_amd64.deb)
|
||||
<!--
|
||||
Beta for Debian-based Linux | [grafana_5.1.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.0-beta1_amd64.deb)
|
||||
-->
|
||||
@ -27,9 +27,9 @@ installation.
|
||||
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.2_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_5.1.2_amd64.deb
|
||||
sudo dpkg -i grafana_5.1.3_amd64.deb
|
||||
```
|
||||
|
||||
<!-- ## Install Latest Beta
|
||||
|
@ -15,7 +15,7 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.3-1.x86_64.rpm)
|
||||
<!--
|
||||
Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.0-beta1.x86_64.rpm)
|
||||
-->
|
||||
@ -28,7 +28,7 @@ installation.
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
```bash
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
<!-- ## Install Beta
|
||||
@ -42,15 +42,15 @@ Or install manually using `rpm`.
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
```bash
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-5.1.2-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-5.1.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
```bash
|
||||
$ sudo rpm -i --nodeps grafana-5.1.2-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-5.1.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
## Install via YUM Repository
|
||||
|
@ -12,7 +12,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana-5.1.2.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana-5.1.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.3.windows-x64.zip)
|
||||
|
||||
<!--
|
||||
Latest beta package for Windows | [grafana.5.1.0-beta1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-beta5.windows-x64.zip)
|
||||
|
@ -25,7 +25,7 @@ export class MyPanelCtrl extends PanelCtrl {
|
||||
...
|
||||
```
|
||||
|
||||
In this case, make sure the template has a single `<div>...</div>` root. The plugin loader will modifiy that element adding a scrollbar.
|
||||
In this case, make sure the template has a single `<div>...</div>` root. The plugin loader will modify that element adding a scrollbar.
|
||||
|
||||
|
||||
|
||||
|
@ -94,7 +94,7 @@ weight = 10
|
||||
</a>
|
||||
<figcaption>
|
||||
<a href="https://youtu.be/FC13uhFRsVw?list=PLDGkOdUX1Ujo3wHw9-z5Vo12YLqXRjzg2" target="_blank" rel="noopener noreferrer">
|
||||
#3 Whats New In Grafana 2.0
|
||||
#3 What's New In Grafana 2.0
|
||||
</a>
|
||||
</figcaption>
|
||||
</figure>
|
||||
|
@ -22,7 +22,9 @@ func runDbCommand(command func(commandLine CommandLine) error) func(context *cli
|
||||
Args: flag.Args(),
|
||||
})
|
||||
|
||||
sqlstore.NewEngine()
|
||||
engine := &sqlstore.SqlStore{}
|
||||
engine.Cfg = cfg
|
||||
engine.Init()
|
||||
|
||||
if err := command(cmd); err != nil {
|
||||
logger.Errorf("\n%s: ", color.RedString("Error"))
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -16,14 +15,12 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/middleware"
|
||||
"github.com/grafana/grafana/pkg/registry"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/grafana/grafana/pkg/api"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/login"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
|
||||
"github.com/grafana/grafana/pkg/social"
|
||||
@ -37,6 +34,7 @@ import (
|
||||
_ "github.com/grafana/grafana/pkg/services/notifications"
|
||||
_ "github.com/grafana/grafana/pkg/services/provisioning"
|
||||
_ "github.com/grafana/grafana/pkg/services/search"
|
||||
_ "github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
_ "github.com/grafana/grafana/pkg/tracing"
|
||||
)
|
||||
|
||||
@ -70,17 +68,12 @@ func (g *GrafanaServerImpl) Run() error {
|
||||
g.loadConfiguration()
|
||||
g.writePIDFile()
|
||||
|
||||
// initSql
|
||||
sqlstore.NewEngine() // TODO: this should return an error
|
||||
sqlstore.EnsureAdminUser()
|
||||
|
||||
login.Init()
|
||||
social.NewOAuthService()
|
||||
|
||||
serviceGraph := inject.Graph{}
|
||||
serviceGraph.Provide(&inject.Object{Value: bus.GetBus()})
|
||||
serviceGraph.Provide(&inject.Object{Value: g.cfg})
|
||||
serviceGraph.Provide(&inject.Object{Value: dashboards.NewProvisioningService()})
|
||||
serviceGraph.Provide(&inject.Object{Value: api.NewRouteRegister(middleware.RequestMetrics, middleware.RequestTracing)})
|
||||
|
||||
// self registered services
|
||||
@ -88,7 +81,7 @@ func (g *GrafanaServerImpl) Run() error {
|
||||
|
||||
// Add all services to dependency graph
|
||||
for _, service := range services {
|
||||
serviceGraph.Provide(&inject.Object{Value: service})
|
||||
serviceGraph.Provide(&inject.Object{Value: service.Instance})
|
||||
}
|
||||
|
||||
serviceGraph.Provide(&inject.Object{Value: g})
|
||||
@ -100,25 +93,27 @@ func (g *GrafanaServerImpl) Run() error {
|
||||
|
||||
// Init & start services
|
||||
for _, service := range services {
|
||||
if registry.IsDisabled(service) {
|
||||
if registry.IsDisabled(service.Instance) {
|
||||
continue
|
||||
}
|
||||
|
||||
g.log.Info("Initializing " + reflect.TypeOf(service).Elem().Name())
|
||||
g.log.Info("Initializing " + service.Name)
|
||||
|
||||
if err := service.Init(); err != nil {
|
||||
if err := service.Instance.Init(); err != nil {
|
||||
return fmt.Errorf("Service init failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start background services
|
||||
for index := range services {
|
||||
service, ok := services[index].(registry.BackgroundService)
|
||||
for _, srv := range services {
|
||||
// variable needed for accessing loop variable in function callback
|
||||
descriptor := srv
|
||||
service, ok := srv.Instance.(registry.BackgroundService)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if registry.IsDisabled(services[index]) {
|
||||
if registry.IsDisabled(descriptor.Instance) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -133,9 +128,9 @@ func (g *GrafanaServerImpl) Run() error {
|
||||
|
||||
// If error is not canceled then the service crashed
|
||||
if err != context.Canceled && err != nil {
|
||||
g.log.Error("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err)
|
||||
g.log.Error("Stopped "+descriptor.Name, "reason", err)
|
||||
} else {
|
||||
g.log.Info("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err)
|
||||
g.log.Info("Stopped "+descriptor.Name, "reason", err)
|
||||
}
|
||||
|
||||
// Mark that we are in shutdown mode
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
@ -79,6 +80,14 @@ func (tw *DatasourcePluginWrapper) Query(ctx context.Context, ds *models.DataSou
|
||||
qr.ErrorString = r.Error
|
||||
}
|
||||
|
||||
if r.MetaJson != "" {
|
||||
metaJson, err := simplejson.NewJson([]byte(r.MetaJson))
|
||||
if err != nil {
|
||||
tw.logger.Error("Error parsing JSON Meta field: " + err.Error())
|
||||
}
|
||||
qr.Meta = metaJson
|
||||
}
|
||||
|
||||
for _, s := range r.GetSeries() {
|
||||
points := tsdb.TimeSeriesPoints{}
|
||||
|
||||
|
@ -2,15 +2,35 @@ package registry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
var services = []Service{}
|
||||
|
||||
func RegisterService(srv Service) {
|
||||
services = append(services, srv)
|
||||
type Descriptor struct {
|
||||
Name string
|
||||
Instance Service
|
||||
InitPriority Priority
|
||||
}
|
||||
|
||||
func GetServices() []Service {
|
||||
var services []*Descriptor
|
||||
|
||||
func RegisterService(instance Service) {
|
||||
services = append(services, &Descriptor{
|
||||
Name: reflect.TypeOf(instance).Elem().Name(),
|
||||
Instance: instance,
|
||||
InitPriority: Low,
|
||||
})
|
||||
}
|
||||
|
||||
func Register(descriptor *Descriptor) {
|
||||
services = append(services, descriptor)
|
||||
}
|
||||
|
||||
func GetServices() []*Descriptor {
|
||||
sort.Slice(services, func(i, j int) bool {
|
||||
return services[i].InitPriority > services[j].InitPriority
|
||||
})
|
||||
|
||||
return services
|
||||
}
|
||||
|
||||
@ -27,7 +47,18 @@ type BackgroundService interface {
|
||||
Run(ctx context.Context) error
|
||||
}
|
||||
|
||||
type HasInitPriority interface {
|
||||
GetInitPriority() Priority
|
||||
}
|
||||
|
||||
func IsDisabled(srv Service) bool {
|
||||
canBeDisabled, ok := srv.(CanBeDisabled)
|
||||
return ok && canBeDisabled.IsDisabled()
|
||||
}
|
||||
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
High Priority = 100
|
||||
Low Priority = 0
|
||||
)
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
simpleDashboardConfig = "./test-configs/dashboards-from-disk"
|
||||
oldVersion = "./test-configs/version-0"
|
||||
brokenConfigs = "./test-configs/broken-configs"
|
||||
simpleDashboardConfig = "./testdata/test-configs/dashboards-from-disk"
|
||||
oldVersion = "./testdata/test-configs/version-0"
|
||||
brokenConfigs = "./testdata/test-configs/broken-configs"
|
||||
)
|
||||
|
||||
func TestDashboardsAsConfig(t *testing.T) {
|
||||
|
@ -15,10 +15,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
defaultDashboards = "./test-dashboards/folder-one"
|
||||
brokenDashboards = "./test-dashboards/broken-dashboards"
|
||||
oneDashboard = "./test-dashboards/one-dashboard"
|
||||
containingId = "./test-dashboards/containing-id"
|
||||
defaultDashboards = "./testdata/test-dashboards/folder-one"
|
||||
brokenDashboards = "./testdata/test-dashboards/broken-dashboards"
|
||||
oneDashboard = "./testdata/test-dashboards/one-dashboard"
|
||||
containingId = "./testdata/test-dashboards/containing-id"
|
||||
|
||||
fakeService *fakeDashboardProvisioningService
|
||||
)
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
@ -110,14 +109,14 @@ func TestDashboardSnapshotDBAccess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteExpiredSnapshots(t *testing.T) {
|
||||
x := InitTestDB(t)
|
||||
sqlstore := InitTestDB(t)
|
||||
|
||||
Convey("Testing dashboard snapshots clean up", t, func() {
|
||||
setting.SnapShotRemoveExpired = true
|
||||
|
||||
notExpiredsnapshot := createTestSnapshot(x, "key1", 1200)
|
||||
createTestSnapshot(x, "key2", -1200)
|
||||
createTestSnapshot(x, "key3", -1200)
|
||||
notExpiredsnapshot := createTestSnapshot(sqlstore, "key1", 48000)
|
||||
createTestSnapshot(sqlstore, "key2", -1200)
|
||||
createTestSnapshot(sqlstore, "key3", -1200)
|
||||
|
||||
err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
|
||||
So(err, ShouldBeNil)
|
||||
@ -146,7 +145,7 @@ func TestDeleteExpiredSnapshots(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardSnapshot {
|
||||
func createTestSnapshot(sqlstore *SqlStore, key string, expires int64) *m.DashboardSnapshot {
|
||||
cmd := m.CreateDashboardSnapshotCommand{
|
||||
Key: key,
|
||||
DeleteKey: "delete" + key,
|
||||
@ -163,7 +162,7 @@ func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardS
|
||||
// Set expiry date manually - to be able to create expired snapshots
|
||||
if expires < 0 {
|
||||
expireDate := time.Now().Add(time.Second * time.Duration(expires))
|
||||
_, err = x.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
|
||||
_, err = sqlstore.engine.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ func TestMigrations(t *testing.T) {
|
||||
has, err := x.SQL(sql).Get(&r)
|
||||
So(err, ShouldBeNil)
|
||||
So(has, ShouldBeTrue)
|
||||
expectedMigrations := mg.MigrationsCount() - 2 //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this
|
||||
expectedMigrations := mg.MigrationsCount() //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this
|
||||
So(r.Count, ShouldEqual, expectedMigrations)
|
||||
|
||||
mg = NewMigrator(x)
|
||||
|
@ -48,27 +48,6 @@ func addOrgMigrations(mg *Migrator) {
|
||||
mg.AddMigration("create org_user table v1", NewAddTableMigration(orgUserV1))
|
||||
addTableIndicesMigrations(mg, "v1", orgUserV1)
|
||||
|
||||
//------- copy data from old table-------------------
|
||||
mg.AddMigration("copy data account to org", NewCopyTableDataMigration("org", "account", map[string]string{
|
||||
"id": "id",
|
||||
"version": "version",
|
||||
"name": "name",
|
||||
"created": "created",
|
||||
"updated": "updated",
|
||||
}).IfTableExists("account"))
|
||||
|
||||
mg.AddMigration("copy data account_user to org_user", NewCopyTableDataMigration("org_user", "account_user", map[string]string{
|
||||
"id": "id",
|
||||
"org_id": "account_id",
|
||||
"user_id": "user_id",
|
||||
"role": "role",
|
||||
"created": "created",
|
||||
"updated": "updated",
|
||||
}).IfTableExists("account_user"))
|
||||
|
||||
mg.AddMigration("Drop old table account", NewDropTableMigration("account"))
|
||||
mg.AddMigration("Drop old table account_user", NewDropTableMigration("account_user"))
|
||||
|
||||
mg.AddMigration("Update org table charset", NewTableCharsetMigration("org", []*Column{
|
||||
{Name: "name", Type: DB_NVarchar, Length: 190, Nullable: false},
|
||||
{Name: "address1", Type: DB_NVarchar, Length: 255, Nullable: true},
|
||||
|
@ -125,7 +125,7 @@ func (mg *Migrator) exec(m Migration, sess *xorm.Session) error {
|
||||
sql, args := condition.Sql(mg.dialect)
|
||||
results, err := sess.SQL(sql).Query(args...)
|
||||
if err != nil || len(results) == 0 {
|
||||
mg.Logger.Info("Skipping migration condition not fulfilled", "id", m.Id())
|
||||
mg.Logger.Debug("Skipping migration condition not fulfilled", "id", m.Id())
|
||||
return sess.Rollback()
|
||||
}
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ func TestQuotaCommandsAndQueries(t *testing.T) {
|
||||
Name: "TestOrg",
|
||||
UserId: 1,
|
||||
}
|
||||
|
||||
err := CreateOrg(&userCmd)
|
||||
So(err, ShouldBeNil)
|
||||
orgId = userCmd.Result.Id
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/registry"
|
||||
"github.com/grafana/grafana/pkg/services/annotations"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrations"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
@ -27,39 +28,72 @@ import (
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/mssql"
|
||||
)
|
||||
|
||||
type DatabaseConfig struct {
|
||||
Type, Host, Name, User, Pwd, Path, SslMode string
|
||||
CaCertPath string
|
||||
ClientKeyPath string
|
||||
ClientCertPath string
|
||||
ServerCertName string
|
||||
MaxOpenConn int
|
||||
MaxIdleConn int
|
||||
ConnMaxLifetime int
|
||||
}
|
||||
|
||||
var (
|
||||
x *xorm.Engine
|
||||
dialect migrator.Dialect
|
||||
|
||||
HasEngine bool
|
||||
|
||||
DbCfg DatabaseConfig
|
||||
|
||||
UseSQLite3 bool
|
||||
sqlog log.Logger = log.New("sqlstore")
|
||||
)
|
||||
|
||||
func EnsureAdminUser() {
|
||||
func init() {
|
||||
registry.Register(®istry.Descriptor{
|
||||
Name: "SqlStore",
|
||||
Instance: &SqlStore{},
|
||||
InitPriority: registry.High,
|
||||
})
|
||||
}
|
||||
|
||||
type SqlStore struct {
|
||||
Cfg *setting.Cfg `inject:""`
|
||||
|
||||
dbCfg DatabaseConfig
|
||||
engine *xorm.Engine
|
||||
log log.Logger
|
||||
skipEnsureAdmin bool
|
||||
}
|
||||
|
||||
func (ss *SqlStore) Init() error {
|
||||
ss.log = log.New("sqlstore")
|
||||
ss.readConfig()
|
||||
|
||||
engine, err := ss.getEngine()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Fail to connect to database: %v", err)
|
||||
}
|
||||
|
||||
ss.engine = engine
|
||||
|
||||
// temporarily still set global var
|
||||
x = engine
|
||||
dialect = migrator.NewDialect(x)
|
||||
migrator := migrator.NewMigrator(x)
|
||||
migrations.AddMigrations(migrator)
|
||||
|
||||
if err := migrator.Start(); err != nil {
|
||||
return fmt.Errorf("Migration failed err: %v", err)
|
||||
}
|
||||
|
||||
// Init repo instances
|
||||
annotations.SetRepository(&SqlAnnotationRepo{})
|
||||
|
||||
// ensure admin user
|
||||
if ss.skipEnsureAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ss.ensureAdminUser()
|
||||
}
|
||||
|
||||
func (ss *SqlStore) ensureAdminUser() error {
|
||||
statsQuery := m.GetSystemStatsQuery{}
|
||||
|
||||
if err := bus.Dispatch(&statsQuery); err != nil {
|
||||
log.Fatal(3, "Could not determine if admin user exists: %v", err)
|
||||
return
|
||||
fmt.Errorf("Could not determine if admin user exists: %v", err)
|
||||
}
|
||||
|
||||
if statsQuery.Result.Users > 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := m.CreateUserCommand{}
|
||||
@ -69,109 +103,89 @@ func EnsureAdminUser() {
|
||||
cmd.IsAdmin = true
|
||||
|
||||
if err := bus.Dispatch(&cmd); err != nil {
|
||||
log.Error(3, "Failed to create default admin user", err)
|
||||
return
|
||||
return fmt.Errorf("Failed to create admin user: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Created default admin user: %v", setting.AdminUser)
|
||||
}
|
||||
ss.log.Info("Created default admin user: %v", setting.AdminUser)
|
||||
|
||||
func NewEngine() *xorm.Engine {
|
||||
x, err := getEngine()
|
||||
|
||||
if err != nil {
|
||||
sqlog.Crit("Fail to connect to database", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = SetEngine(x)
|
||||
|
||||
if err != nil {
|
||||
sqlog.Error("Fail to initialize orm engine", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func SetEngine(engine *xorm.Engine) (err error) {
|
||||
x = engine
|
||||
dialect = migrator.NewDialect(x)
|
||||
|
||||
migrator := migrator.NewMigrator(x)
|
||||
migrations.AddMigrations(migrator)
|
||||
|
||||
if err := migrator.Start(); err != nil {
|
||||
return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err)
|
||||
}
|
||||
|
||||
// Init repo instances
|
||||
annotations.SetRepository(&SqlAnnotationRepo{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEngine() (*xorm.Engine, error) {
|
||||
LoadConfig()
|
||||
func (ss *SqlStore) buildConnectionString() (string, error) {
|
||||
cnnstr := ss.dbCfg.ConnectionString
|
||||
|
||||
cnnstr := ""
|
||||
switch DbCfg.Type {
|
||||
// special case used by integration tests
|
||||
if cnnstr != "" {
|
||||
return cnnstr, nil
|
||||
}
|
||||
|
||||
switch ss.dbCfg.Type {
|
||||
case migrator.MYSQL:
|
||||
protocol := "tcp"
|
||||
if strings.HasPrefix(DbCfg.Host, "/") {
|
||||
if strings.HasPrefix(ss.dbCfg.Host, "/") {
|
||||
protocol = "unix"
|
||||
}
|
||||
|
||||
cnnstr = fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true",
|
||||
url.QueryEscape(DbCfg.User), url.QueryEscape(DbCfg.Pwd), protocol, DbCfg.Host, url.PathEscape(DbCfg.Name))
|
||||
ss.dbCfg.User, ss.dbCfg.Pwd, protocol, ss.dbCfg.Host, ss.dbCfg.Name)
|
||||
|
||||
if DbCfg.SslMode == "true" || DbCfg.SslMode == "skip-verify" {
|
||||
tlsCert, err := makeCert("custom", DbCfg)
|
||||
if ss.dbCfg.SslMode == "true" || ss.dbCfg.SslMode == "skip-verify" {
|
||||
tlsCert, err := makeCert("custom", ss.dbCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
mysql.RegisterTLSConfig("custom", tlsCert)
|
||||
cnnstr += "&tls=custom"
|
||||
}
|
||||
case migrator.POSTGRES:
|
||||
var host, port = "127.0.0.1", "5432"
|
||||
fields := strings.Split(DbCfg.Host, ":")
|
||||
fields := strings.Split(ss.dbCfg.Host, ":")
|
||||
if len(fields) > 0 && len(strings.TrimSpace(fields[0])) > 0 {
|
||||
host = fields[0]
|
||||
}
|
||||
if len(fields) > 1 && len(strings.TrimSpace(fields[1])) > 0 {
|
||||
port = fields[1]
|
||||
}
|
||||
cnnstr = fmt.Sprintf("user='%s' password='%s' host='%s' port='%s' dbname='%s' sslmode='%s' sslcert='%s' sslkey='%s' sslrootcert='%s'",
|
||||
strings.Replace(DbCfg.User, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.Pwd, `'`, `\'`, -1),
|
||||
strings.Replace(host, `'`, `\'`, -1),
|
||||
strings.Replace(port, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.Name, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.SslMode, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.ClientCertPath, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.ClientKeyPath, `'`, `\'`, -1),
|
||||
strings.Replace(DbCfg.CaCertPath, `'`, `\'`, -1),
|
||||
)
|
||||
case migrator.SQLITE:
|
||||
if !filepath.IsAbs(DbCfg.Path) {
|
||||
DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path)
|
||||
if ss.dbCfg.Pwd == "" {
|
||||
ss.dbCfg.Pwd = "''"
|
||||
}
|
||||
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm)
|
||||
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc"
|
||||
if ss.dbCfg.User == "" {
|
||||
ss.dbCfg.User = "''"
|
||||
}
|
||||
cnnstr = fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", ss.dbCfg.User, ss.dbCfg.Pwd, host, port, ss.dbCfg.Name, ss.dbCfg.SslMode, ss.dbCfg.ClientCertPath, ss.dbCfg.ClientKeyPath, ss.dbCfg.CaCertPath)
|
||||
case migrator.SQLITE:
|
||||
// special case for tests
|
||||
if !filepath.IsAbs(ss.dbCfg.Path) {
|
||||
ss.dbCfg.Path = filepath.Join(setting.DataPath, ss.dbCfg.Path)
|
||||
}
|
||||
os.MkdirAll(path.Dir(ss.dbCfg.Path), os.ModePerm)
|
||||
cnnstr = "file:" + ss.dbCfg.Path + "?cache=shared&mode=rwc"
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type)
|
||||
return "", fmt.Errorf("Unknown database type: %s", ss.dbCfg.Type)
|
||||
}
|
||||
|
||||
sqlog.Info("Initializing DB", "dbtype", DbCfg.Type)
|
||||
engine, err := xorm.NewEngine(DbCfg.Type, cnnstr)
|
||||
return cnnstr, nil
|
||||
}
|
||||
|
||||
func (ss *SqlStore) getEngine() (*xorm.Engine, error) {
|
||||
connectionString, err := ss.buildConnectionString()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
engine.SetMaxOpenConns(DbCfg.MaxOpenConn)
|
||||
engine.SetMaxIdleConns(DbCfg.MaxIdleConn)
|
||||
engine.SetConnMaxLifetime(time.Second * time.Duration(DbCfg.ConnMaxLifetime))
|
||||
debugSql := setting.Raw.Section("database").Key("log_queries").MustBool(false)
|
||||
sqlog.Info("Connecting to DB", "dbtype", ss.dbCfg.Type)
|
||||
engine, err := xorm.NewEngine(ss.dbCfg.Type, connectionString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
engine.SetMaxOpenConns(ss.dbCfg.MaxOpenConn)
|
||||
engine.SetMaxIdleConns(ss.dbCfg.MaxIdleConn)
|
||||
engine.SetConnMaxLifetime(time.Second * time.Duration(ss.dbCfg.ConnMaxLifetime))
|
||||
|
||||
// configure sql logging
|
||||
debugSql := ss.Cfg.Raw.Section("database").Key("log_queries").MustBool(false)
|
||||
if !debugSql {
|
||||
engine.SetLogger(&xorm.DiscardLogger{})
|
||||
} else {
|
||||
@ -183,95 +197,90 @@ func getEngine() (*xorm.Engine, error) {
|
||||
return engine, nil
|
||||
}
|
||||
|
||||
func LoadConfig() {
|
||||
sec := setting.Raw.Section("database")
|
||||
func (ss *SqlStore) readConfig() {
|
||||
sec := ss.Cfg.Raw.Section("database")
|
||||
|
||||
cfgURL := sec.Key("url").String()
|
||||
if len(cfgURL) != 0 {
|
||||
dbURL, _ := url.Parse(cfgURL)
|
||||
DbCfg.Type = dbURL.Scheme
|
||||
DbCfg.Host = dbURL.Host
|
||||
ss.dbCfg.Type = dbURL.Scheme
|
||||
ss.dbCfg.Host = dbURL.Host
|
||||
|
||||
pathSplit := strings.Split(dbURL.Path, "/")
|
||||
if len(pathSplit) > 1 {
|
||||
DbCfg.Name = pathSplit[1]
|
||||
ss.dbCfg.Name = pathSplit[1]
|
||||
}
|
||||
|
||||
userInfo := dbURL.User
|
||||
if userInfo != nil {
|
||||
DbCfg.User = userInfo.Username()
|
||||
DbCfg.Pwd, _ = userInfo.Password()
|
||||
ss.dbCfg.User = userInfo.Username()
|
||||
ss.dbCfg.Pwd, _ = userInfo.Password()
|
||||
}
|
||||
} else {
|
||||
DbCfg.Type = sec.Key("type").String()
|
||||
DbCfg.Host = sec.Key("host").String()
|
||||
DbCfg.Name = sec.Key("name").String()
|
||||
DbCfg.User = sec.Key("user").String()
|
||||
if len(DbCfg.Pwd) == 0 {
|
||||
DbCfg.Pwd = sec.Key("password").String()
|
||||
}
|
||||
}
|
||||
DbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
|
||||
DbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(0)
|
||||
DbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
|
||||
|
||||
if DbCfg.Type == "sqlite3" {
|
||||
UseSQLite3 = true
|
||||
// only allow one connection as sqlite3 has multi threading issues that cause table locks
|
||||
// DbCfg.MaxIdleConn = 1
|
||||
// DbCfg.MaxOpenConn = 1
|
||||
}
|
||||
DbCfg.SslMode = sec.Key("ssl_mode").String()
|
||||
DbCfg.CaCertPath = sec.Key("ca_cert_path").String()
|
||||
DbCfg.ClientKeyPath = sec.Key("client_key_path").String()
|
||||
DbCfg.ClientCertPath = sec.Key("client_cert_path").String()
|
||||
DbCfg.ServerCertName = sec.Key("server_cert_name").String()
|
||||
DbCfg.Path = sec.Key("path").MustString("data/grafana.db")
|
||||
ss.dbCfg.Type = sec.Key("type").String()
|
||||
ss.dbCfg.Host = sec.Key("host").String()
|
||||
ss.dbCfg.Name = sec.Key("name").String()
|
||||
ss.dbCfg.User = sec.Key("user").String()
|
||||
ss.dbCfg.ConnectionString = sec.Key("connection_string").String()
|
||||
ss.dbCfg.Pwd = sec.Key("password").String()
|
||||
}
|
||||
|
||||
func InitTestDB(t *testing.T) *xorm.Engine {
|
||||
selectedDb := migrator.SQLITE
|
||||
// selectedDb := migrator.MYSQL
|
||||
// selectedDb := migrator.POSTGRES
|
||||
ss.dbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
|
||||
ss.dbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(2)
|
||||
ss.dbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
|
||||
|
||||
var x *xorm.Engine
|
||||
var err error
|
||||
ss.dbCfg.SslMode = sec.Key("ssl_mode").String()
|
||||
ss.dbCfg.CaCertPath = sec.Key("ca_cert_path").String()
|
||||
ss.dbCfg.ClientKeyPath = sec.Key("client_key_path").String()
|
||||
ss.dbCfg.ClientCertPath = sec.Key("client_cert_path").String()
|
||||
ss.dbCfg.ServerCertName = sec.Key("server_cert_name").String()
|
||||
ss.dbCfg.Path = sec.Key("path").MustString("data/grafana.db")
|
||||
}
|
||||
|
||||
func InitTestDB(t *testing.T) *SqlStore {
|
||||
sqlstore := &SqlStore{}
|
||||
sqlstore.skipEnsureAdmin = true
|
||||
|
||||
dbType := migrator.SQLITE
|
||||
|
||||
// environment variable present for test db?
|
||||
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
|
||||
selectedDb = db
|
||||
dbType = db
|
||||
}
|
||||
|
||||
switch strings.ToLower(selectedDb) {
|
||||
case migrator.MYSQL:
|
||||
x, err = xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr)
|
||||
case migrator.POSTGRES:
|
||||
x, err = xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
|
||||
// set test db config
|
||||
sqlstore.Cfg = setting.NewCfg()
|
||||
sec, _ := sqlstore.Cfg.Raw.NewSection("database")
|
||||
sec.NewKey("type", dbType)
|
||||
|
||||
switch dbType {
|
||||
case "mysql":
|
||||
sec.NewKey("connection_string", sqlutil.TestDB_Mysql.ConnStr)
|
||||
case "postgres":
|
||||
sec.NewKey("connection_string", sqlutil.TestDB_Postgres.ConnStr)
|
||||
default:
|
||||
x, err = xorm.NewEngine(sqlutil.TestDB_Sqlite3.DriverName, sqlutil.TestDB_Sqlite3.ConnStr)
|
||||
sec.NewKey("connection_string", sqlutil.TestDB_Sqlite3.ConnStr)
|
||||
}
|
||||
|
||||
x.DatabaseTZ = time.UTC
|
||||
x.TZLocation = time.UTC
|
||||
|
||||
// need to get engine to clean db before we init
|
||||
engine, err := xorm.NewEngine(dbType, sec.Key("connection_string").String())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init test database: %v", err)
|
||||
}
|
||||
|
||||
dialect = migrator.NewDialect(x)
|
||||
|
||||
err = dialect.CleanDB()
|
||||
if err != nil {
|
||||
dialect = migrator.NewDialect(engine)
|
||||
if err := dialect.CleanDB(); err != nil {
|
||||
t.Fatalf("Failed to clean test db %v", err)
|
||||
}
|
||||
|
||||
if err := SetEngine(x); err != nil {
|
||||
t.Fatal(err)
|
||||
if err := sqlstore.Init(); err != nil {
|
||||
t.Fatalf("Failed to init test database: %v", err)
|
||||
}
|
||||
|
||||
// x.ShowSQL()
|
||||
//// sqlstore.engine.DatabaseTZ = time.UTC
|
||||
//// sqlstore.engine.TZLocation = time.UTC
|
||||
|
||||
return x
|
||||
return sqlstore
|
||||
}
|
||||
|
||||
func IsTestDbMySql() bool {
|
||||
@ -289,3 +298,15 @@ func IsTestDbPostgres() bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type DatabaseConfig struct {
|
||||
Type, Host, Name, User, Pwd, Path, SslMode string
|
||||
CaCertPath string
|
||||
ClientKeyPath string
|
||||
ClientCertPath string
|
||||
ServerCertName string
|
||||
ConnectionString string
|
||||
MaxOpenConn int
|
||||
MaxIdleConn int
|
||||
ConnMaxLifetime int
|
||||
}
|
||||
|
@ -495,7 +495,9 @@ func validateStaticRootPath() error {
|
||||
}
|
||||
|
||||
func NewCfg() *Cfg {
|
||||
return &Cfg{}
|
||||
return &Cfg{
|
||||
Raw: ini.Empty(),
|
||||
}
|
||||
}
|
||||
|
||||
func (cfg *Cfg) Load(args *CommandLineArgs) error {
|
||||
|
@ -989,17 +989,17 @@ kbn.getUnitFormats = function() {
|
||||
{
|
||||
text: 'velocity',
|
||||
submenu: [
|
||||
{ text: 'm/s', value: 'velocityms' },
|
||||
{ text: 'km/h', value: 'velocitykmh' },
|
||||
{ text: 'mph', value: 'velocitymph' },
|
||||
{ text: 'metres/second (m/s)', value: 'velocityms' },
|
||||
{ text: 'kilometers/hour (km/h)', value: 'velocitykmh' },
|
||||
{ text: 'miles/hour (mph)', value: 'velocitymph' },
|
||||
{ text: 'knot (kn)', value: 'velocityknot' },
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'volume',
|
||||
submenu: [
|
||||
{ text: 'millilitre', value: 'mlitre' },
|
||||
{ text: 'litre', value: 'litre' },
|
||||
{ text: 'millilitre (mL)', value: 'mlitre' },
|
||||
{ text: 'litre (L)', value: 'litre' },
|
||||
{ text: 'cubic metre', value: 'm3' },
|
||||
{ text: 'Normal cubic metre', value: 'Nm3' },
|
||||
{ text: 'cubic decimetre', value: 'dm3' },
|
||||
|
@ -312,7 +312,7 @@ class MetricsPanelCtrl extends PanelCtrl {
|
||||
|
||||
getAdditionalMenuItems() {
|
||||
const items = [];
|
||||
if (this.datasource.supportsExplore) {
|
||||
if (this.datasource && this.datasource.supportsExplore) {
|
||||
items.push({
|
||||
text: 'Explore',
|
||||
click: 'ctrl.explore();',
|
||||
|
65
public/app/features/panel/specs/metrics_panel_ctrl.jest.ts
Normal file
65
public/app/features/panel/specs/metrics_panel_ctrl.jest.ts
Normal file
@ -0,0 +1,65 @@
|
||||
jest.mock('app/core/core', () => ({}));
|
||||
|
||||
import { MetricsPanelCtrl } from '../metrics_panel_ctrl';
|
||||
import q from 'q';
|
||||
import { PanelModel } from 'app/features/dashboard/panel_model';
|
||||
|
||||
describe('MetricsPanelCtrl', () => {
|
||||
let ctrl;
|
||||
|
||||
beforeEach(() => {
|
||||
ctrl = setupController();
|
||||
});
|
||||
|
||||
describe('when getting additional menu items', () => {
|
||||
let additionalItems;
|
||||
|
||||
describe('and has no datasource set', () => {
|
||||
beforeEach(() => {
|
||||
additionalItems = ctrl.getAdditionalMenuItems();
|
||||
});
|
||||
|
||||
it('should not return any items', () => {
|
||||
expect(additionalItems.length).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('and has datasource set that supports explore', () => {
|
||||
beforeEach(() => {
|
||||
ctrl.datasource = { supportsExplore: true };
|
||||
additionalItems = ctrl.getAdditionalMenuItems();
|
||||
});
|
||||
|
||||
it('should not return any items', () => {
|
||||
expect(additionalItems.length).toBe(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
function setupController() {
|
||||
const injectorStub = {
|
||||
get: type => {
|
||||
switch (type) {
|
||||
case '$q': {
|
||||
return q;
|
||||
}
|
||||
default: {
|
||||
return jest.fn();
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
const scope = {
|
||||
panel: { events: [] },
|
||||
appEvent: jest.fn(),
|
||||
onAppEvent: jest.fn(),
|
||||
$on: jest.fn(),
|
||||
colors: [],
|
||||
};
|
||||
|
||||
MetricsPanelCtrl.prototype.panel = new PanelModel({ type: 'test' });
|
||||
|
||||
return new MetricsPanelCtrl(scope, injectorStub);
|
||||
}
|
@ -11,23 +11,14 @@ export default class ResponseParser {
|
||||
return [];
|
||||
}
|
||||
|
||||
var influxdb11format = query.toLowerCase().indexOf('show tag values') >= 0;
|
||||
|
||||
var res = {};
|
||||
_.each(influxResults.series, serie => {
|
||||
_.each(serie.values, value => {
|
||||
if (_.isArray(value)) {
|
||||
// In general, there are 2 possible shapes for the returned value.
|
||||
// The first one is a two-element array,
|
||||
// where the first element is somewhat a metadata value:
|
||||
// the tag name for SHOW TAG VALUES queries,
|
||||
// the time field for SELECT queries, etc.
|
||||
// The second shape is an one-element array,
|
||||
// that is containing an immediate value.
|
||||
// For example, SHOW FIELD KEYS queries return such shape.
|
||||
// Note, pre-0.11 versions return
|
||||
// the second shape for SHOW TAG VALUES queries
|
||||
// (while the newer versions—first).
|
||||
if (value[1] !== undefined) {
|
||||
addUnique(res, value[1]);
|
||||
if (influxdb11format) {
|
||||
addUnique(res, value[1] || value[0]);
|
||||
} else {
|
||||
addUnique(res, value[0]);
|
||||
}
|
||||
@ -38,7 +29,7 @@ export default class ResponseParser {
|
||||
});
|
||||
|
||||
return _.map(res, value => {
|
||||
return { text: value.toString() };
|
||||
return { text: value };
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -85,32 +85,6 @@ describe('influxdb response parser', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('SELECT response', () => {
|
||||
var query = 'SELECT "usage_iowait" FROM "cpu" LIMIT 10';
|
||||
var response = {
|
||||
results: [
|
||||
{
|
||||
series: [
|
||||
{
|
||||
name: 'cpu',
|
||||
columns: ['time', 'usage_iowait'],
|
||||
values: [[1488465190006040638, 0.0], [1488465190006040638, 15.0], [1488465190006040638, 20.2]],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
var result = parser.parse(query, response);
|
||||
|
||||
it('should return second column', () => {
|
||||
expect(_.size(result)).toBe(3);
|
||||
expect(result[0].text).toBe('0');
|
||||
expect(result[1].text).toBe('15');
|
||||
expect(result[2].text).toBe('20.2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('SHOW FIELD response', () => {
|
||||
var query = 'SHOW FIELD KEYS FROM "cpu"';
|
||||
describe('response from 0.10.0', () => {
|
||||
|
@ -27,6 +27,7 @@ export class PrometheusDatasource {
|
||||
withCredentials: any;
|
||||
metricsNameCache: any;
|
||||
interval: string;
|
||||
queryTimeout: string;
|
||||
httpMethod: string;
|
||||
resultTransformer: ResultTransformer;
|
||||
|
||||
@ -42,6 +43,7 @@ export class PrometheusDatasource {
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
this.withCredentials = instanceSettings.withCredentials;
|
||||
this.interval = instanceSettings.jsonData.timeInterval || '15s';
|
||||
this.queryTimeout = instanceSettings.jsonData.queryTimeout;
|
||||
this.httpMethod = instanceSettings.jsonData.httpMethod || 'GET';
|
||||
this.resultTransformer = new ResultTransformer(templateSrv);
|
||||
}
|
||||
@ -107,10 +109,18 @@ export class PrometheusDatasource {
|
||||
return this.templateSrv.variableExists(target.expr);
|
||||
}
|
||||
|
||||
clampRange(start, end, step) {
|
||||
const clampedEnd = Math.ceil(end / step) * step;
|
||||
const clampedRange = Math.floor((end - start) / step) * step;
|
||||
return {
|
||||
end: clampedEnd,
|
||||
start: clampedEnd - clampedRange,
|
||||
};
|
||||
}
|
||||
|
||||
query(options) {
|
||||
var start = this.getPrometheusTime(options.range.from, false);
|
||||
var end = this.getPrometheusTime(options.range.to, true);
|
||||
var range = Math.ceil(end - start);
|
||||
|
||||
var queries = [];
|
||||
var activeTargets = [];
|
||||
@ -123,7 +133,7 @@ export class PrometheusDatasource {
|
||||
}
|
||||
|
||||
activeTargets.push(target);
|
||||
queries.push(this.createQuery(target, options, range));
|
||||
queries.push(this.createQuery(target, options, start, end));
|
||||
}
|
||||
|
||||
// No valid targets, return the empty result to save a round trip.
|
||||
@ -133,7 +143,7 @@ export class PrometheusDatasource {
|
||||
|
||||
var allQueryPromise = _.map(queries, query => {
|
||||
if (!query.instant) {
|
||||
return this.performTimeSeriesQuery(query, start, end);
|
||||
return this.performTimeSeriesQuery(query, query.start, query.end);
|
||||
} else {
|
||||
return this.performInstantQuery(query, end);
|
||||
}
|
||||
@ -147,7 +157,8 @@ export class PrometheusDatasource {
|
||||
throw response.error;
|
||||
}
|
||||
|
||||
let transformerOptions = {
|
||||
// Keeping original start/end for transformers
|
||||
const transformerOptions = {
|
||||
format: activeTargets[index].format,
|
||||
step: queries[index].step,
|
||||
legendFormat: activeTargets[index].legendFormat,
|
||||
@ -165,9 +176,10 @@ export class PrometheusDatasource {
|
||||
});
|
||||
}
|
||||
|
||||
createQuery(target, options, range) {
|
||||
createQuery(target, options, start, end) {
|
||||
var query: any = {};
|
||||
query.instant = target.instant;
|
||||
var range = Math.ceil(end - start);
|
||||
|
||||
var interval = kbn.interval_to_seconds(options.interval);
|
||||
// Minimum interval ("Min step"), if specified for the query. or same as interval otherwise
|
||||
@ -191,6 +203,12 @@ export class PrometheusDatasource {
|
||||
// Only replace vars in expression after having (possibly) updated interval vars
|
||||
query.expr = this.templateSrv.replace(target.expr, scopedVars, this.interpolateQueryExpr);
|
||||
query.requestId = options.panelId + target.refId;
|
||||
|
||||
// Align query interval with step
|
||||
const adjusted = this.clampRange(start, end, query.step);
|
||||
query.start = adjusted.start;
|
||||
query.end = adjusted.end;
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
@ -215,6 +233,9 @@ export class PrometheusDatasource {
|
||||
end: end,
|
||||
step: query.step,
|
||||
};
|
||||
if (this.queryTimeout) {
|
||||
data['timeout'] = this.queryTimeout;
|
||||
}
|
||||
return this._request(url, data, { requestId: query.requestId });
|
||||
}
|
||||
|
||||
@ -224,6 +245,9 @@ export class PrometheusDatasource {
|
||||
query: query.expr,
|
||||
time: time,
|
||||
};
|
||||
if (this.queryTimeout) {
|
||||
data['timeout'] = this.queryTimeout;
|
||||
}
|
||||
return this._request(url, data, { requestId: query.requestId });
|
||||
}
|
||||
|
||||
@ -270,22 +294,18 @@ export class PrometheusDatasource {
|
||||
return this.$q.when([]);
|
||||
}
|
||||
|
||||
var interpolated = this.templateSrv.replace(expr, {}, this.interpolateQueryExpr);
|
||||
|
||||
var step = '60s';
|
||||
if (annotation.step) {
|
||||
step = this.templateSrv.replace(annotation.step);
|
||||
}
|
||||
|
||||
var step = annotation.step || '60s';
|
||||
var start = this.getPrometheusTime(options.range.from, false);
|
||||
var end = this.getPrometheusTime(options.range.to, true);
|
||||
var query = {
|
||||
expr: interpolated,
|
||||
step: this.adjustInterval(kbn.interval_to_seconds(step), 0, Math.ceil(end - start), 1) + 's',
|
||||
// Unsetting min interval
|
||||
const queryOptions = {
|
||||
...options,
|
||||
interval: '0s',
|
||||
};
|
||||
const query = this.createQuery({ expr, interval: step }, queryOptions, start, end);
|
||||
|
||||
var self = this;
|
||||
return this.performTimeSeriesQuery(query, start, end).then(function(results) {
|
||||
return this.performTimeSeriesQuery(query, query.start, query.end).then(function(results) {
|
||||
var eventList = [];
|
||||
tagKeys = tagKeys.split(',');
|
||||
|
||||
|
@ -7,8 +7,18 @@
|
||||
<span class="gf-form-label width-8">Scrape interval</span>
|
||||
<input type="text" class="gf-form-input width-8" ng-model="ctrl.current.jsonData.timeInterval" spellcheck='false' placeholder="15s"></input>
|
||||
<info-popover mode="right-absolute">
|
||||
Set this to your global scrape interval defined in your Prometheus config file. This will be used as a lower limit for
|
||||
the Prometheus step query parameter.
|
||||
Set this to your global scrape interval defined in your Prometheus config file. This will be used as a lower limit for the
|
||||
Prometheus step query parameter.
|
||||
</info-popover>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form">
|
||||
<span class="gf-form-label width-8">Query timeout</span>
|
||||
<input type="text" class="gf-form-input width-8" ng-model="ctrl.current.jsonData.queryTimeout" spellcheck='false' placeholder="60s"></input>
|
||||
<info-popover mode="right-absolute">
|
||||
Set the Prometheus query timeout.
|
||||
</info-popover>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -14,8 +14,8 @@
|
||||
data-min-length=0 data-items=1000 ng-model-onblur ng-change="ctrl.refreshMetricData()">
|
||||
</input>
|
||||
<info-popover mode="right-absolute">
|
||||
Controls the name of the time series, using name or pattern. For example <span ng-non-bindable>{{hostname}}</span> will be replaced with label value for
|
||||
the label hostname.
|
||||
Controls the name of the time series, using name or pattern. For example
|
||||
<span ng-non-bindable>{{hostname}}</span> will be replaced with label value for the label hostname.
|
||||
</info-popover>
|
||||
</div>
|
||||
|
||||
@ -25,7 +25,8 @@
|
||||
placeholder="{{ctrl.panelCtrl.interval}}" data-min-length=0 data-items=100 ng-model-onblur ng-change="ctrl.refreshMetricData()"
|
||||
/>
|
||||
<info-popover mode="right-absolute">
|
||||
Leave blank for auto handling based on time range and panel width
|
||||
Leave blank for auto handling based on time range and panel width. Note that the actual dates used in the query will be adjusted
|
||||
to a multiple of the interval step.
|
||||
</info-popover>
|
||||
</div>
|
||||
|
||||
|
@ -4,6 +4,12 @@ import $ from 'jquery';
|
||||
import helpers from 'test/specs/helpers';
|
||||
import { PrometheusDatasource } from '../datasource';
|
||||
|
||||
const SECOND = 1000;
|
||||
const MINUTE = 60 * SECOND;
|
||||
const HOUR = 60 * MINUTE;
|
||||
|
||||
const time = ({ hours = 0, seconds = 0, minutes = 0 }) => moment(hours * HOUR + minutes * MINUTE + seconds * SECOND);
|
||||
|
||||
describe('PrometheusDatasource', function() {
|
||||
var ctx = new helpers.ServiceTestContext();
|
||||
var instanceSettings = {
|
||||
@ -29,18 +35,16 @@ describe('PrometheusDatasource', function() {
|
||||
$httpBackend.when('GET', /\.html$/).respond('');
|
||||
})
|
||||
);
|
||||
|
||||
describe('When querying prometheus with one target using query editor target spec', function() {
|
||||
var results;
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('test{job="testjob"}') +
|
||||
'&start=1443438675&end=1443460275&step=60';
|
||||
var query = {
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ seconds: 63 }), to: time({ seconds: 183 }) },
|
||||
targets: [{ expr: 'test{job="testjob"}', format: 'time_series' }],
|
||||
interval: '60s',
|
||||
};
|
||||
// Interval alignment with step
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('test{job="testjob"}') + '&start=120&end=240&step=60';
|
||||
var response = {
|
||||
status: 'success',
|
||||
data: {
|
||||
@ -48,7 +52,7 @@ describe('PrometheusDatasource', function() {
|
||||
result: [
|
||||
{
|
||||
metric: { __name__: 'test', job: 'testjob' },
|
||||
values: [[1443454528, '3846']],
|
||||
values: [[60, '3846']],
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -70,8 +74,8 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
describe('When querying prometheus with one target which return multiple series', function() {
|
||||
var results;
|
||||
var start = 1443438675;
|
||||
var end = 1443460275;
|
||||
var start = 60;
|
||||
var end = 360;
|
||||
var step = 60;
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
@ -83,7 +87,7 @@ describe('PrometheusDatasource', function() {
|
||||
'&step=' +
|
||||
step;
|
||||
var query = {
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ seconds: start }), to: time({ seconds: end }) },
|
||||
targets: [{ expr: 'test{job="testjob"}', format: 'time_series' }],
|
||||
interval: '60s',
|
||||
};
|
||||
@ -139,9 +143,9 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
describe('When querying prometheus with one target and instant = true', function() {
|
||||
var results;
|
||||
var urlExpected = 'proxied/api/v1/query?query=' + encodeURIComponent('test{job="testjob"}') + '&time=1443460275';
|
||||
var urlExpected = 'proxied/api/v1/query?query=' + encodeURIComponent('test{job="testjob"}') + '&time=123';
|
||||
var query = {
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ seconds: 63 }), to: time({ seconds: 123 }) },
|
||||
targets: [{ expr: 'test{job="testjob"}', format: 'time_series', instant: true }],
|
||||
interval: '60s',
|
||||
};
|
||||
@ -152,7 +156,7 @@ describe('PrometheusDatasource', function() {
|
||||
result: [
|
||||
{
|
||||
metric: { __name__: 'test', job: 'testjob' },
|
||||
value: [1443454528, '3846'],
|
||||
value: [123, '3846'],
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -177,7 +181,7 @@ describe('PrometheusDatasource', function() {
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('ALERTS{alertstate="firing"}') +
|
||||
'&start=1443438675&end=1443460275&step=60s';
|
||||
'&start=120&end=180&step=60';
|
||||
var options = {
|
||||
annotation: {
|
||||
expr: 'ALERTS{alertstate="firing"}',
|
||||
@ -186,8 +190,8 @@ describe('PrometheusDatasource', function() {
|
||||
textFormat: '{{instance}}',
|
||||
},
|
||||
range: {
|
||||
from: moment(1443438674760),
|
||||
to: moment(1443460274760),
|
||||
from: time({ seconds: 63 }),
|
||||
to: time({ seconds: 123 }),
|
||||
},
|
||||
};
|
||||
var response = {
|
||||
@ -203,7 +207,7 @@ describe('PrometheusDatasource', function() {
|
||||
instance: 'testinstance',
|
||||
job: 'testjob',
|
||||
},
|
||||
values: [[1443454528, '1']],
|
||||
values: [[123, '1']],
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -221,15 +225,15 @@ describe('PrometheusDatasource', function() {
|
||||
expect(results[0].tags).to.contain('testjob');
|
||||
expect(results[0].title).to.be('InstanceDown');
|
||||
expect(results[0].text).to.be('testinstance');
|
||||
expect(results[0].time).to.be(1443454528 * 1000);
|
||||
expect(results[0].time).to.be(123 * 1000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('When resultFormat is table and instant = true', function() {
|
||||
var results;
|
||||
var urlExpected = 'proxied/api/v1/query?query=' + encodeURIComponent('test{job="testjob"}') + '&time=1443460275';
|
||||
var urlExpected = 'proxied/api/v1/query?query=' + encodeURIComponent('test{job="testjob"}') + '&time=123';
|
||||
var query = {
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ seconds: 63 }), to: time({ seconds: 123 }) },
|
||||
targets: [{ expr: 'test{job="testjob"}', format: 'time_series', instant: true }],
|
||||
interval: '60s',
|
||||
};
|
||||
@ -240,7 +244,7 @@ describe('PrometheusDatasource', function() {
|
||||
result: [
|
||||
{
|
||||
metric: { __name__: 'test', job: 'testjob' },
|
||||
value: [1443454528, '3846'],
|
||||
value: [123, '3846'],
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -270,8 +274,8 @@ describe('PrometheusDatasource', function() {
|
||||
|
||||
it('should be min interval when greater than auto interval', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -280,7 +284,7 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '5s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=10';
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=60&end=420&step=10';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -288,12 +292,12 @@ describe('PrometheusDatasource', function() {
|
||||
|
||||
it('step should never go below 1', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1508318768202), to: moment(1508318770118) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [{ expr: 'test' }],
|
||||
interval: '100ms',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=1508318769&end=1508318771&step=1';
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=60&end=420&step=1';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -301,8 +305,8 @@ describe('PrometheusDatasource', function() {
|
||||
|
||||
it('should be auto interval when greater than min interval', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -311,7 +315,7 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '10s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=10';
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=60&end=420&step=10';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -319,19 +323,21 @@ describe('PrometheusDatasource', function() {
|
||||
it('should result in querying fewer than 11000 data points', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ hours: 1 }), to: time({ hours: 7 }) },
|
||||
targets: [{ expr: 'test' }],
|
||||
interval: '1s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=2';
|
||||
var end = 7 * 60 * 60;
|
||||
var start = 60 * 60;
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=' + start + '&end=' + end + '&step=2';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
});
|
||||
it('should not apply min interval when interval * intervalFactor greater', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -341,15 +347,16 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '5s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=50';
|
||||
// times get rounded up to interval
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test&start=100&end=450&step=50';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
});
|
||||
it('should apply min interval when interval * intervalFactor smaller', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -359,15 +366,15 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '5s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=15';
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=60&end=420&step=15';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
});
|
||||
it('should apply intervalFactor to auto interval when greater', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -377,7 +384,8 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '10s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1443460275&step=100';
|
||||
// times get rounded up to interval
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=200&end=500&step=100';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -385,7 +393,7 @@ describe('PrometheusDatasource', function() {
|
||||
it('should not not be affected by the 11000 data points limit when large enough', function() {
|
||||
var query = {
|
||||
// 1 week range
|
||||
range: { from: moment(1443438674760), to: moment(1444043474760) },
|
||||
range: { from: time({}), to: time({ hours: 7 * 24 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -394,7 +402,9 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '10s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1444043475&step=100';
|
||||
var end = 7 * 24 * 60 * 60;
|
||||
var start = 0;
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=' + start + '&end=' + end + '&step=100';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -402,7 +412,7 @@ describe('PrometheusDatasource', function() {
|
||||
it('should be determined by the 11000 data points limit when too small', function() {
|
||||
var query = {
|
||||
// 1 week range
|
||||
range: { from: moment(1443438674760), to: moment(1444043474760) },
|
||||
range: { from: time({}), to: time({ hours: 7 * 24 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'test',
|
||||
@ -411,12 +421,15 @@ describe('PrometheusDatasource', function() {
|
||||
],
|
||||
interval: '5s',
|
||||
};
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=1443438675&end=1444043475&step=60';
|
||||
var end = 7 * 24 * 60 * 60;
|
||||
var start = 0;
|
||||
var urlExpected = 'proxied/api/v1/query_range?query=test' + '&start=' + start + '&end=' + end + '&step=60';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
});
|
||||
});
|
||||
|
||||
describe('The __interval and __interval_ms template variables', function() {
|
||||
var response = {
|
||||
status: 'success',
|
||||
@ -428,8 +441,8 @@ describe('PrometheusDatasource', function() {
|
||||
|
||||
it('should be unchanged when auto interval is greater than min interval', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -443,9 +456,7 @@ describe('PrometheusDatasource', function() {
|
||||
},
|
||||
};
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[10s])') +
|
||||
'&start=1443438675&end=1443460275&step=10';
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('rate(test[10s])') + '&start=60&end=420&step=10';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -457,8 +468,8 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
it('should be min interval when it is greater than auto interval', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -472,9 +483,7 @@ describe('PrometheusDatasource', function() {
|
||||
},
|
||||
};
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[10s])') +
|
||||
'&start=1443438675&end=1443460275&step=10';
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('rate(test[10s])') + '&start=60&end=420&step=10';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -486,8 +495,8 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
it('should account for intervalFactor', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -502,9 +511,7 @@ describe('PrometheusDatasource', function() {
|
||||
},
|
||||
};
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[100s])') +
|
||||
'&start=1443438675&end=1443460275&step=100';
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('rate(test[100s])') + '&start=200&end=500&step=100';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -516,8 +523,8 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
it('should be interval * intervalFactor when greater than min interval', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -532,9 +539,7 @@ describe('PrometheusDatasource', function() {
|
||||
},
|
||||
};
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[50s])') +
|
||||
'&start=1443438675&end=1443460275&step=50';
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('rate(test[50s])') + '&start=100&end=450&step=50';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -546,8 +551,8 @@ describe('PrometheusDatasource', function() {
|
||||
});
|
||||
it('should be min interval when greater than interval * intervalFactor', function() {
|
||||
var query = {
|
||||
// 6 hour range
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
// 6 minute range
|
||||
range: { from: time({ minutes: 1 }), to: time({ minutes: 7 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -562,9 +567,7 @@ describe('PrometheusDatasource', function() {
|
||||
},
|
||||
};
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[15s])') +
|
||||
'&start=1443438675&end=1443460275&step=15';
|
||||
'proxied/api/v1/query_range?query=' + encodeURIComponent('rate(test[15s])') + '&start=60&end=420&step=15';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -577,7 +580,7 @@ describe('PrometheusDatasource', function() {
|
||||
it('should be determined by the 11000 data points limit, accounting for intervalFactor', function() {
|
||||
var query = {
|
||||
// 1 week range
|
||||
range: { from: moment(1443438674760), to: moment(1444043474760) },
|
||||
range: { from: time({}), to: time({ hours: 7 * 24 }) },
|
||||
targets: [
|
||||
{
|
||||
expr: 'rate(test[$__interval])',
|
||||
@ -590,10 +593,16 @@ describe('PrometheusDatasource', function() {
|
||||
__interval_ms: { text: 5 * 1000, value: 5 * 1000 },
|
||||
},
|
||||
};
|
||||
var end = 7 * 24 * 60 * 60;
|
||||
var start = 0;
|
||||
var urlExpected =
|
||||
'proxied/api/v1/query_range?query=' +
|
||||
encodeURIComponent('rate(test[60s])') +
|
||||
'&start=1443438675&end=1444043475&step=60';
|
||||
'&start=' +
|
||||
start +
|
||||
'&end=' +
|
||||
end +
|
||||
'&step=60';
|
||||
ctx.$httpBackend.expect('GET', urlExpected).respond(response);
|
||||
ctx.ds.query(query);
|
||||
ctx.$httpBackend.verifyNoOutstandingExpectation();
|
||||
@ -604,6 +613,29 @@ describe('PrometheusDatasource', function() {
|
||||
expect(query.scopedVars.__interval_ms.value).to.be(5 * 1000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Step alignment of intervals', function() {
|
||||
it('does not modify already aligned intervals with perfect step', function() {
|
||||
const range = ctx.ds.clampRange(0, 3, 3);
|
||||
expect(range.start).to.be(0);
|
||||
expect(range.end).to.be(3);
|
||||
});
|
||||
it('does modify end-aligned intervals to reflect number of steps possible', function() {
|
||||
const range = ctx.ds.clampRange(1, 6, 3);
|
||||
expect(range.start).to.be(3);
|
||||
expect(range.end).to.be(6);
|
||||
});
|
||||
it('does align intervals that are a multiple of steps', function() {
|
||||
const range = ctx.ds.clampRange(1, 4, 3);
|
||||
expect(range.start).to.be(3);
|
||||
expect(range.end).to.be(6);
|
||||
});
|
||||
it('does align intervals that are not a multiple of steps', function() {
|
||||
const range = ctx.ds.clampRange(1, 5, 3);
|
||||
expect(range.start).to.be(3);
|
||||
expect(range.end).to.be(6);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('PrometheusDatasource for POST', function() {
|
||||
@ -635,12 +667,12 @@ describe('PrometheusDatasource for POST', function() {
|
||||
var urlExpected = 'proxied/api/v1/query_range';
|
||||
var dataExpected = $.param({
|
||||
query: 'test{job="testjob"}',
|
||||
start: 1443438675,
|
||||
end: 1443460275,
|
||||
start: 2 * 60,
|
||||
end: 3 * 60,
|
||||
step: 60,
|
||||
});
|
||||
var query = {
|
||||
range: { from: moment(1443438674760), to: moment(1443460274760) },
|
||||
range: { from: time({ minutes: 1, seconds: 3 }), to: time({ minutes: 2, seconds: 3 }) },
|
||||
targets: [{ expr: 'test{job="testjob"}', format: 'time_series' }],
|
||||
interval: '60s',
|
||||
};
|
||||
@ -651,7 +683,7 @@ describe('PrometheusDatasource for POST', function() {
|
||||
result: [
|
||||
{
|
||||
metric: { __name__: 'test', job: 'testjob' },
|
||||
values: [[1443454528, '3846']],
|
||||
values: [[2 * 60, '3846']],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
@ -674,7 +674,7 @@ function graphDirective(timeSrv, popoverSrv, contextSrv) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ((ranges.ctrlKey || ranges.metaKey) && dashboard.meta.canEdit) {
|
||||
if ((ranges.ctrlKey || ranges.metaKey) && (dashboard.meta.canEdit || dashboard.meta.canMakeEditable)) {
|
||||
// Add annotation
|
||||
setTimeout(() => {
|
||||
eventManager.updateTime(ranges.xaxis);
|
||||
@ -695,7 +695,7 @@ function graphDirective(timeSrv, popoverSrv, contextSrv) {
|
||||
return;
|
||||
}
|
||||
|
||||
if ((pos.ctrlKey || pos.metaKey) && dashboard.meta.canEdit) {
|
||||
if ((pos.ctrlKey || pos.metaKey) && (dashboard.meta.canEdit || dashboard.meta.canMakeEditable)) {
|
||||
// Skip if range selected (added in "plotselected" event handler)
|
||||
let isRangeSelection = pos.x !== pos.x1;
|
||||
if (!isRangeSelection) {
|
||||
|
@ -287,6 +287,10 @@ module.directive('graphLegend', function(popoverSrv, $timeout) {
|
||||
destroyScrollbar();
|
||||
legendScrollbar = baron(scrollbarParams);
|
||||
}
|
||||
|
||||
// #11830 - compensates for Firefox scrollbar calculation error in the baron framework
|
||||
scroller[0].style.marginRight = '-' + (scroller[0].offsetWidth - scroller[0].clientWidth) + 'px';
|
||||
|
||||
legendScrollbar.scroll();
|
||||
}
|
||||
|
||||
|
@ -44,10 +44,18 @@ div.flot-text {
|
||||
padding: $panel-padding;
|
||||
height: calc(100% - 27px);
|
||||
position: relative;
|
||||
|
||||
// Fixes scrolling on mobile devices
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
// For larger screens, set back to hidden to avoid double scroll bars
|
||||
@include media-breakpoint-up(md) {
|
||||
.panel-content {
|
||||
overflow: hidden;
|
||||
}
|
||||
}
|
||||
|
||||
.panel-title-container {
|
||||
min-height: 9px;
|
||||
cursor: move;
|
||||
|
40
scripts/tag_release.sh
Executable file
40
scripts/tag_release.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#/bin/bash
|
||||
|
||||
# abort if we get any error
|
||||
set -e
|
||||
|
||||
_tag=$1
|
||||
_branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||
|
||||
if [ "${_tag}" == "" ]; then
|
||||
echo "Missing version param. ex './scripts/tag_release.sh v5.1.1'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${_branch}" == "master" ]; then
|
||||
echo "you cannot tag releases from the master branch"
|
||||
echo "please checkout the release branch"
|
||||
echo "ex 'git checkout v5.1.x'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# always make sure to pull latest changes from origin
|
||||
echo "pulling latest changes from ${_branch}"
|
||||
git pull origin ${_branch}
|
||||
|
||||
# create signed tag for latest commit
|
||||
git tag -s "${_tag}" -m "release ${_tag}"
|
||||
|
||||
# verify the signed tag
|
||||
git tag -v "${_tag}"
|
||||
|
||||
echo "Make sure the tag is signed as expected"
|
||||
echo "press [y] to push the tags"
|
||||
|
||||
read -n 1 confirm
|
||||
|
||||
if [ "${confirm}" == "y" ]; then
|
||||
git push origin "${_branch}" --tags
|
||||
else
|
||||
echo "Abort! "
|
||||
fi
|
@ -31,11 +31,24 @@ const entries = HOT ? {
|
||||
vendor: require('./dependencies'),
|
||||
};
|
||||
|
||||
const output = HOT ? {
|
||||
path: path.resolve(__dirname, '../../public/build'),
|
||||
filename: '[name].[hash].js',
|
||||
publicPath: "/public/build/",
|
||||
} : {
|
||||
path: path.resolve(__dirname, '../../public/build'),
|
||||
filename: '[name].[hash].js',
|
||||
// Keep publicPath relative for host.com/grafana/ deployments
|
||||
publicPath: "public/build/",
|
||||
};
|
||||
|
||||
module.exports = merge(common, {
|
||||
devtool: "cheap-module-source-map",
|
||||
|
||||
entry: entries,
|
||||
|
||||
output: output,
|
||||
|
||||
resolve: {
|
||||
extensions: ['.scss', '.ts', '.tsx', '.es6', '.js', '.json', '.svg', '.woff2', '.png'],
|
||||
},
|
||||
@ -66,23 +79,20 @@ module.exports = merge(common, {
|
||||
{
|
||||
test: /\.tsx?$/,
|
||||
exclude: /node_modules/,
|
||||
use: [
|
||||
{
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
plugins: [
|
||||
'syntax-dynamic-import',
|
||||
'react-hot-loader/babel',
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
use: {
|
||||
loader: 'awesome-typescript-loader',
|
||||
options: {
|
||||
useCache: true,
|
||||
useBabel: HOT,
|
||||
babelOptions: {
|
||||
babelrc: false,
|
||||
plugins: [
|
||||
'syntax-dynamic-import',
|
||||
'react-hot-loader/babel'
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
require('./sass.rule.js')({
|
||||
sourceMap: true, minimize: false, preserveUrl: HOT
|
||||
|
Loading…
Reference in New Issue
Block a user