mirror of
https://github.com/grafana/grafana.git
synced 2024-11-22 00:47:38 -06:00
Merge remote-tracking branch 'upstream/master' into update-xorm
This commit is contained in:
commit
1cef373d16
16
CHANGELOG.md
16
CHANGELOG.md
@ -1,10 +1,12 @@
|
||||
# 5.1.0 (unreleased)
|
||||
|
||||
* **MSSQL**: New Microsoft SQL Server data source [#10093](https://github.com/grafana/grafana/pull/10093), [#11298](https://github.com/grafana/grafana/pull/11298), thx [@linuxchips](https://github.com/linuxchips)
|
||||
* **Prometheus**: The heatmap panel now support Prometheus histograms [#10009](https://github.com/grafana/grafana/issues/10009)
|
||||
* **Postgres/MySQL**: Ability to insert 0s or nulls for missing intervals [#9487](https://github.com/grafana/grafana/issues/9487), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Graph**: Thresholds for Right Y axis [#7107](https://github.com/grafana/grafana/issues/7107), thx [@ilgizar](https://github.com/ilgizar)
|
||||
* **Graph**: Support multiple series stacking in histogram mode [#8151](https://github.com/grafana/grafana/issues/8151), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Alerting**: Pausing/un alerts now updates new_state_date [#10942](https://github.com/grafana/grafana/pull/10942)
|
||||
* **Alerting**: Support Pagerduty notification channel using Pagerduty V2 API [#10531](https://github.com/grafana/grafana/issues/10531), thx [@jbaublitz](https://github.com/jbaublitz)
|
||||
* **Templating**: Add comma templating format [#10632](https://github.com/grafana/grafana/issues/10632), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Prometheus**: Support POST for query and query_range [#9859](https://github.com/grafana/grafana/pull/9859), thx [@mtanda](https://github.com/mtanda)
|
||||
|
||||
@ -12,12 +14,22 @@
|
||||
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
|
||||
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
|
||||
* **Units**: Second to HH:mm:ss formatter [#11107](https://github.com/grafana/grafana/issues/11107), thx [@gladdiologist](https://github.com/gladdiologist)
|
||||
* **Units**: Second to HH:mm:ss formatter [#11107](https://github.com/grafana/grafana/issues/11107), thx [@gladdiologist](https://github.com/gladdiologist)
|
||||
* **Singlestat**: Add color to prefix and postfix in singlestat panel [#11143](https://github.com/grafana/grafana/pull/11143), thx [@ApsOps](https://github.com/ApsOps)
|
||||
|
||||
# 5.0.2 (unrelease)
|
||||
# 5.0.4 (unreleased)
|
||||
* **Dashboard** Fixed bug where collapsed panels could not be directly linked to/renderer [#11114](https://github.com/grafana/grafana/issues/11114) & [#11086](https://github.com/grafana/grafana/issues/11086)
|
||||
|
||||
# 5.0.3 (2018-03-16)
|
||||
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access (a bigger patch than the one in 5.0.2) [#11155](https://github.com/grafana/grafana/issues/11155)
|
||||
|
||||
# 5.0.2 (2018-03-14)
|
||||
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access [#11155](https://github.com/grafana/grafana/issues/11155)
|
||||
* **Dashboards**: Should be possible to browse dashboard using only uid [#11231](https://github.com/grafana/grafana/issues/11231)
|
||||
* **Alerting**: Fixes bug where alerts from hidden panels where deleted [#11222](https://github.com/grafana/grafana/issues/11222)
|
||||
* **Import**: Fixes bug where dashboards with alerts couldn't be imported [#11227](https://github.com/grafana/grafana/issues/11227)
|
||||
* **Teams**: Remove quota restrictions from teams [#11220](https://github.com/grafana/grafana/issues/11220)
|
||||
* **Render**: Fixes bug with legacy url redirection for panel rendering [#11180](https://github.com/grafana/grafana/issues/11180)
|
||||
|
||||
# 5.0.1 (2018-03-08)
|
||||
|
||||
|
16
Gopkg.lock
generated
16
Gopkg.lock
generated
@ -103,6 +103,14 @@
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/denisenkom/go-mssqldb"
|
||||
packages = [
|
||||
".",
|
||||
"internal/cp"
|
||||
]
|
||||
revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/fatih/color"
|
||||
packages = ["."]
|
||||
@ -145,7 +153,6 @@
|
||||
packages = [
|
||||
".",
|
||||
"memcache",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"redis"
|
||||
]
|
||||
@ -464,7 +471,10 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["pbkdf2"]
|
||||
packages = [
|
||||
"md4",
|
||||
"pbkdf2"
|
||||
]
|
||||
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
|
||||
|
||||
[[projects]]
|
||||
@ -633,6 +643,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "112ccff73f668c8c4dbe3d41c37ebee65fd7d839f5a4fa0665c593cae0095dad"
|
||||
inputs-digest = "8a9e651fb8ea49dfd3c6ddc99bd3242b39e453ea9edd11321da79bd2c865e9d1"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
@ -195,3 +195,7 @@ ignored = [
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/teris-io/shortid"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/denisenkom/go-mssqldb"
|
||||
revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"
|
||||
|
@ -9,9 +9,6 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
|
||||
|
||||
![](http://docs.grafana.org/assets/img/features/dashboard_ex1.png)
|
||||
|
||||
## Grafana v5 Alpha Preview
|
||||
Grafana master is now v5.0 alpha. This is going to be the biggest and most foundational release Grafana has ever had, coming with a ton of UX improvements, a new dashboard grid engine, dashboard folders, user teams and permissions. Checkout out this [video preview](https://www.youtube.com/watch?v=BC_YRNpqj5k) of Grafana v5.
|
||||
|
||||
## Installation
|
||||
Head to [docs.grafana.org](http://docs.grafana.org/installation/) and [download](https://grafana.com/get)
|
||||
the latest release.
|
||||
@ -27,7 +24,7 @@ the latest master builds [here](https://grafana.com/grafana/download)
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Go 1.9
|
||||
- Go 1.10
|
||||
- NodeJS LTS
|
||||
|
||||
### Building the backend
|
||||
|
@ -6,18 +6,21 @@ But it will give you an idea of our current vision and plan.
|
||||
### Short term (1-2 months)
|
||||
|
||||
- v5.1
|
||||
- Crossplatform builds & build speed improvements
|
||||
- Build speed improvements & integration test execution
|
||||
- Kubernetes friendly docker container
|
||||
- Enterprise LDAP
|
||||
- Provisioning workflow
|
||||
- First login registration view
|
||||
- IFQL Initial support
|
||||
- MSSQL datasource
|
||||
|
||||
### Mid term (2-4 months)
|
||||
|
||||
- v5.2
|
||||
- Azure monitor backend rewrite
|
||||
- Elasticsearch alerting
|
||||
- First login registration view
|
||||
- Backend plugins? (alert notifiers, auth)
|
||||
- Crossplatform builds
|
||||
- IFQL Initial support
|
||||
|
||||
### Long term (4 - 8 months)
|
||||
|
||||
|
4
build.go
4
build.go
@ -79,6 +79,10 @@ func main() {
|
||||
case "setup":
|
||||
setup()
|
||||
|
||||
case "build-srv":
|
||||
clean()
|
||||
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
|
||||
|
||||
case "build-cli":
|
||||
clean()
|
||||
build("grafana-cli", "./pkg/cmd/grafana-cli", []string{})
|
||||
|
@ -4,10 +4,10 @@
|
||||
# change
|
||||
|
||||
# possible values : production, development
|
||||
; app_mode = production
|
||||
;app_mode = production
|
||||
|
||||
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
|
||||
; instance_name = ${HOSTNAME}
|
||||
;instance_name = ${HOSTNAME}
|
||||
|
||||
#################################### Paths ####################################
|
||||
[paths]
|
||||
@ -21,7 +21,7 @@
|
||||
;plugins = /var/lib/grafana/plugins
|
||||
|
||||
# folder that contains provisioning config files that grafana will apply on startup and while running.
|
||||
; provisioning = conf/provisioning
|
||||
;provisioning = conf/provisioning
|
||||
|
||||
#################################### Server ####################################
|
||||
[server]
|
||||
@ -124,7 +124,6 @@ log_queries =
|
||||
# This enables data proxy logging, default is false
|
||||
;logging = false
|
||||
|
||||
|
||||
#################################### Analytics ####################################
|
||||
[analytics]
|
||||
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
|
||||
@ -326,7 +325,6 @@ log_queries =
|
||||
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
|
||||
;filters =
|
||||
|
||||
|
||||
# For "console" mode only
|
||||
[log.console]
|
||||
;level =
|
||||
@ -372,7 +370,6 @@ log_queries =
|
||||
# Syslog tag. By default, the process' argv[0] is used.
|
||||
;tag =
|
||||
|
||||
|
||||
#################################### Alerting ############################
|
||||
[alerting]
|
||||
# Disable alerting engine & UI features
|
||||
|
@ -6,3 +6,10 @@
|
||||
- "9300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
||||
|
||||
fake-elastic-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: elasticsearch
|
||||
FD_PORT: 9200
|
||||
|
@ -6,3 +6,10 @@
|
||||
ports:
|
||||
- "10200:9200"
|
||||
- "10300:9300"
|
||||
|
||||
fake-elastic5-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: elasticsearch
|
||||
FD_PORT: 10200
|
||||
|
1161
docker/blocks/graphite1/big-dashboard.json
Normal file
1161
docker/blocks/graphite1/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
1179
docker/blocks/graphite11/big-dashboard.json
Normal file
1179
docker/blocks/graphite11/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
18
docker/blocks/graphite11/docker-compose.yaml
Normal file
18
docker/blocks/graphite11/docker-compose.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
graphite11:
|
||||
image: graphiteapp/graphite-statsd
|
||||
ports:
|
||||
- "8180:80"
|
||||
- "2103-2104:2003-2004"
|
||||
- "2123-2124:2023-2024"
|
||||
- "8225:8125/udp"
|
||||
- "8226:8126"
|
||||
|
||||
fake-graphite11-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2103
|
||||
FD_GRAPHITE_VERSION: 1.1
|
||||
depends_on:
|
||||
- graphite11
|
5
docker/blocks/mssql/build/Dockerfile
Normal file
5
docker/blocks/mssql/build/Dockerfile
Normal file
@ -0,0 +1,5 @@
|
||||
FROM microsoft/mssql-server-linux:2017-CU4
|
||||
WORKDIR /usr/setup
|
||||
COPY . /usr/setup
|
||||
RUN chmod +x /usr/setup/setup.sh
|
||||
CMD /bin/bash ./entrypoint.sh
|
2
docker/blocks/mssql/build/entrypoint.sh
Normal file
2
docker/blocks/mssql/build/entrypoint.sh
Normal file
@ -0,0 +1,2 @@
|
||||
#start SQL Server and run setup script
|
||||
/usr/setup/setup.sh & /opt/mssql/bin/sqlservr
|
12
docker/blocks/mssql/build/setup.sh
Executable file
12
docker/blocks/mssql/build/setup.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#/bin/bash
|
||||
|
||||
#wait for the SQL Server to come up
|
||||
sleep 15s
|
||||
|
||||
cat /usr/setup/setup.sql.template | awk '{
|
||||
gsub(/%%DB%%/,"'$MSSQL_DATABASE'");
|
||||
gsub(/%%USER%%/,"'$MSSQL_USER'");
|
||||
gsub(/%%PWD%%/,"'$MSSQL_PASSWORD'")
|
||||
}1' > /usr/setup/setup.sql
|
||||
|
||||
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $MSSQL_SA_PASSWORD -d master -i /usr/setup/setup.sql
|
14
docker/blocks/mssql/build/setup.sql.template
Normal file
14
docker/blocks/mssql/build/setup.sql.template
Normal file
@ -0,0 +1,14 @@
|
||||
CREATE LOGIN %%USER%% WITH PASSWORD = '%%PWD%%'
|
||||
GO
|
||||
|
||||
CREATE DATABASE %%DB%%;
|
||||
GO
|
||||
|
||||
USE %%DB%%;
|
||||
GO
|
||||
|
||||
CREATE USER %%USER%% FOR LOGIN %%USER%%;
|
||||
GO
|
||||
|
||||
EXEC sp_addrolemember 'db_owner', '%%USER%%';
|
||||
GO
|
539
docker/blocks/mssql/dashboard.json
Normal file
539
docker/blocks/mssql/dashboard.json
Normal file
@ -0,0 +1,539 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_MSSQL",
|
||||
"label": "MSSQL",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "mssql",
|
||||
"pluginName": "MSSQL"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "mssql",
|
||||
"name": "MSSQL",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": "5.0.0"
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "A dashboard visualizing data generated from grafana/fake-data-gen",
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1520976748896,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {
|
||||
"total avg": "#6ed0e0"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "total avg",
|
||||
"fill": 0,
|
||||
"pointradius": 3,
|
||||
"points": true
|
||||
}
|
||||
],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count' AND\n hostname IN($host)\nGROUP BY $__timeGroup(createdAt,'$summarize'), hostname\nORDER BY 1",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n min(value) as value,\n 'total avg' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Average logins / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"decimals": null,
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 18,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 8,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n 'started' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.started'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n avg(value) as value,\n 'ended' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.ended'\nGROUP BY $__timeGroup(createdAt,'$summarize')\nORDER BY 1",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Average payments started/ended / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 6,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time,\n max(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'cpu' AND\n hostname IN($host)\nGROUP BY $__timeGroup(createdAt,'$summarize'), hostname\nORDER BY 1",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Max CPU / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"fontSize": "100%",
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 4,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"scroll": true,
|
||||
"showHeader": true,
|
||||
"sort": {
|
||||
"col": 0,
|
||||
"desc": true
|
||||
},
|
||||
"styles": [
|
||||
{
|
||||
"alias": "Time",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"pattern": "Time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"decimals": 2,
|
||||
"pattern": "/.*/",
|
||||
"thresholds": [],
|
||||
"type": "number",
|
||||
"unit": "short"
|
||||
}
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "table",
|
||||
"rawSql": "SELECT createdAt as Time, source, datacenter, hostname, value FROM grafana_metric WHERE hostname in($host)",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Values",
|
||||
"transform": "table",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Datacenter",
|
||||
"multi": false,
|
||||
"name": "datacenter",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT datacenter FROM grafana_metric",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_MSSQL}",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Hostname",
|
||||
"multi": true,
|
||||
"name": "host",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT hostname FROM grafana_metric WHERE datacenter='$datacenter'",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"auto": false,
|
||||
"auto_count": 30,
|
||||
"auto_min": "10s",
|
||||
"current": {
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
"hide": 0,
|
||||
"label": "Summarize",
|
||||
"name": "summarize",
|
||||
"options": [
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1s",
|
||||
"value": "1s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10s",
|
||||
"value": "10s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30s",
|
||||
"value": "30s"
|
||||
},
|
||||
{
|
||||
"selected": true,
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "5m",
|
||||
"value": "5m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10m",
|
||||
"value": "10m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30m",
|
||||
"value": "30m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1h",
|
||||
"value": "1h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "6h",
|
||||
"value": "6h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "12h",
|
||||
"value": "12h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1d",
|
||||
"value": "1d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "7d",
|
||||
"value": "7d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "14d",
|
||||
"value": "14d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30d",
|
||||
"value": "30d"
|
||||
}
|
||||
],
|
||||
"query": "1s,10s,30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
|
||||
"refresh": 2,
|
||||
"type": "interval"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Grafana Fake Data Gen - MSSQL",
|
||||
"uid": "86Js1xRmk",
|
||||
"version": 11
|
||||
}
|
19
docker/blocks/mssql/docker-compose.yaml
Normal file
19
docker/blocks/mssql/docker-compose.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
mssql:
|
||||
build:
|
||||
context: blocks/mssql/build
|
||||
environment:
|
||||
ACCEPT_EULA: Y
|
||||
MSSQL_SA_PASSWORD: Password!
|
||||
MSSQL_PID: Express
|
||||
MSSQL_DATABASE: grafana
|
||||
MSSQL_USER: grafana
|
||||
MSSQL_PASSWORD: Password!
|
||||
ports:
|
||||
- "1433:1433"
|
||||
|
||||
fake-mssql-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: mssql
|
||||
FD_PORT: 1433
|
2242
docker/blocks/mssql_tests/dashboard.json
Normal file
2242
docker/blocks/mssql_tests/dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
12
docker/blocks/mssql_tests/docker-compose.yaml
Normal file
12
docker/blocks/mssql_tests/docker-compose.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
mssqltests:
|
||||
build:
|
||||
context: blocks/mssql/build
|
||||
environment:
|
||||
ACCEPT_EULA: Y
|
||||
MSSQL_SA_PASSWORD: Password!
|
||||
MSSQL_PID: Express
|
||||
MSSQL_DATABASE: grafanatest
|
||||
MSSQL_USER: grafana
|
||||
MSSQL_PASSWORD: Password!
|
||||
ports:
|
||||
- "1433:1433"
|
@ -28,4 +28,4 @@
|
||||
build: blocks/prometheus_random_data
|
||||
network_mode: host
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "8081:8080"
|
||||
|
@ -36,4 +36,4 @@ scrape_configs:
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8080']
|
||||
- targets: ['127.0.0.1:8081']
|
||||
|
@ -28,4 +28,4 @@
|
||||
build: blocks/prometheus_random_data
|
||||
network_mode: host
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "8081:8080"
|
||||
|
@ -36,4 +36,4 @@ scrape_configs:
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8080']
|
||||
- targets: ['127.0.0.1:8081']
|
||||
|
@ -58,6 +58,8 @@ Recipient | allows you to override the Slack recipient.
|
||||
Mention | make it possible to include a mention in the Slack notification sent by Grafana. Ex @here or @channel
|
||||
Token | If provided, Grafana will upload the generated image via Slack's file.upload API method, not the external image destination.
|
||||
|
||||
If you are using the token for a slack bot, then you have to invite the bot to the channel you want to send notifications and add the channel to the recipient field.
|
||||
|
||||
### PagerDuty
|
||||
|
||||
To set up PagerDuty, all you have to do is to provide an API key.
|
||||
|
@ -15,7 +15,7 @@ weight = 1
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for Debian-based Linux | [grafana_5.0.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.1_amd64.deb)
|
||||
Stable for Debian-based Linux | [grafana_5.0.3_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.3_amd64.deb)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
@ -24,9 +24,9 @@ installation.
|
||||
|
||||
|
||||
```bash
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.1_amd64.deb
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.3_amd64.deb
|
||||
sudo apt-get install -y adduser libfontconfig
|
||||
sudo dpkg -i grafana_5.0.1_amd64.deb
|
||||
sudo dpkg -i grafana_5.0.3_amd64.deb
|
||||
```
|
||||
|
||||
## APT Repository
|
||||
|
@ -83,7 +83,7 @@ $ docker run \
|
||||
-d \
|
||||
-p 3000:3000 \
|
||||
--name grafana \
|
||||
grafana/grafana:4.5.2
|
||||
grafana/grafana:5.0.2
|
||||
```
|
||||
|
||||
## Configuring AWS Credentials for CloudWatch Support
|
||||
|
@ -15,7 +15,7 @@ weight = 2
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.0.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.1-1.x86_64.rpm)
|
||||
Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.0.3 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm)
|
||||
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
@ -26,7 +26,7 @@ installation.
|
||||
You can install Grafana using Yum directly.
|
||||
|
||||
```bash
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.1-1.x86_64.rpm
|
||||
$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
Or install manually using `rpm`.
|
||||
@ -34,15 +34,15 @@ Or install manually using `rpm`.
|
||||
#### On CentOS / Fedora / Redhat:
|
||||
|
||||
```bash
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.1-1.x86_64.rpm
|
||||
$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3-1.x86_64.rpm
|
||||
$ sudo yum install initscripts fontconfig
|
||||
$ sudo rpm -Uvh grafana-5.0.1-1.x86_64.rpm
|
||||
$ sudo rpm -Uvh grafana-5.0.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
#### On OpenSuse:
|
||||
|
||||
```bash
|
||||
$ sudo rpm -i --nodeps grafana-5.0.1-1.x86_64.rpm
|
||||
$ sudo rpm -i --nodeps grafana-5.0.3-1.x86_64.rpm
|
||||
```
|
||||
|
||||
## Install via YUM Repository
|
||||
|
@ -13,7 +13,7 @@ weight = 3
|
||||
|
||||
Description | Download
|
||||
------------ | -------------
|
||||
Latest stable package for Windows | [grafana-5.0.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.1.windows-x64.zip)
|
||||
Latest stable package for Windows | [grafana-5.0.3.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.3.windows-x64.zip)
|
||||
|
||||
Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
|
||||
installation.
|
||||
|
@ -9,30 +9,38 @@ weight = 10
|
||||
|
||||
# How to setup Grafana for high availability
|
||||
|
||||
> Alerting does not support high availability yet.
|
||||
|
||||
Setting up Grafana for high availability is fairly simple. It comes down to two things:
|
||||
|
||||
* Use a shared database for multiple grafana instances.
|
||||
* Consider how user sessions are stored.
|
||||
1. Use a shared database for storing dashboard, users, and other persistent data
|
||||
2. Decide how to store session data.
|
||||
|
||||
<div class="text-center">
|
||||
<img src="/img/docs/tutorials/grafana-high-availability.png" max-width= "800px" class="center"></img>
|
||||
</div>
|
||||
|
||||
## Configure multiple servers to use the same database
|
||||
|
||||
First you need to do is to setup mysql or postgres on another server and configure Grafana to use that database.
|
||||
First, you need to do is to setup MySQL or Postgres on another server and configure Grafana to use that database.
|
||||
You can find the configuration for doing that in the [[database]]({{< relref "configuration.md" >}}#database) section in the grafana config.
|
||||
Grafana will now persist all long term data in the database.
|
||||
It also worth considering how to setup the database for high availability but thats outside the scope of this guide.
|
||||
Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database your using.
|
||||
|
||||
## User sessions
|
||||
|
||||
The second thing to consider is how to deal with user sessions and how to balance the load between servers.
|
||||
By default Grafana stores user sessions on disk which works fine if you use `sticky sessions` in your load balancer.
|
||||
Grafana also supports storing the session data in the database, redis or memcache which makes it possible to use round robin in your load balancer.
|
||||
If you use mysql/postgres for session storage you first need a table to store the session data in. More details about that in [[sessions]]({{< relref "configuration.md" >}}#session)
|
||||
The second thing to consider is how to deal with user sessions and how to configure your load balancer infront of Grafana.
|
||||
Grafana support two says of storing session data locally on disk or in a database/cache-server.
|
||||
If you want to store sessions on disk you can use `sticky sessions` in your load balanacer. If you prefer to store session data in a database/cache-server
|
||||
you can use any stateless routing strategy in your load balancer (ex round robin or least connections).
|
||||
|
||||
For Grafana itself it doesn't really matter if you store your sessions on disk or database/redis/memcache.
|
||||
But we suggest that you store the session in redis/memcache since it makes it easier to add/remote instances from the group.
|
||||
### Sticky sessions
|
||||
Using sticky sessions, all traffic for one user will always be sent to the same server. Which means that session related data can be
|
||||
stored on disk rather than on a shared database. This is the default behavior for Grafana and if only want multiple servers for fail over this is a good solution since it requires the least amount of work.
|
||||
|
||||
### Stateless sessions
|
||||
You can also choose to store session data in a Redis/Memcache/Postgres/MySQL which means that the load balancer can send a user to any Grafana server without having to log in on each server. This requires a little bit more work from the operator but enables you to remove/add grafana servers without impacting the user experience.
|
||||
If you use MySQL/Postgres for session storage, you first need a table to store the session data in. More details about that in [[sessions]]({{< relref "configuration.md" >}}#session)
|
||||
|
||||
For Grafana itself it doesn't really matter if you store the session data on disk or database/redis/memcache. But we recommend using a database/redis/memcache since it makes it easier manage the grafana servers.
|
||||
|
||||
## Alerting
|
||||
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0 of Grafana, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but no duplicate alert notifications are sent due to the deduping logic. Proper load balancing of alerts will be introduced in the future.
|
||||
Currently alerting supports a limited form of high availability. Since v4.2.0, alert notifications are deduped when running multiple servers. This means all alerts are executed on every server but alert notifications are only sent once per alert. Grafana does not support distributing the alert rule execution between servers. That might be added in the future but right now prefer to keep it simple.
|
||||
|
@ -118,7 +118,7 @@
|
||||
"prettier --write",
|
||||
"git add"
|
||||
],
|
||||
"*.go": [
|
||||
"*pkg/**/*.go": [
|
||||
"gofmt -w -s",
|
||||
"git add"
|
||||
]
|
||||
|
@ -1,5 +1,5 @@
|
||||
#! /usr/bin/env bash
|
||||
version=5.0.1
|
||||
version=5.0.2
|
||||
|
||||
wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
@ -21,6 +22,14 @@ func AdminGetSettings(c *m.ReqContext) {
|
||||
if strings.Contains(keyName, "secret") || strings.Contains(keyName, "password") || (strings.Contains(keyName, "provider_config")) {
|
||||
value = "************"
|
||||
}
|
||||
if strings.Contains(keyName, "url") {
|
||||
var rgx = regexp.MustCompile(`.*:\/\/([^:]*):([^@]*)@.*?$`)
|
||||
var subs = rgx.FindAllSubmatch([]byte(value), -1)
|
||||
if subs != nil && len(subs[0]) == 3 {
|
||||
value = strings.Replace(value, string(subs[0][1]), "******", 1)
|
||||
value = strings.Replace(value, string(subs[0][2]), "******", 1)
|
||||
}
|
||||
}
|
||||
|
||||
jsonSec[keyName] = value
|
||||
}
|
||||
|
@ -66,6 +66,7 @@ func (hs *HttpServer) registerRoutes() {
|
||||
r.Get("/plugins/:id/page/:page", reqSignedIn, Index)
|
||||
|
||||
r.Get("/d/:uid/:slug", reqSignedIn, Index)
|
||||
r.Get("/d/:uid", reqSignedIn, Index)
|
||||
r.Get("/dashboard/db/:slug", reqSignedIn, redirectFromLegacyDashboardUrl, Index)
|
||||
r.Get("/dashboard/script/*", reqSignedIn, Index)
|
||||
r.Get("/dashboard-solo/snapshot/*", Index)
|
||||
|
@ -72,7 +72,9 @@ func RenderToPng(params *RenderOpts) (string, error) {
|
||||
localDomain = setting.HttpAddr
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s:%s/%s", setting.Protocol, localDomain, setting.HttpPort, params.Path)
|
||||
// &render=1 signals to the legacy redirect layer to
|
||||
// avoid redirect these requests.
|
||||
url := fmt.Sprintf("%s://%s:%s/%s&render=1", setting.Protocol, localDomain, setting.HttpPort, params.Path)
|
||||
|
||||
binPath, _ := filepath.Abs(filepath.Join(setting.PhantomDir, executable))
|
||||
scriptPath, _ := filepath.Abs(filepath.Join(setting.PhantomDir, "render.js"))
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
m "github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"gopkg.in/macaron.v1"
|
||||
)
|
||||
|
||||
@ -36,9 +37,14 @@ func RedirectFromLegacyDashboardUrl() macaron.Handler {
|
||||
func RedirectFromLegacyDashboardSoloUrl() macaron.Handler {
|
||||
return func(c *m.ReqContext) {
|
||||
slug := c.Params("slug")
|
||||
renderRequest := c.QueryBool("render")
|
||||
|
||||
if slug != "" {
|
||||
if url, err := getDashboardUrlBySlug(c.OrgId, slug); err == nil {
|
||||
if renderRequest && strings.Contains(url, setting.AppSubUrl) {
|
||||
url = strings.Replace(url, setting.AppSubUrl, "", 1)
|
||||
}
|
||||
|
||||
url = strings.Replace(url, "/d/", "/d-solo/", 1)
|
||||
url = fmt.Sprintf("%s?%s", url, c.Req.URL.RawQuery)
|
||||
c.Redirect(url, 301)
|
||||
|
@ -19,6 +19,7 @@ const (
|
||||
DS_PROMETHEUS = "prometheus"
|
||||
DS_POSTGRES = "postgres"
|
||||
DS_MYSQL = "mysql"
|
||||
DS_MSSQL = "mssql"
|
||||
DS_ACCESS_DIRECT = "direct"
|
||||
DS_ACCESS_PROXY = "proxy"
|
||||
)
|
||||
@ -68,6 +69,7 @@ var knownDatasourcePlugins map[string]bool = map[string]bool{
|
||||
DS_OPENTSDB: true,
|
||||
DS_POSTGRES: true,
|
||||
DS_MYSQL: true,
|
||||
DS_MSSQL: true,
|
||||
"opennms": true,
|
||||
"abhisant-druid-datasource": true,
|
||||
"dalmatinerdb-datasource": true,
|
||||
|
@ -80,7 +80,7 @@ func ImportDashboard(cmd *ImportDashboardCommand) error {
|
||||
User: cmd.User,
|
||||
}
|
||||
|
||||
savedDash, err := dashboards.NewService().SaveDashboard(dto)
|
||||
savedDash, err := dashboards.NewService().ImportDashboard(dto)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -74,6 +74,21 @@ func (e *DashAlertExtractor) GetAlertFromPanels(jsonWithPanels *simplejson.Json)
|
||||
|
||||
for _, panelObj := range jsonWithPanels.Get("panels").MustArray() {
|
||||
panel := simplejson.NewFromAny(panelObj)
|
||||
|
||||
collapsedJson, collapsed := panel.CheckGet("collapsed")
|
||||
// check if the panel is collapsed
|
||||
if collapsed && collapsedJson.MustBool() {
|
||||
|
||||
// extract alerts from sub panels for collapsed panels
|
||||
als, err := e.GetAlertFromPanels(panel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alerts = append(alerts, als...)
|
||||
continue
|
||||
}
|
||||
|
||||
jsonAlert, hasAlert := panel.CheckGet("alert")
|
||||
|
||||
if !hasAlert {
|
||||
|
@ -22,6 +22,7 @@ func TestAlertRuleExtraction(t *testing.T) {
|
||||
defaultDs := &m.DataSource{Id: 12, OrgId: 1, Name: "I am default", IsDefault: true}
|
||||
graphite2Ds := &m.DataSource{Id: 15, OrgId: 1, Name: "graphite2"}
|
||||
influxDBDs := &m.DataSource{Id: 16, OrgId: 1, Name: "InfluxDB"}
|
||||
prom := &m.DataSource{Id: 17, OrgId: 1, Name: "Prometheus"}
|
||||
|
||||
bus.AddHandler("test", func(query *m.GetDataSourcesQuery) error {
|
||||
query.Result = []*m.DataSource{defaultDs, graphite2Ds}
|
||||
@ -38,6 +39,10 @@ func TestAlertRuleExtraction(t *testing.T) {
|
||||
if query.Name == influxDBDs.Name {
|
||||
query.Result = influxDBDs
|
||||
}
|
||||
if query.Name == prom.Name {
|
||||
query.Result = prom
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@ -214,5 +219,26 @@ func TestAlertRuleExtraction(t *testing.T) {
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Should be able to extract collapsed panels", func() {
|
||||
json, err := ioutil.ReadFile("./test-data/collapsed-panels.json")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
dashJson, err := simplejson.NewJson(json)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
dash := m.NewDashboardFromJson(dashJson)
|
||||
extractor := NewDashAlertExtractor(dash, 1)
|
||||
|
||||
alerts, err := extractor.GetAlerts()
|
||||
|
||||
Convey("Get rules without error", func() {
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
|
||||
Convey("should be able to extract collapsed alerts", func() {
|
||||
So(len(alerts), ShouldEqual, 4)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -46,25 +46,49 @@ type AlertmanagerNotifier struct {
|
||||
}
|
||||
|
||||
func (this *AlertmanagerNotifier) ShouldNotify(evalContext *alerting.EvalContext) bool {
|
||||
this.log.Debug("Should notify", "ruleId", evalContext.Rule.Id, "state", evalContext.Rule.State, "previousState", evalContext.PrevAlertState)
|
||||
|
||||
// Do not notify when we become OK for the first time.
|
||||
if (evalContext.PrevAlertState == m.AlertStatePending) && (evalContext.Rule.State == m.AlertStateOK) {
|
||||
return false
|
||||
}
|
||||
// Notify on Alerting -> OK to resolve before alertmanager timeout.
|
||||
if (evalContext.PrevAlertState == m.AlertStateAlerting) && (evalContext.Rule.State == m.AlertStateOK) {
|
||||
return true
|
||||
}
|
||||
return evalContext.Rule.State == m.AlertStateAlerting
|
||||
}
|
||||
|
||||
func (this *AlertmanagerNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
func (this *AlertmanagerNotifier) createAlert(evalContext *alerting.EvalContext, match *alerting.EvalMatch, ruleUrl string) *simplejson.Json {
|
||||
alertJSON := simplejson.New()
|
||||
alertJSON.Set("startsAt", evalContext.StartTime.UTC().Format(time.RFC3339))
|
||||
if evalContext.Rule.State == m.AlertStateOK {
|
||||
alertJSON.Set("endsAt", time.Now().UTC().Format(time.RFC3339))
|
||||
}
|
||||
alertJSON.Set("generatorURL", ruleUrl)
|
||||
|
||||
alerts := make([]interface{}, 0)
|
||||
for _, match := range evalContext.EvalMatches {
|
||||
alertJSON := simplejson.New()
|
||||
alertJSON.Set("startsAt", evalContext.StartTime.UTC().Format(time.RFC3339))
|
||||
|
||||
if ruleUrl, err := evalContext.GetRuleUrl(); err == nil {
|
||||
alertJSON.Set("generatorURL", ruleUrl)
|
||||
// Annotations (summary and description are very commonly used).
|
||||
alertJSON.SetPath([]string{"annotations", "summary"}, evalContext.Rule.Name)
|
||||
description := ""
|
||||
if evalContext.Rule.Message != "" {
|
||||
description += evalContext.Rule.Message
|
||||
}
|
||||
if evalContext.Error != nil {
|
||||
if description != "" {
|
||||
description += "\n"
|
||||
}
|
||||
description += "Error: " + evalContext.Error.Error()
|
||||
}
|
||||
if description != "" {
|
||||
alertJSON.SetPath([]string{"annotations", "description"}, description)
|
||||
}
|
||||
if evalContext.ImagePublicUrl != "" {
|
||||
alertJSON.SetPath([]string{"annotations", "image"}, evalContext.ImagePublicUrl)
|
||||
}
|
||||
|
||||
if evalContext.Rule.Message != "" {
|
||||
alertJSON.SetPath([]string{"annotations", "description"}, evalContext.Rule.Message)
|
||||
}
|
||||
|
||||
tags := make(map[string]string)
|
||||
// Labels (from metrics tags + mandatory alertname).
|
||||
tags := make(map[string]string)
|
||||
if match != nil {
|
||||
if len(match.Tags) == 0 {
|
||||
tags["metric"] = match.Metric
|
||||
} else {
|
||||
@ -72,10 +96,32 @@ func (this *AlertmanagerNotifier) Notify(evalContext *alerting.EvalContext) erro
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
tags["alertname"] = evalContext.Rule.Name
|
||||
alertJSON.Set("labels", tags)
|
||||
}
|
||||
tags["alertname"] = evalContext.Rule.Name
|
||||
alertJSON.Set("labels", tags)
|
||||
return alertJSON
|
||||
}
|
||||
|
||||
alerts = append(alerts, alertJSON)
|
||||
func (this *AlertmanagerNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending Alertmanager alert", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
this.log.Error("Failed get rule link", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Send one alert per matching series.
|
||||
alerts := make([]interface{}, 0)
|
||||
for _, match := range evalContext.EvalMatches {
|
||||
alert := this.createAlert(evalContext, match, ruleUrl)
|
||||
alerts = append(alerts, alert)
|
||||
}
|
||||
|
||||
// This happens on ExecutionError or NoData
|
||||
if len(alerts) == 0 {
|
||||
alert := this.createAlert(evalContext, nil, ruleUrl)
|
||||
alerts = append(alerts, alert)
|
||||
}
|
||||
|
||||
bodyJSON := simplejson.NewFromAny(alerts)
|
||||
|
@ -27,15 +27,21 @@ func NewNotifierBase(id int64, isDefault bool, name, notifierType string, model
|
||||
}
|
||||
|
||||
func defaultShouldNotify(context *alerting.EvalContext) bool {
|
||||
// Only notify on state change.
|
||||
if context.PrevAlertState == context.Rule.State {
|
||||
return false
|
||||
}
|
||||
// Do not notify when we become OK for the first time.
|
||||
if (context.PrevAlertState == m.AlertStatePending) && (context.Rule.State == m.AlertStateOK) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *NotifierBase) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (n *NotifierBase) GetType() string {
|
||||
return n.Type
|
||||
}
|
||||
|
@ -38,10 +38,6 @@ func NewDingDingNotifier(model *m.AlertNotification) (alerting.Notifier, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (this *DingDingNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
type DingDingNotifier struct {
|
||||
NotifierBase
|
||||
Url string
|
||||
|
@ -58,10 +58,6 @@ func NewEmailNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (this *EmailNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending alert notification to", "addresses", this.Addresses)
|
||||
|
||||
|
@ -75,10 +75,6 @@ type HipChatNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *HipChatNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *HipChatNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing hipchat notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
|
@ -57,10 +57,6 @@ type KafkaNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *KafkaNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *KafkaNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
state := evalContext.Rule.State
|
||||
|
@ -51,10 +51,6 @@ type LineNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *LineNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *LineNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing line notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
|
@ -72,10 +72,6 @@ type OpsGenieNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *OpsGenieNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
var err error
|
||||
|
@ -65,10 +65,6 @@ type PagerdutyNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *PagerdutyNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *PagerdutyNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
|
||||
if evalContext.Rule.State == m.AlertStateOK && !this.AutoResolve {
|
||||
|
@ -123,10 +123,6 @@ type PushoverNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *PushoverNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *PushoverNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
ruleUrl, err := evalContext.GetRuleUrl()
|
||||
if err != nil {
|
||||
|
@ -71,10 +71,6 @@ type SensuNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *SensuNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending sensu result")
|
||||
|
||||
|
@ -98,10 +98,6 @@ type SlackNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *SlackNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing slack notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
|
@ -47,10 +47,6 @@ type TeamsNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *TeamsNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *TeamsNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing teams notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
||||
|
@ -208,6 +208,7 @@ func generateImageCaption(evalContext *alerting.EvalContext, ruleUrl string, met
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
func appendIfPossible(message string, extra string, sizeLimit int) string {
|
||||
if len(extra)+len(message) <= sizeLimit {
|
||||
return message + extra
|
||||
@ -216,10 +217,6 @@ func appendIfPossible(message string, extra string, sizeLimit int) string {
|
||||
return message
|
||||
}
|
||||
|
||||
func (this *TelegramNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
var cmd *m.SendWebhookSync
|
||||
if evalContext.ImagePublicUrl == "" && this.UploadImage == true {
|
||||
|
@ -114,10 +114,6 @@ func NewThreemaNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (this *ThreemaNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (notifier *ThreemaNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
notifier.log.Info("Sending alert notification from", "threema_id", notifier.GatewayID)
|
||||
notifier.log.Info("Sending alert notification to", "threema_id", notifier.RecipientID)
|
||||
|
@ -68,10 +68,6 @@ type VictoropsNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *VictoropsNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
// Notify sends notification to Victorops via POST to URL endpoint
|
||||
func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Executing victorops notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
|
||||
|
@ -65,10 +65,6 @@ type WebhookNotifier struct {
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func (this *WebhookNotifier) ShouldNotify(context *alerting.EvalContext) bool {
|
||||
return defaultShouldNotify(context)
|
||||
}
|
||||
|
||||
func (this *WebhookNotifier) Notify(evalContext *alerting.EvalContext) error {
|
||||
this.log.Info("Sending webhook")
|
||||
|
||||
|
597
pkg/services/alerting/test-data/collapsed-panels.json
Normal file
597
pkg/services/alerting/test-data/collapsed-panels.json
Normal file
@ -0,0 +1,597 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": 127,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 9,
|
||||
"title": "Row title",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"alert": {
|
||||
"conditions": [
|
||||
{
|
||||
"evaluator": {
|
||||
"params": [
|
||||
200
|
||||
],
|
||||
"type": "gt"
|
||||
},
|
||||
"operator": {
|
||||
"type": "and"
|
||||
},
|
||||
"query": {
|
||||
"params": [
|
||||
"A",
|
||||
"5m",
|
||||
"now"
|
||||
]
|
||||
},
|
||||
"reducer": {
|
||||
"params": [],
|
||||
"type": "avg"
|
||||
},
|
||||
"type": "query"
|
||||
}
|
||||
],
|
||||
"executionErrorState": "alerting",
|
||||
"frequency": "10s",
|
||||
"handler": 1,
|
||||
"name": "Panel Title alert",
|
||||
"noDataState": "no_data",
|
||||
"notifications": []
|
||||
},
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 1
|
||||
},
|
||||
"id": 10,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "go_goroutines",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 200
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Panel Title",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 1
|
||||
},
|
||||
"id": 14,
|
||||
"limit": 10,
|
||||
"links": [],
|
||||
"onlyAlertsOnDashboard": true,
|
||||
"show": "current",
|
||||
"sortOrder": 1,
|
||||
"stateFilter": [],
|
||||
"title": "Panel Title",
|
||||
"type": "alertlist"
|
||||
},
|
||||
{
|
||||
"collapsed": true,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 10
|
||||
},
|
||||
"id": 6,
|
||||
"panels": [
|
||||
{
|
||||
"alert": {
|
||||
"conditions": [
|
||||
{
|
||||
"evaluator": {
|
||||
"params": [
|
||||
200
|
||||
],
|
||||
"type": "gt"
|
||||
},
|
||||
"operator": {
|
||||
"type": "and"
|
||||
},
|
||||
"query": {
|
||||
"params": [
|
||||
"A",
|
||||
"5m",
|
||||
"now"
|
||||
]
|
||||
},
|
||||
"reducer": {
|
||||
"params": [],
|
||||
"type": "avg"
|
||||
},
|
||||
"type": "query"
|
||||
}
|
||||
],
|
||||
"executionErrorState": "alerting",
|
||||
"frequency": "10s",
|
||||
"handler": 1,
|
||||
"name": "Panel 2 alert",
|
||||
"noDataState": "no_data",
|
||||
"notifications": []
|
||||
},
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 11,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "go_goroutines",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 200
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Panel 2",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"alert": {
|
||||
"conditions": [
|
||||
{
|
||||
"evaluator": {
|
||||
"params": [
|
||||
200
|
||||
],
|
||||
"type": "gt"
|
||||
},
|
||||
"operator": {
|
||||
"type": "and"
|
||||
},
|
||||
"query": {
|
||||
"params": [
|
||||
"A",
|
||||
"5m",
|
||||
"now"
|
||||
]
|
||||
},
|
||||
"reducer": {
|
||||
"params": [],
|
||||
"type": "avg"
|
||||
},
|
||||
"type": "query"
|
||||
}
|
||||
],
|
||||
"executionErrorState": "alerting",
|
||||
"frequency": "10s",
|
||||
"handler": 1,
|
||||
"name": "Panel 4 alert",
|
||||
"noDataState": "no_data",
|
||||
"notifications": []
|
||||
},
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 11
|
||||
},
|
||||
"id": 15,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "go_goroutines",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 200
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Panel 4",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"title": "Row title",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 4,
|
||||
"title": "Row title",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"alert": {
|
||||
"conditions": [
|
||||
{
|
||||
"evaluator": {
|
||||
"params": [
|
||||
200
|
||||
],
|
||||
"type": "gt"
|
||||
},
|
||||
"operator": {
|
||||
"type": "and"
|
||||
},
|
||||
"query": {
|
||||
"params": [
|
||||
"A",
|
||||
"5m",
|
||||
"now"
|
||||
]
|
||||
},
|
||||
"reducer": {
|
||||
"params": [],
|
||||
"type": "avg"
|
||||
},
|
||||
"type": "query"
|
||||
}
|
||||
],
|
||||
"executionErrorState": "alerting",
|
||||
"frequency": "10s",
|
||||
"handler": 1,
|
||||
"name": "Panel 3 alert",
|
||||
"noDataState": "no_data",
|
||||
"notifications": []
|
||||
},
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "Prometheus",
|
||||
"fill": 1,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 12
|
||||
},
|
||||
"id": 12,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "go_goroutines",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"thresholds": [
|
||||
{
|
||||
"colorMode": "critical",
|
||||
"fill": true,
|
||||
"line": true,
|
||||
"op": "gt",
|
||||
"value": 200
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Panel 3",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "New dashboard Copy",
|
||||
"uid": "6v5pg36zk",
|
||||
"version": 17
|
||||
}
|
@ -13,6 +13,7 @@ import (
|
||||
// DashboardService service for operating on dashboards
|
||||
type DashboardService interface {
|
||||
SaveDashboard(dto *SaveDashboardDTO) (*models.Dashboard, error)
|
||||
ImportDashboard(dto *SaveDashboardDTO) (*models.Dashboard, error)
|
||||
}
|
||||
|
||||
// DashboardProvisioningService service for operating on provisioned dashboards
|
||||
@ -214,6 +215,20 @@ func (dr *dashboardServiceImpl) SaveDashboard(dto *SaveDashboardDTO) (*models.Da
|
||||
return cmd.Result, nil
|
||||
}
|
||||
|
||||
func (dr *dashboardServiceImpl) ImportDashboard(dto *SaveDashboardDTO) (*models.Dashboard, error) {
|
||||
cmd, err := dr.buildSaveDashboardCommand(dto, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = bus.Dispatch(cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cmd.Result, nil
|
||||
}
|
||||
|
||||
type FakeDashboardService struct {
|
||||
SaveDashboardResult *models.Dashboard
|
||||
SaveDashboardError error
|
||||
@ -230,6 +245,10 @@ func (s *FakeDashboardService) SaveDashboard(dto *SaveDashboardDTO) (*models.Das
|
||||
return s.SaveDashboardResult, s.SaveDashboardError
|
||||
}
|
||||
|
||||
func (s *FakeDashboardService) ImportDashboard(dto *SaveDashboardDTO) (*models.Dashboard, error) {
|
||||
return s.SaveDashboard(dto)
|
||||
}
|
||||
|
||||
func MockDashboardService(mock *FakeDashboardService) {
|
||||
NewService = func() DashboardService {
|
||||
return mock
|
||||
|
@ -106,6 +106,18 @@ type SessionWrapper struct {
|
||||
}
|
||||
|
||||
func (s *SessionWrapper) Start(c *macaron.Context) error {
|
||||
// See https://github.com/grafana/grafana/issues/11155 for details on why
|
||||
// a recover and retry is needed
|
||||
defer func() error {
|
||||
if err := recover(); err != nil {
|
||||
var retryErr error
|
||||
s.session, retryErr = s.manager.Start(c)
|
||||
return retryErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
var err error
|
||||
s.session, err = s.manager.Start(c)
|
||||
return err
|
||||
|
@ -24,6 +24,8 @@ import (
|
||||
"github.com/go-xorm/xorm"
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
|
||||
_ "github.com/grafana/grafana/pkg/tsdb/mssql"
|
||||
)
|
||||
|
||||
type DatabaseConfig struct {
|
||||
|
@ -14,6 +14,7 @@ type TestDB struct {
|
||||
var TestDB_Sqlite3 = TestDB{DriverName: "sqlite3", ConnStr: ":memory:"}
|
||||
var TestDB_Mysql = TestDB{DriverName: "mysql", ConnStr: "grafana:password@tcp(localhost:3306)/grafana_tests?collation=utf8mb4_unicode_ci"}
|
||||
var TestDB_Postgres = TestDB{DriverName: "postgres", ConnStr: "user=grafanatest password=grafanatest host=localhost port=5432 dbname=grafanatest sslmode=disable"}
|
||||
var TestDB_Mssql = TestDB{DriverName: "mssql", ConnStr: "server=localhost;port=1433;database=grafanatest;user id=grafana;password=Password!"}
|
||||
|
||||
func CleanDB(x *xorm.Engine) {
|
||||
if x.DriverName() == "postgres" {
|
||||
|
129
pkg/tsdb/mssql/macros.go
Normal file
129
pkg/tsdb/mssql/macros.go
Normal file
@ -0,0 +1,129 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"strconv"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
)
|
||||
|
||||
const rsIdentifier = `([_a-zA-Z0-9]+)`
|
||||
const sExpr = `\$` + rsIdentifier + `\(([^\)]*)\)`
|
||||
|
||||
type MsSqlMacroEngine struct {
|
||||
TimeRange *tsdb.TimeRange
|
||||
Query *tsdb.Query
|
||||
}
|
||||
|
||||
func NewMssqlMacroEngine() tsdb.SqlMacroEngine {
|
||||
return &MsSqlMacroEngine{}
|
||||
}
|
||||
|
||||
func (m *MsSqlMacroEngine) Interpolate(query *tsdb.Query, timeRange *tsdb.TimeRange, sql string) (string, error) {
|
||||
m.TimeRange = timeRange
|
||||
m.Query = query
|
||||
rExp, _ := regexp.Compile(sExpr)
|
||||
var macroError error
|
||||
|
||||
sql = replaceAllStringSubmatchFunc(rExp, sql, func(groups []string) string {
|
||||
args := strings.Split(groups[2], ",")
|
||||
for i, arg := range args {
|
||||
args[i] = strings.Trim(arg, " ")
|
||||
}
|
||||
res, err := m.evaluateMacro(groups[1], args)
|
||||
if err != nil && macroError == nil {
|
||||
macroError = err
|
||||
return "macro_error()"
|
||||
}
|
||||
return res
|
||||
})
|
||||
|
||||
if macroError != nil {
|
||||
return "", macroError
|
||||
}
|
||||
|
||||
return sql, nil
|
||||
}
|
||||
|
||||
func replaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
|
||||
result := ""
|
||||
lastIndex := 0
|
||||
|
||||
for _, v := range re.FindAllSubmatchIndex([]byte(str), -1) {
|
||||
groups := []string{}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
groups = append(groups, str[v[i]:v[i+1]])
|
||||
}
|
||||
|
||||
result += str[lastIndex:v[0]] + repl(groups)
|
||||
lastIndex = v[1]
|
||||
}
|
||||
|
||||
return result + str[lastIndex:]
|
||||
}
|
||||
|
||||
func (m *MsSqlMacroEngine) evaluateMacro(name string, args []string) (string, error) {
|
||||
switch name {
|
||||
case "__time":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s AS time", args[0]), nil
|
||||
case "__utcTime":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), %s) AS time", args[0]), nil
|
||||
case "__timeEpoch":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), %s) ) AS time", args[0]), nil
|
||||
case "__timeFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= DATEADD(s, %d+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01') AND %s <= DATEADD(s, %d+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeFrom":
|
||||
return fmt.Sprintf("DATEADD(second, %d+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__timeTo":
|
||||
return fmt.Sprintf("DATEADD(second, %d+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__timeGroup":
|
||||
if len(args) < 2 {
|
||||
return "", fmt.Errorf("macro %v needs time column and interval", name)
|
||||
}
|
||||
interval, err := time.ParseDuration(strings.Trim(args[1], `'"`))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing interval %v", args[1])
|
||||
}
|
||||
if len(args) == 3 {
|
||||
m.Query.Model.Set("fill", true)
|
||||
m.Query.Model.Set("fillInterval", interval.Seconds())
|
||||
if args[2] == "NULL" {
|
||||
m.Query.Model.Set("fillNull", true)
|
||||
} else {
|
||||
floatVal, err := strconv.ParseFloat(args[2], 64)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing fill value %v", args[2])
|
||||
}
|
||||
m.Query.Model.Set("fillValue", floatVal)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), %s))/%.0f as int)*%.0f as int)", args[0], interval.Seconds(), interval.Seconds()), nil
|
||||
case "__unixEpochFilter":
|
||||
if len(args) == 0 {
|
||||
return "", fmt.Errorf("missing time column argument for macro %v", name)
|
||||
}
|
||||
return fmt.Sprintf("%s >= %d AND %s <= %d", args[0], uint64(m.TimeRange.GetFromAsMsEpoch()/1000), args[0], uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochFrom":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetFromAsMsEpoch()/1000)), nil
|
||||
case "__unixEpochTo":
|
||||
return fmt.Sprintf("%d", uint64(m.TimeRange.GetToAsMsEpoch()/1000)), nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unknown macro %v", name)
|
||||
}
|
||||
}
|
131
pkg/tsdb/mssql/macros_test.go
Normal file
131
pkg/tsdb/mssql/macros_test.go
Normal file
@ -0,0 +1,131 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestMacroEngine(t *testing.T) {
|
||||
Convey("MacroEngine", t, func() {
|
||||
engine := &MsSqlMacroEngine{}
|
||||
timeRange := &tsdb.TimeRange{From: "5m", To: "now"}
|
||||
query := &tsdb.Query{
|
||||
Model: simplejson.New(),
|
||||
}
|
||||
|
||||
Convey("interpolate __time function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__time(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select time_column AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __utcTime function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__utcTime(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time_column) AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select $__timeEpoch(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time_column) ) AS time")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeEpoch function wrapped in aggregation", func() {
|
||||
sql, err := engine.Interpolate(query, nil, "select min($__timeEpoch(time_column))")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select min(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time_column) ) AS time)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "WHERE $__timeFilter(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "WHERE time_column >= DATEADD(s, 18446744066914186738+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01') AND time_column <= DATEADD(s, 18446744066914187038+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time_column))/300 as int)*300 as int)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with spaces around arguments", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column , '5m')")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "GROUP BY cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time_column))/300 as int)*300 as int)")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = NULL)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', NULL)")
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillNull := query.Model.Get("fillNull").MustBool()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillNull, ShouldBeTrue)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
|
||||
Convey("interpolate __timeGroup function with fill (value = float)", func() {
|
||||
_, err := engine.Interpolate(query, timeRange, "GROUP BY $__timeGroup(time_column,'5m', 1.5)")
|
||||
|
||||
fill := query.Model.Get("fill").MustBool()
|
||||
fillValue := query.Model.Get("fillValue").MustFloat64()
|
||||
fillInterval := query.Model.Get("fillInterval").MustInt()
|
||||
|
||||
So(err, ShouldBeNil)
|
||||
So(fill, ShouldBeTrue)
|
||||
So(fillValue, ShouldEqual, 1.5)
|
||||
So(fillInterval, ShouldEqual, 5*time.Minute.Seconds())
|
||||
})
|
||||
|
||||
Convey("interpolate __timeFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeFrom(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914186738+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')")
|
||||
})
|
||||
|
||||
Convey("interpolate __timeTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__timeTo(time_column)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select DATEADD(second, 18446744066914187038+DATEDIFF(second,GETUTCDATE(),GETDATE()), '1970-01-01')")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFilter function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFilter(18446744066914186738)")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738 >= 18446744066914186738 AND 18446744066914186738 <= 18446744066914187038")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochFrom function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochFrom()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914186738")
|
||||
})
|
||||
|
||||
Convey("interpolate __unixEpochTo function", func() {
|
||||
sql, err := engine.Interpolate(query, timeRange, "select $__unixEpochTo()")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
So(sql, ShouldEqual, "select 18446744066914187038")
|
||||
})
|
||||
})
|
||||
}
|
328
pkg/tsdb/mssql/mssql.go
Normal file
328
pkg/tsdb/mssql/mssql.go
Normal file
@ -0,0 +1,328 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
|
||||
"math"
|
||||
|
||||
_ "github.com/denisenkom/go-mssqldb"
|
||||
"github.com/go-xorm/core"
|
||||
"github.com/grafana/grafana/pkg/components/null"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/models"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
)
|
||||
|
||||
type MssqlQueryEndpoint struct {
|
||||
sqlEngine tsdb.SqlEngine
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
tsdb.RegisterTsdbQueryEndpoint("mssql", NewMssqlQueryEndpoint)
|
||||
}
|
||||
|
||||
func NewMssqlQueryEndpoint(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
|
||||
endpoint := &MssqlQueryEndpoint{
|
||||
log: log.New("tsdb.mssql"),
|
||||
}
|
||||
|
||||
endpoint.sqlEngine = &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewMssqlMacroEngine(),
|
||||
}
|
||||
|
||||
cnnstr := generateConnectionString(datasource)
|
||||
endpoint.log.Debug("getEngine", "connection", cnnstr)
|
||||
|
||||
if err := endpoint.sqlEngine.InitEngine("mssql", datasource, cnnstr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
func generateConnectionString(datasource *models.DataSource) string {
|
||||
password := ""
|
||||
for key, value := range datasource.SecureJsonData.Decrypt() {
|
||||
if key == "password" {
|
||||
password = value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
hostParts := strings.Split(datasource.Url, ":")
|
||||
if len(hostParts) < 2 {
|
||||
hostParts = append(hostParts, "1433")
|
||||
}
|
||||
|
||||
server, port := hostParts[0], hostParts[1]
|
||||
return fmt.Sprintf("server=%s;port=%s;database=%s;user id=%s;password=%s;",
|
||||
server,
|
||||
port,
|
||||
datasource.Database,
|
||||
datasource.User,
|
||||
password,
|
||||
)
|
||||
}
|
||||
|
||||
// Query is the main function for the MssqlQueryEndpoint
|
||||
func (e *MssqlQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
|
||||
return e.sqlEngine.Query(ctx, dsInfo, tsdbQuery, e.transformToTimeSeries, e.transformToTable)
|
||||
}
|
||||
|
||||
func (e MssqlQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
|
||||
columnNames, err := rows.Columns()
|
||||
columnCount := len(columnNames)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
timeIndex := -1
|
||||
|
||||
table := &tsdb.Table{
|
||||
Columns: make([]tsdb.TableColumn, columnCount),
|
||||
Rows: make([]tsdb.RowValues, 0),
|
||||
}
|
||||
|
||||
for i, name := range columnNames {
|
||||
table.Columns[i].Text = name
|
||||
|
||||
// check if there is a column named time
|
||||
switch name {
|
||||
case "time":
|
||||
timeIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
columnTypes, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for ; rows.Next(); rowCount++ {
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("MsSQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
|
||||
values, err := e.getTypedRowData(columnTypes, rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// convert column named time to unix timestamp to make
|
||||
// native datetime mssql types work in annotation queries
|
||||
if timeIndex != -1 {
|
||||
switch value := values[timeIndex].(type) {
|
||||
case time.Time:
|
||||
values[timeIndex] = float64(value.Unix())
|
||||
}
|
||||
}
|
||||
|
||||
table.Rows = append(table.Rows, values)
|
||||
}
|
||||
|
||||
result.Tables = append(result.Tables, table)
|
||||
result.Meta.Set("rowCount", rowCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e MssqlQueryEndpoint) getTypedRowData(types []*sql.ColumnType, rows *core.Rows) (tsdb.RowValues, error) {
|
||||
values := make([]interface{}, len(types))
|
||||
valuePtrs := make([]interface{}, len(types))
|
||||
|
||||
for i, stype := range types {
|
||||
e.log.Debug("type", "type", stype)
|
||||
valuePtrs[i] = &values[i]
|
||||
}
|
||||
|
||||
if err := rows.Scan(valuePtrs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert types not handled by denisenkom/go-mssqldb
|
||||
// unhandled types are returned as []byte
|
||||
for i := 0; i < len(types); i++ {
|
||||
if value, ok := values[i].([]byte); ok == true {
|
||||
switch types[i].DatabaseTypeName() {
|
||||
case "MONEY", "SMALLMONEY", "DECIMAL":
|
||||
if v, err := strconv.ParseFloat(string(value), 64); err == nil {
|
||||
values[i] = v
|
||||
} else {
|
||||
e.log.Debug("Rows", "Error converting numeric to float", value)
|
||||
}
|
||||
default:
|
||||
e.log.Debug("Rows", "Unknown database type", types[i].DatabaseTypeName(), "value", value)
|
||||
values[i] = string(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (e MssqlQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
|
||||
pointsBySeries := make(map[string]*tsdb.TimeSeries)
|
||||
seriesByQueryOrder := list.New()
|
||||
|
||||
columnNames, err := rows.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
columnTypes, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rowLimit := 1000000
|
||||
rowCount := 0
|
||||
timeIndex := -1
|
||||
metricIndex := -1
|
||||
|
||||
// check columns of resultset: a column named time is mandatory
|
||||
// the first text column is treated as metric name unless a column named metric is present
|
||||
for i, col := range columnNames {
|
||||
switch col {
|
||||
case "time":
|
||||
timeIndex = i
|
||||
case "metric":
|
||||
metricIndex = i
|
||||
default:
|
||||
if metricIndex == -1 {
|
||||
switch columnTypes[i].DatabaseTypeName() {
|
||||
case "VARCHAR", "CHAR", "NVARCHAR", "NCHAR":
|
||||
metricIndex = i
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timeIndex == -1 {
|
||||
return fmt.Errorf("Found no column named time")
|
||||
}
|
||||
|
||||
fillMissing := query.Model.Get("fill").MustBool(false)
|
||||
var fillInterval float64
|
||||
fillValue := null.Float{}
|
||||
if fillMissing {
|
||||
fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
|
||||
if query.Model.Get("fillNull").MustBool(false) == false {
|
||||
fillValue.Float64 = query.Model.Get("fillValue").MustFloat64()
|
||||
fillValue.Valid = true
|
||||
}
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
var timestamp float64
|
||||
var value null.Float
|
||||
var metric string
|
||||
|
||||
if rowCount > rowLimit {
|
||||
return fmt.Errorf("MSSQL query row limit exceeded, limit %d", rowLimit)
|
||||
}
|
||||
|
||||
values, err := e.getTypedRowData(columnTypes, rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch columnValue := values[timeIndex].(type) {
|
||||
case int64:
|
||||
timestamp = float64(columnValue * 1000)
|
||||
case float64:
|
||||
timestamp = columnValue * 1000
|
||||
case time.Time:
|
||||
timestamp = (float64(columnValue.Unix()) * 1000) + float64(columnValue.Nanosecond()/1e6) // in case someone is trying to map times beyond 2262 :D
|
||||
default:
|
||||
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp")
|
||||
}
|
||||
|
||||
if metricIndex >= 0 {
|
||||
if columnValue, ok := values[metricIndex].(string); ok == true {
|
||||
metric = columnValue
|
||||
} else {
|
||||
return fmt.Errorf("Column metric must be of type CHAR, VARCHAR, NCHAR or NVARCHAR. metric column name: %s type: %s but datatype is %T", columnNames[metricIndex], columnTypes[metricIndex].DatabaseTypeName(), values[metricIndex])
|
||||
}
|
||||
}
|
||||
|
||||
for i, col := range columnNames {
|
||||
if i == timeIndex || i == metricIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
switch columnValue := values[i].(type) {
|
||||
case int64:
|
||||
value = null.FloatFrom(float64(columnValue))
|
||||
case float64:
|
||||
value = null.FloatFrom(columnValue)
|
||||
case nil:
|
||||
value.Valid = false
|
||||
default:
|
||||
return fmt.Errorf("Value column must have numeric datatype, column: %s type: %T value: %v", col, columnValue, columnValue)
|
||||
}
|
||||
if metricIndex == -1 {
|
||||
metric = col
|
||||
}
|
||||
|
||||
series, exist := pointsBySeries[metric]
|
||||
if exist == false {
|
||||
series = &tsdb.TimeSeries{Name: metric}
|
||||
pointsBySeries[metric] = series
|
||||
seriesByQueryOrder.PushBack(metric)
|
||||
}
|
||||
|
||||
if fillMissing {
|
||||
var intervalStart float64
|
||||
if exist == false {
|
||||
intervalStart = float64(tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6)
|
||||
} else {
|
||||
intervalStart = series.Points[len(series.Points)-1][1].Float64 + fillInterval
|
||||
}
|
||||
|
||||
// align interval start
|
||||
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
|
||||
|
||||
for i := intervalStart; i < timestamp; i += fillInterval {
|
||||
series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
|
||||
rowCount++
|
||||
}
|
||||
}
|
||||
|
||||
series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
|
||||
|
||||
e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
|
||||
}
|
||||
}
|
||||
|
||||
for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
|
||||
key := elem.Value.(string)
|
||||
result.Series = append(result.Series, pointsBySeries[key])
|
||||
|
||||
if fillMissing {
|
||||
series := pointsBySeries[key]
|
||||
// fill in values from last fetched value till interval end
|
||||
intervalStart := series.Points[len(series.Points)-1][1].Float64
|
||||
intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
|
||||
|
||||
// align interval start
|
||||
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
|
||||
for i := intervalStart + fillInterval; i < intervalEnd; i += fillInterval {
|
||||
series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
|
||||
rowCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.Meta.Set("rowCount", rowCount)
|
||||
return nil
|
||||
}
|
686
pkg/tsdb/mssql/mssql_test.go
Normal file
686
pkg/tsdb/mssql/mssql_test.go
Normal file
@ -0,0 +1,686 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/xorm"
|
||||
"github.com/grafana/grafana/pkg/components/simplejson"
|
||||
"github.com/grafana/grafana/pkg/log"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/sqlutil"
|
||||
"github.com/grafana/grafana/pkg/tsdb"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// To run this test, remove the Skip from SkipConvey
|
||||
// and set up a MSSQL db named grafanatest and a user/password grafana/Password!
|
||||
// Use the docker/blocks/mssql_tests/docker-compose.yaml to spin up a
|
||||
// preconfigured MSSQL server suitable for running these tests.
|
||||
// If needed, change the variable below to the IP address of the database.
|
||||
var serverIP string = "localhost"
|
||||
|
||||
func TestMSSQL(t *testing.T) {
|
||||
SkipConvey("MSSQL", t, func() {
|
||||
x := InitMSSQLTestDB(t)
|
||||
|
||||
endpoint := &MssqlQueryEndpoint{
|
||||
sqlEngine: &tsdb.DefaultSqlEngine{
|
||||
MacroEngine: NewMssqlMacroEngine(),
|
||||
XormEngine: x,
|
||||
},
|
||||
log: log.New("tsdb.mssql"),
|
||||
}
|
||||
|
||||
sess := x.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC)
|
||||
|
||||
Convey("Given a table with different native data types", func() {
|
||||
sql := `
|
||||
IF OBJECT_ID('dbo.[mssql_types]', 'U') IS NOT NULL
|
||||
DROP TABLE dbo.[mssql_types]
|
||||
|
||||
CREATE TABLE [mssql_types] (
|
||||
c_bit bit,
|
||||
|
||||
c_tinyint tinyint,
|
||||
c_smallint smallint,
|
||||
c_int int,
|
||||
c_bigint bigint,
|
||||
|
||||
c_money money,
|
||||
c_smallmoney smallmoney,
|
||||
c_numeric numeric(10,5),
|
||||
c_real real,
|
||||
c_decimal decimal(10,2),
|
||||
c_float float,
|
||||
|
||||
c_char char(10),
|
||||
c_varchar varchar(10),
|
||||
c_text text,
|
||||
|
||||
c_nchar nchar(12),
|
||||
c_nvarchar nvarchar(12),
|
||||
c_ntext ntext,
|
||||
|
||||
c_datetime datetime,
|
||||
c_datetime2 datetime2,
|
||||
c_smalldatetime smalldatetime,
|
||||
c_date date,
|
||||
c_time time,
|
||||
c_datetimeoffset datetimeoffset
|
||||
)
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
dt := time.Date(2018, 3, 14, 21, 20, 6, 527e6, time.UTC)
|
||||
dtFormat := "2006-01-02 15:04:05.999999999"
|
||||
d := dt.Format(dtFormat)
|
||||
dt2 := time.Date(2018, 3, 14, 21, 20, 6, 8896406e2, time.UTC)
|
||||
dt2Format := "2006-01-02 15:04:05.999999999 -07:00"
|
||||
d2 := dt2.Format(dt2Format)
|
||||
|
||||
sql = fmt.Sprintf(`
|
||||
INSERT INTO [mssql_types]
|
||||
SELECT
|
||||
1, 5, 20020, 980300, 1420070400, '$20000.15', '£2.15', 12345.12,
|
||||
1.11, 2.22, 3.33,
|
||||
'char10', 'varchar10', 'text',
|
||||
N'☺nchar12☺', N'☺nvarchar12☺', N'☺text☺',
|
||||
CAST('%s' AS DATETIME), CAST('%s' AS DATETIME2), CAST('%s' AS SMALLDATETIME), CAST('%s' AS DATE), CAST('%s' AS TIME), SWITCHOFFSET(CAST('%s' AS DATETIMEOFFSET), '-07:00')
|
||||
`, d, d2, d, d, d, d2)
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a table query should map MSSQL column types to Go types", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT * FROM mssql_types",
|
||||
"format": "table",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
column := queryResult.Tables[0].Rows[0]
|
||||
|
||||
So(column[0].(bool), ShouldEqual, true)
|
||||
|
||||
So(column[1].(int64), ShouldEqual, 5)
|
||||
So(column[2].(int64), ShouldEqual, 20020)
|
||||
So(column[3].(int64), ShouldEqual, 980300)
|
||||
So(column[4].(int64), ShouldEqual, 1420070400)
|
||||
|
||||
So(column[5].(float64), ShouldEqual, 20000.15)
|
||||
So(column[6].(float64), ShouldEqual, 2.15)
|
||||
So(column[7].(float64), ShouldEqual, 12345.12)
|
||||
So(column[8].(float64), ShouldEqual, 1.1100000143051147)
|
||||
So(column[9].(float64), ShouldEqual, 2.22)
|
||||
So(column[10].(float64), ShouldEqual, 3.33)
|
||||
|
||||
So(column[11].(string), ShouldEqual, "char10 ")
|
||||
So(column[12].(string), ShouldEqual, "varchar10")
|
||||
So(column[13].(string), ShouldEqual, "text")
|
||||
|
||||
So(column[14].(string), ShouldEqual, "☺nchar12☺ ")
|
||||
So(column[15].(string), ShouldEqual, "☺nvarchar12☺")
|
||||
So(column[16].(string), ShouldEqual, "☺text☺")
|
||||
|
||||
So(column[17].(time.Time), ShouldEqual, dt)
|
||||
So(column[18].(time.Time), ShouldEqual, dt2)
|
||||
So(column[19].(time.Time), ShouldEqual, dt.Truncate(time.Minute))
|
||||
So(column[20].(time.Time), ShouldEqual, dt.Truncate(24*time.Hour))
|
||||
So(column[21].(time.Time), ShouldEqual, time.Date(1, 1, 1, dt.Hour(), dt.Minute(), dt.Second(), dt.Nanosecond(), time.UTC))
|
||||
So(column[22].(time.Time), ShouldEqual, dt2.In(time.FixedZone("UTC", int(-7*time.Hour))))
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given a table with metrics that lacks data for some series ", func() {
|
||||
sql := `
|
||||
IF OBJECT_ID('dbo.[metric]', 'U') IS NOT NULL
|
||||
DROP TABLE dbo.[metric]
|
||||
|
||||
CREATE TABLE [metric] (
|
||||
time datetime,
|
||||
value int
|
||||
)
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
type metric struct {
|
||||
Time time.Time
|
||||
Value int64
|
||||
}
|
||||
|
||||
series := []*metric{}
|
||||
firstRange := genTimeRangeByInterval(fromStart, 10*time.Minute, 10*time.Second)
|
||||
secondRange := genTimeRangeByInterval(fromStart.Add(20*time.Minute), 10*time.Minute, 10*time.Second)
|
||||
|
||||
for _, t := range firstRange {
|
||||
series = append(series, &metric{
|
||||
Time: t,
|
||||
Value: 15,
|
||||
})
|
||||
}
|
||||
|
||||
for _, t := range secondRange {
|
||||
series = append(series, &metric{
|
||||
Time: t,
|
||||
Value: 20,
|
||||
})
|
||||
}
|
||||
|
||||
dtFormat := "2006-01-02 15:04:05.999999999"
|
||||
for _, s := range series {
|
||||
sql = fmt.Sprintf(`
|
||||
INSERT INTO metric (time, value)
|
||||
VALUES(CAST('%s' AS DATETIME), %d)
|
||||
`, s.Time.Format(dtFormat), s.Value)
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
Convey("When doing a metric query using timeGroup", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT $__timeGroup(time, '5m') AS time, avg(value) as value FROM metric GROUP BY $__timeGroup(time, '5m') ORDER BY 1",
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
points := queryResult.Series[0].Points
|
||||
|
||||
So(len(points), ShouldEqual, 4)
|
||||
actualValueFirst := points[0][0].Float64
|
||||
actualTimeFirst := time.Unix(int64(points[0][1].Float64)/1000, 0)
|
||||
So(actualValueFirst, ShouldEqual, 15)
|
||||
So(actualTimeFirst, ShouldEqual, fromStart)
|
||||
|
||||
actualValueLast := points[3][0].Float64
|
||||
actualTimeLast := time.Unix(int64(points[3][1].Float64)/1000, 0)
|
||||
So(actualValueLast, ShouldEqual, 20)
|
||||
So(actualTimeLast, ShouldEqual, fromStart.Add(25*time.Minute))
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using timeGroup with NULL fill enabled", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT $__timeGroup(time, '5m', NULL) AS time, avg(value) as value FROM metric GROUP BY $__timeGroup(time, '5m') ORDER BY 1",
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: fmt.Sprintf("%v", fromStart.Unix()*1000),
|
||||
To: fmt.Sprintf("%v", fromStart.Add(34*time.Minute).Unix()*1000),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
points := queryResult.Series[0].Points
|
||||
|
||||
So(len(points), ShouldEqual, 7)
|
||||
actualValueFirst := points[0][0].Float64
|
||||
actualTimeFirst := time.Unix(int64(points[0][1].Float64)/1000, 0)
|
||||
So(actualValueFirst, ShouldEqual, 15)
|
||||
So(actualTimeFirst, ShouldEqual, fromStart)
|
||||
|
||||
actualNullPoint := points[3][0]
|
||||
actualNullTime := time.Unix(int64(points[3][1].Float64)/1000, 0)
|
||||
So(actualNullPoint.Valid, ShouldBeFalse)
|
||||
So(actualNullTime, ShouldEqual, fromStart.Add(15*time.Minute))
|
||||
|
||||
actualValueLast := points[5][0].Float64
|
||||
actualTimeLast := time.Unix(int64(points[5][1].Float64)/1000, 0)
|
||||
So(actualValueLast, ShouldEqual, 20)
|
||||
So(actualTimeLast, ShouldEqual, fromStart.Add(25*time.Minute))
|
||||
|
||||
actualLastNullPoint := points[6][0]
|
||||
actualLastNullTime := time.Unix(int64(points[6][1].Float64)/1000, 0)
|
||||
So(actualLastNullPoint.Valid, ShouldBeFalse)
|
||||
So(actualLastNullTime, ShouldEqual, fromStart.Add(30*time.Minute))
|
||||
|
||||
})
|
||||
|
||||
Convey("When doing a metric query using timeGroup with float fill enabled", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT $__timeGroup(time, '5m', 1.5) AS time, avg(value) as value FROM metric GROUP BY $__timeGroup(time, '5m') ORDER BY 1",
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: fmt.Sprintf("%v", fromStart.Unix()*1000),
|
||||
To: fmt.Sprintf("%v", fromStart.Add(34*time.Minute).Unix()*1000),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
points := queryResult.Series[0].Points
|
||||
|
||||
So(points[6][0].Float64, ShouldEqual, 1.5)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given a table with metrics having multiple values and measurements", func() {
|
||||
sql := `
|
||||
IF OBJECT_ID('dbo.[metric_values]', 'U') IS NOT NULL
|
||||
DROP TABLE dbo.[metric_values]
|
||||
|
||||
CREATE TABLE [metric_values] (
|
||||
time datetime,
|
||||
measurement nvarchar(100),
|
||||
valueOne int,
|
||||
valueTwo int,
|
||||
)
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
type metricValues struct {
|
||||
Time time.Time
|
||||
Measurement string
|
||||
ValueOne int64
|
||||
ValueTwo int64
|
||||
}
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
rnd := func(min, max int64) int64 {
|
||||
return rand.Int63n(max-min) + min
|
||||
}
|
||||
|
||||
series := []*metricValues{}
|
||||
for _, t := range genTimeRangeByInterval(fromStart.Add(-30*time.Minute), 90*time.Minute, 5*time.Minute) {
|
||||
series = append(series, &metricValues{
|
||||
Time: t,
|
||||
Measurement: "Metric A",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
series = append(series, &metricValues{
|
||||
Time: t,
|
||||
Measurement: "Metric B",
|
||||
ValueOne: rnd(0, 100),
|
||||
ValueTwo: rnd(0, 100),
|
||||
})
|
||||
}
|
||||
|
||||
dtFormat := "2006-01-02 15:04:05"
|
||||
for _, s := range series {
|
||||
sql = fmt.Sprintf(`
|
||||
INSERT metric_values (time, measurement, valueOne, valueTwo)
|
||||
VALUES(CAST('%s' AS DATETIME), '%s', %d, %d)
|
||||
`, s.Time.Format(dtFormat), s.Measurement, s.ValueOne, s.ValueTwo)
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
Convey("When doing a metric query grouping by time and select metric column should return correct series", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT $__timeEpoch(time), measurement + ' - value one' as metric, valueOne FROM metric_values ORDER BY 1",
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 2)
|
||||
So(queryResult.Series[0].Name, ShouldEqual, "Metric A - value one")
|
||||
So(queryResult.Series[1].Name, ShouldEqual, "Metric B - value one")
|
||||
})
|
||||
|
||||
Convey("When doing a metric query grouping by time should return correct series", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT $__timeEpoch(time), valueOne, valueTwo FROM metric_values ORDER BY 1",
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 2)
|
||||
So(queryResult.Series[0].Name, ShouldEqual, "valueOne")
|
||||
So(queryResult.Series[1].Name, ShouldEqual, "valueTwo")
|
||||
})
|
||||
|
||||
Convey("Given a stored procedure that takes @from and @to in epoch time", func() {
|
||||
sql := `
|
||||
IF object_id('sp_test_epoch') IS NOT NULL
|
||||
DROP PROCEDURE sp_test_epoch
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sql = `
|
||||
CREATE PROCEDURE sp_test_epoch(
|
||||
@from int,
|
||||
@to int
|
||||
) AS
|
||||
BEGIN
|
||||
SELECT
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int) as time,
|
||||
measurement + ' - value one' as metric,
|
||||
avg(valueOne) as value
|
||||
FROM
|
||||
metric_values
|
||||
WHERE
|
||||
time >= DATEADD(s, @from, '1970-01-01') AND time <= DATEADD(s, @to, '1970-01-01')
|
||||
GROUP BY
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int),
|
||||
measurement
|
||||
UNION ALL
|
||||
SELECT
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int) as time,
|
||||
measurement + ' - value two' as metric,
|
||||
avg(valueTwo) as value
|
||||
FROM
|
||||
metric_values
|
||||
WHERE
|
||||
time >= DATEADD(s, @from, '1970-01-01') AND time <= DATEADD(s, @to, '1970-01-01')
|
||||
GROUP BY
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second,GETDATE(),GETUTCDATE()), time))/600 as int)*600 as int),
|
||||
measurement
|
||||
ORDER BY 1
|
||||
END
|
||||
`
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using stored procedure should return correct result", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `DECLARE
|
||||
@from int = $__unixEpochFrom(),
|
||||
@to int = $__unixEpochTo()
|
||||
|
||||
EXEC dbo.sp_test_epoch @from, @to`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: "1521117000000",
|
||||
To: "1521122100000",
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 4)
|
||||
So(queryResult.Series[0].Name, ShouldEqual, "Metric A - value one")
|
||||
So(queryResult.Series[1].Name, ShouldEqual, "Metric B - value one")
|
||||
So(queryResult.Series[2].Name, ShouldEqual, "Metric A - value two")
|
||||
So(queryResult.Series[3].Name, ShouldEqual, "Metric B - value two")
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given a stored procedure that takes @from and @to in datetime", func() {
|
||||
sql := `
|
||||
IF object_id('sp_test_datetime') IS NOT NULL
|
||||
DROP PROCEDURE sp_test_datetime
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
sql = `
|
||||
CREATE PROCEDURE sp_test_datetime(
|
||||
@from datetime,
|
||||
@to datetime
|
||||
) AS
|
||||
BEGIN
|
||||
SELECT
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int) as time,
|
||||
measurement + ' - value one' as metric,
|
||||
avg(valueOne) as value
|
||||
FROM
|
||||
metric_values
|
||||
WHERE
|
||||
time >= @from AND time <= @to
|
||||
GROUP BY
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int),
|
||||
measurement
|
||||
UNION ALL
|
||||
SELECT
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int) as time,
|
||||
measurement + ' - value two' as metric,
|
||||
avg(valueTwo) as value
|
||||
FROM
|
||||
metric_values
|
||||
WHERE
|
||||
time >= @from AND time <= @to
|
||||
GROUP BY
|
||||
cast(cast(DATEDIFF(second, {d '1970-01-01'}, time)/600 as int)*600 as int),
|
||||
measurement
|
||||
ORDER BY 1
|
||||
END
|
||||
`
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
Convey("When doing a metric query using stored procedure should return correct result", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": `DECLARE
|
||||
@from int = $__unixEpochFrom(),
|
||||
@to int = $__unixEpochTo()
|
||||
|
||||
EXEC dbo.sp_test_epoch @from, @to`,
|
||||
"format": "time_series",
|
||||
}),
|
||||
RefId: "A",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: "1521117000000",
|
||||
To: "1521122100000",
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["A"]
|
||||
So(err, ShouldBeNil)
|
||||
So(queryResult.Error, ShouldBeNil)
|
||||
|
||||
So(len(queryResult.Series), ShouldEqual, 4)
|
||||
So(queryResult.Series[0].Name, ShouldEqual, "Metric A - value one")
|
||||
So(queryResult.Series[1].Name, ShouldEqual, "Metric B - value one")
|
||||
So(queryResult.Series[2].Name, ShouldEqual, "Metric A - value two")
|
||||
So(queryResult.Series[3].Name, ShouldEqual, "Metric B - value two")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Convey("Given a table with event data", func() {
|
||||
sql := `
|
||||
IF OBJECT_ID('dbo.[event]', 'U') IS NOT NULL
|
||||
DROP TABLE dbo.[event]
|
||||
|
||||
CREATE TABLE [event] (
|
||||
time_sec bigint,
|
||||
description nvarchar(100),
|
||||
tags nvarchar(100),
|
||||
)
|
||||
`
|
||||
|
||||
_, err := sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
type event struct {
|
||||
TimeSec int64
|
||||
Description string
|
||||
Tags string
|
||||
}
|
||||
|
||||
events := []*event{}
|
||||
for _, t := range genTimeRangeByInterval(fromStart.Add(-20*time.Minute), 60*time.Minute, 25*time.Minute) {
|
||||
events = append(events, &event{
|
||||
TimeSec: t.Unix(),
|
||||
Description: "Someone deployed something",
|
||||
Tags: "deploy",
|
||||
})
|
||||
events = append(events, &event{
|
||||
TimeSec: t.Add(5 * time.Minute).Unix(),
|
||||
Description: "New support ticket registered",
|
||||
Tags: "ticket",
|
||||
})
|
||||
}
|
||||
|
||||
for _, e := range events {
|
||||
sql = fmt.Sprintf(`
|
||||
INSERT [event] (time_sec, description, tags)
|
||||
VALUES(%d, '%s', '%s')
|
||||
`, e.TimeSec, e.Description, e.Tags)
|
||||
|
||||
_, err = sess.Exec(sql)
|
||||
So(err, ShouldBeNil)
|
||||
}
|
||||
|
||||
Convey("When doing an annotation query of deploy events should return expected result", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT time_sec as time, description as [text], tags FROM [event] WHERE $__unixEpochFilter(time_sec) AND tags='deploy' ORDER BY 1 ASC",
|
||||
"format": "table",
|
||||
}),
|
||||
RefId: "Deploys",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: fmt.Sprintf("%v", fromStart.Add(-20*time.Minute).Unix()*1000),
|
||||
To: fmt.Sprintf("%v", fromStart.Add(40*time.Minute).Unix()*1000),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["Deploys"]
|
||||
So(err, ShouldBeNil)
|
||||
So(len(queryResult.Tables[0].Rows), ShouldEqual, 3)
|
||||
})
|
||||
|
||||
Convey("When doing an annotation query of ticket events should return expected result", func() {
|
||||
query := &tsdb.TsdbQuery{
|
||||
Queries: []*tsdb.Query{
|
||||
{
|
||||
Model: simplejson.NewFromAny(map[string]interface{}{
|
||||
"rawSql": "SELECT time_sec as time, description as [text], tags FROM [event] WHERE $__unixEpochFilter(time_sec) AND tags='ticket' ORDER BY 1 ASC",
|
||||
"format": "table",
|
||||
}),
|
||||
RefId: "Tickets",
|
||||
},
|
||||
},
|
||||
TimeRange: &tsdb.TimeRange{
|
||||
From: fmt.Sprintf("%v", fromStart.Add(-20*time.Minute).Unix()*1000),
|
||||
To: fmt.Sprintf("%v", fromStart.Add(40*time.Minute).Unix()*1000),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := endpoint.Query(nil, nil, query)
|
||||
queryResult := resp.Results["Tickets"]
|
||||
So(err, ShouldBeNil)
|
||||
So(len(queryResult.Tables[0].Rows), ShouldEqual, 3)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func InitMSSQLTestDB(t *testing.T) *xorm.Engine {
|
||||
x, err := xorm.NewEngine(sqlutil.TestDB_Mssql.DriverName, strings.Replace(sqlutil.TestDB_Mssql.ConnStr, "localhost", serverIP, 1))
|
||||
|
||||
// x.ShowSQL()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to init mssql db %v", err)
|
||||
}
|
||||
|
||||
sqlutil.CleanDB(x)
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func genTimeRangeByInterval(from time.Time, duration time.Duration, interval time.Duration) []time.Time {
|
||||
durationSec := int64(duration.Seconds())
|
||||
intervalSec := int64(interval.Seconds())
|
||||
timeRange := []time.Time{}
|
||||
|
||||
for i := int64(0); i < durationSec; i += intervalSec {
|
||||
timeRange = append(timeRange, from)
|
||||
from = from.Add(time.Duration(int64(time.Second) * intervalSec))
|
||||
}
|
||||
|
||||
return timeRange
|
||||
}
|
@ -36,6 +36,8 @@ import 'brace/mode/text';
|
||||
import 'brace/snippets/text';
|
||||
import 'brace/mode/sql';
|
||||
import 'brace/snippets/sql';
|
||||
import 'brace/mode/sqlserver';
|
||||
import 'brace/snippets/sqlserver';
|
||||
import 'brace/mode/markdown';
|
||||
import 'brace/snippets/markdown';
|
||||
import 'brace/mode/json';
|
||||
|
@ -1,7 +1,7 @@
|
||||
import React from "react";
|
||||
import classNames from "classnames";
|
||||
import { observer } from "mobx-react";
|
||||
import { store } from "app/stores/store";
|
||||
import React from 'react';
|
||||
import classNames from 'classnames';
|
||||
import { observer } from 'mobx-react';
|
||||
import { store } from 'app/stores/store';
|
||||
|
||||
export interface SearchResultProps {
|
||||
search: any;
|
||||
@ -13,7 +13,7 @@ export class SearchResult extends React.Component<SearchResultProps, any> {
|
||||
super(props);
|
||||
|
||||
this.state = {
|
||||
search: store.search
|
||||
search: store.search,
|
||||
};
|
||||
|
||||
store.search.query();
|
||||
@ -56,29 +56,20 @@ export class SearchResultSection extends React.Component<SectionProps, any> {
|
||||
render() {
|
||||
let collapseClassNames = classNames({
|
||||
fa: true,
|
||||
"fa-plus": !this.props.section.expanded,
|
||||
"fa-minus": this.props.section.expanded,
|
||||
"search-section__header__toggle": true
|
||||
'fa-plus': !this.props.section.expanded,
|
||||
'fa-minus': this.props.section.expanded,
|
||||
'search-section__header__toggle': true,
|
||||
});
|
||||
|
||||
return (
|
||||
<div className="search-section" key={this.props.section.id}>
|
||||
<div className="search-section__header">
|
||||
<i
|
||||
className={classNames(
|
||||
"search-section__header__icon",
|
||||
this.props.section.icon
|
||||
)}
|
||||
/>
|
||||
<span className="search-section__header__text">
|
||||
{this.props.section.title}
|
||||
</span>
|
||||
<i className={classNames('search-section__header__icon', this.props.section.icon)} />
|
||||
<span className="search-section__header__text">{this.props.section.title}</span>
|
||||
<i className={collapseClassNames} onClick={this.toggleSection} />
|
||||
</div>
|
||||
{this.props.section.expanded && (
|
||||
<div className="search-section__items">
|
||||
{this.props.section.items.map(this.renderItem)}
|
||||
</div>
|
||||
<div className="search-section__items">{this.props.section.items.map(this.renderItem)}</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
@ -133,12 +133,12 @@ kbn.secondsToHms = function(seconds) {
|
||||
|
||||
kbn.secondsToHhmmss = function(seconds) {
|
||||
var strings = [];
|
||||
var numhours = Math.floor(seconds/3600);
|
||||
var numminutes = Math.floor((seconds%3600)/60);
|
||||
var numseconds = Math.floor((seconds%3600)%60);
|
||||
numhours > 9 ? strings.push(''+numhours) : strings.push('0'+numhours);
|
||||
numminutes > 9 ? strings.push(''+numminutes) : strings.push('0'+numminutes);
|
||||
numseconds > 9 ? strings.push(''+numseconds) : strings.push('0'+numseconds);
|
||||
var numhours = Math.floor(seconds / 3600);
|
||||
var numminutes = Math.floor((seconds % 3600) / 60);
|
||||
var numseconds = Math.floor((seconds % 3600) % 60);
|
||||
numhours > 9 ? strings.push('' + numhours) : strings.push('0' + numhours);
|
||||
numminutes > 9 ? strings.push('' + numminutes) : strings.push('0' + numminutes);
|
||||
numseconds > 9 ? strings.push('' + numseconds) : strings.push('0' + numseconds);
|
||||
return strings.join(':');
|
||||
};
|
||||
|
||||
@ -866,9 +866,9 @@ kbn.getUnitFormats = function() {
|
||||
{ text: 'Hryvnias (₴)', value: 'currencyUAH' },
|
||||
{ text: 'Real (R$)', value: 'currencyBRL' },
|
||||
{ text: 'Danish Krone (kr)', value: 'currencyDKK' },
|
||||
{ text: 'Icelandic Krone (kr)', value: 'currencyISK' },
|
||||
{ text: 'Icelandic Króna (kr)', value: 'currencyISK' },
|
||||
{ text: 'Norwegian Krone (kr)', value: 'currencyNOK' },
|
||||
{ text: 'Swedish Krone (kr)', value: 'currencySEK' },
|
||||
{ text: 'Swedish Krona (kr)', value: 'currencySEK' },
|
||||
],
|
||||
},
|
||||
{
|
||||
|
@ -30,7 +30,10 @@ describe('when updating view state', function() {
|
||||
beforeEach(
|
||||
angularMocks.inject(function(dashboardViewStateSrv, $location, $rootScope) {
|
||||
$rootScope.onAppEvent = function() {};
|
||||
$rootScope.dashboard = { meta: {} };
|
||||
$rootScope.dashboard = {
|
||||
meta: {},
|
||||
panels: [],
|
||||
};
|
||||
viewState = dashboardViewStateSrv.create($rootScope);
|
||||
location = $location;
|
||||
})
|
||||
|
@ -1,6 +1,7 @@
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
import config from 'app/core/config';
|
||||
import { DashboardModel } from './dashboard_model';
|
||||
|
||||
// represents the transient view state
|
||||
// like fullscreen panel & edit
|
||||
@ -8,7 +9,7 @@ export class DashboardViewState {
|
||||
state: any;
|
||||
panelScopes: any;
|
||||
$scope: any;
|
||||
dashboard: any;
|
||||
dashboard: DashboardModel;
|
||||
editStateChanged: any;
|
||||
fullscreenPanel: any;
|
||||
oldTimeRange: any;
|
||||
@ -89,6 +90,12 @@ export class DashboardViewState {
|
||||
}
|
||||
}
|
||||
|
||||
if ((this.state.fullscreen || this.dashboard.meta.soloMode) && this.state.panelId) {
|
||||
// Trying to render panel in fullscreen when it's in the collapsed row causes an issue.
|
||||
// So in this case expand collapsed row first.
|
||||
this.toggleCollapsedPanelRow(this.state.panelId);
|
||||
}
|
||||
|
||||
// if no edit state cleanup tab parm
|
||||
if (!this.state.edit) {
|
||||
delete this.state.tab;
|
||||
@ -103,6 +110,19 @@ export class DashboardViewState {
|
||||
this.syncState();
|
||||
}
|
||||
|
||||
toggleCollapsedPanelRow(panelId) {
|
||||
for (let panel of this.dashboard.panels) {
|
||||
if (panel.collapsed) {
|
||||
for (let rowPanel of panel.panels) {
|
||||
if (rowPanel.id === panelId) {
|
||||
this.dashboard.toggleRow(panel);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
syncState() {
|
||||
if (this.panelScopes.length === 0) {
|
||||
return;
|
||||
|
@ -8,6 +8,7 @@ import * as mixedPlugin from 'app/plugins/datasource/mixed/module';
|
||||
import * as mysqlPlugin from 'app/plugins/datasource/mysql/module';
|
||||
import * as postgresPlugin from 'app/plugins/datasource/postgres/module';
|
||||
import * as prometheusPlugin from 'app/plugins/datasource/prometheus/module';
|
||||
import * as mssqlPlugin from 'app/plugins/datasource/mssql/module';
|
||||
|
||||
import * as textPanel from 'app/plugins/panel/text/module';
|
||||
import * as graphPanel from 'app/plugins/panel/graph/module';
|
||||
@ -32,6 +33,7 @@ const builtInPlugins = {
|
||||
'app/plugins/datasource/mixed/module': mixedPlugin,
|
||||
'app/plugins/datasource/mysql/module': mysqlPlugin,
|
||||
'app/plugins/datasource/postgres/module': postgresPlugin,
|
||||
'app/plugins/datasource/mssql/module': mssqlPlugin,
|
||||
'app/plugins/datasource/prometheus/module': prometheusPlugin,
|
||||
'app/plugins/app/testdata/module': testDataAppPlugin,
|
||||
'app/plugins/app/testdata/datasource/module': testDataDSPlugin,
|
||||
|
@ -1,28 +1,24 @@
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
'./query_def',
|
||||
],
|
||||
function (angular, _, queryDef) {
|
||||
'use strict';
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
import * as queryDef from './query_def';
|
||||
|
||||
var module = angular.module('grafana.directives');
|
||||
export function elasticBucketAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
|
||||
controller: 'ElasticBucketAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
module.directive('elasticBucketAgg', function() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html',
|
||||
controller: 'ElasticBucketAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: "=",
|
||||
index: "=",
|
||||
onChange: "&",
|
||||
getFields: "&",
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
module.controller('ElasticBucketAggCtrl', function($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
export class ElasticBucketAggCtrl {
|
||||
/** @nginject */
|
||||
constructor($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
var bucketAggs = $scope.target.bucketAggs;
|
||||
|
||||
$scope.orderByOptions = [];
|
||||
@ -39,9 +35,13 @@ function (angular, _, queryDef) {
|
||||
return queryDef.sizeOptions;
|
||||
};
|
||||
|
||||
$rootScope.onAppEvent('elastic-query-updated', function() {
|
||||
$scope.validateModel();
|
||||
}, $scope);
|
||||
$rootScope.onAppEvent(
|
||||
'elastic-query-updated',
|
||||
function() {
|
||||
$scope.validateModel();
|
||||
},
|
||||
$scope
|
||||
);
|
||||
|
||||
$scope.init = function() {
|
||||
$scope.agg = bucketAggs[$scope.index];
|
||||
@ -56,10 +56,10 @@ function (angular, _, queryDef) {
|
||||
$scope.agg.settings = {};
|
||||
$scope.showOptions = false;
|
||||
|
||||
switch($scope.agg.type) {
|
||||
switch ($scope.agg.type) {
|
||||
case 'date_histogram':
|
||||
case 'histogram':
|
||||
case 'terms': {
|
||||
case 'terms': {
|
||||
delete $scope.agg.query;
|
||||
$scope.agg.field = 'select field';
|
||||
break;
|
||||
@ -84,15 +84,15 @@ function (angular, _, queryDef) {
|
||||
$scope.isFirst = $scope.index === 0;
|
||||
$scope.bucketAggCount = bucketAggs.length;
|
||||
|
||||
var settingsLinkText = "";
|
||||
var settingsLinkText = '';
|
||||
var settings = $scope.agg.settings || {};
|
||||
|
||||
switch($scope.agg.type) {
|
||||
switch ($scope.agg.type) {
|
||||
case 'terms': {
|
||||
settings.order = settings.order || "desc";
|
||||
settings.size = settings.size || "10";
|
||||
settings.order = settings.order || 'desc';
|
||||
settings.size = settings.size || '10';
|
||||
settings.min_doc_count = settings.min_doc_count || 1;
|
||||
settings.orderBy = settings.orderBy || "_term";
|
||||
settings.orderBy = settings.orderBy || '_term';
|
||||
|
||||
if (settings.size !== '0') {
|
||||
settingsLinkText = queryDef.describeOrder(settings.order) + ' ' + settings.size + ', ';
|
||||
@ -111,13 +111,17 @@ function (angular, _, queryDef) {
|
||||
break;
|
||||
}
|
||||
case 'filters': {
|
||||
settings.filters = settings.filters || [{query: '*'}];
|
||||
settingsLinkText = _.reduce(settings.filters, function(memo, value, index) {
|
||||
memo += 'Q' + (index + 1) + ' = ' + value.query + ' ';
|
||||
return memo;
|
||||
}, '');
|
||||
settings.filters = settings.filters || [{ query: '*' }];
|
||||
settingsLinkText = _.reduce(
|
||||
settings.filters,
|
||||
function(memo, value, index) {
|
||||
memo += 'Q' + (index + 1) + ' = ' + value.query + ' ';
|
||||
return memo;
|
||||
},
|
||||
''
|
||||
);
|
||||
if (settingsLinkText.length > 50) {
|
||||
settingsLinkText = settingsLinkText.substr(0, 50) + "...";
|
||||
settingsLinkText = settingsLinkText.substr(0, 50) + '...';
|
||||
}
|
||||
settingsLinkText = 'Filter Queries (' + settings.filters.length + ')';
|
||||
break;
|
||||
@ -165,7 +169,7 @@ function (angular, _, queryDef) {
|
||||
};
|
||||
|
||||
$scope.addFiltersQuery = function() {
|
||||
$scope.agg.settings.filters.push({query: '*'});
|
||||
$scope.agg.settings.filters.push({ query: '*' });
|
||||
};
|
||||
|
||||
$scope.removeFiltersQuery = function(filter) {
|
||||
@ -182,7 +186,7 @@ function (angular, _, queryDef) {
|
||||
|
||||
$scope.getFieldsInternal = function() {
|
||||
if ($scope.agg.type === 'date_histogram') {
|
||||
return $scope.getFields({$fieldType: 'date'});
|
||||
return $scope.getFields({ $fieldType: 'date' });
|
||||
} else {
|
||||
return $scope.getFields();
|
||||
}
|
||||
@ -198,14 +202,18 @@ function (angular, _, queryDef) {
|
||||
var addIndex = bucketAggs.length - 1;
|
||||
|
||||
if (lastBucket && lastBucket.type === 'date_histogram') {
|
||||
addIndex - 1;
|
||||
addIndex -= 1;
|
||||
}
|
||||
|
||||
var id = _.reduce($scope.target.bucketAggs.concat($scope.target.metrics), function(max, val) {
|
||||
return parseInt(val.id) > max ? parseInt(val.id) : max;
|
||||
}, 0);
|
||||
var id = _.reduce(
|
||||
$scope.target.bucketAggs.concat($scope.target.metrics),
|
||||
function(max, val) {
|
||||
return parseInt(val.id) > max ? parseInt(val.id) : max;
|
||||
},
|
||||
0
|
||||
);
|
||||
|
||||
bucketAggs.splice(addIndex, 0, {type: "terms", field: "select field", id: (id+1).toString(), fake: true});
|
||||
bucketAggs.splice(addIndex, 0, { type: 'terms', field: 'select field', id: (id + 1).toString(), fake: true });
|
||||
$scope.onChange();
|
||||
};
|
||||
|
||||
@ -215,7 +223,9 @@ function (angular, _, queryDef) {
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
});
|
||||
var module = angular.module('grafana.directives');
|
||||
module.directive('elasticBucketAgg', elasticBucketAgg);
|
||||
module.controller('ElasticBucketAggCtrl', ElasticBucketAggCtrl);
|
@ -1,31 +1,25 @@
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
'./query_def'
|
||||
],
|
||||
function (angular, _, queryDef) {
|
||||
'use strict';
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
import * as queryDef from './query_def';
|
||||
|
||||
var module = angular.module('grafana.directives');
|
||||
export function elasticMetricAgg() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
|
||||
controller: 'ElasticMetricAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: '=',
|
||||
index: '=',
|
||||
onChange: '&',
|
||||
getFields: '&',
|
||||
esVersion: '=',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
module.directive('elasticMetricAgg', function() {
|
||||
return {
|
||||
templateUrl: 'public/app/plugins/datasource/elasticsearch/partials/metric_agg.html',
|
||||
controller: 'ElasticMetricAggCtrl',
|
||||
restrict: 'E',
|
||||
scope: {
|
||||
target: "=",
|
||||
index: "=",
|
||||
onChange: "&",
|
||||
getFields: "&",
|
||||
esVersion: '='
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
module.controller('ElasticMetricAggCtrl', function($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
export class ElasticMetricAggCtrl {
|
||||
constructor($scope, uiSegmentSrv, $q, $rootScope) {
|
||||
var metricAggs = $scope.target.metrics;
|
||||
|
||||
$scope.metricAggTypes = queryDef.getMetricAggTypes($scope.esVersion);
|
||||
$scope.extendedStats = queryDef.extendedStats;
|
||||
$scope.pipelineAggOptions = [];
|
||||
@ -41,17 +35,21 @@ function (angular, _, queryDef) {
|
||||
$scope.pipelineAggOptions = queryDef.getPipelineAggOptions($scope.target);
|
||||
};
|
||||
|
||||
$rootScope.onAppEvent('elastic-query-updated', function() {
|
||||
$scope.index = _.indexOf(metricAggs, $scope.agg);
|
||||
$scope.updatePipelineAggOptions();
|
||||
$scope.validateModel();
|
||||
}, $scope);
|
||||
$rootScope.onAppEvent(
|
||||
'elastic-query-updated',
|
||||
function() {
|
||||
$scope.index = _.indexOf(metricAggs, $scope.agg);
|
||||
$scope.updatePipelineAggOptions();
|
||||
$scope.validateModel();
|
||||
},
|
||||
$scope
|
||||
);
|
||||
|
||||
$scope.validateModel = function() {
|
||||
$scope.isFirst = $scope.index === 0;
|
||||
$scope.isSingle = metricAggs.length === 1;
|
||||
$scope.settingsLinkText = '';
|
||||
$scope.aggDef = _.find($scope.metricAggTypes, {value: $scope.agg.type});
|
||||
$scope.aggDef = _.find($scope.metricAggTypes, { value: $scope.agg.type });
|
||||
|
||||
if (queryDef.isPipelineAgg($scope.agg.type)) {
|
||||
$scope.agg.pipelineAgg = $scope.agg.pipelineAgg || 'select metric';
|
||||
@ -67,30 +65,34 @@ function (angular, _, queryDef) {
|
||||
} else if (!$scope.agg.field) {
|
||||
$scope.agg.field = 'select field';
|
||||
}
|
||||
switch($scope.agg.type) {
|
||||
switch ($scope.agg.type) {
|
||||
case 'cardinality': {
|
||||
var precision_threshold = $scope.agg.settings.precision_threshold || '';
|
||||
$scope.settingsLinkText = 'Precision threshold: ' + precision_threshold;
|
||||
break;
|
||||
}
|
||||
case 'percentiles': {
|
||||
$scope.agg.settings.percents = $scope.agg.settings.percents || [25,50,75,95,99];
|
||||
$scope.agg.settings.percents = $scope.agg.settings.percents || [25, 50, 75, 95, 99];
|
||||
$scope.settingsLinkText = 'Values: ' + $scope.agg.settings.percents.join(',');
|
||||
break;
|
||||
}
|
||||
case 'extended_stats': {
|
||||
if (_.keys($scope.agg.meta).length === 0) {
|
||||
if (_.keys($scope.agg.meta).length === 0) {
|
||||
$scope.agg.meta.std_deviation_bounds_lower = true;
|
||||
$scope.agg.meta.std_deviation_bounds_upper = true;
|
||||
}
|
||||
|
||||
var stats = _.reduce($scope.agg.meta, function(memo, val, key) {
|
||||
if (val) {
|
||||
var def = _.find($scope.extendedStats, {value: key});
|
||||
memo.push(def.text);
|
||||
}
|
||||
return memo;
|
||||
}, []);
|
||||
var stats = _.reduce(
|
||||
$scope.agg.meta,
|
||||
function(memo, val, key) {
|
||||
if (val) {
|
||||
var def = _.find($scope.extendedStats, { value: key });
|
||||
memo.push(def.text);
|
||||
}
|
||||
return memo;
|
||||
},
|
||||
[]
|
||||
);
|
||||
|
||||
$scope.settingsLinkText = 'Stats: ' + stats.join(', ');
|
||||
break;
|
||||
@ -103,8 +105,8 @@ function (angular, _, queryDef) {
|
||||
}
|
||||
case 'raw_document': {
|
||||
$scope.agg.settings.size = $scope.agg.settings.size || 500;
|
||||
$scope.settingsLinkText = 'Size: ' + $scope.agg.settings.size ;
|
||||
$scope.target.metrics.splice(0,$scope.target.metrics.length, $scope.agg);
|
||||
$scope.settingsLinkText = 'Size: ' + $scope.agg.settings.size;
|
||||
$scope.target.metrics.splice(0, $scope.target.metrics.length, $scope.agg);
|
||||
|
||||
$scope.target.bucketAggs = [];
|
||||
break;
|
||||
@ -115,7 +117,7 @@ function (angular, _, queryDef) {
|
||||
// but having it like this simplifes the query_builder
|
||||
var inlineScript = $scope.agg.inlineScript;
|
||||
if (inlineScript) {
|
||||
$scope.agg.settings.script = {inline: inlineScript};
|
||||
$scope.agg.settings.script = { inline: inlineScript };
|
||||
} else {
|
||||
delete $scope.agg.settings.script;
|
||||
}
|
||||
@ -135,15 +137,15 @@ function (angular, _, queryDef) {
|
||||
$scope.onChange();
|
||||
};
|
||||
|
||||
$scope.updateMovingAvgModelSettings = function () {
|
||||
$scope.updateMovingAvgModelSettings = function() {
|
||||
var modelSettingsKeys = [];
|
||||
var modelSettings = queryDef.getMovingAvgSettings($scope.agg.settings.model, false);
|
||||
for (var i=0; i < modelSettings.length; i++) {
|
||||
for (var i = 0; i < modelSettings.length; i++) {
|
||||
modelSettingsKeys.push(modelSettings[i].value);
|
||||
}
|
||||
|
||||
for (var key in $scope.agg.settings.settings) {
|
||||
if (($scope.agg.settings.settings[key] === null) || (modelSettingsKeys.indexOf(key) === -1)) {
|
||||
if ($scope.agg.settings.settings[key] === null || modelSettingsKeys.indexOf(key) === -1) {
|
||||
delete $scope.agg.settings.settings[key];
|
||||
}
|
||||
}
|
||||
@ -166,17 +168,21 @@ function (angular, _, queryDef) {
|
||||
if ($scope.agg.type === 'cardinality') {
|
||||
return $scope.getFields();
|
||||
}
|
||||
return $scope.getFields({$fieldType: 'number'});
|
||||
return $scope.getFields({ $fieldType: 'number' });
|
||||
};
|
||||
|
||||
$scope.addMetricAgg = function() {
|
||||
var addIndex = metricAggs.length;
|
||||
|
||||
var id = _.reduce($scope.target.bucketAggs.concat($scope.target.metrics), function(max, val) {
|
||||
return parseInt(val.id) > max ? parseInt(val.id) : max;
|
||||
}, 0);
|
||||
var id = _.reduce(
|
||||
$scope.target.bucketAggs.concat($scope.target.metrics),
|
||||
function(max, val) {
|
||||
return parseInt(val.id) > max ? parseInt(val.id) : max;
|
||||
},
|
||||
0
|
||||
);
|
||||
|
||||
metricAggs.splice(addIndex, 0, {type: "count", field: "select field", id: (id+1).toString()});
|
||||
metricAggs.splice(addIndex, 0, { type: 'count', field: 'select field', id: (id + 1).toString() });
|
||||
$scope.onChange();
|
||||
};
|
||||
|
||||
@ -194,7 +200,9 @@ function (angular, _, queryDef) {
|
||||
};
|
||||
|
||||
$scope.init();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
});
|
||||
var module = angular.module('grafana.directives');
|
||||
module.directive('elasticMetricAgg', elasticMetricAgg);
|
||||
module.controller('ElasticMetricAggCtrl', ElasticMetricAggCtrl);
|
12
public/app/plugins/datasource/mssql/README.md
Normal file
12
public/app/plugins/datasource/mssql/README.md
Normal file
@ -0,0 +1,12 @@
|
||||
# Grafana Microsoft SQL Server Data Source - Native Plugin
|
||||
|
||||
Grafana ships with a built-in Microsoft SQL Server (MSSQL) data source plugin that allows you to query and visualize data from any Microsoft SQL Server 2005 or newer.
|
||||
|
||||
## Adding the data source
|
||||
|
||||
1. Open the side menu by clicking the Grafana icon in the top header.
|
||||
2. In the side menu under the `Configuration` link you should find a link named `Data Sources`.
|
||||
3. Click the `+ Add data source` button in the top header.
|
||||
4. Select *Microsoft SQL Server* from the *Type* dropdown.
|
||||
|
||||
For more information, check the [docs](http://docs.grafana.org/).
|
151
public/app/plugins/datasource/mssql/datasource.ts
Normal file
151
public/app/plugins/datasource/mssql/datasource.ts
Normal file
@ -0,0 +1,151 @@
|
||||
import _ from 'lodash';
|
||||
import ResponseParser from './response_parser';
|
||||
|
||||
export class MssqlDatasource {
|
||||
id: any;
|
||||
name: any;
|
||||
responseParser: ResponseParser;
|
||||
|
||||
/** @ngInject **/
|
||||
constructor(instanceSettings, private backendSrv, private $q, private templateSrv) {
|
||||
this.name = instanceSettings.name;
|
||||
this.id = instanceSettings.id;
|
||||
this.responseParser = new ResponseParser(this.$q);
|
||||
}
|
||||
|
||||
interpolateVariable(value, variable) {
|
||||
if (typeof value === 'string') {
|
||||
if (variable.multi || variable.includeAll) {
|
||||
return "'" + value + "'";
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof value === 'number') {
|
||||
return value;
|
||||
}
|
||||
|
||||
var quotedValues = _.map(value, function(val) {
|
||||
if (typeof value === 'number') {
|
||||
return value;
|
||||
}
|
||||
|
||||
return "'" + val + "'";
|
||||
});
|
||||
return quotedValues.join(',');
|
||||
}
|
||||
|
||||
query(options) {
|
||||
var queries = _.filter(options.targets, item => {
|
||||
return item.hide !== true;
|
||||
}).map(item => {
|
||||
return {
|
||||
refId: item.refId,
|
||||
intervalMs: options.intervalMs,
|
||||
maxDataPoints: options.maxDataPoints,
|
||||
datasourceId: this.id,
|
||||
rawSql: this.templateSrv.replace(item.rawSql, options.scopedVars, this.interpolateVariable),
|
||||
format: item.format,
|
||||
};
|
||||
});
|
||||
|
||||
if (queries.length === 0) {
|
||||
return this.$q.when({ data: [] });
|
||||
}
|
||||
|
||||
return this.backendSrv
|
||||
.datasourceRequest({
|
||||
url: '/api/tsdb/query',
|
||||
method: 'POST',
|
||||
data: {
|
||||
from: options.range.from.valueOf().toString(),
|
||||
to: options.range.to.valueOf().toString(),
|
||||
queries: queries,
|
||||
},
|
||||
})
|
||||
.then(this.responseParser.processQueryResult);
|
||||
}
|
||||
|
||||
annotationQuery(options) {
|
||||
if (!options.annotation.rawQuery) {
|
||||
return this.$q.reject({ message: 'Query missing in annotation definition' });
|
||||
}
|
||||
|
||||
const query = {
|
||||
refId: options.annotation.name,
|
||||
datasourceId: this.id,
|
||||
rawSql: this.templateSrv.replace(options.annotation.rawQuery, options.scopedVars, this.interpolateVariable),
|
||||
format: 'table',
|
||||
};
|
||||
|
||||
return this.backendSrv
|
||||
.datasourceRequest({
|
||||
url: '/api/tsdb/query',
|
||||
method: 'POST',
|
||||
data: {
|
||||
from: options.range.from.valueOf().toString(),
|
||||
to: options.range.to.valueOf().toString(),
|
||||
queries: [query],
|
||||
},
|
||||
})
|
||||
.then(data => this.responseParser.transformAnnotationResponse(options, data));
|
||||
}
|
||||
|
||||
metricFindQuery(query, optionalOptions) {
|
||||
let refId = 'tempvar';
|
||||
if (optionalOptions && optionalOptions.variable && optionalOptions.variable.name) {
|
||||
refId = optionalOptions.variable.name;
|
||||
}
|
||||
|
||||
const interpolatedQuery = {
|
||||
refId: refId,
|
||||
datasourceId: this.id,
|
||||
rawSql: this.templateSrv.replace(query, {}, this.interpolateVariable),
|
||||
format: 'table',
|
||||
};
|
||||
|
||||
return this.backendSrv
|
||||
.datasourceRequest({
|
||||
url: '/api/tsdb/query',
|
||||
method: 'POST',
|
||||
data: {
|
||||
queries: [interpolatedQuery],
|
||||
},
|
||||
})
|
||||
.then(data => this.responseParser.parseMetricFindQueryResult(refId, data));
|
||||
}
|
||||
|
||||
testDatasource() {
|
||||
return this.backendSrv
|
||||
.datasourceRequest({
|
||||
url: '/api/tsdb/query',
|
||||
method: 'POST',
|
||||
data: {
|
||||
from: '5m',
|
||||
to: 'now',
|
||||
queries: [
|
||||
{
|
||||
refId: 'A',
|
||||
intervalMs: 1,
|
||||
maxDataPoints: 1,
|
||||
datasourceId: this.id,
|
||||
rawSql: 'SELECT 1',
|
||||
format: 'table',
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
.then(res => {
|
||||
return { status: 'success', message: 'Database Connection OK' };
|
||||
})
|
||||
.catch(err => {
|
||||
console.log(err);
|
||||
if (err.data && err.data.message) {
|
||||
return { status: 'error', message: err.data.message };
|
||||
} else {
|
||||
return { status: 'error', message: err.status };
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
36
public/app/plugins/datasource/mssql/module.ts
Normal file
36
public/app/plugins/datasource/mssql/module.ts
Normal file
@ -0,0 +1,36 @@
|
||||
import { MssqlDatasource } from './datasource';
|
||||
import { MssqlQueryCtrl } from './query_ctrl';
|
||||
|
||||
class MssqlConfigCtrl {
|
||||
static templateUrl = 'partials/config.html';
|
||||
}
|
||||
|
||||
const defaultQuery = `SELECT
|
||||
<time_column> as time,
|
||||
<text_column> as text,
|
||||
<tags_column> as tags
|
||||
FROM
|
||||
<table name>
|
||||
WHERE
|
||||
$__timeFilter(time_column)
|
||||
ORDER BY
|
||||
<time_column> ASC`;
|
||||
|
||||
class MssqlAnnotationsQueryCtrl {
|
||||
static templateUrl = 'partials/annotations.editor.html';
|
||||
|
||||
annotation: any;
|
||||
|
||||
/** @ngInject **/
|
||||
constructor() {
|
||||
this.annotation.rawQuery = this.annotation.rawQuery || defaultQuery;
|
||||
}
|
||||
}
|
||||
|
||||
export {
|
||||
MssqlDatasource,
|
||||
MssqlDatasource as Datasource,
|
||||
MssqlQueryCtrl as QueryCtrl,
|
||||
MssqlConfigCtrl as ConfigCtrl,
|
||||
MssqlAnnotationsQueryCtrl as AnnotationsQueryCtrl,
|
||||
};
|
@ -0,0 +1,42 @@
|
||||
|
||||
<div class="gf-form-group">
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form gf-form--grow">
|
||||
<textarea rows="10" class="gf-form-input" ng-model="ctrl.annotation.rawQuery" spellcheck="false" placeholder="query expression" data-min-length=0 data-items=100 ng-model-onblur ng-change="ctrl.panelCtrl.refresh()"></textarea>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label query-keyword" ng-click="ctrl.showHelp = !ctrl.showHelp">
|
||||
Show Help
|
||||
<i class="fa fa-caret-down" ng-show="ctrl.showHelp"></i>
|
||||
<i class="fa fa-caret-right" ng-hide="ctrl.showHelp"></i>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form" ng-show="ctrl.showHelp">
|
||||
<pre class="gf-form-pre alert alert-info"><h6>Annotation Query Format</h6>
|
||||
An annotation is an event that is overlayed on top of graphs. The query can have up to three columns per row, the <b>time</b> column is mandatory. Annotation rendering is expensive so it is important to limit the number of rows returned.
|
||||
|
||||
- column with alias: <b>time</b> for the annotation event time (in UTC). Use unix timestamp in seconds or any native date data type.
|
||||
- column with alias: <b>text</b> for the annotation text.
|
||||
- column with alias: <b>tags</b> for annotation tags. This is a comma separated string of tags e.g. 'tag1,tag2'.
|
||||
|
||||
|
||||
Macros:
|
||||
- $__time(column) -> column AS time
|
||||
- $__utcTime(column) -> DATEADD(second, DATEDIFF(second, GETDATE(), GETUTCDATE()), column) AS time
|
||||
- $__timeEpoch(column) -> DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second, GETDATE(), GETUTCDATE()), column) ) AS time
|
||||
- $__timeFilter(column) -> column > DATEADD(s, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01') AND column < DATEADD(s, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__unixEpochFilter(column) -> column > 1492750877 AND column < 1492750877
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> DATEADD(second, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__timeTo() -> DATEADD(second, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
43
public/app/plugins/datasource/mssql/partials/config.html
Normal file
43
public/app/plugins/datasource/mssql/partials/config.html
Normal file
@ -0,0 +1,43 @@
|
||||
|
||||
<h3 class="page-heading">MSSQL Connection</h3>
|
||||
|
||||
<div class="gf-form-group">
|
||||
<div class="gf-form max-width-30">
|
||||
<span class="gf-form-label width-7">Host</span>
|
||||
<input type="text" class="gf-form-input" ng-model='ctrl.current.url' placeholder="localhost:1433" bs-typeahead="{{['localhost', 'localhost:1433']}}" required></input>
|
||||
</div>
|
||||
|
||||
<div class="gf-form max-width-30">
|
||||
<span class="gf-form-label width-7">Database</span>
|
||||
<input type="text" class="gf-form-input" ng-model='ctrl.current.database' placeholder="database name" required></input>
|
||||
</div>
|
||||
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form max-width-15">
|
||||
<span class="gf-form-label width-7">User</span>
|
||||
<input type="text" class="gf-form-input" ng-model='ctrl.current.user' placeholder="user"></input>
|
||||
</div>
|
||||
<div class="gf-form max-width-15" ng-if="!ctrl.current.secureJsonFields.password">
|
||||
<span class="gf-form-label width-7">Password</span>
|
||||
<input type="password" class="gf-form-input" ng-model='ctrl.current.secureJsonData.password' placeholder="password"></input>
|
||||
</div>
|
||||
<div class="gf-form max-width-19" ng-if="ctrl.current.secureJsonFields.password">
|
||||
<span class="gf-form-label width-7">Password</span>
|
||||
<input type="text" class="gf-form-input" disabled="disabled" value="configured">
|
||||
<a class="btn btn-secondary gf-form-btn" href="#" ng-click="ctrl.current.secureJsonFields.password = false">reset</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form-group">
|
||||
<div class="grafana-info-box">
|
||||
<h5>User Permission</h5>
|
||||
<p>
|
||||
The database user should only be granted SELECT permissions on the specified database & tables you want to query.
|
||||
Grafana does not validate that queries are safe so queries can contain any SQL statement. For example, statements
|
||||
like <code>USE otherdb;</code> and <code>DROP TABLE user;</code> would be executed. To protect against this we
|
||||
<strong>Highly</strong> recommmend you create a specific MSSQL user with restricted permissions.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -0,0 +1,71 @@
|
||||
<query-editor-row query-ctrl="ctrl" can-collapse="false">
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form gf-form--grow">
|
||||
<code-editor content="ctrl.target.rawSql" datasource="ctrl.datasource" on-change="ctrl.panelCtrl.refresh()" data-mode="sqlserver">
|
||||
</code-editor>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form-inline">
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label query-keyword">Format as</label>
|
||||
<div class="gf-form-select-wrapper">
|
||||
<select class="gf-form-input gf-size-auto" ng-model="ctrl.target.format" ng-options="f.value as f.text for f in ctrl.formats" ng-change="ctrl.refresh()"></select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="gf-form">
|
||||
<label class="gf-form-label query-keyword" ng-click="ctrl.showHelp = !ctrl.showHelp">
|
||||
Show Help
|
||||
<i class="fa fa-caret-down" ng-show="ctrl.showHelp"></i>
|
||||
<i class="fa fa-caret-right" ng-hide="ctrl.showHelp"></i>
|
||||
</label>
|
||||
</div>
|
||||
<div class="gf-form" ng-show="ctrl.lastQueryMeta">
|
||||
<label class="gf-form-label query-keyword" ng-click="ctrl.showLastQuerySQL = !ctrl.showLastQuerySQL">
|
||||
Generated SQL
|
||||
<i class="fa fa-caret-down" ng-show="ctrl.showLastQuerySQL"></i>
|
||||
<i class="fa fa-caret-right" ng-hide="ctrl.showLastQuerySQL"></i>
|
||||
</label>
|
||||
</div>
|
||||
<div class="gf-form gf-form--grow">
|
||||
<div class="gf-form-label gf-form-label--grow"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="gf-form" ng-show="ctrl.showLastQuerySQL">
|
||||
<pre class="gf-form-pre">{{ctrl.lastQueryMeta.sql}}</pre>
|
||||
</div>
|
||||
|
||||
<div class="gf-form" ng-show="ctrl.showHelp">
|
||||
<pre class="gf-form-pre alert alert-info">Time series:
|
||||
- return column named time (in UTC), as a unix time stamp or any sql native date data type. You can use the macros below.
|
||||
- optional: return column named metric to represent the series names.
|
||||
- any other columns returned will be the time point values.
|
||||
- if multiple value columns are present and a metric column is provided. the series name will be the combination of "MetricName - ValueColumnName".
|
||||
|
||||
Table:
|
||||
- return any set of columns
|
||||
|
||||
Macros:
|
||||
- $__time(column) -> column AS time
|
||||
- $__utcTime(column) -> DATEADD(second, DATEDIFF(second, GETDATE(), GETUTCDATE()), column) AS time
|
||||
- $__timeEpoch(column) -> DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second, GETDATE(), GETUTCDATE()), column) ) AS time
|
||||
- $__timeFilter(column) -> column > DATEADD(s, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01') AND column < DATEADD(s, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__unixEpochFilter(column) -> column > 1492750877 AND column < 1492750877
|
||||
- $__timeGroup(column, '5m'[, fillvalue]) -> cast(cast(DATEDIFF(second, {d '1970-01-01'}, DATEADD(second, DATEDIFF(second, GETDATE(), GETUTCDATE()), column))/300 as int)*300 as int). Providing a <i>fillValue</i> of <i>NULL</i> or floating value will automatically fill empty series in timerange with that value.
|
||||
|
||||
Or build your own conditionals using these macros which just return the values:
|
||||
- $__timeFrom() -> DATEADD(second, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__timeTo() -> DATEADD(second, 1492750877+DATEDIFF(second, GETUTCDATE(), GETDATE()), '1970-01-01')
|
||||
- $__unixEpochFrom() -> 1492750877
|
||||
- $__unixEpochTo() -> 1492750877
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="gf-form" ng-show="ctrl.lastQueryError">
|
||||
<pre class="gf-form-pre alert alert-error">{{ctrl.lastQueryError}}</pre>
|
||||
</div>
|
||||
|
||||
</query-editor-row>
|
21
public/app/plugins/datasource/mssql/plugin.json
Normal file
21
public/app/plugins/datasource/mssql/plugin.json
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
"type": "datasource",
|
||||
"name": "Microsoft SQL Server",
|
||||
"id": "mssql",
|
||||
|
||||
"info": {
|
||||
"description": "Microsoft SQL Server Data Source for Grafana",
|
||||
"author": {
|
||||
"name": "Grafana Project",
|
||||
"url": "https://grafana.com"
|
||||
},
|
||||
"logos": {
|
||||
"small": "",
|
||||
"large": ""
|
||||
}
|
||||
},
|
||||
|
||||
"alerting": true,
|
||||
"annotations": true,
|
||||
"metrics": true
|
||||
}
|
77
public/app/plugins/datasource/mssql/query_ctrl.ts
Normal file
77
public/app/plugins/datasource/mssql/query_ctrl.ts
Normal file
@ -0,0 +1,77 @@
|
||||
import _ from 'lodash';
|
||||
import { QueryCtrl } from 'app/plugins/sdk';
|
||||
|
||||
export interface MssqlQuery {
|
||||
refId: string;
|
||||
format: string;
|
||||
alias: string;
|
||||
rawSql: string;
|
||||
}
|
||||
|
||||
export interface QueryMeta {
|
||||
sql: string;
|
||||
}
|
||||
|
||||
const defaultQuery = `SELECT
|
||||
$__timeEpoch(<time_column>),
|
||||
<value column> as value,
|
||||
<series name column> as metric
|
||||
FROM
|
||||
<table name>
|
||||
WHERE
|
||||
$__timeFilter(time_column)
|
||||
ORDER BY
|
||||
<time_column> ASC`;
|
||||
|
||||
export class MssqlQueryCtrl extends QueryCtrl {
|
||||
static templateUrl = 'partials/query.editor.html';
|
||||
|
||||
showLastQuerySQL: boolean;
|
||||
formats: any[];
|
||||
target: MssqlQuery;
|
||||
lastQueryMeta: QueryMeta;
|
||||
lastQueryError: string;
|
||||
showHelp: boolean;
|
||||
|
||||
/** @ngInject **/
|
||||
constructor($scope, $injector) {
|
||||
super($scope, $injector);
|
||||
|
||||
this.target.format = this.target.format || 'time_series';
|
||||
this.target.alias = '';
|
||||
this.formats = [{ text: 'Time series', value: 'time_series' }, { text: 'Table', value: 'table' }];
|
||||
|
||||
if (!this.target.rawSql) {
|
||||
// special handling when in table panel
|
||||
if (this.panelCtrl.panel.type === 'table') {
|
||||
this.target.format = 'table';
|
||||
this.target.rawSql = 'SELECT 1';
|
||||
} else {
|
||||
this.target.rawSql = defaultQuery;
|
||||
}
|
||||
}
|
||||
|
||||
this.panelCtrl.events.on('data-received', this.onDataReceived.bind(this), $scope);
|
||||
this.panelCtrl.events.on('data-error', this.onDataError.bind(this), $scope);
|
||||
}
|
||||
|
||||
onDataReceived(dataList) {
|
||||
this.lastQueryMeta = null;
|
||||
this.lastQueryError = null;
|
||||
|
||||
let anySeriesFromQuery = _.find(dataList, { refId: this.target.refId });
|
||||
if (anySeriesFromQuery) {
|
||||
this.lastQueryMeta = anySeriesFromQuery.meta;
|
||||
}
|
||||
}
|
||||
|
||||
onDataError(err) {
|
||||
if (err.data && err.data.results) {
|
||||
let queryRes = err.data.results[this.target.refId];
|
||||
if (queryRes) {
|
||||
this.lastQueryMeta = queryRes.meta;
|
||||
this.lastQueryError = queryRes.error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
139
public/app/plugins/datasource/mssql/response_parser.ts
Normal file
139
public/app/plugins/datasource/mssql/response_parser.ts
Normal file
@ -0,0 +1,139 @@
|
||||
import _ from 'lodash';
|
||||
|
||||
export default class ResponseParser {
|
||||
constructor(private $q) {}
|
||||
|
||||
processQueryResult(res) {
|
||||
var data = [];
|
||||
|
||||
if (!res.data.results) {
|
||||
return { data: data };
|
||||
}
|
||||
|
||||
for (let key in res.data.results) {
|
||||
let queryRes = res.data.results[key];
|
||||
|
||||
if (queryRes.series) {
|
||||
for (let series of queryRes.series) {
|
||||
data.push({
|
||||
target: series.name,
|
||||
datapoints: series.points,
|
||||
refId: queryRes.refId,
|
||||
meta: queryRes.meta,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (queryRes.tables) {
|
||||
for (let table of queryRes.tables) {
|
||||
table.type = 'table';
|
||||
table.refId = queryRes.refId;
|
||||
table.meta = queryRes.meta;
|
||||
data.push(table);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { data: data };
|
||||
}
|
||||
|
||||
parseMetricFindQueryResult(refId, results) {
|
||||
if (!results || results.data.length === 0 || results.data.results[refId].meta.rowCount === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const columns = results.data.results[refId].tables[0].columns;
|
||||
const rows = results.data.results[refId].tables[0].rows;
|
||||
const textColIndex = this.findColIndex(columns, '__text');
|
||||
const valueColIndex = this.findColIndex(columns, '__value');
|
||||
|
||||
if (columns.length === 2 && textColIndex !== -1 && valueColIndex !== -1) {
|
||||
return this.transformToKeyValueList(rows, textColIndex, valueColIndex);
|
||||
}
|
||||
|
||||
return this.transformToSimpleList(rows);
|
||||
}
|
||||
|
||||
transformToKeyValueList(rows, textColIndex, valueColIndex) {
|
||||
const res = [];
|
||||
|
||||
for (let i = 0; i < rows.length; i++) {
|
||||
if (!this.containsKey(res, rows[i][textColIndex])) {
|
||||
res.push({ text: rows[i][textColIndex], value: rows[i][valueColIndex] });
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
transformToSimpleList(rows) {
|
||||
const res = [];
|
||||
|
||||
for (let i = 0; i < rows.length; i++) {
|
||||
for (let j = 0; j < rows[i].length; j++) {
|
||||
const value = rows[i][j];
|
||||
if (res.indexOf(value) === -1) {
|
||||
res.push(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return _.map(res, value => {
|
||||
return { text: value };
|
||||
});
|
||||
}
|
||||
|
||||
findColIndex(columns, colName) {
|
||||
for (let i = 0; i < columns.length; i++) {
|
||||
if (columns[i].text === colName) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
containsKey(res, key) {
|
||||
for (let i = 0; i < res.length; i++) {
|
||||
if (res[i].text === key) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
transformAnnotationResponse(options, data) {
|
||||
const table = data.data.results[options.annotation.name].tables[0];
|
||||
|
||||
let timeColumnIndex = -1;
|
||||
let textColumnIndex = -1;
|
||||
let tagsColumnIndex = -1;
|
||||
|
||||
for (let i = 0; i < table.columns.length; i++) {
|
||||
if (table.columns[i].text === 'time') {
|
||||
timeColumnIndex = i;
|
||||
} else if (table.columns[i].text === 'text') {
|
||||
textColumnIndex = i;
|
||||
} else if (table.columns[i].text === 'tags') {
|
||||
tagsColumnIndex = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (timeColumnIndex === -1) {
|
||||
return this.$q.reject({ message: 'Missing mandatory time column (with time column alias) in annotation query.' });
|
||||
}
|
||||
|
||||
const list = [];
|
||||
for (let i = 0; i < table.rows.length; i++) {
|
||||
const row = table.rows[i];
|
||||
list.push({
|
||||
annotation: options.annotation,
|
||||
time: Math.floor(row[timeColumnIndex]) * 1000,
|
||||
text: row[textColumnIndex],
|
||||
tags: row[tagsColumnIndex] ? row[tagsColumnIndex].trim().split(/\s*,\s*/) : [],
|
||||
});
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
}
|
233
public/app/plugins/datasource/mssql/specs/datasource_specs.ts
Normal file
233
public/app/plugins/datasource/mssql/specs/datasource_specs.ts
Normal file
@ -0,0 +1,233 @@
|
||||
import { describe, beforeEach, it, expect, angularMocks } from 'test/lib/common';
|
||||
import moment from 'moment';
|
||||
import helpers from 'test/specs/helpers';
|
||||
import { MssqlDatasource } from '../datasource';
|
||||
import { CustomVariable } from 'app/features/templating/custom_variable';
|
||||
|
||||
describe('MSSQLDatasource', function() {
|
||||
var ctx = new helpers.ServiceTestContext();
|
||||
var instanceSettings = { name: 'mssql' };
|
||||
|
||||
beforeEach(angularMocks.module('grafana.core'));
|
||||
beforeEach(angularMocks.module('grafana.services'));
|
||||
beforeEach(ctx.providePhase(['backendSrv']));
|
||||
|
||||
beforeEach(
|
||||
angularMocks.inject(function($q, $rootScope, $httpBackend, $injector) {
|
||||
ctx.$q = $q;
|
||||
ctx.$httpBackend = $httpBackend;
|
||||
ctx.$rootScope = $rootScope;
|
||||
ctx.ds = $injector.instantiate(MssqlDatasource, { instanceSettings: instanceSettings });
|
||||
$httpBackend.when('GET', /\.html$/).respond('');
|
||||
})
|
||||
);
|
||||
|
||||
describe('When performing annotationQuery', function() {
|
||||
let results;
|
||||
|
||||
const annotationName = 'MyAnno';
|
||||
|
||||
const options = {
|
||||
annotation: {
|
||||
name: annotationName,
|
||||
rawQuery: 'select time, text, tags from table;',
|
||||
},
|
||||
range: {
|
||||
from: moment(1432288354),
|
||||
to: moment(1432288401),
|
||||
},
|
||||
};
|
||||
|
||||
const response = {
|
||||
results: {
|
||||
MyAnno: {
|
||||
refId: annotationName,
|
||||
tables: [
|
||||
{
|
||||
columns: [{ text: 'time' }, { text: 'text' }, { text: 'tags' }],
|
||||
rows: [
|
||||
[1432288355, 'some text', 'TagA,TagB'],
|
||||
[1432288390, 'some text2', ' TagB , TagC'],
|
||||
[1432288400, 'some text3'],
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.backendSrv.datasourceRequest = function(options) {
|
||||
return ctx.$q.when({ data: response, status: 200 });
|
||||
};
|
||||
ctx.ds.annotationQuery(options).then(function(data) {
|
||||
results = data;
|
||||
});
|
||||
ctx.$rootScope.$apply();
|
||||
});
|
||||
|
||||
it('should return annotation list', function() {
|
||||
expect(results.length).to.be(3);
|
||||
|
||||
expect(results[0].text).to.be('some text');
|
||||
expect(results[0].tags[0]).to.be('TagA');
|
||||
expect(results[0].tags[1]).to.be('TagB');
|
||||
|
||||
expect(results[1].tags[0]).to.be('TagB');
|
||||
expect(results[1].tags[1]).to.be('TagC');
|
||||
|
||||
expect(results[2].tags.length).to.be(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('When performing metricFindQuery', function() {
|
||||
let results;
|
||||
const query = 'select * from atable';
|
||||
const response = {
|
||||
results: {
|
||||
tempvar: {
|
||||
meta: {
|
||||
rowCount: 3,
|
||||
},
|
||||
refId: 'tempvar',
|
||||
tables: [
|
||||
{
|
||||
columns: [{ text: 'title' }, { text: 'text' }],
|
||||
rows: [['aTitle', 'some text'], ['aTitle2', 'some text2'], ['aTitle3', 'some text3']],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.backendSrv.datasourceRequest = function(options) {
|
||||
return ctx.$q.when({ data: response, status: 200 });
|
||||
};
|
||||
ctx.ds.metricFindQuery(query).then(function(data) {
|
||||
results = data;
|
||||
});
|
||||
ctx.$rootScope.$apply();
|
||||
});
|
||||
|
||||
it('should return list of all column values', function() {
|
||||
expect(results.length).to.be(6);
|
||||
expect(results[0].text).to.be('aTitle');
|
||||
expect(results[5].text).to.be('some text3');
|
||||
});
|
||||
});
|
||||
|
||||
describe('When performing metricFindQuery with key, value columns', function() {
|
||||
let results;
|
||||
const query = 'select * from atable';
|
||||
const response = {
|
||||
results: {
|
||||
tempvar: {
|
||||
meta: {
|
||||
rowCount: 3,
|
||||
},
|
||||
refId: 'tempvar',
|
||||
tables: [
|
||||
{
|
||||
columns: [{ text: '__value' }, { text: '__text' }],
|
||||
rows: [['value1', 'aTitle'], ['value2', 'aTitle2'], ['value3', 'aTitle3']],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.backendSrv.datasourceRequest = function(options) {
|
||||
return ctx.$q.when({ data: response, status: 200 });
|
||||
};
|
||||
ctx.ds.metricFindQuery(query).then(function(data) {
|
||||
results = data;
|
||||
});
|
||||
ctx.$rootScope.$apply();
|
||||
});
|
||||
|
||||
it('should return list of as text, value', function() {
|
||||
expect(results.length).to.be(3);
|
||||
expect(results[0].text).to.be('aTitle');
|
||||
expect(results[0].value).to.be('value1');
|
||||
expect(results[2].text).to.be('aTitle3');
|
||||
expect(results[2].value).to.be('value3');
|
||||
});
|
||||
});
|
||||
|
||||
describe('When performing metricFindQuery with key, value columns and with duplicate keys', function() {
|
||||
let results;
|
||||
const query = 'select * from atable';
|
||||
const response = {
|
||||
results: {
|
||||
tempvar: {
|
||||
meta: {
|
||||
rowCount: 3,
|
||||
},
|
||||
refId: 'tempvar',
|
||||
tables: [
|
||||
{
|
||||
columns: [{ text: '__text' }, { text: '__value' }],
|
||||
rows: [['aTitle', 'same'], ['aTitle', 'same'], ['aTitle', 'diff']],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
beforeEach(function() {
|
||||
ctx.backendSrv.datasourceRequest = function(options) {
|
||||
return ctx.$q.when({ data: response, status: 200 });
|
||||
};
|
||||
ctx.ds.metricFindQuery(query).then(function(data) {
|
||||
results = data;
|
||||
});
|
||||
ctx.$rootScope.$apply();
|
||||
});
|
||||
|
||||
it('should return list of unique keys', function() {
|
||||
expect(results.length).to.be(1);
|
||||
expect(results[0].text).to.be('aTitle');
|
||||
expect(results[0].value).to.be('same');
|
||||
});
|
||||
});
|
||||
|
||||
describe('When interpolating variables', () => {
|
||||
beforeEach(function() {
|
||||
ctx.variable = new CustomVariable({}, {});
|
||||
});
|
||||
|
||||
describe('and value is a string', () => {
|
||||
it('should return an unquoted value', () => {
|
||||
expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql('abc');
|
||||
});
|
||||
});
|
||||
|
||||
describe('and value is a number', () => {
|
||||
it('should return an unquoted value', () => {
|
||||
expect(ctx.ds.interpolateVariable(1000, ctx.variable)).to.eql(1000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('and value is an array of strings', () => {
|
||||
it('should return comma separated quoted values', () => {
|
||||
expect(ctx.ds.interpolateVariable(['a', 'b', 'c'], ctx.variable)).to.eql("'a','b','c'");
|
||||
});
|
||||
});
|
||||
|
||||
describe('and variable allows multi-value and value is a string', () => {
|
||||
it('should return a quoted value', () => {
|
||||
ctx.variable.multi = true;
|
||||
expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql("'abc'");
|
||||
});
|
||||
});
|
||||
|
||||
describe('and variable allows all and value is a string', () => {
|
||||
it('should return a quoted value', () => {
|
||||
ctx.variable.includeAll = true;
|
||||
expect(ctx.ds.interpolateVariable('abc', ctx.variable)).to.eql("'abc'");
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
@ -1,476 +0,0 @@
|
||||
define([
|
||||
'angular',
|
||||
'lodash',
|
||||
'app/core/utils/datemath',
|
||||
'moment',
|
||||
],
|
||||
function (angular, _, dateMath) {
|
||||
'use strict';
|
||||
|
||||
/** @ngInject */
|
||||
function OpenTsDatasource(instanceSettings, $q, backendSrv, templateSrv) {
|
||||
this.type = 'opentsdb';
|
||||
this.url = instanceSettings.url;
|
||||
this.name = instanceSettings.name;
|
||||
this.withCredentials = instanceSettings.withCredentials;
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
instanceSettings.jsonData = instanceSettings.jsonData || {};
|
||||
this.tsdbVersion = instanceSettings.jsonData.tsdbVersion || 1;
|
||||
this.tsdbResolution = instanceSettings.jsonData.tsdbResolution || 1;
|
||||
this.supportMetrics = true;
|
||||
this.tagKeys = {};
|
||||
|
||||
// Called once per panel (graph)
|
||||
this.query = function(options) {
|
||||
var start = convertToTSDBTime(options.rangeRaw.from, false);
|
||||
var end = convertToTSDBTime(options.rangeRaw.to, true);
|
||||
var qs = [];
|
||||
|
||||
_.each(options.targets, function(target) {
|
||||
if (!target.metric) { return; }
|
||||
qs.push(convertTargetToQuery(target, options, this.tsdbVersion));
|
||||
}.bind(this));
|
||||
|
||||
var queries = _.compact(qs);
|
||||
|
||||
// No valid targets, return the empty result to save a round trip.
|
||||
if (_.isEmpty(queries)) {
|
||||
var d = $q.defer();
|
||||
d.resolve({ data: [] });
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var groupByTags = {};
|
||||
_.each(queries, function(query) {
|
||||
if (query.filters && query.filters.length > 0) {
|
||||
_.each(query.filters, function(val) {
|
||||
groupByTags[val.tagk] = true;
|
||||
});
|
||||
} else {
|
||||
_.each(query.tags, function(val, key) {
|
||||
groupByTags[key] = true;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
options.targets = _.filter(options.targets, function(query) {
|
||||
return query.hide !== true;
|
||||
});
|
||||
|
||||
return this.performTimeSeriesQuery(queries, start, end).then(function(response) {
|
||||
var metricToTargetMapping = mapMetricsToTargets(response.data, options, this.tsdbVersion);
|
||||
var result = _.map(response.data, function(metricData, index) {
|
||||
index = metricToTargetMapping[index];
|
||||
if (index === -1) {
|
||||
index = 0;
|
||||
}
|
||||
this._saveTagKeys(metricData);
|
||||
|
||||
return transformMetricData(metricData, groupByTags, options.targets[index], options, this.tsdbResolution);
|
||||
}.bind(this));
|
||||
return { data: result };
|
||||
}.bind(this));
|
||||
};
|
||||
|
||||
this.annotationQuery = function(options) {
|
||||
var start = convertToTSDBTime(options.rangeRaw.from, false);
|
||||
var end = convertToTSDBTime(options.rangeRaw.to, true);
|
||||
var qs = [];
|
||||
var eventList = [];
|
||||
|
||||
qs.push({ aggregator:"sum", metric:options.annotation.target });
|
||||
|
||||
var queries = _.compact(qs);
|
||||
|
||||
return this.performTimeSeriesQuery(queries, start, end).then(function(results) {
|
||||
if(results.data[0]) {
|
||||
var annotationObject = results.data[0].annotations;
|
||||
if(options.annotation.isGlobal){
|
||||
annotationObject = results.data[0].globalAnnotations;
|
||||
}
|
||||
if(annotationObject) {
|
||||
_.each(annotationObject, function(annotation) {
|
||||
var event = {
|
||||
text: annotation.description,
|
||||
time: Math.floor(annotation.startTime) * 1000,
|
||||
annotation: options.annotation
|
||||
};
|
||||
|
||||
eventList.push(event);
|
||||
});
|
||||
}
|
||||
}
|
||||
return eventList;
|
||||
|
||||
}.bind(this));
|
||||
};
|
||||
|
||||
this.targetContainsTemplate = function(target) {
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
for (var i = 0; i < target.filters.length; i++) {
|
||||
if (templateSrv.variableExists(target.filters[i].filter)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (target.tags && Object.keys(target.tags).length > 0) {
|
||||
for (var tagKey in target.tags) {
|
||||
if (templateSrv.variableExists(target.tags[tagKey])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
this.performTimeSeriesQuery = function(queries, start, end) {
|
||||
var msResolution = false;
|
||||
if (this.tsdbResolution === 2) {
|
||||
msResolution = true;
|
||||
}
|
||||
var reqBody = {
|
||||
start: start,
|
||||
queries: queries,
|
||||
msResolution: msResolution,
|
||||
globalAnnotations: true
|
||||
};
|
||||
if (this.tsdbVersion === 3) {
|
||||
reqBody.showQuery = true;
|
||||
}
|
||||
|
||||
// Relative queries (e.g. last hour) don't include an end time
|
||||
if (end) {
|
||||
reqBody.end = end;
|
||||
}
|
||||
|
||||
var options = {
|
||||
method: 'POST',
|
||||
url: this.url + '/api/query',
|
||||
data: reqBody
|
||||
};
|
||||
|
||||
this._addCredentialOptions(options);
|
||||
return backendSrv.datasourceRequest(options);
|
||||
};
|
||||
|
||||
this.suggestTagKeys = function(metric) {
|
||||
return $q.when(this.tagKeys[metric] || []);
|
||||
};
|
||||
|
||||
this._saveTagKeys = function(metricData) {
|
||||
var tagKeys = Object.keys(metricData.tags);
|
||||
_.each(metricData.aggregateTags, function(tag) {
|
||||
tagKeys.push(tag);
|
||||
});
|
||||
|
||||
this.tagKeys[metricData.metric] = tagKeys;
|
||||
};
|
||||
|
||||
this._performSuggestQuery = function(query, type) {
|
||||
return this._get('/api/suggest', {type: type, q: query, max: 1000}).then(function(result) {
|
||||
return result.data;
|
||||
});
|
||||
};
|
||||
|
||||
this._performMetricKeyValueLookup = function(metric, keys) {
|
||||
|
||||
if(!metric || !keys) {
|
||||
return $q.when([]);
|
||||
}
|
||||
|
||||
var keysArray = keys.split(",").map(function(key) {
|
||||
return key.trim();
|
||||
});
|
||||
var key = keysArray[0];
|
||||
var keysQuery = key + "=*";
|
||||
|
||||
if (keysArray.length > 1) {
|
||||
keysQuery += "," + keysArray.splice(1).join(",");
|
||||
}
|
||||
|
||||
var m = metric + "{" + keysQuery + "}";
|
||||
|
||||
return this._get('/api/search/lookup', {m: m, limit: 3000}).then(function(result) {
|
||||
result = result.data.results;
|
||||
var tagvs = [];
|
||||
_.each(result, function(r) {
|
||||
if (tagvs.indexOf(r.tags[key]) === -1) {
|
||||
tagvs.push(r.tags[key]);
|
||||
}
|
||||
});
|
||||
return tagvs;
|
||||
});
|
||||
};
|
||||
|
||||
this._performMetricKeyLookup = function(metric) {
|
||||
if(!metric) { return $q.when([]); }
|
||||
|
||||
return this._get('/api/search/lookup', {m: metric, limit: 1000}).then(function(result) {
|
||||
result = result.data.results;
|
||||
var tagks = [];
|
||||
_.each(result, function(r) {
|
||||
_.each(r.tags, function(tagv, tagk) {
|
||||
if(tagks.indexOf(tagk) === -1) {
|
||||
tagks.push(tagk);
|
||||
}
|
||||
});
|
||||
});
|
||||
return tagks;
|
||||
});
|
||||
};
|
||||
|
||||
this._get = function(relativeUrl, params) {
|
||||
var options = {
|
||||
method: 'GET',
|
||||
url: this.url + relativeUrl,
|
||||
params: params,
|
||||
};
|
||||
|
||||
this._addCredentialOptions(options);
|
||||
|
||||
return backendSrv.datasourceRequest(options);
|
||||
};
|
||||
|
||||
this._addCredentialOptions = function(options) {
|
||||
if (this.basicAuth || this.withCredentials) {
|
||||
options.withCredentials = true;
|
||||
}
|
||||
if (this.basicAuth) {
|
||||
options.headers = {"Authorization": this.basicAuth};
|
||||
}
|
||||
};
|
||||
|
||||
this.metricFindQuery = function(query) {
|
||||
if (!query) { return $q.when([]); }
|
||||
|
||||
var interpolated;
|
||||
try {
|
||||
interpolated = templateSrv.replace(query, {}, 'distributed');
|
||||
}
|
||||
catch (err) {
|
||||
return $q.reject(err);
|
||||
}
|
||||
|
||||
var responseTransform = function(result) {
|
||||
return _.map(result, function(value) {
|
||||
return {text: value};
|
||||
});
|
||||
};
|
||||
|
||||
var metrics_regex = /metrics\((.*)\)/;
|
||||
var tag_names_regex = /tag_names\((.*)\)/;
|
||||
var tag_values_regex = /tag_values\((.*?),\s?(.*)\)/;
|
||||
var tag_names_suggest_regex = /suggest_tagk\((.*)\)/;
|
||||
var tag_values_suggest_regex = /suggest_tagv\((.*)\)/;
|
||||
|
||||
var metrics_query = interpolated.match(metrics_regex);
|
||||
if (metrics_query) {
|
||||
return this._performSuggestQuery(metrics_query[1], 'metrics').then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_names_query = interpolated.match(tag_names_regex);
|
||||
if (tag_names_query) {
|
||||
return this._performMetricKeyLookup(tag_names_query[1]).then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_values_query = interpolated.match(tag_values_regex);
|
||||
if (tag_values_query) {
|
||||
return this._performMetricKeyValueLookup(tag_values_query[1], tag_values_query[2]).then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_names_suggest_query = interpolated.match(tag_names_suggest_regex);
|
||||
if (tag_names_suggest_query) {
|
||||
return this._performSuggestQuery(tag_names_suggest_query[1], 'tagk').then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_values_suggest_query = interpolated.match(tag_values_suggest_regex);
|
||||
if (tag_values_suggest_query) {
|
||||
return this._performSuggestQuery(tag_values_suggest_query[1], 'tagv').then(responseTransform);
|
||||
}
|
||||
|
||||
return $q.when([]);
|
||||
};
|
||||
|
||||
this.testDatasource = function() {
|
||||
return this._performSuggestQuery('cpu', 'metrics').then(function () {
|
||||
return { status: "success", message: "Data source is working" };
|
||||
});
|
||||
};
|
||||
|
||||
var aggregatorsPromise = null;
|
||||
this.getAggregators = function() {
|
||||
if (aggregatorsPromise) { return aggregatorsPromise; }
|
||||
|
||||
aggregatorsPromise = this._get('/api/aggregators').then(function(result) {
|
||||
if (result.data && _.isArray(result.data)) {
|
||||
return result.data.sort();
|
||||
}
|
||||
return [];
|
||||
});
|
||||
return aggregatorsPromise;
|
||||
};
|
||||
|
||||
var filterTypesPromise = null;
|
||||
this.getFilterTypes = function() {
|
||||
if (filterTypesPromise) { return filterTypesPromise; }
|
||||
|
||||
filterTypesPromise = this._get('/api/config/filters').then(function(result) {
|
||||
if (result.data) {
|
||||
return Object.keys(result.data).sort();
|
||||
}
|
||||
return [];
|
||||
});
|
||||
return filterTypesPromise;
|
||||
};
|
||||
|
||||
function transformMetricData(md, groupByTags, target, options, tsdbResolution) {
|
||||
var metricLabel = createMetricLabel(md, target, groupByTags, options);
|
||||
var dps = [];
|
||||
|
||||
// TSDB returns datapoints has a hash of ts => value.
|
||||
// Can't use _.pairs(invert()) because it stringifies keys/values
|
||||
_.each(md.dps, function (v, k) {
|
||||
if (tsdbResolution === 2) {
|
||||
dps.push([v, k * 1]);
|
||||
} else {
|
||||
dps.push([v, k * 1000]);
|
||||
}
|
||||
});
|
||||
|
||||
return { target: metricLabel, datapoints: dps };
|
||||
}
|
||||
|
||||
function createMetricLabel(md, target, groupByTags, options) {
|
||||
if (target.alias) {
|
||||
var scopedVars = _.clone(options.scopedVars || {});
|
||||
_.each(md.tags, function(value, key) {
|
||||
scopedVars['tag_' + key] = {value: value};
|
||||
});
|
||||
return templateSrv.replace(target.alias, scopedVars);
|
||||
}
|
||||
|
||||
var label = md.metric;
|
||||
var tagData = [];
|
||||
|
||||
if (!_.isEmpty(md.tags)) {
|
||||
_.each(_.toPairs(md.tags), function(tag) {
|
||||
if (_.has(groupByTags, tag[0])) {
|
||||
tagData.push(tag[0] + "=" + tag[1]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (!_.isEmpty(tagData)) {
|
||||
label += "{" + tagData.join(", ") + "}";
|
||||
}
|
||||
|
||||
return label;
|
||||
}
|
||||
|
||||
function convertTargetToQuery(target, options, tsdbVersion) {
|
||||
if (!target.metric || target.hide) {
|
||||
return null;
|
||||
}
|
||||
|
||||
var query = {
|
||||
metric: templateSrv.replace(target.metric, options.scopedVars, 'pipe'),
|
||||
aggregator: "avg"
|
||||
};
|
||||
|
||||
if (target.aggregator) {
|
||||
query.aggregator = templateSrv.replace(target.aggregator);
|
||||
}
|
||||
|
||||
if (target.shouldComputeRate) {
|
||||
query.rate = true;
|
||||
query.rateOptions = {
|
||||
counter: !!target.isCounter
|
||||
};
|
||||
|
||||
if (target.counterMax && target.counterMax.length) {
|
||||
query.rateOptions.counterMax = parseInt(target.counterMax);
|
||||
}
|
||||
|
||||
if (target.counterResetValue && target.counterResetValue.length) {
|
||||
query.rateOptions.resetValue = parseInt(target.counterResetValue);
|
||||
}
|
||||
|
||||
if(tsdbVersion >= 2) {
|
||||
query.rateOptions.dropResets = !query.rateOptions.counterMax &&
|
||||
(!query.rateOptions.ResetValue || query.rateOptions.ResetValue === 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (!target.disableDownsampling) {
|
||||
var interval = templateSrv.replace(target.downsampleInterval || options.interval);
|
||||
|
||||
if (interval.match(/\.[0-9]+s/)) {
|
||||
interval = parseFloat(interval)*1000 + "ms";
|
||||
}
|
||||
|
||||
query.downsample = interval + "-" + target.downsampleAggregator;
|
||||
|
||||
if (target.downsampleFillPolicy && target.downsampleFillPolicy !== "none") {
|
||||
query.downsample += "-" + target.downsampleFillPolicy;
|
||||
}
|
||||
}
|
||||
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
query.filters = angular.copy(target.filters);
|
||||
if (query.filters){
|
||||
for (var filter_key in query.filters) {
|
||||
query.filters[filter_key].filter = templateSrv.replace(query.filters[filter_key].filter, options.scopedVars, 'pipe');
|
||||
}
|
||||
}
|
||||
} else {
|
||||
query.tags = angular.copy(target.tags);
|
||||
if (query.tags){
|
||||
for (var tag_key in query.tags) {
|
||||
query.tags[tag_key] = templateSrv.replace(query.tags[tag_key], options.scopedVars, 'pipe');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (target.explicitTags) {
|
||||
query.explicitTags = true;
|
||||
}
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
function mapMetricsToTargets(metrics, options, tsdbVersion) {
|
||||
var interpolatedTagValue, arrTagV;
|
||||
return _.map(metrics, function(metricData) {
|
||||
if (tsdbVersion === 3) {
|
||||
return metricData.query.index;
|
||||
} else {
|
||||
return _.findIndex(options.targets, function(target) {
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
return target.metric === metricData.metric;
|
||||
} else {
|
||||
return target.metric === metricData.metric &&
|
||||
_.every(target.tags, function(tagV, tagK) {
|
||||
interpolatedTagValue = templateSrv.replace(tagV, options.scopedVars, 'pipe');
|
||||
arrTagV = interpolatedTagValue.split('|');
|
||||
return _.includes(arrTagV, metricData.tags[tagK]) || interpolatedTagValue === "*";
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function convertToTSDBTime(date, roundUp) {
|
||||
if (date === 'now') {
|
||||
return null;
|
||||
}
|
||||
|
||||
date = dateMath.parse(date, roundUp);
|
||||
return date.valueOf();
|
||||
}
|
||||
}
|
||||
|
||||
return OpenTsDatasource;
|
||||
});
|
513
public/app/plugins/datasource/opentsdb/datasource.ts
Normal file
513
public/app/plugins/datasource/opentsdb/datasource.ts
Normal file
@ -0,0 +1,513 @@
|
||||
import angular from 'angular';
|
||||
import _ from 'lodash';
|
||||
import * as dateMath from 'app/core/utils/datemath';
|
||||
|
||||
export default class OpenTsDatasource {
|
||||
type: any;
|
||||
url: any;
|
||||
name: any;
|
||||
withCredentials: any;
|
||||
basicAuth: any;
|
||||
tsdbVersion: any;
|
||||
tsdbResolution: any;
|
||||
supportMetrics: any;
|
||||
tagKeys: any;
|
||||
|
||||
aggregatorsPromise: any;
|
||||
filterTypesPromise: any;
|
||||
|
||||
/** @ngInject */
|
||||
constructor(instanceSettings, private $q, private backendSrv, private templateSrv) {
|
||||
this.type = 'opentsdb';
|
||||
this.url = instanceSettings.url;
|
||||
this.name = instanceSettings.name;
|
||||
this.withCredentials = instanceSettings.withCredentials;
|
||||
this.basicAuth = instanceSettings.basicAuth;
|
||||
instanceSettings.jsonData = instanceSettings.jsonData || {};
|
||||
this.tsdbVersion = instanceSettings.jsonData.tsdbVersion || 1;
|
||||
this.tsdbResolution = instanceSettings.jsonData.tsdbResolution || 1;
|
||||
this.supportMetrics = true;
|
||||
this.tagKeys = {};
|
||||
|
||||
this.aggregatorsPromise = null;
|
||||
this.filterTypesPromise = null;
|
||||
}
|
||||
|
||||
// Called once per panel (graph)
|
||||
query(options) {
|
||||
var start = this.convertToTSDBTime(options.rangeRaw.from, false);
|
||||
var end = this.convertToTSDBTime(options.rangeRaw.to, true);
|
||||
var qs = [];
|
||||
|
||||
_.each(
|
||||
options.targets,
|
||||
function(target) {
|
||||
if (!target.metric) {
|
||||
return;
|
||||
}
|
||||
qs.push(this.convertTargetToQuery(target, options, this.tsdbVersion));
|
||||
}.bind(this)
|
||||
);
|
||||
|
||||
var queries = _.compact(qs);
|
||||
|
||||
// No valid targets, return the empty result to save a round trip.
|
||||
if (_.isEmpty(queries)) {
|
||||
var d = this.$q.defer();
|
||||
d.resolve({ data: [] });
|
||||
return d.promise;
|
||||
}
|
||||
|
||||
var groupByTags = {};
|
||||
_.each(queries, function(query) {
|
||||
if (query.filters && query.filters.length > 0) {
|
||||
_.each(query.filters, function(val) {
|
||||
groupByTags[val.tagk] = true;
|
||||
});
|
||||
} else {
|
||||
_.each(query.tags, function(val, key) {
|
||||
groupByTags[key] = true;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
options.targets = _.filter(options.targets, function(query) {
|
||||
return query.hide !== true;
|
||||
});
|
||||
|
||||
return this.performTimeSeriesQuery(queries, start, end).then(
|
||||
function(response) {
|
||||
var metricToTargetMapping = this.mapMetricsToTargets(response.data, options, this.tsdbVersion);
|
||||
var result = _.map(
|
||||
response.data,
|
||||
function(metricData, index) {
|
||||
index = metricToTargetMapping[index];
|
||||
if (index === -1) {
|
||||
index = 0;
|
||||
}
|
||||
this._saveTagKeys(metricData);
|
||||
|
||||
return this.transformMetricData(
|
||||
metricData,
|
||||
groupByTags,
|
||||
options.targets[index],
|
||||
options,
|
||||
this.tsdbResolution
|
||||
);
|
||||
}.bind(this)
|
||||
);
|
||||
return { data: result };
|
||||
}.bind(this)
|
||||
);
|
||||
}
|
||||
|
||||
annotationQuery(options) {
|
||||
var start = this.convertToTSDBTime(options.rangeRaw.from, false);
|
||||
var end = this.convertToTSDBTime(options.rangeRaw.to, true);
|
||||
var qs = [];
|
||||
var eventList = [];
|
||||
|
||||
qs.push({ aggregator: 'sum', metric: options.annotation.target });
|
||||
|
||||
var queries = _.compact(qs);
|
||||
|
||||
return this.performTimeSeriesQuery(queries, start, end).then(
|
||||
function(results) {
|
||||
if (results.data[0]) {
|
||||
var annotationObject = results.data[0].annotations;
|
||||
if (options.annotation.isGlobal) {
|
||||
annotationObject = results.data[0].globalAnnotations;
|
||||
}
|
||||
if (annotationObject) {
|
||||
_.each(annotationObject, function(annotation) {
|
||||
var event = {
|
||||
text: annotation.description,
|
||||
time: Math.floor(annotation.startTime) * 1000,
|
||||
annotation: options.annotation,
|
||||
};
|
||||
|
||||
eventList.push(event);
|
||||
});
|
||||
}
|
||||
}
|
||||
return eventList;
|
||||
}.bind(this)
|
||||
);
|
||||
}
|
||||
|
||||
targetContainsTemplate(target) {
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
for (var i = 0; i < target.filters.length; i++) {
|
||||
if (this.templateSrv.variableExists(target.filters[i].filter)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (target.tags && Object.keys(target.tags).length > 0) {
|
||||
for (var tagKey in target.tags) {
|
||||
if (this.templateSrv.variableExists(target.tags[tagKey])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
performTimeSeriesQuery(queries, start, end) {
|
||||
var msResolution = false;
|
||||
if (this.tsdbResolution === 2) {
|
||||
msResolution = true;
|
||||
}
|
||||
var reqBody: any = {
|
||||
start: start,
|
||||
queries: queries,
|
||||
msResolution: msResolution,
|
||||
globalAnnotations: true,
|
||||
};
|
||||
if (this.tsdbVersion === 3) {
|
||||
reqBody.showQuery = true;
|
||||
}
|
||||
|
||||
// Relative queries (e.g. last hour) don't include an end time
|
||||
if (end) {
|
||||
reqBody.end = end;
|
||||
}
|
||||
|
||||
var options = {
|
||||
method: 'POST',
|
||||
url: this.url + '/api/query',
|
||||
data: reqBody,
|
||||
};
|
||||
|
||||
this._addCredentialOptions(options);
|
||||
return this.backendSrv.datasourceRequest(options);
|
||||
}
|
||||
|
||||
suggestTagKeys(metric) {
|
||||
return this.$q.when(this.tagKeys[metric] || []);
|
||||
}
|
||||
|
||||
_saveTagKeys(metricData) {
|
||||
var tagKeys = Object.keys(metricData.tags);
|
||||
_.each(metricData.aggregateTags, function(tag) {
|
||||
tagKeys.push(tag);
|
||||
});
|
||||
|
||||
this.tagKeys[metricData.metric] = tagKeys;
|
||||
}
|
||||
|
||||
_performSuggestQuery(query, type) {
|
||||
return this._get('/api/suggest', { type: type, q: query, max: 1000 }).then(function(result) {
|
||||
return result.data;
|
||||
});
|
||||
}
|
||||
|
||||
_performMetricKeyValueLookup(metric, keys) {
|
||||
if (!metric || !keys) {
|
||||
return this.$q.when([]);
|
||||
}
|
||||
|
||||
var keysArray = keys.split(',').map(function(key) {
|
||||
return key.trim();
|
||||
});
|
||||
var key = keysArray[0];
|
||||
var keysQuery = key + '=*';
|
||||
|
||||
if (keysArray.length > 1) {
|
||||
keysQuery += ',' + keysArray.splice(1).join(',');
|
||||
}
|
||||
|
||||
var m = metric + '{' + keysQuery + '}';
|
||||
|
||||
return this._get('/api/search/lookup', { m: m, limit: 3000 }).then(function(result) {
|
||||
result = result.data.results;
|
||||
var tagvs = [];
|
||||
_.each(result, function(r) {
|
||||
if (tagvs.indexOf(r.tags[key]) === -1) {
|
||||
tagvs.push(r.tags[key]);
|
||||
}
|
||||
});
|
||||
return tagvs;
|
||||
});
|
||||
}
|
||||
|
||||
_performMetricKeyLookup(metric) {
|
||||
if (!metric) {
|
||||
return this.$q.when([]);
|
||||
}
|
||||
|
||||
return this._get('/api/search/lookup', { m: metric, limit: 1000 }).then(function(result) {
|
||||
result = result.data.results;
|
||||
var tagks = [];
|
||||
_.each(result, function(r) {
|
||||
_.each(r.tags, function(tagv, tagk) {
|
||||
if (tagks.indexOf(tagk) === -1) {
|
||||
tagks.push(tagk);
|
||||
}
|
||||
});
|
||||
});
|
||||
return tagks;
|
||||
});
|
||||
}
|
||||
|
||||
_get(relativeUrl, params?) {
|
||||
var options = {
|
||||
method: 'GET',
|
||||
url: this.url + relativeUrl,
|
||||
params: params,
|
||||
};
|
||||
|
||||
this._addCredentialOptions(options);
|
||||
|
||||
return this.backendSrv.datasourceRequest(options);
|
||||
}
|
||||
|
||||
_addCredentialOptions(options) {
|
||||
if (this.basicAuth || this.withCredentials) {
|
||||
options.withCredentials = true;
|
||||
}
|
||||
if (this.basicAuth) {
|
||||
options.headers = { Authorization: this.basicAuth };
|
||||
}
|
||||
}
|
||||
|
||||
metricFindQuery(query) {
|
||||
if (!query) {
|
||||
return this.$q.when([]);
|
||||
}
|
||||
|
||||
var interpolated;
|
||||
try {
|
||||
interpolated = this.templateSrv.replace(query, {}, 'distributed');
|
||||
} catch (err) {
|
||||
return this.$q.reject(err);
|
||||
}
|
||||
|
||||
var responseTransform = function(result) {
|
||||
return _.map(result, function(value) {
|
||||
return { text: value };
|
||||
});
|
||||
};
|
||||
|
||||
var metrics_regex = /metrics\((.*)\)/;
|
||||
var tag_names_regex = /tag_names\((.*)\)/;
|
||||
var tag_values_regex = /tag_values\((.*?),\s?(.*)\)/;
|
||||
var tag_names_suggest_regex = /suggest_tagk\((.*)\)/;
|
||||
var tag_values_suggest_regex = /suggest_tagv\((.*)\)/;
|
||||
|
||||
var metrics_query = interpolated.match(metrics_regex);
|
||||
if (metrics_query) {
|
||||
return this._performSuggestQuery(metrics_query[1], 'metrics').then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_names_query = interpolated.match(tag_names_regex);
|
||||
if (tag_names_query) {
|
||||
return this._performMetricKeyLookup(tag_names_query[1]).then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_values_query = interpolated.match(tag_values_regex);
|
||||
if (tag_values_query) {
|
||||
return this._performMetricKeyValueLookup(tag_values_query[1], tag_values_query[2]).then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_names_suggest_query = interpolated.match(tag_names_suggest_regex);
|
||||
if (tag_names_suggest_query) {
|
||||
return this._performSuggestQuery(tag_names_suggest_query[1], 'tagk').then(responseTransform);
|
||||
}
|
||||
|
||||
var tag_values_suggest_query = interpolated.match(tag_values_suggest_regex);
|
||||
if (tag_values_suggest_query) {
|
||||
return this._performSuggestQuery(tag_values_suggest_query[1], 'tagv').then(responseTransform);
|
||||
}
|
||||
|
||||
return this.$q.when([]);
|
||||
}
|
||||
|
||||
testDatasource() {
|
||||
return this._performSuggestQuery('cpu', 'metrics').then(function() {
|
||||
return { status: 'success', message: 'Data source is working' };
|
||||
});
|
||||
}
|
||||
|
||||
getAggregators() {
|
||||
if (this.aggregatorsPromise) {
|
||||
return this.aggregatorsPromise;
|
||||
}
|
||||
|
||||
this.aggregatorsPromise = this._get('/api/aggregators').then(function(result) {
|
||||
if (result.data && _.isArray(result.data)) {
|
||||
return result.data.sort();
|
||||
}
|
||||
return [];
|
||||
});
|
||||
return this.aggregatorsPromise;
|
||||
}
|
||||
|
||||
getFilterTypes() {
|
||||
if (this.filterTypesPromise) {
|
||||
return this.filterTypesPromise;
|
||||
}
|
||||
|
||||
this.filterTypesPromise = this._get('/api/config/filters').then(function(result) {
|
||||
if (result.data) {
|
||||
return Object.keys(result.data).sort();
|
||||
}
|
||||
return [];
|
||||
});
|
||||
return this.filterTypesPromise;
|
||||
}
|
||||
|
||||
transformMetricData(md, groupByTags, target, options, tsdbResolution) {
|
||||
var metricLabel = this.createMetricLabel(md, target, groupByTags, options);
|
||||
var dps = [];
|
||||
|
||||
// TSDB returns datapoints has a hash of ts => value.
|
||||
// Can't use _.pairs(invert()) because it stringifies keys/values
|
||||
_.each(md.dps, function(v, k) {
|
||||
if (tsdbResolution === 2) {
|
||||
dps.push([v, k * 1]);
|
||||
} else {
|
||||
dps.push([v, k * 1000]);
|
||||
}
|
||||
});
|
||||
|
||||
return { target: metricLabel, datapoints: dps };
|
||||
}
|
||||
|
||||
createMetricLabel(md, target, groupByTags, options) {
|
||||
if (target.alias) {
|
||||
var scopedVars = _.clone(options.scopedVars || {});
|
||||
_.each(md.tags, function(value, key) {
|
||||
scopedVars['tag_' + key] = { value: value };
|
||||
});
|
||||
return this.templateSrv.replace(target.alias, scopedVars);
|
||||
}
|
||||
|
||||
var label = md.metric;
|
||||
var tagData = [];
|
||||
|
||||
if (!_.isEmpty(md.tags)) {
|
||||
_.each(_.toPairs(md.tags), function(tag) {
|
||||
if (_.has(groupByTags, tag[0])) {
|
||||
tagData.push(tag[0] + '=' + tag[1]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (!_.isEmpty(tagData)) {
|
||||
label += '{' + tagData.join(', ') + '}';
|
||||
}
|
||||
|
||||
return label;
|
||||
}
|
||||
|
||||
convertTargetToQuery(target, options, tsdbVersion) {
|
||||
if (!target.metric || target.hide) {
|
||||
return null;
|
||||
}
|
||||
|
||||
var query: any = {
|
||||
metric: this.templateSrv.replace(target.metric, options.scopedVars, 'pipe'),
|
||||
aggregator: 'avg',
|
||||
};
|
||||
|
||||
if (target.aggregator) {
|
||||
query.aggregator = this.templateSrv.replace(target.aggregator);
|
||||
}
|
||||
|
||||
if (target.shouldComputeRate) {
|
||||
query.rate = true;
|
||||
query.rateOptions = {
|
||||
counter: !!target.isCounter,
|
||||
};
|
||||
|
||||
if (target.counterMax && target.counterMax.length) {
|
||||
query.rateOptions.counterMax = parseInt(target.counterMax);
|
||||
}
|
||||
|
||||
if (target.counterResetValue && target.counterResetValue.length) {
|
||||
query.rateOptions.resetValue = parseInt(target.counterResetValue);
|
||||
}
|
||||
|
||||
if (tsdbVersion >= 2) {
|
||||
query.rateOptions.dropResets =
|
||||
!query.rateOptions.counterMax && (!query.rateOptions.ResetValue || query.rateOptions.ResetValue === 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (!target.disableDownsampling) {
|
||||
var interval = this.templateSrv.replace(target.downsampleInterval || options.interval);
|
||||
|
||||
if (interval.match(/\.[0-9]+s/)) {
|
||||
interval = parseFloat(interval) * 1000 + 'ms';
|
||||
}
|
||||
|
||||
query.downsample = interval + '-' + target.downsampleAggregator;
|
||||
|
||||
if (target.downsampleFillPolicy && target.downsampleFillPolicy !== 'none') {
|
||||
query.downsample += '-' + target.downsampleFillPolicy;
|
||||
}
|
||||
}
|
||||
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
query.filters = angular.copy(target.filters);
|
||||
if (query.filters) {
|
||||
for (var filter_key in query.filters) {
|
||||
query.filters[filter_key].filter = this.templateSrv.replace(
|
||||
query.filters[filter_key].filter,
|
||||
options.scopedVars,
|
||||
'pipe'
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
query.tags = angular.copy(target.tags);
|
||||
if (query.tags) {
|
||||
for (var tag_key in query.tags) {
|
||||
query.tags[tag_key] = this.templateSrv.replace(query.tags[tag_key], options.scopedVars, 'pipe');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (target.explicitTags) {
|
||||
query.explicitTags = true;
|
||||
}
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
mapMetricsToTargets(metrics, options, tsdbVersion) {
|
||||
var interpolatedTagValue, arrTagV;
|
||||
return _.map(metrics, function(metricData) {
|
||||
if (tsdbVersion === 3) {
|
||||
return metricData.query.index;
|
||||
} else {
|
||||
return _.findIndex(options.targets, function(target) {
|
||||
if (target.filters && target.filters.length > 0) {
|
||||
return target.metric === metricData.metric;
|
||||
} else {
|
||||
return (
|
||||
target.metric === metricData.metric &&
|
||||
_.every(target.tags, function(tagV, tagK) {
|
||||
interpolatedTagValue = this.templateSrv.replace(tagV, options.scopedVars, 'pipe');
|
||||
arrTagV = interpolatedTagValue.split('|');
|
||||
return _.includes(arrTagV, metricData.tags[tagK]) || interpolatedTagValue === '*';
|
||||
})
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
convertToTSDBTime(date, roundUp) {
|
||||
if (date === 'now') {
|
||||
return null;
|
||||
}
|
||||
|
||||
date = dateMath.parse(date, roundUp);
|
||||
return date.valueOf();
|
||||
}
|
||||
}
|
@ -5,32 +5,46 @@ export class PromCompleter {
|
||||
labelQueryCache: any;
|
||||
labelNameCache: any;
|
||||
labelValueCache: any;
|
||||
templateVariableCompletions: any;
|
||||
|
||||
identifierRegexps = [/\[/, /[a-zA-Z0-9_:]/];
|
||||
|
||||
constructor(private datasource: PrometheusDatasource) {
|
||||
constructor(private datasource: PrometheusDatasource, private templateSrv) {
|
||||
this.labelQueryCache = {};
|
||||
this.labelNameCache = {};
|
||||
this.labelValueCache = {};
|
||||
this.templateVariableCompletions = this.templateSrv.variables.map(variable => {
|
||||
return {
|
||||
caption: '$' + variable.name,
|
||||
value: '$' + variable.name,
|
||||
meta: 'variable',
|
||||
score: Number.MAX_VALUE,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
getCompletions(editor, session, pos, prefix, callback) {
|
||||
let wrappedCallback = (err, completions) => {
|
||||
completions = completions.concat(this.templateVariableCompletions);
|
||||
return callback(err, completions);
|
||||
};
|
||||
|
||||
let token = session.getTokenAt(pos.row, pos.column);
|
||||
|
||||
switch (token.type) {
|
||||
case 'entity.name.tag.label-matcher':
|
||||
this.getCompletionsForLabelMatcherName(session, pos).then(completions => {
|
||||
callback(null, completions);
|
||||
wrappedCallback(null, completions);
|
||||
});
|
||||
return;
|
||||
case 'string.quoted.label-matcher':
|
||||
this.getCompletionsForLabelMatcherValue(session, pos).then(completions => {
|
||||
callback(null, completions);
|
||||
wrappedCallback(null, completions);
|
||||
});
|
||||
return;
|
||||
case 'entity.name.tag.label-list-matcher':
|
||||
this.getCompletionsForBinaryOperator(session, pos).then(completions => {
|
||||
callback(null, completions);
|
||||
wrappedCallback(null, completions);
|
||||
});
|
||||
return;
|
||||
}
|
||||
@ -59,14 +73,14 @@ export class PromCompleter {
|
||||
meta: 'range vector',
|
||||
});
|
||||
|
||||
callback(null, vectors);
|
||||
wrappedCallback(null, vectors);
|
||||
return;
|
||||
}
|
||||
|
||||
var query = prefix;
|
||||
|
||||
return this.datasource.performSuggestQuery(query, true).then(metricNames => {
|
||||
callback(
|
||||
wrappedCallback(
|
||||
null,
|
||||
metricNames.map(name => {
|
||||
let value = name;
|
||||
|
@ -50,6 +50,9 @@ var PrometheusHighlightRules = function() {
|
||||
token : "keyword.control",
|
||||
regex : "by|without|on|ignoring|group_left|group_right",
|
||||
next : "start-label-list-matcher"
|
||||
}, {
|
||||
token : "variable",
|
||||
regex : "\\$[A-Za-z0-9_]+"
|
||||
}, {
|
||||
token : keywordMapper,
|
||||
regex : "[a-zA-Z_:][a-zA-Z0-9_:]*"
|
||||
|
@ -43,7 +43,7 @@ class PrometheusQueryCtrl extends QueryCtrl {
|
||||
}
|
||||
|
||||
getCompleter(query) {
|
||||
return new PromCompleter(this.datasource);
|
||||
return new PromCompleter(this.datasource, this.templateSrv);
|
||||
}
|
||||
|
||||
getDefaultFormat() {
|
||||
|
@ -1,9 +1,13 @@
|
||||
import { describe, it, sinon, expect } from 'test/lib/common';
|
||||
import helpers from 'test/specs/helpers';
|
||||
|
||||
import { PromCompleter } from '../completer';
|
||||
import { PrometheusDatasource } from '../datasource';
|
||||
|
||||
describe('Prometheus editor completer', function() {
|
||||
var ctx = new helpers.ServiceTestContext();
|
||||
beforeEach(ctx.providePhase(['templateSrv']));
|
||||
|
||||
function getSessionStub(data) {
|
||||
return {
|
||||
getTokenAt: sinon.stub().returns(data.currentToken),
|
||||
@ -39,7 +43,15 @@ describe('Prometheus editor completer', function() {
|
||||
.returns(Promise.resolve(['node_cpu'])),
|
||||
};
|
||||
|
||||
let completer = new PromCompleter(datasourceStub);
|
||||
let templateSrv = {
|
||||
variables: [
|
||||
{
|
||||
name: 'var_name',
|
||||
options: [{ text: 'foo', value: 'foo', selected: false }, { text: 'bar', value: 'bar', selected: true }],
|
||||
},
|
||||
],
|
||||
};
|
||||
let completer = new PromCompleter(datasourceStub, templateSrv);
|
||||
|
||||
describe('When inside brackets', () => {
|
||||
it('Should return range vectors', () => {
|
||||
|
@ -22,6 +22,12 @@ export function setupAngularRoutes($routeProvider, $locationProvider) {
|
||||
reloadOnSearch: false,
|
||||
pageClass: 'page-dashboard',
|
||||
})
|
||||
.when('/d/:uid', {
|
||||
templateUrl: 'public/app/partials/dashboard.html',
|
||||
controller: 'LoadDashboardCtrl',
|
||||
reloadOnSearch: false,
|
||||
pageClass: 'page-dashboard',
|
||||
})
|
||||
.when('/dashboard/:type/:slug', {
|
||||
templateUrl: 'public/app/partials/dashboard.html',
|
||||
controller: 'LoadDashboardCtrl',
|
||||
@ -98,6 +104,11 @@ export function setupAngularRoutes($routeProvider, $locationProvider) {
|
||||
controller: 'FolderDashboardsCtrl',
|
||||
controllerAs: 'ctrl',
|
||||
})
|
||||
.when('/dashboards/f/:uid', {
|
||||
templateUrl: 'public/app/features/dashboard/partials/folder_dashboards.html',
|
||||
controller: 'FolderDashboardsCtrl',
|
||||
controllerAs: 'ctrl',
|
||||
})
|
||||
.when('/org', {
|
||||
templateUrl: 'public/app/features/org/partials/orgDetails.html',
|
||||
controller: 'OrgDetailsCtrl',
|
||||
|
@ -26,7 +26,9 @@ export const ViewStore = types
|
||||
function updateQuery(query: any) {
|
||||
self.query.clear();
|
||||
for (let key of Object.keys(query)) {
|
||||
self.query.set(key, query[key]);
|
||||
if (query[key]) {
|
||||
self.query.set(key, query[key]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -34,7 +36,9 @@ export const ViewStore = types
|
||||
function updateRouteParams(routeParams: any) {
|
||||
self.routeParams.clear();
|
||||
for (let key of Object.keys(routeParams)) {
|
||||
self.routeParams.set(key, routeParams[key]);
|
||||
if (routeParams[key]) {
|
||||
self.routeParams.set(key, routeParams[key]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,8 @@
|
||||
|
||||
<link rel="icon" type="image/png" href="public/img/fav32.png">
|
||||
<link rel="mask-icon" href="public/img/grafana_mask_icon.svg" color="#F05A28">
|
||||
|
||||
<link rel="apple-touch-icon" href="public/img/fav32.png">
|
||||
|
||||
</head>
|
||||
|
||||
<body ng-cloak class="theme-[[ .Theme ]]">
|
||||
|
27
vendor/github.com/denisenkom/go-mssqldb/LICENSE.txt
generated
vendored
Normal file
27
vendor/github.com/denisenkom/go-mssqldb/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
259
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
Normal file
259
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type packetType uint8
|
||||
|
||||
type header struct {
|
||||
PacketType packetType
|
||||
Status uint8
|
||||
Size uint16
|
||||
Spid uint16
|
||||
PacketNo uint8
|
||||
Pad uint8
|
||||
}
|
||||
|
||||
// tdsBuffer reads and writes TDS packets of data to the transport.
|
||||
// The write and read buffers are separate to make sending attn signals
|
||||
// possible without locks. Currently attn signals are only sent during
|
||||
// reads, not writes.
|
||||
type tdsBuffer struct {
|
||||
transport io.ReadWriteCloser
|
||||
|
||||
packetSize int
|
||||
|
||||
// Write fields.
|
||||
wbuf []byte
|
||||
wpos int
|
||||
wPacketSeq byte
|
||||
wPacketType packetType
|
||||
|
||||
// Read fields.
|
||||
rbuf []byte
|
||||
rpos int
|
||||
rsize int
|
||||
final bool
|
||||
rPacketType packetType
|
||||
|
||||
// afterFirst is assigned to right after tdsBuffer is created and
|
||||
// before the first use. It is executed after the first packet is
|
||||
// written and then removed.
|
||||
afterFirst func()
|
||||
}
|
||||
|
||||
func newTdsBuffer(bufsize uint16, transport io.ReadWriteCloser) *tdsBuffer {
|
||||
return &tdsBuffer{
|
||||
packetSize: int(bufsize),
|
||||
wbuf: make([]byte, 1<<16),
|
||||
rbuf: make([]byte, 1<<16),
|
||||
rpos: 8,
|
||||
transport: transport,
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *tdsBuffer) ResizeBuffer(packetSize int) {
|
||||
rw.packetSize = packetSize
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) PackageSize() int {
|
||||
return w.packetSize
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) flush() (err error) {
|
||||
// Write packet size.
|
||||
w.wbuf[0] = byte(w.wPacketType)
|
||||
binary.BigEndian.PutUint16(w.wbuf[2:], uint16(w.wpos))
|
||||
w.wbuf[6] = w.wPacketSeq
|
||||
|
||||
// Write packet into underlying transport.
|
||||
if _, err = w.transport.Write(w.wbuf[:w.wpos]); err != nil {
|
||||
return err
|
||||
}
|
||||
// It is possible to create a whole new buffer after a flush.
|
||||
// Useful for debugging. Normally reuse the buffer.
|
||||
// w.wbuf = make([]byte, 1<<16)
|
||||
|
||||
// Execute afterFirst hook if it is set.
|
||||
if w.afterFirst != nil {
|
||||
w.afterFirst()
|
||||
w.afterFirst = nil
|
||||
}
|
||||
|
||||
w.wpos = 8
|
||||
w.wPacketSeq++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) Write(p []byte) (total int, err error) {
|
||||
for {
|
||||
copied := copy(w.wbuf[w.wpos:w.packetSize], p)
|
||||
w.wpos += copied
|
||||
total += copied
|
||||
if copied == len(p) {
|
||||
return
|
||||
}
|
||||
if err = w.flush(); err != nil {
|
||||
return
|
||||
}
|
||||
p = p[copied:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) WriteByte(b byte) error {
|
||||
if int(w.wpos) == len(w.wbuf) {
|
||||
if err := w.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.wbuf[w.wpos] = b
|
||||
w.wpos += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) BeginPacket(packetType packetType, resetSession bool) {
|
||||
status := byte(0)
|
||||
if resetSession {
|
||||
switch packetType {
|
||||
// Reset session can only be set on the following packet types.
|
||||
case packSQLBatch, packRPCRequest, packTransMgrReq:
|
||||
status = 0x8
|
||||
}
|
||||
}
|
||||
w.wbuf[1] = status // Packet is incomplete. This byte is set again in FinishPacket.
|
||||
w.wpos = 8
|
||||
w.wPacketSeq = 1
|
||||
w.wPacketType = packetType
|
||||
}
|
||||
|
||||
func (w *tdsBuffer) FinishPacket() error {
|
||||
w.wbuf[1] |= 1 // Mark this as the last packet in the message.
|
||||
return w.flush()
|
||||
}
|
||||
|
||||
var headerSize = binary.Size(header{})
|
||||
|
||||
func (r *tdsBuffer) readNextPacket() error {
|
||||
h := header{}
|
||||
var err error
|
||||
err = binary.Read(r.transport, binary.BigEndian, &h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if int(h.Size) > len(r.rbuf) {
|
||||
return errors.New("Invalid packet size, it is longer than buffer size")
|
||||
}
|
||||
if headerSize > int(h.Size) {
|
||||
return errors.New("Invalid packet size, it is shorter than header size")
|
||||
}
|
||||
_, err = io.ReadFull(r.transport, r.rbuf[headerSize:h.Size])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.rpos = headerSize
|
||||
r.rsize = int(h.Size)
|
||||
r.final = h.Status != 0
|
||||
r.rPacketType = h.PacketType
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) BeginRead() (packetType, error) {
|
||||
err := r.readNextPacket()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return r.rPacketType, nil
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) ReadByte() (res byte, err error) {
|
||||
if r.rpos == r.rsize {
|
||||
if r.final {
|
||||
return 0, io.EOF
|
||||
}
|
||||
err = r.readNextPacket()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
res = r.rbuf[r.rpos]
|
||||
r.rpos++
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) byte() byte {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
badStreamPanic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) ReadFull(buf []byte) {
|
||||
_, err := io.ReadFull(r, buf[:])
|
||||
if err != nil {
|
||||
badStreamPanic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) uint64() uint64 {
|
||||
var buf [8]byte
|
||||
r.ReadFull(buf[:])
|
||||
return binary.LittleEndian.Uint64(buf[:])
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) int32() int32 {
|
||||
return int32(r.uint32())
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) uint32() uint32 {
|
||||
var buf [4]byte
|
||||
r.ReadFull(buf[:])
|
||||
return binary.LittleEndian.Uint32(buf[:])
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) uint16() uint16 {
|
||||
var buf [2]byte
|
||||
r.ReadFull(buf[:])
|
||||
return binary.LittleEndian.Uint16(buf[:])
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) BVarChar() string {
|
||||
l := int(r.byte())
|
||||
return r.readUcs2(l)
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) UsVarChar() string {
|
||||
l := int(r.uint16())
|
||||
return r.readUcs2(l)
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) readUcs2(numchars int) string {
|
||||
b := make([]byte, numchars*2)
|
||||
r.ReadFull(b)
|
||||
res, err := ucs22str(b)
|
||||
if err != nil {
|
||||
badStreamPanic(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (r *tdsBuffer) Read(buf []byte) (copied int, err error) {
|
||||
copied = 0
|
||||
err = nil
|
||||
if r.rpos == r.rsize {
|
||||
if r.final {
|
||||
return 0, io.EOF
|
||||
}
|
||||
err = r.readNextPacket()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
copied = copy(buf, r.rbuf[r.rpos:r.rsize])
|
||||
r.rpos += copied
|
||||
return
|
||||
}
|
604
vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go
generated
vendored
Normal file
604
vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go
generated
vendored
Normal file
@ -0,0 +1,604 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Bulk struct {
|
||||
// ctx is used only for AddRow and Done methods.
|
||||
// This could be removed if AddRow and Done accepted
|
||||
// a ctx field as well, which is available with the
|
||||
// database/sql call.
|
||||
ctx context.Context
|
||||
|
||||
cn *Conn
|
||||
metadata []columnStruct
|
||||
bulkColumns []columnStruct
|
||||
columnsName []string
|
||||
tablename string
|
||||
numRows int
|
||||
|
||||
headerSent bool
|
||||
Options BulkOptions
|
||||
Debug bool
|
||||
}
|
||||
type BulkOptions struct {
|
||||
CheckConstraints bool
|
||||
FireTriggers bool
|
||||
KeepNulls bool
|
||||
KilobytesPerBatch int
|
||||
RowsPerBatch int
|
||||
Order []string
|
||||
Tablock bool
|
||||
}
|
||||
|
||||
type DataValue interface{}
|
||||
|
||||
func (cn *Conn) CreateBulk(table string, columns []string) (_ *Bulk) {
|
||||
b := Bulk{ctx: context.Background(), cn: cn, tablename: table, headerSent: false, columnsName: columns}
|
||||
b.Debug = false
|
||||
return &b
|
||||
}
|
||||
|
||||
func (cn *Conn) CreateBulkContext(ctx context.Context, table string, columns []string) (_ *Bulk) {
|
||||
b := Bulk{ctx: ctx, cn: cn, tablename: table, headerSent: false, columnsName: columns}
|
||||
b.Debug = false
|
||||
return &b
|
||||
}
|
||||
|
||||
func (b *Bulk) sendBulkCommand(ctx context.Context) (err error) {
|
||||
//get table columns info
|
||||
err = b.getMetadata(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//match the columns
|
||||
for _, colname := range b.columnsName {
|
||||
var bulkCol *columnStruct
|
||||
|
||||
for _, m := range b.metadata {
|
||||
if m.ColName == colname {
|
||||
bulkCol = &m
|
||||
break
|
||||
}
|
||||
}
|
||||
if bulkCol != nil {
|
||||
|
||||
if bulkCol.ti.TypeId == typeUdt {
|
||||
//send udt as binary
|
||||
bulkCol.ti.TypeId = typeBigVarBin
|
||||
}
|
||||
b.bulkColumns = append(b.bulkColumns, *bulkCol)
|
||||
b.dlogf("Adding column %s %s %#x", colname, bulkCol.ColName, bulkCol.ti.TypeId)
|
||||
} else {
|
||||
return fmt.Errorf("Column %s does not exist in destination table %s", colname, b.tablename)
|
||||
}
|
||||
}
|
||||
|
||||
//create the bulk command
|
||||
|
||||
//columns definitions
|
||||
var col_defs bytes.Buffer
|
||||
for i, col := range b.bulkColumns {
|
||||
if i != 0 {
|
||||
col_defs.WriteString(", ")
|
||||
}
|
||||
col_defs.WriteString("[" + col.ColName + "] " + makeDecl(col.ti))
|
||||
}
|
||||
|
||||
//options
|
||||
var with_opts []string
|
||||
|
||||
if b.Options.CheckConstraints {
|
||||
with_opts = append(with_opts, "CHECK_CONSTRAINTS")
|
||||
}
|
||||
if b.Options.FireTriggers {
|
||||
with_opts = append(with_opts, "FIRE_TRIGGERS")
|
||||
}
|
||||
if b.Options.KeepNulls {
|
||||
with_opts = append(with_opts, "KEEP_NULLS")
|
||||
}
|
||||
if b.Options.KilobytesPerBatch > 0 {
|
||||
with_opts = append(with_opts, fmt.Sprintf("KILOBYTES_PER_BATCH = %d", b.Options.KilobytesPerBatch))
|
||||
}
|
||||
if b.Options.RowsPerBatch > 0 {
|
||||
with_opts = append(with_opts, fmt.Sprintf("ROWS_PER_BATCH = %d", b.Options.RowsPerBatch))
|
||||
}
|
||||
if len(b.Options.Order) > 0 {
|
||||
with_opts = append(with_opts, fmt.Sprintf("ORDER(%s)", strings.Join(b.Options.Order, ",")))
|
||||
}
|
||||
if b.Options.Tablock {
|
||||
with_opts = append(with_opts, "TABLOCK")
|
||||
}
|
||||
var with_part string
|
||||
if len(with_opts) > 0 {
|
||||
with_part = fmt.Sprintf("WITH (%s)", strings.Join(with_opts, ","))
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("INSERT BULK %s (%s) %s", b.tablename, col_defs.String(), with_part)
|
||||
|
||||
stmt, err := b.cn.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Prepare failed: %s", err.Error())
|
||||
}
|
||||
b.dlogf(query)
|
||||
|
||||
_, err = stmt.(*Stmt).ExecContext(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.headerSent = true
|
||||
|
||||
var buf = b.cn.sess.buf
|
||||
buf.BeginPacket(packBulkLoadBCP, false)
|
||||
|
||||
// Send the columns metadata.
|
||||
columnMetadata := b.createColMetadata()
|
||||
_, err = buf.Write(columnMetadata)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AddRow immediately writes the row to the destination table.
|
||||
// The arguments are the row values in the order they were specified.
|
||||
func (b *Bulk) AddRow(row []interface{}) (err error) {
|
||||
if !b.headerSent {
|
||||
err = b.sendBulkCommand(b.ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(row) != len(b.bulkColumns) {
|
||||
return fmt.Errorf("Row does not have the same number of columns than the destination table %d %d",
|
||||
len(row), len(b.bulkColumns))
|
||||
}
|
||||
|
||||
bytes, err := b.makeRowData(row)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = b.cn.sess.buf.Write(bytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b.numRows = b.numRows + 1
|
||||
return
|
||||
}
|
||||
|
||||
func (b *Bulk) makeRowData(row []interface{}) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.WriteByte(byte(tokenRow))
|
||||
|
||||
var logcol bytes.Buffer
|
||||
for i, col := range b.bulkColumns {
|
||||
|
||||
if b.Debug {
|
||||
logcol.WriteString(fmt.Sprintf(" col[%d]='%v' ", i, row[i]))
|
||||
}
|
||||
param, err := b.makeParam(row[i], col)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bulkcopy: %s", err.Error())
|
||||
}
|
||||
|
||||
if col.ti.Writer == nil {
|
||||
return nil, fmt.Errorf("no writer for column: %s, TypeId: %#x",
|
||||
col.ColName, col.ti.TypeId)
|
||||
}
|
||||
err = col.ti.Writer(buf, param.ti, param.buffer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bulkcopy: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
b.dlogf("row[%d] %s\n", b.numRows, logcol.String())
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (b *Bulk) Done() (rowcount int64, err error) {
|
||||
if b.headerSent == false {
|
||||
//no rows had been sent
|
||||
return 0, nil
|
||||
}
|
||||
var buf = b.cn.sess.buf
|
||||
buf.WriteByte(byte(tokenDone))
|
||||
|
||||
binary.Write(buf, binary.LittleEndian, uint16(doneFinal))
|
||||
binary.Write(buf, binary.LittleEndian, uint16(0)) // curcmd
|
||||
|
||||
if b.cn.sess.loginAck.TDSVersion >= verTDS72 {
|
||||
binary.Write(buf, binary.LittleEndian, uint64(0)) //rowcount 0
|
||||
} else {
|
||||
binary.Write(buf, binary.LittleEndian, uint32(0)) //rowcount 0
|
||||
}
|
||||
|
||||
buf.FinishPacket()
|
||||
|
||||
tokchan := make(chan tokenStruct, 5)
|
||||
go processResponse(b.ctx, b.cn.sess, tokchan, nil)
|
||||
|
||||
var rowCount int64
|
||||
for token := range tokchan {
|
||||
switch token := token.(type) {
|
||||
case doneStruct:
|
||||
if token.Status&doneCount != 0 {
|
||||
rowCount = int64(token.RowCount)
|
||||
}
|
||||
if token.isError() {
|
||||
return 0, token.getError()
|
||||
}
|
||||
case error:
|
||||
return 0, b.cn.checkBadConn(token)
|
||||
}
|
||||
}
|
||||
return rowCount, nil
|
||||
}
|
||||
|
||||
func (b *Bulk) createColMetadata() []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.WriteByte(byte(tokenColMetadata)) // token
|
||||
binary.Write(buf, binary.LittleEndian, uint16(len(b.bulkColumns))) // column count
|
||||
|
||||
for i, col := range b.bulkColumns {
|
||||
|
||||
if b.cn.sess.loginAck.TDSVersion >= verTDS72 {
|
||||
binary.Write(buf, binary.LittleEndian, uint32(col.UserType)) // usertype, always 0?
|
||||
} else {
|
||||
binary.Write(buf, binary.LittleEndian, uint16(col.UserType))
|
||||
}
|
||||
binary.Write(buf, binary.LittleEndian, uint16(col.Flags))
|
||||
|
||||
writeTypeInfo(buf, &b.bulkColumns[i].ti)
|
||||
|
||||
if col.ti.TypeId == typeNText ||
|
||||
col.ti.TypeId == typeText ||
|
||||
col.ti.TypeId == typeImage {
|
||||
|
||||
tablename_ucs2 := str2ucs2(b.tablename)
|
||||
binary.Write(buf, binary.LittleEndian, uint16(len(tablename_ucs2)/2))
|
||||
buf.Write(tablename_ucs2)
|
||||
}
|
||||
colname_ucs2 := str2ucs2(col.ColName)
|
||||
buf.WriteByte(uint8(len(colname_ucs2) / 2))
|
||||
buf.Write(colname_ucs2)
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (b *Bulk) getMetadata(ctx context.Context) (err error) {
|
||||
stmt, err := b.cn.prepareContext(ctx, "SET FMTONLY ON")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = stmt.ExecContext(ctx, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get columns info.
|
||||
stmt, err = b.cn.prepareContext(ctx, fmt.Sprintf("select * from %s SET FMTONLY OFF", b.tablename))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
rows, err := stmt.QueryContext(ctx, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get columns info failed: %v", err)
|
||||
}
|
||||
b.metadata = rows.(*Rows).cols
|
||||
|
||||
if b.Debug {
|
||||
for _, col := range b.metadata {
|
||||
b.dlogf("col: %s typeId: %#x size: %d scale: %d prec: %d flags: %d lcid: %#x\n",
|
||||
col.ColName, col.ti.TypeId, col.ti.Size, col.ti.Scale, col.ti.Prec,
|
||||
col.Flags, col.ti.Collation.LcidAndFlags)
|
||||
}
|
||||
}
|
||||
|
||||
return rows.Close()
|
||||
}
|
||||
|
||||
func (b *Bulk) makeParam(val DataValue, col columnStruct) (res Param, err error) {
|
||||
res.ti.Size = col.ti.Size
|
||||
res.ti.TypeId = col.ti.TypeId
|
||||
|
||||
if val == nil {
|
||||
res.ti.Size = 0
|
||||
return
|
||||
}
|
||||
|
||||
switch col.ti.TypeId {
|
||||
|
||||
case typeInt1, typeInt2, typeInt4, typeInt8, typeIntN:
|
||||
var intvalue int64
|
||||
|
||||
switch val := val.(type) {
|
||||
case int:
|
||||
intvalue = int64(val)
|
||||
case int32:
|
||||
intvalue = int64(val)
|
||||
case int64:
|
||||
intvalue = val
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for int column")
|
||||
return
|
||||
}
|
||||
|
||||
res.buffer = make([]byte, res.ti.Size)
|
||||
if col.ti.Size == 1 {
|
||||
res.buffer[0] = byte(intvalue)
|
||||
} else if col.ti.Size == 2 {
|
||||
binary.LittleEndian.PutUint16(res.buffer, uint16(intvalue))
|
||||
} else if col.ti.Size == 4 {
|
||||
binary.LittleEndian.PutUint32(res.buffer, uint32(intvalue))
|
||||
} else if col.ti.Size == 8 {
|
||||
binary.LittleEndian.PutUint64(res.buffer, uint64(intvalue))
|
||||
}
|
||||
case typeFlt4, typeFlt8, typeFltN:
|
||||
var floatvalue float64
|
||||
|
||||
switch val := val.(type) {
|
||||
case float32:
|
||||
floatvalue = float64(val)
|
||||
case float64:
|
||||
floatvalue = val
|
||||
case int:
|
||||
floatvalue = float64(val)
|
||||
case int64:
|
||||
floatvalue = float64(val)
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for float column: %s", val)
|
||||
return
|
||||
}
|
||||
|
||||
if col.ti.Size == 4 {
|
||||
res.buffer = make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(res.buffer, math.Float32bits(float32(floatvalue)))
|
||||
} else if col.ti.Size == 8 {
|
||||
res.buffer = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(floatvalue))
|
||||
}
|
||||
case typeNVarChar, typeNText, typeNChar:
|
||||
|
||||
switch val := val.(type) {
|
||||
case string:
|
||||
res.buffer = str2ucs2(val)
|
||||
case []byte:
|
||||
res.buffer = val
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for nvarchar column: %s", val)
|
||||
return
|
||||
}
|
||||
res.ti.Size = len(res.buffer)
|
||||
|
||||
case typeVarChar, typeBigVarChar, typeText, typeChar, typeBigChar:
|
||||
switch val := val.(type) {
|
||||
case string:
|
||||
res.buffer = []byte(val)
|
||||
case []byte:
|
||||
res.buffer = val
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for varchar column: %s", val)
|
||||
return
|
||||
}
|
||||
res.ti.Size = len(res.buffer)
|
||||
|
||||
case typeBit, typeBitN:
|
||||
if reflect.TypeOf(val).Kind() != reflect.Bool {
|
||||
err = fmt.Errorf("mssql: invalid type for bit column: %s", val)
|
||||
return
|
||||
}
|
||||
res.ti.TypeId = typeBitN
|
||||
res.ti.Size = 1
|
||||
res.buffer = make([]byte, 1)
|
||||
if val.(bool) {
|
||||
res.buffer[0] = 1
|
||||
}
|
||||
|
||||
case typeDateTime2N, typeDateTimeOffsetN:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
days, ns := dateTime2(val)
|
||||
ns /= int64(math.Pow10(int(col.ti.Scale)*-1) * 1000000000)
|
||||
|
||||
var data = make([]byte, 5)
|
||||
|
||||
data[0] = byte(ns)
|
||||
data[1] = byte(ns >> 8)
|
||||
data[2] = byte(ns >> 16)
|
||||
data[3] = byte(ns >> 24)
|
||||
data[4] = byte(ns >> 32)
|
||||
|
||||
if col.ti.Scale <= 2 {
|
||||
res.ti.Size = 6
|
||||
} else if col.ti.Scale <= 4 {
|
||||
res.ti.Size = 7
|
||||
} else {
|
||||
res.ti.Size = 8
|
||||
}
|
||||
var buf []byte
|
||||
buf = make([]byte, res.ti.Size)
|
||||
copy(buf, data[0:res.ti.Size-3])
|
||||
|
||||
buf[res.ti.Size-3] = byte(days)
|
||||
buf[res.ti.Size-2] = byte(days >> 8)
|
||||
buf[res.ti.Size-1] = byte(days >> 16)
|
||||
|
||||
if col.ti.TypeId == typeDateTimeOffsetN {
|
||||
_, offset := val.Zone()
|
||||
var offsetMinute = uint16(offset / 60)
|
||||
buf = append(buf, byte(offsetMinute))
|
||||
buf = append(buf, byte(offsetMinute>>8))
|
||||
res.ti.Size = res.ti.Size + 2
|
||||
}
|
||||
|
||||
res.buffer = buf
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for datetime2 column: %s", val)
|
||||
return
|
||||
}
|
||||
case typeDateN:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
days, _ := dateTime2(val)
|
||||
|
||||
res.ti.Size = 3
|
||||
res.buffer = make([]byte, 3)
|
||||
res.buffer[0] = byte(days)
|
||||
res.buffer[1] = byte(days >> 8)
|
||||
res.buffer[2] = byte(days >> 16)
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for date column: %s", val)
|
||||
return
|
||||
}
|
||||
case typeDateTime, typeDateTimeN, typeDateTim4:
|
||||
switch val := val.(type) {
|
||||
case time.Time:
|
||||
if col.ti.Size == 4 {
|
||||
res.ti.Size = 4
|
||||
res.buffer = make([]byte, 4)
|
||||
|
||||
ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
dur := val.Sub(ref)
|
||||
days := dur / (24 * time.Hour)
|
||||
if days < 0 {
|
||||
err = fmt.Errorf("mssql: Date %s is out of range", val)
|
||||
return
|
||||
}
|
||||
mins := val.Hour()*60 + val.Minute()
|
||||
|
||||
binary.LittleEndian.PutUint16(res.buffer[0:2], uint16(days))
|
||||
binary.LittleEndian.PutUint16(res.buffer[2:4], uint16(mins))
|
||||
} else if col.ti.Size == 8 {
|
||||
res.ti.Size = 8
|
||||
res.buffer = make([]byte, 8)
|
||||
|
||||
days := divFloor(val.Unix(), 24*60*60)
|
||||
//25567 - number of days since Jan 1 1900 UTC to Jan 1 1970
|
||||
days = days + 25567
|
||||
tm := (val.Hour()*60*60+val.Minute()*60+val.Second())*300 + int(val.Nanosecond()/10000000*3)
|
||||
|
||||
binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days))
|
||||
binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm))
|
||||
} else {
|
||||
err = fmt.Errorf("mssql: invalid size of column")
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for datetime column: %s", val)
|
||||
}
|
||||
|
||||
// case typeMoney, typeMoney4, typeMoneyN:
|
||||
case typeDecimal, typeDecimalN, typeNumeric, typeNumericN:
|
||||
var value float64
|
||||
switch v := val.(type) {
|
||||
case int:
|
||||
value = float64(v)
|
||||
case int8:
|
||||
value = float64(v)
|
||||
case int16:
|
||||
value = float64(v)
|
||||
case int32:
|
||||
value = float64(v)
|
||||
case int64:
|
||||
value = float64(v)
|
||||
case float32:
|
||||
value = float64(v)
|
||||
case float64:
|
||||
value = v
|
||||
case string:
|
||||
if value, err = strconv.ParseFloat(v, 64); err != nil {
|
||||
return res, fmt.Errorf("bulk: unable to convert string to float: %v", err)
|
||||
}
|
||||
default:
|
||||
return res, fmt.Errorf("unknown value for decimal: %#v", v)
|
||||
}
|
||||
|
||||
perc := col.ti.Prec
|
||||
scale := col.ti.Scale
|
||||
var dec Decimal
|
||||
dec, err = Float64ToDecimalScale(value, scale)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
dec.prec = perc
|
||||
|
||||
var length byte
|
||||
switch {
|
||||
case perc <= 9:
|
||||
length = 4
|
||||
case perc <= 19:
|
||||
length = 8
|
||||
case perc <= 28:
|
||||
length = 12
|
||||
default:
|
||||
length = 16
|
||||
}
|
||||
|
||||
buf := make([]byte, length+1)
|
||||
// first byte length written by typeInfo.writer
|
||||
res.ti.Size = int(length) + 1
|
||||
// second byte sign
|
||||
if value < 0 {
|
||||
buf[0] = 0
|
||||
} else {
|
||||
buf[0] = 1
|
||||
}
|
||||
|
||||
ub := dec.UnscaledBytes()
|
||||
l := len(ub)
|
||||
if l > int(length) {
|
||||
err = fmt.Errorf("decimal out of range: %s", dec)
|
||||
return res, err
|
||||
}
|
||||
// reverse the bytes
|
||||
for i, j := 1, l-1; j >= 0; i, j = i+1, j-1 {
|
||||
buf[i] = ub[j]
|
||||
}
|
||||
res.buffer = buf
|
||||
case typeBigVarBin:
|
||||
switch val := val.(type) {
|
||||
case []byte:
|
||||
res.ti.Size = len(val)
|
||||
res.buffer = val
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for Binary column: %s", val)
|
||||
return
|
||||
}
|
||||
case typeGuid:
|
||||
switch val := val.(type) {
|
||||
case []byte:
|
||||
res.ti.Size = len(val)
|
||||
res.buffer = val
|
||||
default:
|
||||
err = fmt.Errorf("mssql: invalid type for Guid column: %s", val)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("mssql: type %x not implemented", col.ti.TypeId)
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func (b *Bulk) dlogf(format string, v ...interface{}) {
|
||||
if b.Debug {
|
||||
b.cn.sess.log.Printf(format, v...)
|
||||
}
|
||||
}
|
93
vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go
generated
vendored
Normal file
93
vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type copyin struct {
|
||||
cn *Conn
|
||||
bulkcopy *Bulk
|
||||
closed bool
|
||||
}
|
||||
|
||||
type serializableBulkConfig struct {
|
||||
TableName string
|
||||
ColumnsName []string
|
||||
Options BulkOptions
|
||||
}
|
||||
|
||||
func (d *Driver) OpenConnection(dsn string) (*Conn, error) {
|
||||
return d.open(context.Background(), dsn)
|
||||
}
|
||||
|
||||
func (c *Conn) prepareCopyIn(ctx context.Context, query string) (_ driver.Stmt, err error) {
|
||||
config_json := query[11:]
|
||||
|
||||
bulkconfig := serializableBulkConfig{}
|
||||
err = json.Unmarshal([]byte(config_json), &bulkconfig)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bulkcopy := c.CreateBulkContext(ctx, bulkconfig.TableName, bulkconfig.ColumnsName)
|
||||
bulkcopy.Options = bulkconfig.Options
|
||||
|
||||
ci := ©in{
|
||||
cn: c,
|
||||
bulkcopy: bulkcopy,
|
||||
}
|
||||
|
||||
return ci, nil
|
||||
}
|
||||
|
||||
func CopyIn(table string, options BulkOptions, columns ...string) string {
|
||||
bulkconfig := &serializableBulkConfig{TableName: table, Options: options, ColumnsName: columns}
|
||||
|
||||
config_json, err := json.Marshal(bulkconfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
stmt := "INSERTBULK " + string(config_json)
|
||||
|
||||
return stmt
|
||||
}
|
||||
|
||||
func (ci *copyin) NumInput() int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
|
||||
panic("should never be called")
|
||||
}
|
||||
|
||||
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
|
||||
if ci.closed {
|
||||
return nil, errors.New("errCopyInClosed")
|
||||
}
|
||||
|
||||
if len(v) == 0 {
|
||||
rowCount, err := ci.bulkcopy.Done()
|
||||
ci.closed = true
|
||||
return driver.RowsAffected(rowCount), err
|
||||
}
|
||||
|
||||
t := make([]interface{}, len(v))
|
||||
for i, val := range v {
|
||||
t[i] = val
|
||||
}
|
||||
|
||||
err = ci.bulkcopy.AddRow(t)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return driver.RowsAffected(0), nil
|
||||
}
|
||||
|
||||
func (ci *copyin) Close() (err error) {
|
||||
return nil
|
||||
}
|
131
vendor/github.com/denisenkom/go-mssqldb/decimal.go
generated
vendored
Normal file
131
vendor/github.com/denisenkom/go-mssqldb/decimal.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// http://msdn.microsoft.com/en-us/library/ee780893.aspx
|
||||
type Decimal struct {
|
||||
integer [4]uint32
|
||||
positive bool
|
||||
prec uint8
|
||||
scale uint8
|
||||
}
|
||||
|
||||
var scaletblflt64 [39]float64
|
||||
|
||||
func (d Decimal) ToFloat64() float64 {
|
||||
val := float64(0)
|
||||
for i := 3; i >= 0; i-- {
|
||||
val *= 0x100000000
|
||||
val += float64(d.integer[i])
|
||||
}
|
||||
if !d.positive {
|
||||
val = -val
|
||||
}
|
||||
if d.scale != 0 {
|
||||
val /= scaletblflt64[d.scale]
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
const autoScale = 100
|
||||
|
||||
func Float64ToDecimal(f float64) (Decimal, error) {
|
||||
return Float64ToDecimalScale(f, autoScale)
|
||||
}
|
||||
|
||||
func Float64ToDecimalScale(f float64, scale uint8) (Decimal, error) {
|
||||
var dec Decimal
|
||||
if math.IsNaN(f) {
|
||||
return dec, errors.New("NaN")
|
||||
}
|
||||
if math.IsInf(f, 0) {
|
||||
return dec, errors.New("Infinity can't be converted to decimal")
|
||||
}
|
||||
dec.positive = f >= 0
|
||||
if !dec.positive {
|
||||
f = math.Abs(f)
|
||||
}
|
||||
if f > 3.402823669209385e+38 {
|
||||
return dec, errors.New("Float value is out of range")
|
||||
}
|
||||
dec.prec = 20
|
||||
var integer float64
|
||||
for dec.scale = 0; dec.scale <= scale; dec.scale++ {
|
||||
integer = f * scaletblflt64[dec.scale]
|
||||
_, frac := math.Modf(integer)
|
||||
if frac == 0 && scale == autoScale {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
mod := math.Mod(integer, 0x100000000)
|
||||
integer -= mod
|
||||
integer /= 0x100000000
|
||||
dec.integer[i] = uint32(mod)
|
||||
}
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
var acc float64 = 1
|
||||
for i := 0; i <= 38; i++ {
|
||||
scaletblflt64[i] = acc
|
||||
acc *= 10
|
||||
}
|
||||
}
|
||||
|
||||
func (d Decimal) BigInt() big.Int {
|
||||
bytes := make([]byte, 16)
|
||||
binary.BigEndian.PutUint32(bytes[0:4], d.integer[3])
|
||||
binary.BigEndian.PutUint32(bytes[4:8], d.integer[2])
|
||||
binary.BigEndian.PutUint32(bytes[8:12], d.integer[1])
|
||||
binary.BigEndian.PutUint32(bytes[12:16], d.integer[0])
|
||||
var x big.Int
|
||||
x.SetBytes(bytes)
|
||||
if !d.positive {
|
||||
x.Neg(&x)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (d Decimal) Bytes() []byte {
|
||||
x := d.BigInt()
|
||||
return scaleBytes(x.String(), d.scale)
|
||||
}
|
||||
|
||||
func (d Decimal) UnscaledBytes() []byte {
|
||||
x := d.BigInt()
|
||||
return x.Bytes()
|
||||
}
|
||||
|
||||
func scaleBytes(s string, scale uint8) []byte {
|
||||
z := make([]byte, 0, len(s)+1)
|
||||
if s[0] == '-' || s[0] == '+' {
|
||||
z = append(z, byte(s[0]))
|
||||
s = s[1:]
|
||||
}
|
||||
pos := len(s) - int(scale)
|
||||
if pos <= 0 {
|
||||
z = append(z, byte('0'))
|
||||
} else if pos > 0 {
|
||||
z = append(z, []byte(s[:pos])...)
|
||||
}
|
||||
if scale > 0 {
|
||||
z = append(z, byte('.'))
|
||||
for pos < 0 {
|
||||
z = append(z, byte('0'))
|
||||
pos++
|
||||
}
|
||||
z = append(z, []byte(s[pos:])...)
|
||||
}
|
||||
return z
|
||||
}
|
||||
|
||||
func (d Decimal) String() string {
|
||||
return string(d.Bytes())
|
||||
}
|
12
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
Normal file
12
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// package mssql implements the TDS protocol used to connect to MS SQL Server (sqlserver)
|
||||
// database servers.
|
||||
//
|
||||
// This package registers two drivers:
|
||||
// sqlserver: uses native "@" parameter placeholder names and does no pre-processing.
|
||||
// mssql: expects identifiers to be prefixed with ":" and pre-processes queries.
|
||||
//
|
||||
// If the ordinal position is used for query parameters, identifiers will be named
|
||||
// "@p1", "@p2", ... "@pN".
|
||||
//
|
||||
// Please refer to the README for the format of the DSN.
|
||||
package mssql
|
73
vendor/github.com/denisenkom/go-mssqldb/error.go
generated
vendored
Normal file
73
vendor/github.com/denisenkom/go-mssqldb/error.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error represents an SQL Server error. This
|
||||
// type includes methods for reading the contents
|
||||
// of the struct, which allows calling programs
|
||||
// to check for specific error conditions without
|
||||
// having to import this package directly.
|
||||
type Error struct {
|
||||
Number int32
|
||||
State uint8
|
||||
Class uint8
|
||||
Message string
|
||||
ServerName string
|
||||
ProcName string
|
||||
LineNo int32
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return "mssql: " + e.Message
|
||||
}
|
||||
|
||||
// SQLErrorNumber returns the SQL Server error number.
|
||||
func (e Error) SQLErrorNumber() int32 {
|
||||
return e.Number
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorState() uint8 {
|
||||
return e.State
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorClass() uint8 {
|
||||
return e.Class
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorMessage() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorServerName() string {
|
||||
return e.ServerName
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorProcName() string {
|
||||
return e.ProcName
|
||||
}
|
||||
|
||||
func (e Error) SQLErrorLineNo() int32 {
|
||||
return e.LineNo
|
||||
}
|
||||
|
||||
type StreamError struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func streamErrorf(format string, v ...interface{}) StreamError {
|
||||
return StreamError{"Invalid TDS stream: " + fmt.Sprintf(format, v...)}
|
||||
}
|
||||
|
||||
func badStreamPanic(err error) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func badStreamPanicf(format string, v ...interface{}) {
|
||||
panic(streamErrorf(format, v...))
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user