Merge branch 'master' of github.com:grafana/grafana

This commit is contained in:
Torkel Ödegaard 2018-09-19 17:45:58 +02:00
commit c67327d768
33 changed files with 397 additions and 41 deletions

4
.gitignore vendored
View File

@ -40,8 +40,8 @@ public/css/*.min.css
conf/custom.ini
fig.yml
docker-compose.yml
docker-compose.yaml
devenv/docker-compose.yml
devenv/docker-compose.yaml
/conf/provisioning/**/custom.yaml
/conf/provisioning/**/dev.yaml
/conf/ldap_dev.toml

View File

@ -0,0 +1,9 @@
# This will proxy all requests for http://localhost:10081/grafana/ to
# http://localhost:3000 (Grafana running locally)
#
# Please note that you'll need to change the root_url in the Grafana configuration:
# root_url = %(protocol)s://%(domain)s:10081/grafana/
apacheproxy:
build: blocks/apache_proxy
network_mode: host

View File

@ -0,0 +1,11 @@
collectd:
build: blocks/collectd
environment:
HOST_NAME: myserver
GRAPHITE_HOST: graphite
GRAPHITE_PORT: 2003
GRAPHITE_PREFIX: collectd.
REPORT_BY_CPU: 'false'
COLLECT_INTERVAL: 10
links:
- graphite

View File

@ -0,0 +1,15 @@
elasticsearch:
image: elasticsearch:2.4.1
command: elasticsearch -Des.network.host=0.0.0.0
ports:
- "9200:9200"
- "9300:9300"
volumes:
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
fake-elastic-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: elasticsearch
FD_PORT: 9200

View File

@ -0,0 +1,8 @@
elasticsearch1:
image: elasticsearch:1.7.6
command: elasticsearch -Des.network.host=0.0.0.0
ports:
- "11200:9200"
- "11300:9300"
volumes:
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml

View File

@ -0,0 +1,15 @@
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
elasticsearch5:
image: elasticsearch:5
command: elasticsearch
ports:
- "10200:9200"
- "10300:9300"
fake-elastic5-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: elasticsearch
FD_PORT: 10200

View File

@ -0,0 +1,15 @@
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
elasticsearch6:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.4
command: elasticsearch
ports:
- "11200:9200"
- "11300:9300"
fake-elastic6-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: elasticsearch6
FD_PORT: 11200

View File

@ -0,0 +1,16 @@
graphite09:
build: blocks/graphite
ports:
- "8080:80"
- "2003:2003"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
fake-graphite-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2003

View File

@ -0,0 +1,21 @@
graphite:
build:
context: blocks/graphite1
args:
version: master
ports:
- "8080:80"
- "2003:2003"
- "8125:8125/udp"
- "8126:8126"
volumes:
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
fake-graphite-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2003

View File

@ -0,0 +1,18 @@
graphite11:
image: graphiteapp/graphite-statsd
ports:
- "8180:80"
- "2103-2104:2003-2004"
- "2123-2124:2023-2024"
- "8225:8125/udp"
- "8226:8126"
fake-graphite11-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: graphite
FD_PORT: 2103
FD_GRAPHITE_VERSION: 1.1
depends_on:
- graphite11

View File

@ -0,0 +1,17 @@
influxdb:
image: influxdb:latest
container_name: influxdb
ports:
- "2004:2004"
- "8083:8083"
- "8086:8086"
volumes:
- ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
fake-influxdb-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: influxdb
FD_PORT: 8086

View File

@ -0,0 +1,6 @@
jaeger:
image: jaegertracing/all-in-one:latest
ports:
- "127.0.0.1:6831:6831/udp"
- "16686:16686"

View File

@ -0,0 +1,5 @@
memcached:
image: memcached:latest
ports:
- "11211:11211"

View File

@ -0,0 +1,19 @@
mssql:
build:
context: blocks/mssql/build
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: Password!
MSSQL_PID: Developer
MSSQL_DATABASE: grafana
MSSQL_USER: grafana
MSSQL_PASSWORD: Password!
ports:
- "1433:1433"
fake-mssql-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: mssql
FD_PORT: 1433

View File

@ -0,0 +1,12 @@
mssqltests:
build:
context: blocks/mssql/build
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: Password!
MSSQL_PID: Express
MSSQL_DATABASE: grafanatest
MSSQL_USER: grafana
MSSQL_PASSWORD: Password!
ports:
- "1433:1433"

View File

@ -0,0 +1,18 @@
mysql:
image: mysql:5.6
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: grafana
MYSQL_USER: grafana
MYSQL_PASSWORD: password
ports:
- "3306:3306"
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
fake-mysql-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: mysql
FD_PORT: 3306

View File

@ -0,0 +1,9 @@
mysql_opendata:
build: blocks/mysql_opendata
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: testdata
MYSQL_USER: grafana
MYSQL_PASSWORD: password
ports:
- "3307:3306"

View File

@ -0,0 +1,11 @@
mysqltests:
build:
context: blocks/mysql_tests
environment:
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_DATABASE: grafana_tests
MYSQL_USER: grafana
MYSQL_PASSWORD: password
ports:
- "3306:3306"
tmpfs: /var/lib/mysql:rw

View File

@ -0,0 +1,9 @@
# This will proxy all requests for http://localhost:10080/grafana/ to
# http://localhost:3000 (Grafana running locally)
#
# Please note that you'll need to change the root_url in the Grafana configuration:
# root_url = %(protocol)s://%(domain)s:10080/grafana/
nginxproxy:
build: blocks/nginx_proxy
network_mode: host

View File

@ -0,0 +1,10 @@
openldap:
build: blocks/openldap
environment:
SLAPD_PASSWORD: grafana
SLAPD_DOMAIN: grafana.org
SLAPD_ADDITIONAL_MODULES: memberof
ports:
- "389:389"

View File

@ -0,0 +1,11 @@
opentsdb:
image: opower/opentsdb:latest
ports:
- "4242:4242"
fake-opentsdb-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: opentsdb

View File

@ -0,0 +1,16 @@
postgrestest:
image: postgres:9.3
environment:
POSTGRES_USER: grafana
POSTGRES_PASSWORD: password
POSTGRES_DATABASE: grafana
ports:
- "5432:5432"
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
fake-postgres-data:
image: grafana/fake-data-gen
network_mode: bridge
environment:
FD_DATASOURCE: postgres
FD_PORT: 5432

View File

@ -0,0 +1,9 @@
postgrestest:
build:
context: blocks/postgres_tests
environment:
POSTGRES_USER: grafanatest
POSTGRES_PASSWORD: grafanatest
ports:
- "5432:5432"
tmpfs: /var/lib/postgresql/data:rw

View File

@ -0,0 +1,31 @@
prometheus:
build: blocks/prometheus
network_mode: host
ports:
- "9090:9090"
node_exporter:
image: prom/node-exporter
network_mode: host
ports:
- "9100:9100"
fake-prometheus-data:
image: grafana/fake-data-gen
network_mode: host
ports:
- "9091:9091"
environment:
FD_DATASOURCE: prom
alertmanager:
image: quay.io/prometheus/alertmanager
network_mode: host
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
network_mode: host
ports:
- "8081:8080"

View File

@ -0,0 +1,31 @@
prometheus:
build: blocks/prometheus2
network_mode: host
ports:
- "9090:9090"
node_exporter:
image: prom/node-exporter
network_mode: host
ports:
- "9100:9100"
fake-prometheus-data:
image: grafana/fake-data-gen
network_mode: host
ports:
- "9091:9091"
environment:
FD_DATASOURCE: prom
alertmanager:
image: quay.io/prometheus/alertmanager
network_mode: host
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
network_mode: host
ports:
- "8081:8080"

View File

@ -0,0 +1,26 @@
prometheus:
build: blocks/prometheus_mac
ports:
- "9090:9090"
node_exporter:
image: prom/node-exporter
ports:
- "9100:9100"
fake-prometheus-data:
image: grafana/fake-data-gen
ports:
- "9091:9091"
environment:
FD_DATASOURCE: prom
alertmanager:
image: quay.io/prometheus/alertmanager
ports:
- "9093:9093"
prometheus-random-data:
build: blocks/prometheus_random_data
ports:
- "8081:8080"

View File

@ -0,0 +1,4 @@
snmpd:
image: namshi/smtp
ports:
- "25:25"

View File

@ -48,11 +48,7 @@ func autoUpdateAppDashboard(pluginDashInfo *PluginDashboardInfoDTO, orgId int64)
Path: pluginDashInfo.Path,
}
if err := bus.Dispatch(&updateCmd); err != nil {
return err
}
return nil
return bus.Dispatch(&updateCmd)
}
func syncPluginDashboards(pluginDef *PluginBase, orgId int64) {

View File

@ -239,11 +239,8 @@ func RecordNotificationJournal(ctx context.Context, cmd *m.RecordNotificationJou
Success: cmd.Success,
}
if _, err := sess.Insert(journalEntry); err != nil {
return err
}
return nil
_, err := sess.Insert(journalEntry)
return err
})
}

View File

@ -144,7 +144,7 @@ func (c *baseClientImpl) encodeBatchRequests(requests []*multiRequest) ([]byte,
payload.WriteString(body + "\n")
}
elapsed := time.Now().Sub(start)
elapsed := time.Since(start)
clientLog.Debug("Encoded batch requests to json", "took", elapsed)
return payload.Bytes(), nil
@ -187,7 +187,7 @@ func (c *baseClientImpl) executeRequest(method, uriPath string, body []byte) (*h
start := time.Now()
defer func() {
elapsed := time.Now().Sub(start)
elapsed := time.Since(start)
clientLog.Debug("Executed request", "took", elapsed)
}()
return ctxhttp.Do(c.ctx, httpClient, req)
@ -215,7 +215,7 @@ func (c *baseClientImpl) ExecuteMultisearch(r *MultiSearchRequest) (*MultiSearch
return nil, err
}
elapsed := time.Now().Sub(start)
elapsed := time.Since(start)
clientLog.Debug("Decoded multisearch json response", "took", elapsed)
msr.Status = res.StatusCode

View File

@ -25,7 +25,7 @@ export class PostgresMetaQuery {
findMetricTable() {
// query that returns first table found that has a timestamp(tz) column and a float column
const query = `
let query = `
SELECT
quote_ident(table_name) as table_name,
( SELECT
@ -47,11 +47,9 @@ SELECT
ORDER BY ordinal_position LIMIT 1
) AS value_column
FROM information_schema.tables t
WHERE
table_schema IN (
SELECT CASE WHEN trim(unnest) = '"$user"' THEN user ELSE trim(unnest) END
FROM unnest(string_to_array(current_setting('search_path'),','))
) AND
WHERE `;
query += this.buildSchemaConstraint();
query += ` AND
EXISTS
( SELECT 1
FROM information_schema.columns c
@ -76,8 +74,14 @@ LIMIT 1
buildSchemaConstraint() {
const query = `
table_schema IN (
SELECT CASE WHEN trim(unnest) = \'"$user"\' THEN user ELSE trim(unnest) END
FROM unnest(string_to_array(current_setting(\'search_path\'),\',\'))
SELECT
CASE WHEN trim(s[i]) = '"$user"' THEN user ELSE trim(s[i]) END
FROM
generate_series(
array_lower(string_to_array(current_setting('search_path'),','),1),
array_upper(string_to_array(current_setting('search_path'),','),1)
) as i,
string_to_array(current_setting('search_path'),',') s
)`;
return query;
}
@ -92,11 +96,7 @@ table_schema IN (
query += ' AND table_name = ' + this.quoteIdentAsLiteral(parts[1]);
return query;
} else {
query = `
table_schema IN (
SELECT CASE WHEN trim(unnest) = \'"$user"\' THEN user ELSE trim(unnest) END
FROM unnest(string_to_array(current_setting(\'search_path\'),\',\'))
)`;
query = this.buildSchemaConstraint();
query += ' AND table_name = ' + this.quoteIdentAsLiteral(table);
return query;
@ -149,18 +149,8 @@ table_schema IN (
}
buildDatatypeQuery(column: string) {
let query = `
SELECT udt_name
FROM information_schema.columns
WHERE
table_schema IN (
SELECT schema FROM (
SELECT CASE WHEN trim(unnest) = \'"$user"\' THEN user ELSE trim(unnest) END as schema
FROM unnest(string_to_array(current_setting(\'search_path\'),\',\'))
) s
WHERE EXISTS (SELECT 1 FROM information_schema.schemata WHERE schema_name = s.schema)
)
`;
let query = 'SELECT udt_name FROM information_schema.columns WHERE ';
query += this.buildSchemaConstraint();
query += ' AND table_name = ' + this.quoteIdentAsLiteral(this.target.table);
query += ' AND column_name = ' + this.quoteIdentAsLiteral(column);
return query;

View File

@ -15,7 +15,7 @@ const builtInWords = [
.join('|')
.split('|');
const metricNameRegexp = /([A-Za-z]\w*)\b(?![\(\]{=!",])/g;
const metricNameRegexp = /([A-Za-z:][\w:]*)\b(?![\(\]{=!",])/g;
const selectorRegexp = /{([^{]*)}/g;
// addLabelToQuery('foo', 'bar', 'baz') => 'foo{bar="baz"}'

View File

@ -28,6 +28,7 @@ describe('addLabelToQuery()', () => {
expect(addLabelToQuery('foo{instance="my-host.com:9100"}', 'bar', 'baz')).toBe(
'foo{bar="baz",instance="my-host.com:9100"}'
);
expect(addLabelToQuery('foo:metric:rate1m', 'bar', 'baz')).toBe('foo:metric:rate1m{bar="baz"}');
expect(addLabelToQuery('foo{list="a,b,c"}', 'bar', 'baz')).toBe('foo{bar="baz",list="a,b,c"}');
});