mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
merge with master
This commit is contained in:
commit
4f8ee9f5a7
@ -7,6 +7,7 @@ indent_size = 2
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
max_line_length = 120
|
||||
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
|
18
.github/ISSUE_TEMPLATE.md
vendored
18
.github/ISSUE_TEMPLATE.md
vendored
@ -4,15 +4,13 @@ Read before posting:
|
||||
- Checkout FAQ: https://community.grafana.com/c/howto/faq
|
||||
- Checkout How to troubleshoot metric query issues: https://community.grafana.com/t/how-to-troubleshoot-metric-query-issues/50
|
||||
|
||||
Please prefix your title with [Bug] or [Feature request].
|
||||
|
||||
Please include this information:
|
||||
- What Grafana version are you using?
|
||||
- What datasource are you using?
|
||||
- What OS are you running grafana on?
|
||||
- What did you do?
|
||||
- What was the expected result?
|
||||
- What happened instead?
|
||||
- If related to metric query / data viz:
|
||||
- Include raw network request & response: get by opening Chrome Dev Tools (F12, Ctrl+Shift+I on windows, Cmd+Opt+I on Mac), go the network tab.
|
||||
### What Grafana version are you using?
|
||||
### What datasource are you using?
|
||||
### What OS are you running grafana on?
|
||||
### What did you do?
|
||||
### What was the expected result?
|
||||
### What happened instead?
|
||||
### If related to metric query / data viz:
|
||||
### Include raw network request & response: get by opening Chrome Dev Tools (F12, Ctrl+Shift+I on windows, Cmd+Opt+I on Mac), go the network tab.
|
||||
|
||||
|
13
.gitignore
vendored
13
.gitignore
vendored
@ -10,8 +10,10 @@ awsconfig
|
||||
/public_gen
|
||||
/public/vendor/npm
|
||||
/tmp
|
||||
vendor/phantomjs/phantomjs
|
||||
vendor/phantomjs/phantomjs.exe
|
||||
tools/phantomjs/phantomjs
|
||||
tools/phantomjs/phantomjs.exe
|
||||
profile.out
|
||||
coverage.txt
|
||||
|
||||
docs/AWS_S3_BUCKET
|
||||
docs/GIT_BRANCH
|
||||
@ -36,19 +38,26 @@ public/css/*.min.css
|
||||
conf/custom.ini
|
||||
fig.yml
|
||||
docker-compose.yml
|
||||
docker-compose.yaml
|
||||
/conf/provisioning/**/custom.yaml
|
||||
profile.cov
|
||||
/grafana
|
||||
.notouch
|
||||
/pkg/cmd/grafana-cli/grafana-cli
|
||||
/pkg/cmd/grafana-server/grafana-server
|
||||
/pkg/cmd/grafana-server/debug
|
||||
debug.test
|
||||
/examples/*/dist
|
||||
/packaging/**/*.rpm
|
||||
/packaging/**/*.deb
|
||||
|
||||
# Ignore OSX indexing
|
||||
.DS_Store
|
||||
|
||||
/vendor/**/*.py
|
||||
/vendor/**/*.xml
|
||||
/vendor/**/*.yml
|
||||
/vendor/**/*_test.go
|
||||
/vendor/**/.editorconfig
|
||||
/vendor/**/appengine*
|
||||
*.orig
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
test -z "$(gofmt -s -l . | grep -v vendor/src/ | tee /dev/stderr)"
|
||||
if [ $? -gt 0 ]; then
|
||||
echo "Some files aren't formatted, please run 'go fmt ./pkg/...' to format your source code before committing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
grunt test
|
@ -4,7 +4,7 @@
|
||||
"bitwise":false,
|
||||
"curly": true,
|
||||
"eqnull": true,
|
||||
"strict": true,
|
||||
"strict": false,
|
||||
"devel": true,
|
||||
"eqeqeq": true,
|
||||
"forin": false,
|
||||
|
216
CHANGELOG.md
216
CHANGELOG.md
@ -1,23 +1,213 @@
|
||||
# 5.0.0 (unreleased)
|
||||
# 5.1.0 (unreleased)
|
||||
|
||||
### WIP (in develop branch currently as its unstable or unfinished)
|
||||
- Dashboard folders
|
||||
- User groups
|
||||
- Dashboard permissions (on folder & dashboard level), permissions can be assigned to groups or individual users
|
||||
- UX changes to nav & side menu
|
||||
- New dashboard grid layout system
|
||||
* **Prometheus**: The heatmap panel now support Prometheus histograms [#10009](https://github.com/grafana/grafana/issues/10009)
|
||||
* **Postgres/MySQL**: Ability to insert 0s or nulls for missing intervals [#9487](https://github.com/grafana/grafana/issues/9487), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Graph**: Thresholds for Right Y axis [#7107](https://github.com/grafana/grafana/issues/7107), thx [@ilgizar](https://github.com/ilgizar)
|
||||
* **Graph**: Support multiple series stacking in histogram mode [#8151](https://github.com/grafana/grafana/issues/8151), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Alerting**: Pausing/un alerts now updates new_state_date [#10942](https://github.com/grafana/grafana/pull/10942)
|
||||
* **Templating**: Add comma templating format [#10632](https://github.com/grafana/grafana/issues/10632), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Prometheus**: Support POST for query and query_range [#9859](https://github.com/grafana/grafana/pull/9859), thx [@mtanda](https://github.com/mtanda)
|
||||
|
||||
# 4.7.0 (unreleased)
|
||||
### Minor
|
||||
* **OpsGenie**: Add triggered alerts as description [#11046](https://github.com/grafana/grafana/pull/11046), thx [@llamashoes](https://github.com/llamashoes)
|
||||
* **Cloudwatch**: Support high resolution metrics [#10925](https://github.com/grafana/grafana/pull/10925), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Cloudwatch**: Add dimension filtering to CloudWatch `dimension_values()` [#10029](https://github.com/grafana/grafana/issues/10029), thx [@willyhutw](https://github.com/willyhutw)
|
||||
* **Units**: Second to HH:mm:ss formatter [#11107](https://github.com/grafana/grafana/issues/11107), thx [@gladdiologist](https://github.com/gladdiologist)
|
||||
* **Singlestat**: Add color to prefix and postfix in singlestat panel [#11143](https://github.com/grafana/grafana/pull/11143), thx [@ApsOps](https://github.com/ApsOps)
|
||||
|
||||
# 5.0.2 (2018-03-14)
|
||||
* **Mysql**: Mysql panic occurring occasionally upon Grafana dashboard access [#11155](https://github.com/grafana/grafana/issues/11155)
|
||||
* **Dashboards**: Should be possible to browse dashboard using only uid [#11231](https://github.com/grafana/grafana/issues/11231)
|
||||
* **Alerting**: Fixes bug where alerts from hidden panels where deleted [#11222](https://github.com/grafana/grafana/issues/11222)
|
||||
* **Import**: Fixes bug where dashboards with alerts couldn't be imported [#11227](https://github.com/grafana/grafana/issues/11227)
|
||||
* **Teams**: Remove quota restrictions from teams [#11220](https://github.com/grafana/grafana/issues/11220)
|
||||
* **Render**: Fixes bug with legacy url redirection for panel rendering [#11180](https://github.com/grafana/grafana/issues/11180)
|
||||
|
||||
# 5.0.1 (2018-03-08)
|
||||
|
||||
* **Postgres**: PostgreSQL error when using ipv6 address as hostname in connection string [#11055](https://github.com/grafana/grafana/issues/11055), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Dashboards**: Changing templated value from dropdown is causing unsaved changes [#11063](https://github.com/grafana/grafana/issues/11063)
|
||||
* **Prometheus**: Fixes bundled Prometheus 2.0 dashboard [#11016](https://github.com/grafana/grafana/issues/11016), thx [@roidelapluie](https://github.com/roidelapluie)
|
||||
* **Sidemenu**: Profile menu "invisible" when gravatar is disabled [#11097](https://github.com/grafana/grafana/issues/11097)
|
||||
* **Dashboard**: Fixes a bug with resizeable handles for panels [#11103](https://github.com/grafana/grafana/issues/11103)
|
||||
* **Alerting**: Telegram inline image mode fails when caption too long [#10975](https://github.com/grafana/grafana/issues/10975)
|
||||
* **Alerting**: Fixes silent failing validation [#11145](https://github.com/grafana/grafana/pull/11145)
|
||||
* **OAuth**: Only use jwt token if it contains an email address [#11127](https://github.com/grafana/grafana/pull/11127)
|
||||
|
||||
# 5.0.0-stable (2018-03-01)
|
||||
|
||||
### Fixes
|
||||
|
||||
- **oauth** Fix Github OAuth not working with private Organizations [#11028](https://github.com/grafana/grafana/pull/11028) [@lostick](https://github.com/lostick)
|
||||
- **kiosk** white area over bottom panels in kiosk mode [#11010](https://github.com/grafana/grafana/issues/11010)
|
||||
- **alerting** Fix OK state doesn't show up in Microsoft Teams [#11032](https://github.com/grafana/grafana/pull/11032), thx [@manacker](https://github.com/manacker)
|
||||
|
||||
# 5.0.0-beta5 (2018-02-26)
|
||||
|
||||
### Fixes
|
||||
|
||||
- **Orgs** Unable to switch org when too many orgs listed [#10774](https://github.com/grafana/grafana/issues/10774)
|
||||
- **Folders** Make it easier/explicit to access/modify folders using the API [#10630](https://github.com/grafana/grafana/issues/10630)
|
||||
- **Dashboard** Scrollbar works incorrectly in Grafana 5.0 Beta4 in some cases [#10982](https://github.com/grafana/grafana/issues/10982)
|
||||
- **ElasticSearch** Custom aggregation sizes no longer allowed for Elasticsearch [#10124](https://github.com/grafana/grafana/issues/10124)
|
||||
- **oauth** Github OAuth with allowed organizations fails to login [#10964](https://github.com/grafana/grafana/issues/10964)
|
||||
- **heatmap** Heatmap panel has partially hidden legend [#10793](https://github.com/grafana/grafana/issues/10793)
|
||||
- **snapshots** Expired snapshots not being cleaned up [#10996](https://github.com/grafana/grafana/pull/10996)
|
||||
|
||||
# 5.0.0-beta4 (2018-02-19)
|
||||
|
||||
### Fixes
|
||||
|
||||
- **Dashboard** Fixed dashboard overwrite permission issue [#10814](https://github.com/grafana/grafana/issues/10814)
|
||||
- **Keyboard shortcuts** Fixed Esc key when in panel edit/view mode [#10945](https://github.com/grafana/grafana/issues/10945)
|
||||
- **Save dashboard** Fixed issue with time range & variable reset after saving [#10946](https://github.com/grafana/grafana/issues/10946)
|
||||
|
||||
# 5.0.0-beta3 (2018-02-16)
|
||||
|
||||
### Fixes
|
||||
|
||||
- **MySQL** Fixed new migration issue with index length [#10931](https://github.com/grafana/grafana/issues/10931)
|
||||
- **Modal** Escape key no closes modals everywhere, fixes [#10887](https://github.com/grafana/grafana/issues/10887)
|
||||
- **Row repeats** Fix for repeating rows issue, fixes [#10932](https://github.com/grafana/grafana/issues/10932)
|
||||
- **Docs** Team api documented, fixes [#10832](https://github.com/grafana/grafana/issues/10832)
|
||||
- **Plugins** Plugin info page broken, fixes [#10943](https://github.com/grafana/grafana/issues/10943)
|
||||
|
||||
# 5.0.0-beta2 (2018-02-15)
|
||||
|
||||
### Fixes
|
||||
|
||||
- **Permissions** Fixed search permissions issues [#10822](https://github.com/grafana/grafana/issues/10822)
|
||||
- **Permissions** Fixed problem issues displaying permissions lists [#10864](https://github.com/grafana/grafana/issues/10864)
|
||||
- **PNG-Rendering** Fixed problem rendering legend to the right [#10526](https://github.com/grafana/grafana/issues/10526)
|
||||
- **Reset password** Fixed problem with reset password form [#10870](https://github.com/grafana/grafana/issues/10870)
|
||||
- **Light theme** Fixed problem with light theme in safari, [#10869](https://github.com/grafana/grafana/issues/10869)
|
||||
- **Provisioning** Now handles deletes when dashboard json files removed from disk [#10865](https://github.com/grafana/grafana/issues/10865)
|
||||
- **MySQL** Fixed issue with schema migration on old mysql (index too long) [#10779](https://github.com/grafana/grafana/issues/10779)
|
||||
- **Github OAuth** Fixed fetching github orgs from private github org [#10823](https://github.com/grafana/grafana/issues/10823)
|
||||
- **Embedding** Fixed issues embedding panel [#10787](https://github.com/grafana/grafana/issues/10787)
|
||||
|
||||
# 5.0.0-beta1 (2018-02-05)
|
||||
|
||||
Grafana v5.0 is going to be the biggest and most foundational release Grafana has ever had, coming with a ton of UX improvements, a new dashboard grid engine, dashboard folders, user teams and permissions. Checkout out this [video preview](https://www.youtube.com/watch?v=Izr0IBgoTZQ) of Grafana v5.
|
||||
|
||||
### New Major Features
|
||||
- **Dashboards** Dashboard folders, [#1611](https://github.com/grafana/grafana/issues/1611)
|
||||
- **Teams** User groups (teams) implemented. Can be used in folder & dashboard permission list.
|
||||
- **Dashboard grid**: Panels are now layed out in a two dimensional grid (with x, y, w, h). [#9093](https://github.com/grafana/grafana/issues/9093).
|
||||
- **Templating**: Vertical repeat direction for panel repeats.
|
||||
- **UX**: Major update to page header and navigation
|
||||
- **Dashboard settings**: Combine dashboard settings views into one with side menu, [#9750](https://github.com/grafana/grafana/issues/9750)
|
||||
- **Persistent dashboard url's**: New url's for dashboards that allows renaming dashboards without breaking links. [#7883](https://github.com/grafana/grafana/issues/7883)
|
||||
|
||||
## Breaking changes
|
||||
|
||||
* **[dashboard.json]** have been replaced with [dashboard provisioning](http://docs.grafana.org/administration/provisioning/).
|
||||
Config files for provisioning datasources as configuration have changed from `/conf/datasources` to `/conf/provisioning/datasources`.
|
||||
From `/etc/grafana/datasources` to `/etc/grafana/provisioning/datasources` when installed with deb/rpm packages.
|
||||
|
||||
* **Pagerduty** The notifier now defaults to not auto resolve incidents. More details at [#10222](https://github.com/grafana/grafana/issues/10222)
|
||||
|
||||
* **HTTP API**
|
||||
- `GET /api/alerts` property dashboardUri renamed to url and is now the full url (that is including app sub url).
|
||||
|
||||
## New Dashboard Grid
|
||||
|
||||
The new grid engine is a major upgrade for how you can position and move panels. It enables new layouts and a much easier dashboard building experience. The change is backward compatible. So you can upgrade your current version to 5.0 without breaking dashboards, but you cannot downgrade from 5.0 to previous versions. Grafana will automatically upgrade your dashboards to the new schema and position panels to match your existing layout. There might be minor differences in panel height. If you upgrade to 5.0 and for some reason want to rollback to the previous version you can restore dashboards to previous versions using dashboard history. But that should only be seen as an emergency solution.
|
||||
|
||||
Dashboard panels and rows are positioned using a gridPos object `{x: 0, y: 0, w: 24, h: 5}`. Units are in grid dimensions (24 columns, 1 height unit 30px). Rows and Panels objects exist (together) in a flat array directly on the dashboard root object. Rows are not needed for layouts anymore and are mainly there for backward compatibility. Some panel plugins that do not respect their panel height might require an update.
|
||||
|
||||
## New Features
|
||||
* **Alerting**: Add support for internal image store [#6922](https://github.com/grafana/grafana/issues/6922), thx [@FunkyM](https://github.com/FunkyM)
|
||||
* **Data Source Proxy**: Add support for whitelisting specified cookies that will be passed through to the data source when proxying data source requests [#5457](https://github.com/grafana/grafana/issues/5457), thanks [@robingustafsson](https://github.com/robingustafsson)
|
||||
* **Postgres/MySQL**: add __timeGroup macro for mysql [#9596](https://github.com/grafana/grafana/pull/9596), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Text**: Text panel are now edited in the ace editor. [#9698](https://github.com/grafana/grafana/pull/9698), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Teams**: Add Microsoft Teams notifier as [#8523](https://github.com/grafana/grafana/issues/8523), thx [@anthu](https://github.com/anthu)
|
||||
* **Datasources**: Its now possible to configure datasources with config files [#1789](https://github.com/grafana/grafana/issues/1789)
|
||||
* **Graphite**: Query editor updated to support new query by tag features [#9230](https://github.com/grafana/grafana/issues/9230)
|
||||
* **Dashboard history**: New config file option versions_to_keep sets how many versions per dashboard to store, [#9671](https://github.com/grafana/grafana/issues/9671)
|
||||
* **Dashboard as cfg**: Load dashboards from file into Grafana on startup/change [#9654](https://github.com/grafana/grafana/issues/9654) [#5269](https://github.com/grafana/grafana/issues/5269)
|
||||
* **Prometheus**: Grafana can now send alerts to Prometheus Alertmanager while firing [#7481](https://github.com/grafana/grafana/issues/7481), thx [@Thib17](https://github.com/Thib17) and [@mtanda](https://github.com/mtanda)
|
||||
* **Table**: Support multiple table formated queries in table panel [#9170](https://github.com/grafana/grafana/issues/9170), thx [@davkal](https://github.com/davkal)
|
||||
* **Security**: Protect against brute force (frequent) login attempts [#7616](https://github.com/grafana/grafana/issues/7616)
|
||||
|
||||
## Fixes
|
||||
## Minor
|
||||
* **Graph**: Don't hide graph display options (Lines/Points) when draw mode is unchecked [#9770](https://github.com/grafana/grafana/issues/9770), thx [@Jonnymcc](https://github.com/Jonnymcc)
|
||||
* **Prometheus**: Show label name in paren after by/without/on/ignoring/group_left/group_right [#9664](https://github.com/grafana/grafana/pull/9664), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Alert panel**: Adds placeholder text when no alerts are within the time range [#9624](https://github.com/grafana/grafana/issues/9624), thx [@straend](https://github.com/straend)
|
||||
* **Mysql**: MySQL enable MaxOpenCon and MaxIdleCon regards how constring is configured. [#9784](https://github.com/grafana/grafana/issues/9784), thx [@dfredell](https://github.com/dfredell)
|
||||
* **Cloudwatch**: Fixes broken query inspector for cloudwatch [#9661](https://github.com/grafana/grafana/issues/9661), thx [@mtanda](https://github.com/mtanda)
|
||||
* **Dashboard**: Make it possible to start dashboards from search and dashboard list panel [#1871](https://github.com/grafana/grafana/issues/1871)
|
||||
* **Annotations**: Posting annotations now return the id of the annotation [#9798](https://github.com/grafana/grafana/issues/9798)
|
||||
* **Systemd**: Use systemd notification ready flag [#10024](https://github.com/grafana/grafana/issues/10024), thx [@jgrassler](https://github.com/jgrassler)
|
||||
* **Github**: Use organizations_url provided from github to verify user belongs in org. [#10111](https://github.com/grafana/grafana/issues/10111), thx
|
||||
[@adiletmaratov](https://github.com/adiletmaratov)
|
||||
* **Backend**: Fixed bug where Grafana exited before all sub routines where finished [#10131](https://github.com/grafana/grafana/issues/10131)
|
||||
* **Azure**: Adds support for Azure blob storage as external image stor [#8955](https://github.com/grafana/grafana/issues/8955), thx [@saada](https://github.com/saada)
|
||||
* **Telegram**: Add support for inline image uploads to telegram notifier plugin [#9967](https://github.com/grafana/grafana/pull/9967), thx [@rburchell](https://github.com/rburchell)
|
||||
|
||||
## Fixes
|
||||
* **Sensu**: Send alert message to sensu output [#9551](https://github.com/grafana/grafana/issues/9551), thx [@cjchand](https://github.com/cjchand)
|
||||
* **Singlestat**: suppress error when result contains no datapoints [#9636](https://github.com/grafana/grafana/issues/9636), thx [@utkarshcmu](https://github.com/utkarshcmu)
|
||||
* **Postgres/MySQL**: Control quoting in SQL-queries when using template variables [#9030](https://github.com/grafana/grafana/issues/9030), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
* **Pagerduty**: Pagerduty dont auto resolve incidents by default anymore. [#10222](https://github.com/grafana/grafana/issues/10222)
|
||||
* **Cloudwatch**: Fix for multi-valued templated queries. [#9903](https://github.com/grafana/grafana/issues/9903)
|
||||
|
||||
# 4.6.0-beta3 (unreleased)
|
||||
## Tech
|
||||
* **RabbitMq**: Remove support for publishing events to RabbitMQ [#9645](https://github.com/grafana/grafana/issues/9645)
|
||||
|
||||
## Deprecation notes
|
||||
|
||||
### HTTP API
|
||||
The following operations have been deprecated and will be removed in a future release:
|
||||
- `GET /api/dashboards/db/:slug` -> Use `GET /api/dashboards/uid/:uid` instead
|
||||
- `DELETE /api/dashboards/db/:slug` -> Use `DELETE /api/dashboards/uid/:uid` instead
|
||||
|
||||
The following properties have been deprecated and will be removed in a future release:
|
||||
- `uri` property in `GET /api/search` -> Use new `url` or `uid` property instead
|
||||
- `meta.slug` property in `GET /api/dashboards/uid/:uid` and `GET /api/dashboards/db/:slug` -> Use new `meta.url` or `dashboard.uid` property instead
|
||||
|
||||
# 4.6.3 (2017-12-14)
|
||||
|
||||
## Fixes
|
||||
* **Gzip**: Fixes bug gravatar images when gzip was enabled [#5952](https://github.com/grafana/grafana/issues/5952)
|
||||
* **Alert list**: Now shows alert state changes even after adding manual annotations on dashboard [#9951](https://github.com/grafana/grafana/issues/9951)
|
||||
* **Alerting**: Fixes bug where rules evaluated as firing when all conditions was false and using OR operator. [#9318](https://github.com/grafana/grafana/issues/9318)
|
||||
* **Cloudwatch**: CloudWatch no longer display metrics' default alias [#10151](https://github.com/grafana/grafana/issues/10151), thx [@mtanda](https://github.com/mtanda)
|
||||
|
||||
# 4.6.2 (2017-11-16)
|
||||
|
||||
## Important
|
||||
* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if your using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
|
||||
|
||||
## Fixes
|
||||
* **Color picker**: Bug after using textbox input field to change/paste color string [#9769](https://github.com/grafana/grafana/issues/9769)
|
||||
* **Cloudwatch**: Fix for cloudwatch templating query `ec2_instance_attribute` [#9667](https://github.com/grafana/grafana/issues/9667), thanks [@mtanda](https://github.com/mtanda)
|
||||
* **Heatmap**: Fixed tooltip for "time series buckets" mode [#9332](https://github.com/grafana/grafana/issues/9332)
|
||||
* **InfluxDB**: Fixed query editor issue when using `>` or `<` operators in WHERE clause [#9871](https://github.com/grafana/grafana/issues/9871)
|
||||
|
||||
|
||||
# 4.6.1 (2017-11-01)
|
||||
|
||||
* **Singlestat**: Lost thresholds when using save dashboard as [#9681](https://github.com/grafana/grafana/issues/9681)
|
||||
* **Graph**: Fix for series override color picker [#9715](https://github.com/grafana/grafana/issues/9715)
|
||||
* **Go**: build using golang 1.9.2 [#9713](https://github.com/grafana/grafana/issues/9713)
|
||||
* **Plugins**: Fixed problem with loading plugin js files behind auth proxy [#9509](https://github.com/grafana/grafana/issues/9509)
|
||||
* **Graphite**: Annotation tooltip should render empty string when undefined [#9707](https://github.com/grafana/grafana/issues/9707)
|
||||
|
||||
# 4.6.0 (2017-10-26)
|
||||
|
||||
## Fixes
|
||||
* **Alerting**: Viewer can no longer pause alert rules [#9640](https://github.com/grafana/grafana/issues/9640)
|
||||
* **Playlist**: Bug where playlist controls was missing [#9639](https://github.com/grafana/grafana/issues/9639)
|
||||
* **Firefox**: Creating region annotations now work in firefox [#9638](https://github.com/grafana/grafana/issues/9638)
|
||||
|
||||
# 4.6.0-beta3 (2017-10-23)
|
||||
|
||||
## Fixes
|
||||
* **Prometheus**: Fix for browser crash for short time ranges. [#9575](https://github.com/grafana/grafana/issues/9575)
|
||||
* **Heatmap**: Fix for y-axis not showing. [#9576](https://github.com/grafana/grafana/issues/9576)
|
||||
* **Save to file**: Fix for save to file in export modal. [#9586](https://github.com/grafana/grafana/issues/9586)
|
||||
* **Postgres**: modify group by time macro so it can be used in select clause [#9527](https://github.com/grafana/grafana/pull/9527), thanks [@svenklemm](https://github.com/svenklemm)
|
||||
|
||||
# 4.6.0-beta2 (2017-10-17)
|
||||
|
||||
@ -52,11 +242,11 @@
|
||||
* **OAuth**: Verify TLS during OAuth callback [#9373](https://github.com/grafana/grafana/issues/9373), thx [@mattbostock](https://github.com/mattbostock)
|
||||
|
||||
## Minor
|
||||
* **SMTP**: Make it possible to set specific EHLO for smtp client. [#9319](https://github.com/grafana/grafana/issues/9319)
|
||||
* **Dataproxy**: Allow grafan to renegotiate tls connection [#9250](https://github.com/grafana/grafana/issues/9250)
|
||||
* **SMTP**: Make it possible to set specific HELO for smtp client. [#9319](https://github.com/grafana/grafana/issues/9319)
|
||||
* **Dataproxy**: Allow grafana to renegotiate tls connection [#9250](https://github.com/grafana/grafana/issues/9250)
|
||||
* **HTTP**: set net.Dialer.DualStack to true for all http clients [#9367](https://github.com/grafana/grafana/pull/9367)
|
||||
* **Alerting**: Add diff and percent diff as series reducers [#9386](https://github.com/grafana/grafana/pull/9386), thx [@shanhuhai5739](https://github.com/shanhuhai5739)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is precent [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Slack**: Allow images to be uploaded to slack when Token is present [#7175](https://github.com/grafana/grafana/issues/7175), thx [@xginn8](https://github.com/xginn8)
|
||||
* **Opsgenie**: Use their latest API instead of old version [#9399](https://github.com/grafana/grafana/pull/9399), thx [@cglrkn](https://github.com/cglrkn)
|
||||
* **Table**: Add support for displaying the timestamp with milliseconds [#9429](https://github.com/grafana/grafana/pull/9429), thx [@s1061123](https://github.com/s1061123)
|
||||
* **Hipchat**: Add metrics, message and image to hipchat notifications [#9110](https://github.com/grafana/grafana/issues/9110), thx [@eloo](https://github.com/eloo)
|
||||
|
636
Gopkg.lock
generated
Normal file
636
Gopkg.lock
generated
Normal file
@ -0,0 +1,636 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
|
||||
version = "v0.18.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/BurntSushi/toml"
|
||||
packages = ["."]
|
||||
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/com"
|
||||
packages = ["."]
|
||||
revision = "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/apache/thrift"
|
||||
packages = ["lib/go/thrift"]
|
||||
revision = "b2a4d4ae21c789b689dd162deb819665567f481c"
|
||||
version = "0.10.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/ec2query",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/cloudwatch",
|
||||
"service/ec2",
|
||||
"service/ec2/ec2iface",
|
||||
"service/s3",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "decd990ddc5dcdf2f73309cbcab90d06b996ca28"
|
||||
version = "v1.12.67"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/benbjohnson/clock"
|
||||
packages = ["."]
|
||||
revision = "7dc76406b6d3c05b5f71a86293cbcf3c4ea03b19"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/bmizerany/assert"
|
||||
packages = ["."]
|
||||
revision = "b7ed37b82869576c289d7d97fb2bbd8b64a0cb28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/bradfitz/gomemcache"
|
||||
packages = ["memcache"]
|
||||
revision = "1952afaa557dc08e8e0d89eafab110fb501c1a2b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/codahale/hdrhistogram"
|
||||
packages = ["."]
|
||||
revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/codegangsta/cli"
|
||||
packages = ["."]
|
||||
revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1"
|
||||
version = "v1.20.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/fatih/color"
|
||||
packages = ["."]
|
||||
revision = "570b54cabe6b8eb0bc2dfce68d964677d63b5260"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ldap/ldap"
|
||||
packages = ["."]
|
||||
revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9"
|
||||
version = "v2.5.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/binding"
|
||||
packages = ["."]
|
||||
revision = "ac54ee249c27dca7e76fad851a4a04b73bd1b183"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/gzip"
|
||||
packages = ["."]
|
||||
revision = "cad1c6580a07c56f5f6bc52d66002a05985c5854"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/inject"
|
||||
packages = ["."]
|
||||
revision = "d8a0b8677191f4380287cfebd08e462217bac7ad"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/session"
|
||||
packages = [
|
||||
".",
|
||||
"memcache",
|
||||
"mysql",
|
||||
"postgres",
|
||||
"redis"
|
||||
]
|
||||
revision = "b8e286a0dba8f4999042d6b258daf51b31d08938"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
packages = ["."]
|
||||
revision = "2cc627ac8defc45d65066ae98f898166f580f9a4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-xorm/builder"
|
||||
packages = ["."]
|
||||
revision = "488224409dd8aa2ce7a5baf8d10d55764a913738"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-xorm/core"
|
||||
packages = ["."]
|
||||
revision = "e8409d73255791843585964791443dbad877058c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-xorm/xorm"
|
||||
packages = ["."]
|
||||
revision = "6687a2b4e824f4d87f2d65060ec5cb0d896dff1e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gopherjs/gopherjs"
|
||||
packages = ["js"]
|
||||
revision = "178c176a91fe05e3e6c58fa5c989bad19e6cdcb3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gosimple/slug"
|
||||
packages = ["."]
|
||||
revision = "e9f42fa127660e552d0ad2b589868d403a9be7c6"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/grafana/grafana_plugin_model"
|
||||
packages = ["go/datasource"]
|
||||
revision = "dfe5dc0a6ce05825ba7fe2d0323d92e631bffa89"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
packages = ["."]
|
||||
revision = "5bcb0f17e36442247290887cc914a6e507afa5c4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
packages = ["."]
|
||||
revision = "3e6d191694b5a3a2b99755f31b47fa209e4bcd09"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-version"
|
||||
packages = ["."]
|
||||
revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/yamux"
|
||||
packages = ["."]
|
||||
revision = "683f49123a33db61abfb241b7ac5e4af4dc54d55"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/log15"
|
||||
packages = ["."]
|
||||
revision = "0decfc6c20d9ca0ad143b0e89dcaa20f810b4fb3"
|
||||
version = "v2.13"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jtolds/gls"
|
||||
packages = ["."]
|
||||
revision = "77f18212c9c7edc9bd6a33d383a7b545ce62f064"
|
||||
version = "v4.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/klauspost/compress"
|
||||
packages = [
|
||||
"flate",
|
||||
"gzip"
|
||||
]
|
||||
revision = "6c8db69c4b49dd4df1fff66996cf556176d0b9bf"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/klauspost/cpuid"
|
||||
packages = ["."]
|
||||
revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/klauspost/crc32"
|
||||
packages = ["."]
|
||||
revision = "cb6bfca970f6908083f26f39a79009d608efd5cd"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/pretty"
|
||||
packages = ["."]
|
||||
revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/text"
|
||||
packages = ["."]
|
||||
revision = "7cafcd837844e784b526369c9bce262804aebc60"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/lib/pq"
|
||||
packages = [
|
||||
".",
|
||||
"oid"
|
||||
]
|
||||
revision = "61fe37aa2ee24fabcdbe5c4ac1d4ac566f88f345"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
|
||||
version = "v0.0.9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-sqlite3"
|
||||
packages = ["."]
|
||||
revision = "6c771bb9887719704b210e87e934f08be014bdb1"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/go-testing-interface"
|
||||
packages = ["."]
|
||||
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
packages = [
|
||||
".",
|
||||
"ext",
|
||||
"log"
|
||||
]
|
||||
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"api",
|
||||
"api/prometheus/v1",
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "967789050ba94deca04a5e84cce8ad472ce313c1"
|
||||
version = "v0.9.0-pre1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "89604d197083d4781071d3c65855d24ecfb0a563"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfsd",
|
||||
"xfs"
|
||||
]
|
||||
revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rainycape/unidecode"
|
||||
packages = ["."]
|
||||
revision = "cb7f23ec59bec0d61b19c56cd88cee3d0cc1870c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sergi/go-diff"
|
||||
packages = ["diffmatchpatch"]
|
||||
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/smartystreets/assertions"
|
||||
packages = [
|
||||
".",
|
||||
"internal/go-render/render",
|
||||
"internal/oglematchers"
|
||||
]
|
||||
revision = "0b37b35ec7434b77e77a4bb29b79677cced992ea"
|
||||
version = "1.8.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/smartystreets/goconvey"
|
||||
packages = [
|
||||
"convey",
|
||||
"convey/gotest",
|
||||
"convey/reporting"
|
||||
]
|
||||
revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857"
|
||||
version = "1.6.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/teris-io/shortid"
|
||||
packages = ["."]
|
||||
revision = "771a37caa5cf0c81f585d7b6df4dfc77e0615b5c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/uber/jaeger-client-go"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"internal/baggage",
|
||||
"internal/baggage/remote",
|
||||
"internal/spanlog",
|
||||
"log",
|
||||
"rpcmetrics",
|
||||
"thrift-gen/agent",
|
||||
"thrift-gen/baggage",
|
||||
"thrift-gen/jaeger",
|
||||
"thrift-gen/sampling",
|
||||
"thrift-gen/zipkincore",
|
||||
"utils"
|
||||
]
|
||||
revision = "3ac96c6e679cb60a74589b0d0aa7c70a906183f7"
|
||||
version = "v2.11.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
packages = ["metrics"]
|
||||
revision = "7f95f4f7e80028096410abddaae2556e4c61b59f"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/yudai/gojsondiff"
|
||||
packages = [
|
||||
".",
|
||||
"formatter"
|
||||
]
|
||||
revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yudai/golcs"
|
||||
packages = ["."]
|
||||
revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["pbkdf2"]
|
||||
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "af50095a40f9041b3b38960738837185c26e9419"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"cloudsql",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
revision = "a8101f21cf983e773d0c1133ebc5424792003214"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"health",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef"
|
||||
version = "v1.9.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "v3"
|
||||
name = "gopkg.in/alexcesaro/quotedprintable.v3"
|
||||
packages = ["."]
|
||||
revision = "2caba252f4dc53eaf6b553000885530023f54623"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/asn1-ber.v1"
|
||||
packages = ["."]
|
||||
revision = "379148ca0225df7a432012b8df0355c2a2063ac0"
|
||||
version = "v1.2"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/bufio.v1"
|
||||
packages = ["."]
|
||||
revision = "567b2bfa514e796916c4747494d6ff5132a1dfce"
|
||||
version = "v1"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/gomail.v2"
|
||||
packages = ["."]
|
||||
revision = "81ebce5c23dfd25c6c67194b37d3dd3f338c98b1"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/ini.v1"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/macaron.v1"
|
||||
packages = ["."]
|
||||
revision = "75f2e9b42e99652f0d82b28ccb73648f44615faa"
|
||||
version = "v1.2.4"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/redis.v2"
|
||||
packages = ["."]
|
||||
revision = "e6179049628164864e6e84e973cfb56335748dea"
|
||||
version = "v2.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4de68f1342ba98a637ec8ca7496aeeae2021bf9e4c7c80db7924e14709151a62"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
199
Gopkg.toml
Normal file
199
Gopkg.toml
Normal file
@ -0,0 +1,199 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
ignored = [
|
||||
"github.com/grafana/grafana/data/*",
|
||||
"github.com/grafana/grafana/public/*",
|
||||
"github.com/grafana/grafana/node_modules/*"
|
||||
]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/BurntSushi/toml"
|
||||
version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/com"
|
||||
#version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.12.65"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/benbjohnson/clock"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/bmizerany/assert"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/codegangsta/cli"
|
||||
version = "1.20.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fatih/color"
|
||||
version = "1.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-ldap/ldap"
|
||||
version = "2.5.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/binding"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/gzip"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/go-macaron/session"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
revision = "2cc627ac8defc45d65066ae98f898166f580f9a4"
|
||||
#version = "1.3.0" //keeping this since we would rather depend on version then commit
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-stack/stack"
|
||||
version = "1.7.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-xorm/core"
|
||||
revision = "e8409d73255791843585964791443dbad877058c"
|
||||
#version = "0.5.7" //keeping this since we would rather depend on version then commit
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-xorm/xorm"
|
||||
revision = "6687a2b4e824f4d87f2d65060ec5cb0d896dff1e"
|
||||
#version = "0.6.4" //keeping this since we would rather depend on version then commit
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gosimple/slug"
|
||||
version = "1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/grafana/grafana_plugin_model"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-version"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/inconshreveable/log15"
|
||||
version = "2.13.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/lib/pq"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
version = "0.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-sqlite3"
|
||||
version = "1.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
version = "2.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.9.0-pre1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/smartystreets/goconvey"
|
||||
version = "1.6.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-client-go"
|
||||
version = "2.11.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/yudai/gojsondiff"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/gomail.v2"
|
||||
branch = "v2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/ini.v1"
|
||||
version = "1.32.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/macaron.v1"
|
||||
version = "1.2.4"
|
||||
|
||||
[[constraint]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/teris-io/shortid"
|
@ -22,9 +22,10 @@ module.exports = function (grunt) {
|
||||
}
|
||||
}
|
||||
|
||||
config.coverage = grunt.option('coverage');
|
||||
config.phjs = grunt.option('phjsToRelease');
|
||||
|
||||
config.pkg.version = grunt.option('pkgVer') || config.pkg.version;
|
||||
|
||||
console.log('Version', config.pkg.version);
|
||||
|
||||
// load plugins
|
||||
|
208
LICENSE.md
208
LICENSE.md
@ -1,14 +1,202 @@
|
||||
Copyright 2014-2017 Torkel Ödegaard, Raintank Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
may not use this file except in compliance with the License. You may
|
||||
obtain a copy of the License at
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied. See the License for the specific language governing
|
||||
permissions and limitations under the License.
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
13
Makefile
13
Makefile
@ -11,8 +11,14 @@ deps: deps-js
|
||||
build-go:
|
||||
go run build.go build
|
||||
|
||||
build-server:
|
||||
go run build.go build-server
|
||||
|
||||
build-cli:
|
||||
go run build.go build-cli
|
||||
|
||||
build-js:
|
||||
npm run build
|
||||
yarn run build
|
||||
|
||||
build: build-go build-js
|
||||
|
||||
@ -20,9 +26,12 @@ test-go:
|
||||
go test -v ./pkg/...
|
||||
|
||||
test-js:
|
||||
npm test
|
||||
yarn test
|
||||
|
||||
test: test-go test-js
|
||||
|
||||
run:
|
||||
./bin/grafana-server
|
||||
|
||||
protoc:
|
||||
protoc -I pkg/tsdb/models pkg/tsdb/models/*.proto --go_out=plugins=grpc:pkg/tsdb/models/.
|
16
NOTICE.md
16
NOTICE.md
@ -1,16 +1,6 @@
|
||||
|
||||
This software is based on Kibana:
|
||||
========================================
|
||||
Copyright 2014-2017 Grafana Labs
|
||||
|
||||
This software is based on Kibana:
|
||||
Copyright 2012-2013 Elasticsearch BV
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you
|
||||
may not use this file except in compliance with the License. You may
|
||||
obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied. See the License for the specific language governing
|
||||
permissions and limitations under the License.
|
||||
|
57
README.md
57
README.md
@ -1,4 +1,4 @@
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana) [](https://goreportcard.com/report/github.com/grafana/grafana)
|
||||
[Grafana](https://grafana.com) [](https://circleci.com/gh/grafana/grafana) [](https://goreportcard.com/report/github.com/grafana/grafana) [](https://codecov.io/gh/grafana/grafana)
|
||||
================
|
||||
[Website](https://grafana.com) |
|
||||
[Twitter](https://twitter.com/grafana) |
|
||||
@ -19,18 +19,18 @@ If you have any problems please read the [troubleshooting guide](http://docs.gra
|
||||
Be sure to read the [getting started guide](http://docs.grafana.org/guides/gettingstarted/) and the other feature guides.
|
||||
|
||||
## Run from master
|
||||
If you want to build a package yourself, or contribute. Here is a guide for how to do that. You can always find
|
||||
If you want to build a package yourself, or contribute - Here is a guide for how to do that. You can always find
|
||||
the latest master builds [here](https://grafana.com/grafana/download)
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Go 1.9
|
||||
- Go 1.10
|
||||
- NodeJS LTS
|
||||
|
||||
### Building the backend
|
||||
```bash
|
||||
go get github.com/grafana/grafana
|
||||
cd ~/go/src/github.com/grafana/grafana
|
||||
cd $GOPATH/src/github.com/grafana/grafana
|
||||
go run build.go setup
|
||||
go run build.go build
|
||||
```
|
||||
@ -42,23 +42,17 @@ For this you need nodejs (v.6+).
|
||||
```bash
|
||||
npm install -g yarn
|
||||
yarn install --pure-lockfile
|
||||
npm run build
|
||||
```
|
||||
|
||||
To rebuild frontend assets (typescript, sass etc) as you change them start the watcher via.
|
||||
|
||||
```bash
|
||||
npm run watch
|
||||
```
|
||||
|
||||
Run tests
|
||||
Run tests
|
||||
```bash
|
||||
npm run test
|
||||
npm run jest
|
||||
```
|
||||
|
||||
Run tests in watch mode
|
||||
Run karma tests
|
||||
```bash
|
||||
npm run watch-test
|
||||
npm run karma
|
||||
```
|
||||
|
||||
### Recompile backend on source change
|
||||
@ -81,16 +75,45 @@ You only need to add the options you want to override. Config files are applied
|
||||
|
||||
In your custom.ini uncomment (remove the leading `;`) sign. And set `app_mode = development`.
|
||||
|
||||
### Running tests
|
||||
|
||||
#### Frontend
|
||||
Execute all frontend tests
|
||||
```bash
|
||||
npm run test
|
||||
```
|
||||
|
||||
Writing & watching frontend tests (we have two test runners)
|
||||
|
||||
- jest for all new tests that do not require browser context (React+more)
|
||||
- Start watcher: `npm run jest`
|
||||
- Jest will run all test files that end with the name ".jest.ts"
|
||||
- karma + mocha is used for testing angularjs components. We do want to migrate these test to jest over time (if possible).
|
||||
- Start watcher: `npm run karma`
|
||||
- Karma+Mocha runs all files that end with the name "_specs.ts".
|
||||
|
||||
#### Backend
|
||||
```bash
|
||||
# Run Golang tests using sqlite3 as database (default)
|
||||
go test ./pkg/...
|
||||
|
||||
# Run Golang tests using mysql as database - convenient to use /docker/blocks/mysql_tests
|
||||
GRAFANA_TEST_DB=mysql go test ./pkg/...
|
||||
|
||||
# Run Golang tests using postgres as database - convenient to use /docker/blocks/postgres_tests
|
||||
GRAFANA_TEST_DB=postgres go test ./pkg/...
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
If you have any idea for an improvement or found a bug do not hesitate to open an issue.
|
||||
If you have any idea for an improvement or found a bug, do not hesitate to open an issue.
|
||||
And if you have time clone this repo and submit a pull request and help me make Grafana
|
||||
the kickass metrics & devops dashboard we all dream about!
|
||||
|
||||
## Plugin development
|
||||
|
||||
Checkout the [Plugin Development Guide](http://docs.grafana.org/plugins/developing/development/) and checkout the [PLUGIN_DEV.md](https://github.com/grafana/grafana/blob/master/PLUGIN_DEV.md) file for changes in Grafana that relate to
|
||||
plugin development.
|
||||
Checkout the [Plugin Development Guide](http://docs.grafana.org/plugins/developing/development/) and checkout the [PLUGIN_DEV.md](https://github.com/grafana/grafana/blob/master/PLUGIN_DEV.md) file for changes in Grafana that relate to
|
||||
plugin development.
|
||||
|
||||
## License
|
||||
|
||||
|
43
ROADMAP.md
43
ROADMAP.md
@ -1,29 +1,38 @@
|
||||
# Roadmap (2017-08-29)
|
||||
# Roadmap (2018-02-22)
|
||||
|
||||
This roadmap is a tentative plan for the core development team. Things change constantly as PRs come in and priorities change.
|
||||
But it will give you an idea of our current vision and plan.
|
||||
|
||||
### Short term (1-4 months)
|
||||
### Short term (1-2 months)
|
||||
|
||||
- Release Grafana v4.5 with fixes and minor enhancements
|
||||
- Release Grafana v5
|
||||
- User groups
|
||||
- Dashboard folders
|
||||
- Dashboard permissions (on folders as well), permissions on groups or users
|
||||
- New Dashboard layout engine
|
||||
- New sidemenu & nav UX
|
||||
- Elasticsearch alerting
|
||||
- v5.1
|
||||
- Crossplatform builds & build speed improvements
|
||||
- Enterprise LDAP
|
||||
- Provisioning workflow
|
||||
- First login registration view
|
||||
- IFQL Initial support
|
||||
|
||||
### Long term
|
||||
### Mid term (2-4 months)
|
||||
|
||||
- Backend plugins to support more Auth options, Alerting data sources & notifications
|
||||
- Universal time series transformations for any data source (meta queries)
|
||||
- Reporting
|
||||
- Web socket & live data streams
|
||||
- Migrate to Angular2 or react
|
||||
- v5.2
|
||||
- Azure monitor backend rewrite
|
||||
- Elasticsearch alerting
|
||||
- Backend plugins? (alert notifiers, auth)
|
||||
|
||||
### Long term (4 - 8 months)
|
||||
|
||||
- Alerting improvements (silence, per series tracking, etc)
|
||||
- Progress on React migration
|
||||
- Change visualization (panel type) on the fly.
|
||||
- Multi stat panel (vertical version of singlestat with bars/graph mode with big number etc)
|
||||
- Repeat panel by query results
|
||||
|
||||
### In a distant future far far away
|
||||
|
||||
- Meta queries
|
||||
- Integrated light weight TSDB
|
||||
- Web socket & live data sources
|
||||
|
||||
### Outside contributions
|
||||
We know this is being worked on right now by contributors (and we hope to merge it when it's ready).
|
||||
|
||||
- Clustering for alert engine (load distribution)
|
||||
|
@ -7,7 +7,7 @@ clone_folder: c:\gopath\src\github.com\grafana\grafana
|
||||
environment:
|
||||
nodejs_version: "6"
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.9.1
|
||||
GOVERSION: 1.9.2
|
||||
|
||||
install:
|
||||
- rmdir c:\go /s /q
|
||||
|
22
build.go
22
build.go
@ -83,6 +83,10 @@ func main() {
|
||||
clean()
|
||||
build("grafana-cli", "./pkg/cmd/grafana-cli", []string{})
|
||||
|
||||
case "build-server":
|
||||
clean()
|
||||
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
|
||||
|
||||
case "build":
|
||||
clean()
|
||||
for _, binary := range binaries {
|
||||
@ -95,9 +99,9 @@ func main() {
|
||||
|
||||
case "package":
|
||||
grunt(gruntBuildArg("release")...)
|
||||
if runtime.GOOS != "windows" {
|
||||
createLinuxPackages()
|
||||
}
|
||||
if runtime.GOOS != "windows" {
|
||||
createLinuxPackages()
|
||||
}
|
||||
|
||||
case "pkg-rpm":
|
||||
grunt(gruntBuildArg("release")...)
|
||||
@ -347,11 +351,11 @@ func ChangeWorkingDir(dir string) {
|
||||
}
|
||||
|
||||
func grunt(params ...string) {
|
||||
if runtime.GOOS == "windows" {
|
||||
runPrint(`.\node_modules\.bin\grunt`, params...)
|
||||
} else {
|
||||
runPrint("./node_modules/.bin/grunt", params...)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
runPrint(`.\node_modules\.bin\grunt`, params...)
|
||||
} else {
|
||||
runPrint("./node_modules/.bin/grunt", params...)
|
||||
}
|
||||
}
|
||||
|
||||
func gruntBuildArg(task string) []string {
|
||||
@ -371,7 +375,7 @@ func gruntBuildArg(task string) []string {
|
||||
}
|
||||
|
||||
func setup() {
|
||||
runPrint("go", "get", "-v", "github.com/kardianos/govendor")
|
||||
runPrint("go", "get", "-v", "github.com/golang/dep")
|
||||
runPrint("go", "install", "-v", "./pkg/cmd/grafana-server")
|
||||
}
|
||||
|
||||
|
184
circle.yml
184
circle.yml
@ -1,57 +1,135 @@
|
||||
machine:
|
||||
node:
|
||||
version: 6.9.2
|
||||
python:
|
||||
version: 2.7.3
|
||||
services:
|
||||
- docker
|
||||
environment:
|
||||
GOPATH: "/home/ubuntu/.go_workspace"
|
||||
ORG_PATH: "github.com/grafana"
|
||||
REPO_PATH: "${ORG_PATH}/grafana"
|
||||
GODIST: "go1.9.1.linux-amd64.tar.gz"
|
||||
post:
|
||||
- mkdir -p ~/download
|
||||
- mkdir -p ~/docker
|
||||
- test -e download/$GODIST || curl -o download/$GODIST https://storage.googleapis.com/golang/$GODIST
|
||||
- sudo rm -rf /usr/local/go
|
||||
- sudo tar -C /usr/local -xzf download/$GODIST
|
||||
version: 2
|
||||
|
||||
dependencies:
|
||||
cache_directories:
|
||||
- "~/docker"
|
||||
- "~/download"
|
||||
override:
|
||||
- rm -rf ${GOPATH}/src/${REPO_PATH}
|
||||
- mkdir -p ${GOPATH}/src/${ORG_PATH}
|
||||
- cp -r ~/grafana ${GOPATH}/src/${ORG_PATH}
|
||||
pre:
|
||||
- pip install awscli
|
||||
- sudo apt-get update; sudo apt-get install rpm; sudo apt-get install expect
|
||||
- ./scripts/build/build_container.sh
|
||||
jobs:
|
||||
test-frontend:
|
||||
docker:
|
||||
- image: circleci/node:6.11.4
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: install yarn
|
||||
command: 'sudo npm install -g yarn --quiet'
|
||||
- restore_cache:
|
||||
key: dependency-cache-{{ checksum "yarn.lock" }}
|
||||
# Could we skip this step if the cache has been restored? `[ -d node_modules ] || yarn install ...` should be able to apply to build step as well
|
||||
- run:
|
||||
name: yarn install
|
||||
command: 'yarn install --pure-lockfile --no-progress'
|
||||
- save_cache:
|
||||
key: dependency-cache-{{ checksum "yarn.lock" }}
|
||||
paths:
|
||||
- node_modules
|
||||
- run:
|
||||
name: frontend tests
|
||||
command: './scripts/circle-test-frontend.sh'
|
||||
|
||||
test-backend:
|
||||
docker:
|
||||
- image: circleci/golang:1.10
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build backend and run go tests
|
||||
command: './scripts/circle-test-backend.sh'
|
||||
|
||||
test:
|
||||
override:
|
||||
- bash scripts/circle-test.sh
|
||||
build:
|
||||
docker:
|
||||
- image: grafana/build-container:v0.1
|
||||
working_directory: /go/src/github.com/grafana/grafana
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: build and package grafana
|
||||
command: './scripts/build/build.sh'
|
||||
- run:
|
||||
name: sign packages
|
||||
command: './scripts/build/sign_packages.sh'
|
||||
- run:
|
||||
name: sha-sum packages
|
||||
command: 'go run build.go sha-dist'
|
||||
- run:
|
||||
name: Build Grafana.com publisher
|
||||
command: 'go build -o scripts/publish scripts/build/publish.go'
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- dist/grafana*
|
||||
- scripts/*.sh
|
||||
- scripts/publish
|
||||
|
||||
deployment:
|
||||
gh_branch:
|
||||
branch: master
|
||||
commands:
|
||||
- ./scripts/build/deploy.sh
|
||||
- ./scripts/build/sign_packages.sh
|
||||
- go run build.go sha-dist
|
||||
- aws s3 sync ./dist s3://$BUCKET_NAME/master
|
||||
- ./scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} master
|
||||
- ./scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN}
|
||||
- go run ./scripts/build/publish.go -apiKey ${GRAFANA_COM_API_KEY}
|
||||
gh_tag:
|
||||
tag: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
|
||||
commands:
|
||||
- ./scripts/build/deploy.sh
|
||||
- ./scripts/build/sign_packages.sh
|
||||
- go run build.go sha-dist
|
||||
- aws s3 sync ./dist s3://$BUCKET_NAME/release
|
||||
- ./scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} release
|
||||
- ./scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN} ${CIRCLE_TAG}
|
||||
deploy-master:
|
||||
docker:
|
||||
- image: circleci/python:2.7-stretch
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: install awscli
|
||||
command: 'sudo pip install awscli'
|
||||
- run:
|
||||
name: deploy to s3
|
||||
command: 'aws s3 sync ./dist s3://$BUCKET_NAME/master'
|
||||
- run:
|
||||
name: Trigger Windows build
|
||||
command: './scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} master'
|
||||
- run:
|
||||
name: Trigger Docker build
|
||||
command: './scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN}'
|
||||
- run:
|
||||
name: Publish to Grafana.com
|
||||
command: './scripts/publish -apiKey ${GRAFANA_COM_API_KEY}'
|
||||
|
||||
deploy-release:
|
||||
docker:
|
||||
- image: circleci/python:2.7-stretch
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: dist
|
||||
- run:
|
||||
name: install awscli
|
||||
command: 'sudo pip install awscli'
|
||||
- run:
|
||||
name: deploy to s3
|
||||
command: 'aws s3 sync ./dist s3://$BUCKET_NAME/release'
|
||||
- run:
|
||||
name: Trigger Windows build
|
||||
command: './scripts/trigger_windows_build.sh ${APPVEYOR_TOKEN} ${CIRCLE_SHA1} release'
|
||||
- run:
|
||||
name: Trigger Docker build
|
||||
command: './scripts/trigger_docker_build.sh ${TRIGGER_GRAFANA_PACKER_CIRCLECI_TOKEN} ${CIRCLE_TAG}'
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-and-build:
|
||||
jobs:
|
||||
- build:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test-frontend:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- test-backend:
|
||||
filters:
|
||||
tags:
|
||||
only: /.*/
|
||||
- deploy-master:
|
||||
requires:
|
||||
- test-backend
|
||||
- test-frontend
|
||||
- build
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
- deploy-release:
|
||||
requires:
|
||||
- test-backend
|
||||
- test-frontend
|
||||
- build
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
|
||||
|
13
codecov.yml
Normal file
13
codecov.yml
Normal file
@ -0,0 +1,13 @@
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "50...100"
|
||||
|
||||
status:
|
||||
project: yes
|
||||
patch: yes
|
||||
changes: no
|
||||
|
||||
comment:
|
||||
layout: "diff"
|
||||
behavior: "once"
|
@ -12,17 +12,17 @@ instance_name = ${HOSTNAME}
|
||||
#################################### Paths ###############################
|
||||
[paths]
|
||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||
#
|
||||
data = data
|
||||
#
|
||||
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
logs = data/log
|
||||
#
|
||||
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
plugins = data/plugins
|
||||
|
||||
# folder that contains provisioning config files that grafana will apply on startup and while running.
|
||||
provisioning = conf/provisioning
|
||||
|
||||
#################################### Server ##############################
|
||||
[server]
|
||||
# Protocol (http, https, socket)
|
||||
@ -82,6 +82,9 @@ max_idle_conn = 2
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
max_open_conn =
|
||||
|
||||
# Set to true to log the sql calls and execution times.
|
||||
log_queries =
|
||||
|
||||
# For "postgres", use either "disable", "require" or "verify-full"
|
||||
# For "mysql", use either "true", "false", or "skip-verify".
|
||||
ssl_mode = disable
|
||||
@ -171,6 +174,10 @@ disable_gravatar = false
|
||||
# data source proxy whitelist (ip_or_domain:port separated by spaces)
|
||||
data_source_proxy_whitelist =
|
||||
|
||||
# disable protection against brute force login attempts
|
||||
disable_brute_force_login_protection = false
|
||||
|
||||
#################################### Snapshots ###########################
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
external_enabled = true
|
||||
@ -180,10 +187,13 @@ external_snapshot_name = Publish to snapshot.raintank.io
|
||||
# remove expired snapshot
|
||||
snapshot_remove_expired = true
|
||||
|
||||
# remove snapshots after 90 days
|
||||
snapshot_TTL_days = 90
|
||||
#################################### Dashboards ##################
|
||||
|
||||
#################################### Users ####################################
|
||||
[dashboards]
|
||||
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
|
||||
versions_to_keep = 20
|
||||
|
||||
#################################### Users ###############################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
allow_sign_up = false
|
||||
@ -211,6 +221,9 @@ external_manage_link_url =
|
||||
external_manage_link_name =
|
||||
external_manage_info =
|
||||
|
||||
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
|
||||
viewers_can_edit = false
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth
|
||||
disable_login_form = false
|
||||
@ -235,7 +248,7 @@ enabled = false
|
||||
allow_sign_up = true
|
||||
client_id = some_id
|
||||
client_secret = some_secret
|
||||
scopes = user:email
|
||||
scopes = user:email,read:org
|
||||
auth_url = https://github.com/login/oauth/authorize
|
||||
token_url = https://github.com/login/oauth/access_token
|
||||
api_url = https://api.github.com/user
|
||||
@ -311,7 +324,7 @@ allow_sign_up = true
|
||||
enabled = false
|
||||
host = localhost:25
|
||||
user =
|
||||
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
|
||||
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
|
||||
password =
|
||||
cert_file =
|
||||
key_file =
|
||||
@ -381,18 +394,6 @@ facility =
|
||||
# Syslog tag. By default, the process' argv[0] is used.
|
||||
tag =
|
||||
|
||||
|
||||
#################################### AMQP Event Publisher ################
|
||||
[event_publisher]
|
||||
enabled = false
|
||||
rabbitmq_url = amqp://localhost/
|
||||
exchange = grafana_events
|
||||
|
||||
#################################### Dashboard JSON files ################
|
||||
[dashboards.json]
|
||||
enabled = false
|
||||
path = /var/lib/grafana/dashboards
|
||||
|
||||
#################################### Usage Quotas ########################
|
||||
[quota]
|
||||
enabled = false
|
||||
@ -436,7 +437,7 @@ enabled = true
|
||||
execute_alerts = true
|
||||
|
||||
#################################### Internal Grafana Metrics ############
|
||||
# Metrics available at HTTP API Url /api/metrics
|
||||
# Metrics available at HTTP API Url /metrics
|
||||
[metrics]
|
||||
enabled = true
|
||||
interval_seconds = 10
|
||||
@ -472,7 +473,7 @@ sampler_param = 1
|
||||
|
||||
#################################### External Image Storage ##############
|
||||
[external_image_storage]
|
||||
# You can choose between (s3, webdav, gcs)
|
||||
# You can choose between (s3, webdav, gcs, azure_blob, local)
|
||||
provider =
|
||||
|
||||
[external_image_storage.s3]
|
||||
@ -492,3 +493,12 @@ public_url =
|
||||
[external_image_storage.gcs]
|
||||
key_file =
|
||||
bucket =
|
||||
path =
|
||||
|
||||
[external_image_storage.azure_blob]
|
||||
account_name =
|
||||
account_key =
|
||||
container_name =
|
||||
|
||||
[external_image_storage.local]
|
||||
# does not require any configuration
|
||||
|
@ -19,7 +19,7 @@ ssl_skip_verify = false
|
||||
# Search user bind dn
|
||||
bind_dn = "cn=admin,dc=grafana,dc=org"
|
||||
# Search user bind password
|
||||
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
|
||||
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
|
||||
bind_password = 'grafana'
|
||||
|
||||
# User search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"
|
||||
|
10
conf/provisioning/dashboards/sample.yaml
Normal file
10
conf/provisioning/dashboards/sample.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
# # config file version
|
||||
apiVersion: 1
|
||||
|
||||
#providers:
|
||||
# - name: 'default'
|
||||
# orgId: 1
|
||||
# folder: ''
|
||||
# type: file
|
||||
# options:
|
||||
# path: /var/lib/grafana/dashboards
|
51
conf/provisioning/datasources/sample.yaml
Normal file
51
conf/provisioning/datasources/sample.yaml
Normal file
@ -0,0 +1,51 @@
|
||||
# # config file version
|
||||
apiVersion: 1
|
||||
|
||||
# # list of datasources that should be deleted from the database
|
||||
#deleteDatasources:
|
||||
# - name: Graphite
|
||||
# orgId: 1
|
||||
|
||||
# # list of datasources to insert/update depending
|
||||
# # on what's available in the datbase
|
||||
#datasources:
|
||||
# # <string, required> name of the datasource. Required
|
||||
# - name: Graphite
|
||||
# # <string, required> datasource type. Required
|
||||
# type: graphite
|
||||
# # <string, required> access mode. direct or proxy. Required
|
||||
# access: proxy
|
||||
# # <int> org id. will default to orgId 1 if not specified
|
||||
# orgId: 1
|
||||
# # <string> url
|
||||
# url: http://localhost:8080
|
||||
# # <string> database password, if used
|
||||
# password:
|
||||
# # <string> database user, if used
|
||||
# user:
|
||||
# # <string> database name, if used
|
||||
# database:
|
||||
# # <bool> enable/disable basic auth
|
||||
# basicAuth:
|
||||
# # <string> basic auth username
|
||||
# basicAuthUser:
|
||||
# # <string> basic auth password
|
||||
# basicAuthPassword:
|
||||
# # <bool> enable/disable with credentials headers
|
||||
# withCredentials:
|
||||
# # <bool> mark as default datasource. Max one per org
|
||||
# isDefault:
|
||||
# # <map> fields that will be converted to json and stored in json_data
|
||||
# jsonData:
|
||||
# graphiteVersion: "1.1"
|
||||
# tlsAuth: true
|
||||
# tlsAuthWithCACert: true
|
||||
# # <string> json object of data that will be encrypted.
|
||||
# secureJsonData:
|
||||
# tlsCACert: "..."
|
||||
# tlsClientCert: "..."
|
||||
# tlsClientKey: "..."
|
||||
# version: 1
|
||||
# # <bool> allow users to edit datasources from the UI.
|
||||
# editable: false
|
||||
|
@ -12,18 +12,17 @@
|
||||
#################################### Paths ####################################
|
||||
[paths]
|
||||
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||
#
|
||||
;data = /var/lib/grafana
|
||||
#
|
||||
|
||||
# Directory where grafana can store logs
|
||||
#
|
||||
;logs = /var/log/grafana
|
||||
#
|
||||
|
||||
# Directory where grafana will automatically scan and look for plugins
|
||||
#
|
||||
;plugins = /var/lib/grafana/plugins
|
||||
|
||||
#
|
||||
# folder that contains provisioning config files that grafana will apply on startup and while running.
|
||||
; provisioning = conf/provisioning
|
||||
|
||||
#################################### Server ####################################
|
||||
[server]
|
||||
# Protocol (http, https, socket)
|
||||
@ -72,7 +71,7 @@
|
||||
;host = 127.0.0.1:3306
|
||||
;name = grafana
|
||||
;user = root
|
||||
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
|
||||
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
|
||||
;password =
|
||||
|
||||
# Use either URL or the previous fields to configure the database
|
||||
@ -91,6 +90,8 @@
|
||||
# Max conn setting default is 0 (mean not set)
|
||||
;max_open_conn =
|
||||
|
||||
# Set to true to log the sql calls and execution times.
|
||||
log_queries =
|
||||
|
||||
#################################### Session ####################################
|
||||
[session]
|
||||
@ -161,6 +162,10 @@
|
||||
# data source proxy whitelist (ip_or_domain:port separated by spaces)
|
||||
;data_source_proxy_whitelist =
|
||||
|
||||
# disable protection against brute force login attempts
|
||||
;disable_brute_force_login_protection = false
|
||||
|
||||
#################################### Snapshots ###########################
|
||||
[snapshots]
|
||||
# snapshot sharing options
|
||||
;external_enabled = true
|
||||
@ -170,10 +175,12 @@
|
||||
# remove expired snapshot
|
||||
;snapshot_remove_expired = true
|
||||
|
||||
# remove snapshots after 90 days
|
||||
;snapshot_TTL_days = 90
|
||||
#################################### Dashboards History ##################
|
||||
[dashboards]
|
||||
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
|
||||
;versions_to_keep = 20
|
||||
|
||||
#################################### Users ####################################
|
||||
#################################### Users ###############################
|
||||
[users]
|
||||
# disable user signup / registration
|
||||
;allow_sign_up = true
|
||||
@ -198,6 +205,9 @@
|
||||
;external_manage_link_name =
|
||||
;external_manage_info =
|
||||
|
||||
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
|
||||
;viewers_can_edit = false
|
||||
|
||||
[auth]
|
||||
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
|
||||
;disable_login_form = false
|
||||
@ -360,17 +370,6 @@
|
||||
;tag =
|
||||
|
||||
|
||||
#################################### AMQP Event Publisher ##########################
|
||||
[event_publisher]
|
||||
;enabled = false
|
||||
;rabbitmq_url = amqp://localhost/
|
||||
;exchange = grafana_events
|
||||
|
||||
;#################################### Dashboard JSON files ##########################
|
||||
[dashboards.json]
|
||||
;enabled = false
|
||||
;path = /var/lib/grafana/dashboards
|
||||
|
||||
#################################### Alerting ############################
|
||||
[alerting]
|
||||
# Disable alerting engine & UI features
|
||||
@ -379,7 +378,7 @@
|
||||
;execute_alerts = true
|
||||
|
||||
#################################### Internal Grafana Metrics ##########################
|
||||
# Metrics available at HTTP API Url /api/metrics
|
||||
# Metrics available at HTTP API Url /metrics
|
||||
[metrics]
|
||||
# Disable / Enable internal metrics
|
||||
;enabled = true
|
||||
@ -418,7 +417,7 @@
|
||||
#################################### External image storage ##########################
|
||||
[external_image_storage]
|
||||
# Used for uploading images to public servers so they can be included in slack/email messages.
|
||||
# you can choose between (s3, webdav, gcs)
|
||||
# you can choose between (s3, webdav, gcs, azure_blob, local)
|
||||
;provider =
|
||||
|
||||
[external_image_storage.s3]
|
||||
@ -437,3 +436,12 @@
|
||||
[external_image_storage.gcs]
|
||||
;key_file =
|
||||
;bucket =
|
||||
;path =
|
||||
|
||||
[external_image_storage.azure_blob]
|
||||
;account_name =
|
||||
;account_key =
|
||||
;container_name =
|
||||
|
||||
[external_image_storage.local]
|
||||
# does not require any configuration
|
||||
|
4
docker/blocks/apache_proxy/Dockerfile
Normal file
4
docker/blocks/apache_proxy/Dockerfile
Normal file
@ -0,0 +1,4 @@
|
||||
FROM jmferrer/apache2-reverse-proxy:latest
|
||||
|
||||
COPY ports.conf /etc/apache2/sites-enabled
|
||||
COPY proxy.conf /etc/apache2/sites-enabled
|
9
docker/blocks/apache_proxy/docker-compose.yaml
Normal file
9
docker/blocks/apache_proxy/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
# This will proxy all requests for http://localhost:10081/grafana/ to
|
||||
# http://localhost:3000 (Grafana running locally)
|
||||
#
|
||||
# Please note that you'll need to change the root_url in the Grafana configuration:
|
||||
# root_url = %(protocol)s://%(domain)s:/grafana/
|
||||
|
||||
apacheproxy:
|
||||
build: blocks/apache_proxy
|
||||
network_mode: host
|
1
docker/blocks/apache_proxy/ports.conf
Normal file
1
docker/blocks/apache_proxy/ports.conf
Normal file
@ -0,0 +1 @@
|
||||
Listen 10081
|
4
docker/blocks/apache_proxy/proxy.conf
Normal file
4
docker/blocks/apache_proxy/proxy.conf
Normal file
@ -0,0 +1,4 @@
|
||||
<VirtualHost *:10081>
|
||||
ProxyPass /grafana/ http://localhost:3000/
|
||||
ProxyPassReverse /grafana/ http://localhost:3000/
|
||||
</VirtualHost>
|
11
docker/blocks/collectd/docker-compose.yaml
Normal file
11
docker/blocks/collectd/docker-compose.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
collectd:
|
||||
build: blocks/collectd
|
||||
environment:
|
||||
HOST_NAME: myserver
|
||||
GRAPHITE_HOST: graphite
|
||||
GRAPHITE_PORT: 2003
|
||||
GRAPHITE_PREFIX: collectd.
|
||||
REPORT_BY_CPU: 'false'
|
||||
COLLECT_INTERVAL: 10
|
||||
links:
|
||||
- graphite
|
@ -1,11 +0,0 @@
|
||||
collectd:
|
||||
build: blocks/collectd
|
||||
environment:
|
||||
HOST_NAME: myserver
|
||||
GRAPHITE_HOST: graphite
|
||||
GRAPHITE_PORT: 2003
|
||||
GRAPHITE_PREFIX: collectd.
|
||||
REPORT_BY_CPU: 'false'
|
||||
COLLECT_INTERVAL: 10
|
||||
links:
|
||||
- graphite
|
15
docker/blocks/elastic/docker-compose.yaml
Normal file
15
docker/blocks/elastic/docker-compose.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
elasticsearch:
|
||||
image: elasticsearch:2.4.1
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
||||
|
||||
fake-elastic-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: elasticsearch
|
||||
FD_PORT: 9200
|
@ -1,8 +0,0 @@
|
||||
elasticsearch:
|
||||
image: elasticsearch:2.4.1
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
8
docker/blocks/elastic1/docker-compose.yaml
Normal file
8
docker/blocks/elastic1/docker-compose.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
elasticsearch1:
|
||||
image: elasticsearch:1.7.6
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "11200:9200"
|
||||
- "11300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
@ -1,8 +0,0 @@
|
||||
elasticsearch1:
|
||||
image: elasticsearch:1.7.6
|
||||
command: elasticsearch -Des.network.host=0.0.0.0
|
||||
ports:
|
||||
- "11200:9200"
|
||||
- "11300:9300"
|
||||
volumes:
|
||||
- ./blocks/elastic/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
|
15
docker/blocks/elastic5/docker-compose.yaml
Normal file
15
docker/blocks/elastic5/docker-compose.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
|
||||
|
||||
elasticsearch5:
|
||||
image: elasticsearch:5
|
||||
command: elasticsearch
|
||||
ports:
|
||||
- "10200:9200"
|
||||
- "10300:9300"
|
||||
|
||||
fake-elastic5-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: elasticsearch
|
||||
FD_PORT: 10200
|
@ -1,8 +0,0 @@
|
||||
# You need to run 'sysctl -w vm.max_map_count=262144' on the host machine
|
||||
|
||||
elasticsearch5:
|
||||
image: elasticsearch:5
|
||||
command: elasticsearch
|
||||
ports:
|
||||
- "10200:9200"
|
||||
- "10300:9300"
|
16
docker/blocks/graphite/docker-compose.yaml
Normal file
16
docker/blocks/graphite/docker-compose.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
graphite09:
|
||||
build: blocks/graphite
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,16 +0,0 @@
|
||||
graphite:
|
||||
build: blocks/graphite
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,9 +1,10 @@
|
||||
FROM phusion/baseimage:0.9.22
|
||||
MAINTAINER Denys Zhdanov <denis.zhdanov@gmail.com>
|
||||
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y --force-yes install vim \
|
||||
&& apt-get -y install vim \
|
||||
nginx \
|
||||
python-dev \
|
||||
python-flup \
|
||||
@ -22,38 +23,67 @@ RUN apt-get -y update \
|
||||
nodejs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# choose a timezone at build-time
|
||||
# use `--build-arg CONTAINER_TIMEZONE=Europe/Brussels` in `docker build`
|
||||
ARG CONTAINER_TIMEZONE
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN if [ ! -z "${CONTAINER_TIMEZONE}" ]; \
|
||||
then ln -sf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && \
|
||||
dpkg-reconfigure -f noninteractive tzdata; \
|
||||
fi
|
||||
|
||||
# fix python dependencies (LTS Django and newer memcached/txAMQP)
|
||||
RUN pip install django==1.8.18 \
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install django==1.8.18 \
|
||||
python-memcached==1.53 \
|
||||
txAMQP==0.6.2 \
|
||||
&& pip install --upgrade pip
|
||||
txAMQP==0.6.2
|
||||
|
||||
ARG version=1.0.2
|
||||
ARG whisper_version=${version}
|
||||
ARG carbon_version=${version}
|
||||
ARG graphite_version=${version}
|
||||
|
||||
RUN echo "Building Version: $version"
|
||||
|
||||
ARG whisper_repo=https://github.com/graphite-project/whisper.git
|
||||
ARG carbon_repo=https://github.com/graphite-project/carbon.git
|
||||
ARG graphite_repo=https://github.com/graphite-project/graphite-web.git
|
||||
|
||||
ARG statsd_version=v0.8.0
|
||||
|
||||
ARG statsd_repo=https://github.com/etsy/statsd.git
|
||||
|
||||
# install whisper
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/whisper.git /usr/local/src/whisper
|
||||
RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper
|
||||
WORKDIR /usr/local/src/whisper
|
||||
RUN python ./setup.py install
|
||||
|
||||
# install carbon
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/carbon.git /usr/local/src/carbon
|
||||
RUN git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon
|
||||
WORKDIR /usr/local/src/carbon
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install graphite
|
||||
RUN git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
RUN git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web
|
||||
WORKDIR /usr/local/src/graphite-web
|
||||
RUN pip install -r requirements.txt \
|
||||
&& python ./setup.py install
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b ${statsd_version} ${statsd_repo} /opt/statsd
|
||||
|
||||
# config graphite
|
||||
ADD conf/opt/graphite/conf/*.conf /opt/graphite/conf/
|
||||
ADD conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py
|
||||
ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
# ADD conf/opt/graphite/webapp/graphite/app_settings.py /opt/graphite/webapp/graphite/app_settings.py
|
||||
WORKDIR /opt/graphite/webapp
|
||||
RUN mkdir -p /var/log/graphite/ \
|
||||
&& PYTHONPATH=/opt/graphite/webapp django-admin.py collectstatic --noinput --settings=graphite.settings
|
||||
|
||||
# install statsd
|
||||
RUN git clone -b v0.7.2 https://github.com/etsy/statsd.git /opt/statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/config.js
|
||||
# config statsd
|
||||
ADD conf/opt/statsd/config.js /opt/statsd/
|
||||
|
||||
# config nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
@ -63,8 +93,7 @@ ADD conf/etc/nginx/sites-enabled/graphite-statsd.conf /etc/nginx/sites-enabled/g
|
||||
# init django admin
|
||||
ADD conf/usr/local/bin/django_admin_init.exp /usr/local/bin/django_admin_init.exp
|
||||
ADD conf/usr/local/bin/manage.sh /usr/local/bin/manage.sh
|
||||
RUN chmod +x /usr/local/bin/manage.sh \
|
||||
&& /usr/local/bin/django_admin_init.exp
|
||||
RUN chmod +x /usr/local/bin/manage.sh && /usr/local/bin/django_admin_init.exp
|
||||
|
||||
# logging support
|
||||
RUN mkdir -p /var/log/carbon /var/log/graphite /var/log/nginx
|
||||
@ -86,8 +115,10 @@ RUN apt-get clean\
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
# defaults
|
||||
EXPOSE 80 2003-2004 2023-2024 8125/udp 8126
|
||||
EXPOSE 80 2003-2004 2023-2024 8125 8125/udp 8126
|
||||
VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/etc/nginx", "/opt/statsd", "/etc/logrotate.d", "/var/log"]
|
||||
WORKDIR /
|
||||
ENV HOME /root
|
||||
ENV STATSD_INTERFACE udp
|
||||
|
||||
CMD ["/sbin/my_init"]
|
||||
|
1161
docker/blocks/graphite1/big-dashboard.json
Normal file
1161
docker/blocks/graphite1/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -12,7 +12,7 @@ graphite_conf_dir_contents=$(find /opt/graphite/conf -mindepth 1 -print -quit)
|
||||
graphite_webapp_dir_contents=$(find /opt/graphite/webapp/graphite -mindepth 1 -print -quit)
|
||||
graphite_storage_dir_contents=$(find /opt/graphite/storage -mindepth 1 -print -quit)
|
||||
if [[ -z $graphite_dir_contents ]]; then
|
||||
git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
# git clone -b 1.0.2 --depth 1 https://github.com/graphite-project/graphite-web.git /usr/local/src/graphite-web
|
||||
cd /usr/local/src/graphite-web && python ./setup.py install
|
||||
fi
|
||||
if [[ -z $graphite_storage_dir_contents ]]; then
|
||||
|
@ -8,18 +8,18 @@
|
||||
# Defaults to ../
|
||||
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
|
||||
# Defaults to $GRAPHITE_ROOT/conf/
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
|
||||
# GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files.
|
||||
# Defaults to $GRAPHITE_ROOT/storage/
|
||||
#
|
||||
# To change other directory paths, add settings to this file. The following
|
||||
# configuration variables are available with these default values:
|
||||
#
|
||||
# STORAGE_DIR = $GRAPHITE_STORAGE_DIR
|
||||
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
|
||||
# WHITELISTS_DIR = STORAGE_DIR/lists/
|
||||
# CONF_DIR = STORAGE_DIR/conf/
|
||||
# LOG_DIR = STORAGE_DIR/log/
|
||||
# PID_DIR = STORAGE_DIR/
|
||||
# LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/
|
||||
# WHITELISTS_DIR = %(STORAGE_DIR)s/lists/
|
||||
# CONF_DIR = %(STORAGE_DIR)s/conf/
|
||||
# LOG_DIR = %(STORAGE_DIR)s/log/
|
||||
# PID_DIR = %(STORAGE_DIR)s/
|
||||
#
|
||||
# For FHS style directory structures, use:
|
||||
#
|
||||
@ -30,20 +30,30 @@
|
||||
#
|
||||
#LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate
|
||||
# Specify the database library used to store metric data on disk. Each database
|
||||
# may have configurable options to change the behaviour of how it writes to
|
||||
# persistent storage.
|
||||
#
|
||||
# whisper - Fixed-size database, similar in design and purpose to RRD. This is
|
||||
# the default storage backend for carbon and the most rigorously tested.
|
||||
#
|
||||
# ceres - Experimental alternative database that supports storing data in sparse
|
||||
# files of arbitrary fixed-size resolutions.
|
||||
DATABASE = whisper
|
||||
|
||||
# Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no
|
||||
# longer exists (i.e. it is removed or renamed)
|
||||
ENABLE_LOGROTATION = True
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# If this is blank carbon-cache runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
#
|
||||
# NOTE: The above settings must be set under [relay] and [aggregator]
|
||||
# to take effect for those daemons as well
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
# value should be an integer number of metric datapoints.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
@ -60,14 +70,30 @@ MAX_UPDATES_PER_SECOND = 500
|
||||
# MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# Setting this value low (e.g. 50) is a good way to ensure that your carbon
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
# sent to it. The trade off is that any metrics received in excess of this
|
||||
# value will be silently dropped, and the whisper file will not be created
|
||||
# until such point as a subsequent metric is received and fits within the
|
||||
# defined rate limit. Setting this value high (like "inf" for infinity) will
|
||||
# cause carbon to create the files quickly but at the risk of increased I/O.
|
||||
MAX_CREATES_PER_MINUTE = 50
|
||||
|
||||
# Set the minimum timestamp resolution supported by this instance. This allows
|
||||
# internal optimisations by overwriting points with equal truncated timestamps
|
||||
# in order to limit the number of updates to the database. It defaults to one
|
||||
# second.
|
||||
MIN_TIMESTAMP_RESOLUTION = 1
|
||||
|
||||
# Set the minimum lag in seconds for a point to be written to the database
|
||||
# in order to optimize batching. This means that each point will wait at least
|
||||
# the duration of this lag before being written. Setting this to 0 disable the feature.
|
||||
# This currently only works when using the timesorted write strategy.
|
||||
# MIN_TIMESTAMP_LAG = 0
|
||||
|
||||
# Set the interface and port for the line (plain text) listener. Setting the
|
||||
# interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to
|
||||
# disable this listener if it is not required.
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
@ -78,11 +104,23 @@ ENABLE_UDP_LISTENER = False
|
||||
UDP_RECEIVER_INTERFACE = 0.0.0.0
|
||||
UDP_RECEIVER_PORT = 2003
|
||||
|
||||
# Set the interface and port for the pickle listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
# Set the interface and port for the protobuf listener. Setting the interface to
|
||||
# 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this
|
||||
# listener if it is not required.
|
||||
# PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0
|
||||
# PROTOBUF_RECEIVER_PORT = 2005
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Per security concerns outlined in Bug #817247 the pickle receiver
|
||||
# will use a more secure and slightly less efficient unpickler.
|
||||
@ -98,13 +136,19 @@ CACHE_QUERY_PORT = 7002
|
||||
# data until the cache size falls below 95% MAX_CACHE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and
|
||||
# degrade performance if logging on the same volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = True
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# The thread that writes metrics to disk can use on of the following strategies
|
||||
# By default, carbon-cache will log every whisper update and cache hit.
|
||||
# This can be excessive and degrade performance if logging on the same
|
||||
# volume as the whisper data is stored.
|
||||
LOG_UPDATES = False
|
||||
LOG_CREATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
LOG_CACHE_QUEUE_SORTS = False
|
||||
|
||||
# The thread that writes metrics to disk can use one of the following strategies
|
||||
# determining the order in which metrics are removed from cache and flushed to
|
||||
# disk. The default option preserves the same behavior as has been historically
|
||||
# available in version 0.9.10.
|
||||
@ -114,6 +158,12 @@ LOG_CACHE_QUEUE_SORTS = True
|
||||
# moment of the list's creation. Metrics will then be flushed from the cache to
|
||||
# disk in that order.
|
||||
#
|
||||
# timesorted - All metrics in the list will be looked at and sorted according
|
||||
# to the timestamp of there datapoints. The metric that were the least recently
|
||||
# written will be written first. This is an hybrid strategy between max and
|
||||
# sorted which is particularly adapted to sets of metrics with non-uniform
|
||||
# resolutions.
|
||||
#
|
||||
# max - The writer thread will always pop and flush the metric from cache
|
||||
# that has the most datapoints. This will give a strong flush preference to
|
||||
# frequently updated metrics and will also reduce random file-io. Infrequently
|
||||
@ -152,12 +202,61 @@ WHISPER_FALLOCATE_CREATE = True
|
||||
|
||||
# Enabling this option will cause Whisper to lock each Whisper file it writes
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# WHISPER_LOCK_WRITES = False
|
||||
|
||||
# On systems which has a large number of metrics, an amount of Whisper write(2)'s
|
||||
# pageback sometimes cause disk thrashing due to memory shortage, so that abnormal
|
||||
# disk reads occur. Enabling this option makes it possible to decrease useless
|
||||
# page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option.
|
||||
# WHISPER_FADVISE_RANDOM = False
|
||||
|
||||
# By default all nodes stored in Ceres are cached in memory to improve the
|
||||
# throughput of reads and writes to underlying slices. Turning this off will
|
||||
# greatly reduce memory consumption for databases with millions of metrics, at
|
||||
# the cost of a steep increase in disk i/o, approximately an extra two os.stat
|
||||
# calls for every read and write. Reasons to do this are if the underlying
|
||||
# storage can handle stat() with practically zero cost (SSD, NVMe, zRAM).
|
||||
# Valid values are:
|
||||
# all - all nodes are cached
|
||||
# none - node caching is disabled
|
||||
# CERES_NODE_CACHING_BEHAVIOR = all
|
||||
|
||||
# Ceres nodes can have many slices and caching the right ones can improve
|
||||
# performance dramatically. Note that there are many trade-offs to tinkering
|
||||
# with this, and unless you are a ceres developer you *really* should not
|
||||
# mess with this. Valid values are:
|
||||
# latest - only the most recent slice is cached
|
||||
# all - all slices are cached
|
||||
# none - slice caching is disabled
|
||||
# CERES_SLICE_CACHING_BEHAVIOR = latest
|
||||
|
||||
# If a Ceres node accumulates too many slices, performance can suffer.
|
||||
# This can be caused by intermittently reported data. To mitigate
|
||||
# slice fragmentation there is a tolerance for how much space can be
|
||||
# wasted within a slice file to avoid creating a new one. That tolerance
|
||||
# level is determined by MAX_SLICE_GAP, which is the number of consecutive
|
||||
# null datapoints allowed in a slice file.
|
||||
# If you set this very low, you will waste less of the *tiny* bit disk space
|
||||
# that this feature wastes, and you will be prone to performance problems
|
||||
# caused by slice fragmentation, which can be pretty severe.
|
||||
# If you set this really high, you will waste a bit more disk space (each
|
||||
# null datapoint wastes 8 bytes, but keep in mind your filesystem's block
|
||||
# size). If you suffer slice fragmentation issues, you should increase this or
|
||||
# run the ceres-maintenance defrag plugin more often. However you should not
|
||||
# set it to be huge because then if a large but allowed gap occurs it has to
|
||||
# get filled in, which means instead of a simple 8-byte write to a new file we
|
||||
# could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice.
|
||||
# CERES_MAX_SLICE_GAP = 80
|
||||
|
||||
# Enabling this option will cause Ceres to lock each Ceres file it writes to
|
||||
# to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when
|
||||
# multiple carbon-cache daemons are writing to the same files.
|
||||
# CERES_LOCK_WRITES = False
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -203,16 +302,25 @@ WHISPER_FALLOCATE_CREATE = True
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# URL of graphite-web instance, this is used to add incoming series to the tag database
|
||||
GRAPHITE_URL = http://127.0.0.1:80
|
||||
|
||||
# Tag update interval, this specifies how frequently updates to existing series will trigger
|
||||
# an update to the tag index, the default setting is once every 100 updates
|
||||
# TAG_UPDATE_INTERVAL = 100
|
||||
|
||||
# To configure special settings for the carbon-cache instance 'b', uncomment this:
|
||||
#[cache:b]
|
||||
#LINE_RECEIVER_PORT = 2103
|
||||
#PICKLE_RECEIVER_PORT = 2104
|
||||
#CACHE_QUERY_PORT = 7102
|
||||
# and any other settings you want to customize, defaults are inherited
|
||||
# from [carbon] section.
|
||||
# from the [cache] section.
|
||||
# You can then specify the --instance=b option to manage this instance
|
||||
|
||||
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
@ -220,9 +328,6 @@ LINE_RECEIVER_PORT = 2013
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2014
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# Carbon-relay has several options for metric routing controlled by RELAY_METHOD
|
||||
#
|
||||
# Use relay-rules.conf to route metrics to destinations based on pattern rules
|
||||
@ -237,12 +342,24 @@ LOG_LISTENER_CONNECTIONS = True
|
||||
# instance.
|
||||
# Enable this for carbon-relays that send to a group of carbon-aggregators
|
||||
#RELAY_METHOD = aggregated-consistent-hashing
|
||||
#
|
||||
# You can also use fast-hashing and fast-aggregated-hashing which are in O(1)
|
||||
# and will always redirect the metrics to the same destination but do not try
|
||||
# to minimize rebalancing when the list of destinations is changing.
|
||||
RELAY_METHOD = rules
|
||||
|
||||
# If you use consistent-hashing you can add redundancy by replicating every
|
||||
# datapoint to more than one machine.
|
||||
REPLICATION_FACTOR = 1
|
||||
|
||||
# For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas
|
||||
# across distributed hosts. With this setting disabled, it's possible that replicas
|
||||
# may be sent to different caches on the same host. This has been the default
|
||||
# behavior since introduction of 'consistent-hashing' relay method.
|
||||
# Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing
|
||||
# your metrics across the cluster nodes using a tool like Carbonate.
|
||||
#DIVERSE_REPLICAS = True
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
@ -261,20 +378,71 @@ REPLICATION_FACTOR = 1
|
||||
# must be defined in this list
|
||||
DESTINATIONS = 127.0.0.1:2004
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
# This define the protocol to use to contact the destination. It can be
|
||||
# set to one of "line", "pickle", "udp" and "protobuf". This list can be
|
||||
# extended with CarbonClientFactory plugins and defaults to "pickle".
|
||||
# DESTINATION_PROTOCOL = pickle
|
||||
|
||||
# When using consistent hashing it sometime makes sense to make
|
||||
# the ring dynamic when you don't want to loose points when a
|
||||
# single destination is down. Replication is an answer to that
|
||||
# but it can be quite expensive.
|
||||
# DYNAMIC_ROUTER = False
|
||||
|
||||
# Controls the number of connection attempts before marking a
|
||||
# destination as down. We usually do one connection attempt per
|
||||
# second.
|
||||
# DYNAMIC_ROUTER_MAX_RETRIES = 5
|
||||
|
||||
# This is the maximum number of datapoints that can be queued up
|
||||
# for a single destination. Once this limit is hit, we will
|
||||
# stop accepting new data if USE_FLOW_CONTROL is True, otherwise
|
||||
# we will drop any subsequently received datapoints.
|
||||
MAX_QUEUE_SIZE = 10000
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons. If
|
||||
# your queue is large, setting this to a lower number will cause the
|
||||
# relay to forward smaller discrete chunks of stats, which may prevent
|
||||
# overloading on the receiving side after a disconnect.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-relay runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# This is the percentage that the queue must be empty before it will accept
|
||||
# more messages. For a larger site, if the queue is very large it makes sense
|
||||
# to tune this to allow for incoming stats. So if you have an average
|
||||
# flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense
|
||||
# to allow stats to start flowing when you've cleared the queue to 95% since
|
||||
# you should have space to accommodate the next minute's worth of stats
|
||||
# even before the relay incrementally clears more of the queue
|
||||
QUEUE_LOW_WATERMARK_PCT = 0.8
|
||||
|
||||
# To allow for batch efficiency from the pickle protocol and to benefit from
|
||||
# other batching advantages, all writes are deferred by putting them into a queue,
|
||||
# and then the queue is flushed and sent a small fraction of a second later.
|
||||
TIME_TO_DEFER_SENDING = 0.0001
|
||||
|
||||
# Set this to False to drop datapoints when any send queue (sending datapoints
|
||||
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
|
||||
# default) then sockets over which metrics are received will temporarily stop accepting
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
# data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -282,7 +450,40 @@ USE_FLOW_CONTROL = True
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
#
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# If you're connecting from the relay to a destination that's over the
|
||||
# internet or similarly iffy connection, a backlog can develop because
|
||||
# of internet weather conditions, e.g. acks getting lost or similar issues.
|
||||
# To deal with that, you can enable USE_RATIO_RESET which will let you
|
||||
# re-set the connection to an individual destination. Defaults to being off.
|
||||
USE_RATIO_RESET=False
|
||||
|
||||
# When there is a small number of stats flowing, it's not desirable to
|
||||
# perform any actions based on percentages - it's just too "twitchy".
|
||||
MIN_RESET_STAT_FLOW=1000
|
||||
|
||||
# When the ratio of stats being sent in a reporting interval is far
|
||||
# enough from 1.0, we will disconnect the socket and reconnecto to
|
||||
# clear out queued stats. The default ratio of 0.9 indicates that 10%
|
||||
# of stats aren't being delivered within one CARBON_METRIC_INTERVAL
|
||||
# (default of 60 seconds), which can lead to a queue backup. Under
|
||||
# some circumstances re-setting the connection can fix this, so
|
||||
# set this according to your tolerance, and look in the logs for
|
||||
# "resetConnectionForQualityReasons" to observe whether this is kicking
|
||||
# in when your sent queue is building up.
|
||||
MIN_RESET_RATIO=0.9
|
||||
|
||||
# The minimum time between resets. When a connection is re-set, we
|
||||
# need to wait before another reset is performed.
|
||||
# (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed
|
||||
# before stats for the new connection will be available. Setting this
|
||||
# below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of
|
||||
# reset connections for no good reason.
|
||||
MIN_RESET_INTERVAL=121
|
||||
|
||||
[aggregator]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
@ -291,14 +492,17 @@ LINE_RECEIVER_PORT = 2023
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2024
|
||||
|
||||
# Set to false to disable logging of successful connections
|
||||
LOG_LISTENER_CONNECTIONS = True
|
||||
|
||||
# If set true, metric received will be forwarded to DESTINATIONS in addition to
|
||||
# the output of the aggregation rules. If set false the carbon-aggregator will
|
||||
# only ever send the output of aggregation.
|
||||
FORWARD_ALL = True
|
||||
|
||||
# Filenames of the configuration files to use for this instance of aggregator.
|
||||
# Filenames are relative to CONF_DIR.
|
||||
#
|
||||
# AGGREGATION_RULES = aggregation-rules.conf
|
||||
# REWRITE_RULES = rewrite-rules.conf
|
||||
|
||||
# This is a list of carbon daemons we will send any relayed or
|
||||
# generated metrics to. The default provided would send to a single
|
||||
# carbon-cache instance on the default port. However if you
|
||||
@ -330,6 +534,10 @@ MAX_QUEUE_SIZE = 10000
|
||||
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
|
||||
USE_FLOW_CONTROL = True
|
||||
|
||||
# If enabled this setting is used to timeout metric client connection if no
|
||||
# metrics have been sent in specified time in seconds
|
||||
#METRIC_CLIENT_IDLE_TIMEOUT = None
|
||||
|
||||
# This defines the maximum "message size" between carbon daemons.
|
||||
# You shouldn't need to tune this unless you really know what you're doing.
|
||||
MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
@ -339,6 +547,12 @@ MAX_DATAPOINTS_PER_MESSAGE = 500
|
||||
# the past MAX_AGGREGATION_INTERVALS * intervalSize seconds.
|
||||
MAX_AGGREGATION_INTERVALS = 5
|
||||
|
||||
# Limit the number of open connections the receiver can handle as any time.
|
||||
# Default is no limit. Setting up a limit for sites handling high volume
|
||||
# traffic may be recommended to avoid running out of TCP memory or having
|
||||
# thousands of TCP connections reduce the throughput of the service.
|
||||
#MAX_RECEIVER_CONNECTIONS = inf
|
||||
|
||||
# By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back
|
||||
# aggregated data points once every rule.frequency seconds, on a per-rule basis.
|
||||
# Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points
|
||||
@ -348,8 +562,8 @@ MAX_AGGREGATION_INTERVALS = 5
|
||||
# WRITE_BACK_FREQUENCY = 0
|
||||
|
||||
# Set this to True to enable whitelisting and blacklisting of metrics in
|
||||
# CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or
|
||||
# empty, all metrics will pass through
|
||||
# CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is
|
||||
# missing or empty, all metrics will pass through
|
||||
# USE_WHITELIST = False
|
||||
|
||||
# By default, carbon itself will log statistics (such as a count,
|
||||
@ -357,3 +571,24 @@ MAX_AGGREGATION_INTERVALS = 5
|
||||
# seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation
|
||||
# CARBON_METRIC_PREFIX = carbon
|
||||
# CARBON_METRIC_INTERVAL = 60
|
||||
|
||||
# In order to turn off logging of successful connections for the line
|
||||
# receiver, set this to False
|
||||
# LOG_LISTENER_CONN_SUCCESS = True
|
||||
|
||||
# In order to turn off logging of metrics with no corresponding
|
||||
# aggregation rules receiver, set this to False
|
||||
# LOG_AGGREGATOR_MISSES = False
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon-aggregator runs as the user that invokes it
|
||||
# USER =
|
||||
|
||||
# Part of the code, and particularly aggregator rules, need
|
||||
# to cache metric names. To avoid leaking too much memory you
|
||||
# can tweak the size of this cache. The default allow for 1M
|
||||
# different metrics per rule (~200MiB).
|
||||
# CACHE_METRIC_NAMES_MAX=1000000
|
||||
|
||||
# You can optionally set a ttl to this cache.
|
||||
# CACHE_METRIC_NAMES_TTL=600
|
||||
|
@ -40,4 +40,3 @@ aggregationMethod = sum
|
||||
pattern = .*
|
||||
xFilesFactor = 0.3
|
||||
aggregationMethod = average
|
||||
|
||||
|
@ -1,4 +1,23 @@
|
||||
# Schema definitions for Whisper files. Entries are scanned in order,
|
||||
# and first match wins. This file is scanned for changes every 60 seconds.
|
||||
#
|
||||
# Definition Syntax:
|
||||
#
|
||||
# [name]
|
||||
# pattern = regex
|
||||
# retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ...
|
||||
#
|
||||
# Remember: To support accurate aggregation from higher to lower resolution
|
||||
# archives, the precision of a longer retention archive must be
|
||||
# cleanly divisible by precision of next lower retention archive.
|
||||
#
|
||||
# Valid: 60s:7d,300s:30d (300/60 = 5)
|
||||
# Invalid: 180s:7d,300s:30d (300/180 = 3.333)
|
||||
#
|
||||
|
||||
# Carbon's internal metrics. This entry should match what is specified in
|
||||
# CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings
|
||||
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
2
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file → Executable file
2
docker/blocks/graphite1/conf/usr/local/bin/manage.sh
Normal file → Executable file
@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py syncdb --settings=graphite.settings
|
||||
PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
||||
# PYTHONPATH=/opt/graphite/webapp django-admin.py update_users --settings=graphite.settings
|
21
docker/blocks/graphite1/docker-compose.yaml
Normal file
21
docker/blocks/graphite1/docker-compose.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
graphite:
|
||||
build:
|
||||
context: blocks/graphite1
|
||||
args:
|
||||
version: master
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
- "8125:8125/udp"
|
||||
- "8126:8126"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,16 +0,0 @@
|
||||
graphite:
|
||||
build: blocks/graphite1
|
||||
ports:
|
||||
- "8080:80"
|
||||
- "2003:2003"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
|
||||
fake-graphite-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2003
|
||||
|
@ -1,76 +0,0 @@
|
||||
[cache]
|
||||
LOCAL_DATA_DIR = /opt/graphite/storage/whisper/
|
||||
|
||||
# Specify the user to drop privileges to
|
||||
# If this is blank carbon runs as the user that invokes it
|
||||
# This user must have write access to the local data directory
|
||||
USER =
|
||||
|
||||
# Limit the size of the cache to avoid swapping or becoming CPU bound.
|
||||
# Sorts and serving cache queries gets more expensive as the cache grows.
|
||||
# Use the value "inf" (infinity) for an unlimited cache size.
|
||||
MAX_CACHE_SIZE = inf
|
||||
|
||||
# Limits the number of whisper update_many() calls per second, which effectively
|
||||
# means the number of write requests sent to the disk. This is intended to
|
||||
# prevent over-utilizing the disk and thus starving the rest of the system.
|
||||
# When the rate of required updates exceeds this, then carbon's caching will
|
||||
# take effect and increase the overall throughput accordingly.
|
||||
MAX_UPDATES_PER_SECOND = 1000
|
||||
|
||||
# Softly limits the number of whisper files that get created each minute.
|
||||
# Setting this value low (like at 50) is a good way to ensure your graphite
|
||||
# system will not be adversely impacted when a bunch of new metrics are
|
||||
# sent to it. The trade off is that it will take much longer for those metrics'
|
||||
# database files to all get created and thus longer until the data becomes usable.
|
||||
# Setting this value high (like "inf" for infinity) will cause graphite to create
|
||||
# the files quickly but at the risk of slowing I/O down considerably for a while.
|
||||
MAX_CREATES_PER_MINUTE = inf
|
||||
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_QUERY_INTERFACE = 0.0.0.0
|
||||
CACHE_QUERY_PORT = 7002
|
||||
|
||||
LOG_UPDATES = False
|
||||
|
||||
# Enable AMQP if you want to receve metrics using an amqp broker
|
||||
# ENABLE_AMQP = False
|
||||
|
||||
# Verbose means a line will be logged for every metric received
|
||||
# useful for testing
|
||||
# AMQP_VERBOSE = False
|
||||
|
||||
# AMQP_HOST = localhost
|
||||
# AMQP_PORT = 5672
|
||||
# AMQP_VHOST = /
|
||||
# AMQP_USER = guest
|
||||
# AMQP_PASSWORD = guest
|
||||
# AMQP_EXCHANGE = graphite
|
||||
|
||||
# Patterns for all of the metrics this machine will store. Read more at
|
||||
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
|
||||
#
|
||||
# Example: store all sales, linux servers, and utilization metrics
|
||||
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
|
||||
#
|
||||
# Example: store everything
|
||||
# BIND_PATTERNS = #
|
||||
|
||||
# NOTE: you cannot run both a cache and a relay on the same server
|
||||
# with the default configuration, you have to specify a distinict
|
||||
# interfaces and ports for the listeners.
|
||||
|
||||
[relay]
|
||||
LINE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
LINE_RECEIVER_PORT = 2003
|
||||
|
||||
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
|
||||
PICKLE_RECEIVER_PORT = 2004
|
||||
|
||||
CACHE_SERVERS = server1, server2, server3
|
||||
MAX_QUEUE_SIZE = 10000
|
@ -1,102 +0,0 @@
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from django.utils.timezone import get_current_timezone
|
||||
from django.core.urlresolvers import get_script_prefix
|
||||
from django.http import HttpResponse
|
||||
from django.shortcuts import render_to_response, get_object_or_404
|
||||
from pytz import timezone
|
||||
|
||||
from graphite.util import json
|
||||
from graphite.events import models
|
||||
from graphite.render.attime import parseATTime
|
||||
|
||||
|
||||
def to_timestamp(dt):
|
||||
return time.mktime(dt.timetuple())
|
||||
|
||||
|
||||
class EventEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return to_timestamp(obj)
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def view_events(request):
|
||||
if request.method == "GET":
|
||||
context = { 'events' : fetch(request),
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("events.html", context)
|
||||
else:
|
||||
return post_event(request)
|
||||
|
||||
def detail(request, event_id):
|
||||
e = get_object_or_404(models.Event, pk=event_id)
|
||||
context = { 'event' : e,
|
||||
'slash' : get_script_prefix()
|
||||
}
|
||||
return render_to_response("event.html", context)
|
||||
|
||||
|
||||
def post_event(request):
|
||||
if request.method == 'POST':
|
||||
event = json.loads(request.body)
|
||||
assert isinstance(event, dict)
|
||||
|
||||
values = {}
|
||||
values["what"] = event["what"]
|
||||
values["tags"] = event.get("tags", None)
|
||||
values["when"] = datetime.datetime.fromtimestamp(
|
||||
event.get("when", time.time()))
|
||||
if "data" in event:
|
||||
values["data"] = event["data"]
|
||||
|
||||
e = models.Event(**values)
|
||||
e.save()
|
||||
|
||||
return HttpResponse(status=200)
|
||||
else:
|
||||
return HttpResponse(status=405)
|
||||
|
||||
def get_data(request):
|
||||
if 'jsonp' in request.REQUEST:
|
||||
response = HttpResponse(
|
||||
"%s(%s)" % (request.REQUEST.get('jsonp'),
|
||||
json.dumps(fetch(request), cls=EventEncoder)),
|
||||
mimetype='text/javascript')
|
||||
else:
|
||||
response = HttpResponse(
|
||||
json.dumps(fetch(request), cls=EventEncoder),
|
||||
mimetype="application/json")
|
||||
return response
|
||||
|
||||
def fetch(request):
|
||||
#XXX we need to move to USE_TZ=True to get rid of naive-time conversions
|
||||
def make_naive(dt):
|
||||
if 'tz' in request.GET:
|
||||
tz = timezone(request.GET['tz'])
|
||||
else:
|
||||
tz = get_current_timezone()
|
||||
local_dt = dt.astimezone(tz)
|
||||
if hasattr(local_dt, 'normalize'):
|
||||
local_dt = local_dt.normalize()
|
||||
return local_dt.replace(tzinfo=None)
|
||||
|
||||
if request.GET.get("from", None) is not None:
|
||||
time_from = make_naive(parseATTime(request.GET["from"]))
|
||||
else:
|
||||
time_from = datetime.datetime.fromtimestamp(0)
|
||||
|
||||
if request.GET.get("until", None) is not None:
|
||||
time_until = make_naive(parseATTime(request.GET["until"]))
|
||||
else:
|
||||
time_until = datetime.datetime.now()
|
||||
|
||||
tags = request.GET.get("tags", None)
|
||||
if tags is not None:
|
||||
tags = request.GET.get("tags").split(" ")
|
||||
|
||||
return [x.as_dict() for x in
|
||||
models.Event.find_events(time_from, time_until, tags=tags)]
|
@ -1,20 +0,0 @@
|
||||
[
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "auth.user",
|
||||
"fields": {
|
||||
"username": "admin",
|
||||
"first_name": "",
|
||||
"last_name": "",
|
||||
"is_active": true,
|
||||
"is_superuser": true,
|
||||
"is_staff": true,
|
||||
"last_login": "2011-09-20 17:02:14",
|
||||
"groups": [],
|
||||
"user_permissions": [],
|
||||
"password": "sha1$1b11b$edeb0a67a9622f1f2cfeabf9188a711f5ac7d236",
|
||||
"email": "root@example.com",
|
||||
"date_joined": "2011-09-20 17:02:14"
|
||||
}
|
||||
}
|
||||
]
|
@ -1,42 +0,0 @@
|
||||
# Edit this file to override the default graphite settings, do not edit settings.py
|
||||
|
||||
# Turn on debugging and restart apache if you ever see an "Internal Server Error" page
|
||||
#DEBUG = True
|
||||
|
||||
# Set your local timezone (django will try to figure this out automatically)
|
||||
TIME_ZONE = 'UTC'
|
||||
|
||||
# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely
|
||||
#MEMCACHE_HOSTS = ['127.0.0.1:11211']
|
||||
|
||||
# Sometimes you need to do a lot of rendering work but cannot share your storage mount
|
||||
#REMOTE_RENDERING = True
|
||||
#RENDERING_HOSTS = ['fastserver01','fastserver02']
|
||||
#LOG_RENDERING_PERFORMANCE = True
|
||||
#LOG_CACHE_PERFORMANCE = True
|
||||
|
||||
# If you've got more than one backend server they should all be listed here
|
||||
#CLUSTER_SERVERS = []
|
||||
|
||||
# Override this if you need to provide documentation specific to your graphite deployment
|
||||
#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite"
|
||||
|
||||
# Enable email-related features
|
||||
#SMTP_SERVER = "mail.mycompany.com"
|
||||
|
||||
# LDAP / ActiveDirectory authentication setup
|
||||
#USE_LDAP_AUTH = True
|
||||
#LDAP_SERVER = "ldap.mycompany.com"
|
||||
#LDAP_PORT = 389
|
||||
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
|
||||
#LDAP_BASE_PASS = "readonly_account_password"
|
||||
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
|
||||
|
||||
# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!)
|
||||
#DATABASE_ENGINE = 'mysql' # or 'postgres'
|
||||
#DATABASE_NAME = 'graphite'
|
||||
#DATABASE_USER = 'graphite'
|
||||
#DATABASE_PASSWORD = 'graphite-is-awesome'
|
||||
#DATABASE_HOST = 'mysql.mycompany.com'
|
||||
#DATABASE_PORT = '3306'
|
@ -1 +0,0 @@
|
||||
grafana:$apr1$4R/20xhC$8t37jPP5dbcLr48btdkU//
|
@ -1,70 +0,0 @@
|
||||
daemon off;
|
||||
user www-data;
|
||||
worker_processes 1;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
server_tokens off;
|
||||
|
||||
server_names_hash_bucket_size 32;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
|
||||
open_log_file_cache max=1000 inactive=20s min_uses=2 valid=1m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 90;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
}
|
||||
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS";
|
||||
add_header Access-Control-Allow-Headers "origin, authorization, accept";
|
||||
|
||||
location /content {
|
||||
alias /opt/graphite/webapp/content;
|
||||
|
||||
}
|
||||
|
||||
location /media {
|
||||
alias /usr/share/pyshared/django/contrib/admin/media;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
{
|
||||
graphitePort: 2003,
|
||||
graphiteHost: "127.0.0.1",
|
||||
port: 8125,
|
||||
mgmt_port: 8126,
|
||||
backends: ['./backends/graphite'],
|
||||
debug: true
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
[min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
|
||||
[max]
|
||||
pattern = \.max$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = max
|
||||
|
||||
[sum]
|
||||
pattern = \.count$
|
||||
xFilesFactor = 0
|
||||
aggregationMethod = sum
|
||||
|
||||
[default_average]
|
||||
pattern = .*
|
||||
xFilesFactor = 0.5
|
||||
aggregationMethod = average
|
@ -1,16 +0,0 @@
|
||||
[carbon]
|
||||
pattern = ^carbon\..*
|
||||
retentions = 1m:31d,10m:1y,1h:5y
|
||||
|
||||
[highres]
|
||||
pattern = ^highres.*
|
||||
retentions = 1s:1d,1m:7d
|
||||
|
||||
[statsd]
|
||||
pattern = ^statsd.*
|
||||
retentions = 1m:7d,10m:1y
|
||||
|
||||
[default]
|
||||
pattern = .*
|
||||
retentions = 10s:1d,1m:7d,10m:1y
|
||||
|
@ -1,26 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon = true
|
||||
environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf'
|
||||
|
||||
[program:nginx]
|
||||
command = /usr/sbin/nginx
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:carbon-cache]
|
||||
;user = www-data
|
||||
command = /opt/graphite/bin/carbon-cache.py --debug start
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
||||
[program:graphite-webapp]
|
||||
;user = www-data
|
||||
directory = /opt/graphite/webapp
|
||||
environment = PYTHONPATH='/opt/graphite/webapp'
|
||||
command = /usr/bin/gunicorn_django -b127.0.0.1:8000 -w2 graphite/settings.py
|
||||
stdout_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
stderr_logfile = /var/log/supervisor/%(program_name)s.log
|
||||
autorestart = true
|
||||
|
1179
docker/blocks/graphite11/big-dashboard.json
Normal file
1179
docker/blocks/graphite11/big-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
18
docker/blocks/graphite11/docker-compose.yaml
Normal file
18
docker/blocks/graphite11/docker-compose.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
graphite11:
|
||||
image: graphiteapp/graphite-statsd
|
||||
ports:
|
||||
- "8180:80"
|
||||
- "2103-2104:2003-2004"
|
||||
- "2123-2124:2023-2024"
|
||||
- "8225:8125/udp"
|
||||
- "8226:8126"
|
||||
|
||||
fake-graphite11-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: graphite
|
||||
FD_PORT: 2103
|
||||
FD_GRAPHITE_VERSION: 1.1
|
||||
depends_on:
|
||||
- graphite11
|
17
docker/blocks/influxdb/docker-compose.yaml
Normal file
17
docker/blocks/influxdb/docker-compose.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
influxdb:
|
||||
image: influxdb:latest
|
||||
container_name: influxdb
|
||||
ports:
|
||||
- "2004:2004"
|
||||
- "8083:8083"
|
||||
- "8086:8086"
|
||||
volumes:
|
||||
- ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
|
||||
|
||||
fake-influxdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: influxdb
|
||||
FD_PORT: 8086
|
||||
|
@ -1,17 +0,0 @@
|
||||
influxdb:
|
||||
image: influxdb:latest
|
||||
container_name: influxdb
|
||||
ports:
|
||||
- "2004:2004"
|
||||
- "8083:8083"
|
||||
- "8086:8086"
|
||||
volumes:
|
||||
- ./blocks/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf
|
||||
|
||||
fake-influxdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: influxdb
|
||||
FD_PORT: 8086
|
||||
|
6
docker/blocks/jaeger/docker-compose.yaml
Normal file
6
docker/blocks/jaeger/docker-compose.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
ports:
|
||||
- "127.0.0.1:6831:6831/udp"
|
||||
- "16686:16686"
|
||||
|
@ -1,6 +0,0 @@
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
ports:
|
||||
- "localhost:6831:6831/udp"
|
||||
- "16686:16686"
|
||||
|
5
docker/blocks/memcached/docker-compose.yaml
Normal file
5
docker/blocks/memcached/docker-compose.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
memcached:
|
||||
image: memcached:latest
|
||||
ports:
|
||||
- "11211:11211"
|
||||
|
@ -1,5 +0,0 @@
|
||||
memcached:
|
||||
image: memcached:latest
|
||||
ports:
|
||||
- "11211:11211"
|
||||
|
549
docker/blocks/mysql/dashboard.json
Normal file
549
docker/blocks/mysql/dashboard.json
Normal file
@ -0,0 +1,549 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_MYSQL",
|
||||
"label": "Mysql",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "mysql",
|
||||
"pluginName": "MySQL"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "mysql",
|
||||
"name": "MySQL",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "A dashboard visualizing data generated from grafana/fake-data-gen",
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1518602729468,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {
|
||||
"total avg": "#6ed0e0"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "total avg",
|
||||
"fill": 0,
|
||||
"pointradius": 3,
|
||||
"points": true
|
||||
}
|
||||
],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time_sec,\n avg(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count' AND\n hostname IN($host)\nGROUP BY 1, 3\nORDER BY 1",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time_sec,\n min(value) as value,\n 'total avg' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'logins.count'\nGROUP BY 1\nORDER BY 1",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"title": "Average logins / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 18,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time_sec,\n avg(value) as value,\n 'started' as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.started'\nGROUP BY 1, 3\nORDER BY 1",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time_sec,\n avg(value) as value,\n 'ended' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'payment.ended'\nGROUP BY 1, 3\nORDER BY 1",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"title": "Average payments started/ended / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 3,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(createdAt,'$summarize') as time_sec,\n max(value) as value,\n hostname as metric\nFROM \n grafana_metric\nWHERE\n $__timeFilter(createdAt) AND\n measurement = 'cpu' AND\n hostname IN($host)\nGROUP BY 1, 3\nORDER BY 1",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": "1h",
|
||||
"title": "Max CPU / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "percent",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"fontSize": "100%",
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 6,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"scroll": true,
|
||||
"showHeader": true,
|
||||
"sort": {
|
||||
"col": 0,
|
||||
"desc": true
|
||||
},
|
||||
"styles": [
|
||||
{
|
||||
"alias": "Time",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"link": false,
|
||||
"pattern": "Time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"decimals": 2,
|
||||
"pattern": "/.*/",
|
||||
"thresholds": [],
|
||||
"type": "number",
|
||||
"unit": "short"
|
||||
}
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "table",
|
||||
"rawSql": "SELECT createdAt as Time, source, datacenter, hostname, value FROM grafana_metric WHERE hostname in($host)",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
}
|
||||
],
|
||||
"timeShift": "1h",
|
||||
"title": "Values",
|
||||
"transform": "table",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"fake-data-gen",
|
||||
"mysql"
|
||||
],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Datacenter",
|
||||
"multi": false,
|
||||
"name": "datacenter",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT datacenter FROM grafana_metric",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_MYSQL}",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Hostname",
|
||||
"multi": true,
|
||||
"name": "host",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT hostname FROM grafana_metric WHERE datacenter='$datacenter'",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"auto": false,
|
||||
"auto_count": 5,
|
||||
"auto_min": "10s",
|
||||
"current": {
|
||||
"selected": true,
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
"hide": 0,
|
||||
"label": "Summarize",
|
||||
"name": "summarize",
|
||||
"options": [
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1s",
|
||||
"value": "1s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10s",
|
||||
"value": "10s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30s",
|
||||
"value": "30s"
|
||||
},
|
||||
{
|
||||
"selected": true,
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "5m",
|
||||
"value": "5m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10m",
|
||||
"value": "10m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30m",
|
||||
"value": "30m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1h",
|
||||
"value": "1h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "6h",
|
||||
"value": "6h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "12h",
|
||||
"value": "12h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1d",
|
||||
"value": "1d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "7d",
|
||||
"value": "7d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "14d",
|
||||
"value": "14d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30d",
|
||||
"value": "30d"
|
||||
}
|
||||
],
|
||||
"query": "1s,10s,30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
|
||||
"refresh": 2,
|
||||
"type": "interval"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Grafana Fake Data Gen - MySQL",
|
||||
"uid": "DGsCac3kz",
|
||||
"version": 6
|
||||
}
|
21
docker/blocks/mysql/docker-compose.yaml
Normal file
21
docker/blocks/mysql/docker-compose.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||
|
||||
fake-mysql-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: mysql
|
||||
FD_PORT: 3306
|
||||
|
@ -1,14 +0,0 @@
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci, --innodb_monitor_enable=all]
|
||||
|
9
docker/blocks/mysql_opendata/docker-compose.yaml
Normal file
9
docker/blocks/mysql_opendata/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
mysql_opendata:
|
||||
build: blocks/mysql_opendata
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: testdata
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3307:3306"
|
@ -1,9 +0,0 @@
|
||||
mysql_opendata:
|
||||
build: blocks/mysql_opendata
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: testdata
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3307:3306"
|
13
docker/blocks/mysql_tests/docker-compose.yaml
Normal file
13
docker/blocks/mysql_tests/docker-compose.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
mysqltests:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana_tests
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
||||
volumes:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
tmpfs: /var/lib/mysql:rw
|
@ -1,9 +0,0 @@
|
||||
mysqltests:
|
||||
image: mysql:latest
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: rootpass
|
||||
MYSQL_DATABASE: grafana_tests
|
||||
MYSQL_USER: grafana
|
||||
MYSQL_PASSWORD: password
|
||||
ports:
|
||||
- "3306:3306"
|
3
docker/blocks/nginx_proxy/Dockerfile
Normal file
3
docker/blocks/nginx_proxy/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM nginx:alpine
|
||||
|
||||
COPY nginx.conf /etc/nginx/nginx.conf
|
9
docker/blocks/nginx_proxy/docker-compose.yaml
Normal file
9
docker/blocks/nginx_proxy/docker-compose.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
# This will proxy all requests for http://localhost:10080/grafana/ to
|
||||
# http://localhost:3000 (Grafana running locally)
|
||||
#
|
||||
# Please note that you'll need to change the root_url in the Grafana configuration:
|
||||
# root_url = %(protocol)s://%(domain)s:/grafana/
|
||||
|
||||
nginxproxy:
|
||||
build: blocks/nginx_proxy
|
||||
network_mode: host
|
19
docker/blocks/nginx_proxy/nginx.conf
Normal file
19
docker/blocks/nginx_proxy/nginx.conf
Normal file
@ -0,0 +1,19 @@
|
||||
events { worker_connections 1024; }
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $server_name;
|
||||
|
||||
server {
|
||||
listen 10080;
|
||||
|
||||
location /grafana/ {
|
||||
proxy_pass http://localhost:3000/;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
FROM debian:jessie
|
||||
|
||||
MAINTAINER Christian Luginbühl <dinke@pimprecords.com>
|
||||
LABEL maintainer="Christian Luginbühl <dinke@pimprecords.com>"
|
||||
|
||||
ENV OPENLDAP_VERSION 2.4.40
|
||||
|
||||
|
10
docker/blocks/openldap/docker-compose.yaml
Normal file
10
docker/blocks/openldap/docker-compose.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
openldap:
|
||||
build: blocks/openldap
|
||||
environment:
|
||||
SLAPD_PASSWORD: grafana
|
||||
SLAPD_DOMAIN: grafana.org
|
||||
SLAPD_ADDITIONAL_MODULES: memberof
|
||||
ports:
|
||||
- "389:389"
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
openldap:
|
||||
build: blocks/openldap
|
||||
environment:
|
||||
SLAPD_PASSWORD: grafana
|
||||
SLAPD_DOMAIN: grafana.org
|
||||
SLAPD_ADDITIONAL_MODULES: memberof
|
||||
ports:
|
||||
- "389:389"
|
||||
|
||||
|
11
docker/blocks/opentsdb/docker-compose.yaml
Normal file
11
docker/blocks/opentsdb/docker-compose.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
opentsdb:
|
||||
image: opower/opentsdb:latest
|
||||
ports:
|
||||
- "4242:4242"
|
||||
|
||||
fake-opentsdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: opentsdb
|
||||
|
@ -1,11 +0,0 @@
|
||||
opentsdb:
|
||||
image: opower/opentsdb:latest
|
||||
ports:
|
||||
- "4242:4242"
|
||||
|
||||
fake-opentsdb-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: opentsdb
|
||||
|
547
docker/blocks/postgres/dashboard.json
Normal file
547
docker/blocks/postgres/dashboard.json
Normal file
@ -0,0 +1,547 @@
|
||||
{
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_POSTGRESQL",
|
||||
"label": "PostgreSQL",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "postgres",
|
||||
"pluginName": "PostgreSQL"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "5.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "graph",
|
||||
"name": "Graph",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "postgres",
|
||||
"name": "PostgreSQL",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "table",
|
||||
"name": "Table",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "A dashboard visualizing data generated from grafana/fake-data-gen",
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1518601837383,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"aliasColors": {
|
||||
"total avg": "#6ed0e0"
|
||||
},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [
|
||||
{
|
||||
"alias": "total avg",
|
||||
"fill": 0,
|
||||
"pointradius": 3,
|
||||
"points": true
|
||||
}
|
||||
],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"rawSql": "SELECT\n $__timeGroup(\"createdAt\",'$summarize'),\n avg(value) as \"value\",\n hostname as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(\"createdAt\") AND\n measurement = 'logins.count' AND\n hostname IN($host)\nGROUP BY time, metric\nORDER BY time",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(\"createdAt\",'$summarize'),\n min(value) as \"value\",\n 'total avg' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(\"createdAt\") AND\n measurement = 'logins.count'\nGROUP BY time",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Average logins / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 18,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(\"createdAt\",'$summarize'),\n avg(value) as \"value\",\n 'started' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(\"createdAt\") AND\n measurement = 'payment.started'\nGROUP BY time, metric\nORDER BY time",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(\"createdAt\",'$summarize'),\n avg(value) as \"value\",\n 'ended' as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(\"createdAt\") AND\n measurement = 'payment.ended'\nGROUP BY time, metric\nORDER BY time",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Average payments started/ended / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"fill": 2,
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 3,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 2,
|
||||
"links": [],
|
||||
"nullPointMode": "null",
|
||||
"percentage": false,
|
||||
"pointradius": 5,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "time_series",
|
||||
"rawSql": "SELECT\n $__timeGroup(\"createdAt\",'$summarize'),\n max(value) as \"value\",\n hostname as \"metric\"\nFROM \n grafana_metric\nWHERE\n $__timeFilter(\"createdAt\") AND\n measurement = 'cpu' AND\n hostname IN($host)\nGROUP BY time, metric\nORDER BY time",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Max CPU / $summarize",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "percent",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"columns": [],
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"fontSize": "100%",
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 6,
|
||||
"links": [],
|
||||
"pageSize": null,
|
||||
"scroll": true,
|
||||
"showHeader": true,
|
||||
"sort": {
|
||||
"col": 0,
|
||||
"desc": true
|
||||
},
|
||||
"styles": [
|
||||
{
|
||||
"alias": "Time",
|
||||
"dateFormat": "YYYY-MM-DD HH:mm:ss",
|
||||
"link": false,
|
||||
"pattern": "Time",
|
||||
"type": "date"
|
||||
},
|
||||
{
|
||||
"alias": "",
|
||||
"colorMode": null,
|
||||
"colors": [
|
||||
"rgba(245, 54, 54, 0.9)",
|
||||
"rgba(237, 129, 40, 0.89)",
|
||||
"rgba(50, 172, 45, 0.97)"
|
||||
],
|
||||
"decimals": 2,
|
||||
"pattern": "/.*/",
|
||||
"thresholds": [],
|
||||
"type": "number",
|
||||
"unit": "short"
|
||||
}
|
||||
],
|
||||
"targets": [
|
||||
{
|
||||
"alias": "",
|
||||
"format": "table",
|
||||
"rawSql": "SELECT \"createdAt\" as \"Time\", source, datacenter, hostname, value FROM grafana_metric WHERE hostname in($host)",
|
||||
"refId": "A",
|
||||
"target": ""
|
||||
}
|
||||
],
|
||||
"title": "Values",
|
||||
"transform": "table",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 16,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"fake-data-gen",
|
||||
"postgres"
|
||||
],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "Datacenter",
|
||||
"multi": false,
|
||||
"name": "datacenter",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT datacenter FROM grafana_metric",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_POSTGRESQL}",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Hostname",
|
||||
"multi": true,
|
||||
"name": "host",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT hostname FROM grafana_metric WHERE datacenter='$datacenter'",
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"auto": false,
|
||||
"auto_count": 5,
|
||||
"auto_min": "10s",
|
||||
"current": {
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
"hide": 0,
|
||||
"label": "Summarize",
|
||||
"name": "summarize",
|
||||
"options": [
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1s",
|
||||
"value": "1s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10s",
|
||||
"value": "10s"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30s",
|
||||
"value": "30s"
|
||||
},
|
||||
{
|
||||
"selected": true,
|
||||
"text": "1m",
|
||||
"value": "1m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "5m",
|
||||
"value": "5m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "10m",
|
||||
"value": "10m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30m",
|
||||
"value": "30m"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1h",
|
||||
"value": "1h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "6h",
|
||||
"value": "6h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "12h",
|
||||
"value": "12h"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "1d",
|
||||
"value": "1d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "7d",
|
||||
"value": "7d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "14d",
|
||||
"value": "14d"
|
||||
},
|
||||
{
|
||||
"selected": false,
|
||||
"text": "30d",
|
||||
"value": "30d"
|
||||
}
|
||||
],
|
||||
"query": "1s,10s,30s,1m,5m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
|
||||
"refresh": 2,
|
||||
"type": "interval"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "",
|
||||
"title": "Grafana Fake Data Gen - PostgreSQL",
|
||||
"uid": "JYola5qzz",
|
||||
"version": 1
|
||||
}
|
16
docker/blocks/postgres/docker-compose.yaml
Normal file
16
docker/blocks/postgres/docker-compose.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafana
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
||||
|
||||
fake-postgres-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: bridge
|
||||
environment:
|
||||
FD_DATASOURCE: postgres
|
||||
FD_PORT: 5432
|
@ -1,9 +0,0 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafana
|
||||
POSTGRES_PASSWORD: password
|
||||
POSTGRES_DATABASE: grafana
|
||||
ports:
|
||||
- "5432:5432"
|
||||
command: postgres -c log_connections=on -c logging_collector=on -c log_destination=stderr -c log_directory=/var/log/postgresql
|
8
docker/blocks/postgres_tests/docker-compose.yaml
Normal file
8
docker/blocks/postgres_tests/docker-compose.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
POSTGRES_PASSWORD: grafanatest
|
||||
ports:
|
||||
- "5432:5432"
|
||||
tmpfs: /var/lib/postgresql/data:rw
|
@ -1,7 +0,0 @@
|
||||
postgrestest:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: grafanatest
|
||||
POSTGRES_PASSWORD: grafanatest
|
||||
ports:
|
||||
- "5432:5432"
|
@ -1,3 +1,3 @@
|
||||
FROM prom/prometheus
|
||||
FROM prom/prometheus:v1.8.2
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
||||
|
31
docker/blocks/prometheus/docker-compose.yaml
Normal file
31
docker/blocks/prometheus/docker-compose.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
prometheus:
|
||||
build: blocks/prometheus
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9100:9100"
|
||||
|
||||
fake-prometheus-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9091:9091"
|
||||
environment:
|
||||
FD_DATASOURCE: prom
|
||||
|
||||
alertmanager:
|
||||
image: quay.io/prometheus/alertmanager
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9093:9093"
|
||||
|
||||
prometheus-random-data:
|
||||
build: blocks/prometheus_random_data
|
||||
network_mode: host
|
||||
ports:
|
||||
- "8080:8080"
|
@ -1,25 +0,0 @@
|
||||
prometheus:
|
||||
build: blocks/prometheus
|
||||
net: host
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter
|
||||
net: host
|
||||
ports:
|
||||
- "9100:9100"
|
||||
|
||||
fake-prometheus-data:
|
||||
image: grafana/fake-data-gen
|
||||
net: host
|
||||
ports:
|
||||
- "9091:9091"
|
||||
environment:
|
||||
FD_DATASOURCE: prom
|
||||
|
||||
alertmanager:
|
||||
image: quay.io/prometheus/alertmanager
|
||||
net: host
|
||||
ports:
|
||||
- "9093:9093"
|
@ -25,11 +25,15 @@ scrape_configs:
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9100']
|
||||
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9091']
|
||||
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:3000']
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8080']
|
||||
|
3
docker/blocks/prometheus2/Dockerfile
Normal file
3
docker/blocks/prometheus2/Dockerfile
Normal file
@ -0,0 +1,3 @@
|
||||
FROM prom/prometheus:v2.2.0
|
||||
ADD prometheus.yml /etc/prometheus/
|
||||
ADD alert.rules /etc/prometheus/
|
10
docker/blocks/prometheus2/alert.rules
Normal file
10
docker/blocks/prometheus2/alert.rules
Normal file
@ -0,0 +1,10 @@
|
||||
# Alert Rules
|
||||
|
||||
ALERT AppCrash
|
||||
IF process_open_fds > 0
|
||||
FOR 15s
|
||||
LABELS { severity="critical" }
|
||||
ANNOTATIONS {
|
||||
summary = "Number of open fds > 0",
|
||||
description = "Just testing"
|
||||
}
|
31
docker/blocks/prometheus2/docker-compose.yaml
Normal file
31
docker/blocks/prometheus2/docker-compose.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
prometheus:
|
||||
build: blocks/prometheus2
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
node_exporter:
|
||||
image: prom/node-exporter
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9100:9100"
|
||||
|
||||
fake-prometheus-data:
|
||||
image: grafana/fake-data-gen
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9091:9091"
|
||||
environment:
|
||||
FD_DATASOURCE: prom
|
||||
|
||||
alertmanager:
|
||||
image: quay.io/prometheus/alertmanager
|
||||
network_mode: host
|
||||
ports:
|
||||
- "9093:9093"
|
||||
|
||||
prometheus-random-data:
|
||||
build: blocks/prometheus_random_data
|
||||
network_mode: host
|
||||
ports:
|
||||
- "8080:8080"
|
39
docker/blocks/prometheus2/prometheus.yml
Normal file
39
docker/blocks/prometheus2/prometheus.yml
Normal file
@ -0,0 +1,39 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
evaluation_interval: 10s # By default, scrape targets every 15 seconds.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
||||
#rule_files:
|
||||
# - "alert.rules"
|
||||
# - "first.rules"
|
||||
# - "second.rules"
|
||||
|
||||
# alerting:
|
||||
# alertmanagers:
|
||||
# - scheme: http
|
||||
# static_configs:
|
||||
# - targets:
|
||||
# - "127.0.0.1:9093"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'node_exporter'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9100']
|
||||
|
||||
- job_name: 'fake-data-gen'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:9091']
|
||||
|
||||
- job_name: 'grafana'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:3000']
|
||||
|
||||
- job_name: 'prometheus-random-data'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:8080']
|
18
docker/blocks/prometheus_random_data/Dockerfile
Normal file
18
docker/blocks/prometheus_random_data/Dockerfile
Normal file
@ -0,0 +1,18 @@
|
||||
# This Dockerfile builds an image for a client_golang example.
|
||||
|
||||
# Builder image, where we build the example.
|
||||
FROM golang:1.9.0 AS builder
|
||||
# Download prometheus/client_golang/examples/random first
|
||||
RUN go get github.com/prometheus/client_golang/examples/random
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
|
||||
RUN go get -d
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
|
||||
|
||||
# Final image.
|
||||
FROM scratch
|
||||
LABEL maintainer "The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random .
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/random"]
|
@ -1,5 +1,5 @@
|
||||
FROM centos:centos7
|
||||
MAINTAINER Przemyslaw Ozgo <linux@ozgo.info>
|
||||
LABEL maintainer="Przemyslaw Ozgo <linux@ozgo.info>"
|
||||
|
||||
RUN \
|
||||
yum update -y && \
|
||||
|
4
docker/blocks/smtp/docker-compose.yaml
Normal file
4
docker/blocks/smtp/docker-compose.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
snmpd:
|
||||
image: namshi/smtp
|
||||
ports:
|
||||
- "25:25"
|
@ -1,4 +0,0 @@
|
||||
snmpd:
|
||||
image: namshi/smtp
|
||||
ports:
|
||||
- "25:25"
|
2
docker/compose_header.yml
Normal file
2
docker/compose_header.yml
Normal file
@ -0,0 +1,2 @@
|
||||
version: "2"
|
||||
services:
|
@ -7,8 +7,9 @@ template_dir=templates
|
||||
grafana_config_file=conf.tmp
|
||||
grafana_config=config
|
||||
|
||||
fig_file=docker-compose.yml
|
||||
fig_config=fig
|
||||
compose_header_file=compose_header.yml
|
||||
fig_file=docker-compose.yaml
|
||||
fig_config=docker-compose.yaml
|
||||
|
||||
if [ "$#" == 0 ]; then
|
||||
blocks=`ls $blocks_dir`
|
||||
@ -23,13 +24,16 @@ if [ "$#" == 0 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for file in $gogs_config_file $fig_file; do
|
||||
for file in $grafana_config_file $fig_file; do
|
||||
if [ -e $file ]; then
|
||||
echo "Deleting $file"
|
||||
rm $file
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Adding Compose header to $fig_file"
|
||||
cat $compose_header_file >> $fig_file
|
||||
|
||||
for dir in $@; do
|
||||
current_dir=$blocks_dir/$dir
|
||||
if [ ! -d "$current_dir" ]; then
|
||||
@ -45,7 +49,7 @@ for dir in $@; do
|
||||
|
||||
if [ -e $current_dir/$fig_config ]; then
|
||||
echo "Adding $current_dir/$fig_config to $fig_file"
|
||||
cat $current_dir/fig >> $fig_file
|
||||
cat $current_dir/$fig_config >> $fig_file
|
||||
echo "" >> $fig_file
|
||||
fi
|
||||
done
|
||||
|
@ -9,5 +9,6 @@ FROM grafana/docs-base:latest
|
||||
|
||||
COPY config.toml /site
|
||||
COPY awsconfig /site
|
||||
COPY versions.json /site/static/js
|
||||
|
||||
VOLUME ["/site/content"]
|
||||
|
@ -1 +1 @@
|
||||
v4.3
|
||||
v5.0
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user