From 1d2eb0f903df0c9225f5a5233fa822b1af8e425b Mon Sep 17 00:00:00 2001 From: Ben Oswald Date: Sun, 5 Jun 2016 16:19:53 +0200 Subject: [PATCH 01/17] restart grafana after upgrade The current script only tries to start the already running grafana process after an upgrade. This leads to errors due to changed js and css hashes while grafana is delivering the old ones. To load the new sources we need to restart grafana after an upgrade. --- packaging/deb/control/postinst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/deb/control/postinst b/packaging/deb/control/postinst index b93c8433490..37c0eb54138 100755 --- a/packaging/deb/control/postinst +++ b/packaging/deb/control/postinst @@ -10,9 +10,9 @@ startGrafana() { /bin/systemctl start grafana-server elif [ -x "/etc/init.d/grafana-server" ]; then if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then - invoke-rc.d grafana-server start || true + invoke-rc.d grafana-server restart || true else - /etc/init.d/grafana-server start || true + /etc/init.d/grafana-server restart || true fi fi } From 51878aeca880352608a5addd115e8ccbbd49d223 Mon Sep 17 00:00:00 2001 From: Ben Oswald Date: Sun, 5 Jun 2016 16:27:57 +0200 Subject: [PATCH 02/17] change start to restart also for systemctl I've forgotten to update the systemctl start to restart in my first commit --- packaging/deb/control/postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/deb/control/postinst b/packaging/deb/control/postinst index 37c0eb54138..425a7319e62 100755 --- a/packaging/deb/control/postinst +++ b/packaging/deb/control/postinst @@ -7,7 +7,7 @@ set -e startGrafana() { if [ -x /bin/systemctl ]; then /bin/systemctl daemon-reload - /bin/systemctl start grafana-server + /bin/systemctl restart grafana-server elif [ -x "/etc/init.d/grafana-server" ]; then if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then invoke-rc.d grafana-server restart || true From 975b53b31845e596ab2f7093f2362901797197c9 Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanda Date: Mon, 6 Jun 2016 13:49:49 +0900 Subject: [PATCH 03/17] enable profiling in development mode --- public/app/core/components/grafana_app.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/public/app/core/components/grafana_app.ts b/public/app/core/components/grafana_app.ts index 0a2e49e5d72..bfa0720fd71 100644 --- a/public/app/core/components/grafana_app.ts +++ b/public/app/core/components/grafana_app.ts @@ -17,7 +17,7 @@ export class GrafanaCtrl { $scope._ = _; - $rootScope.profilingEnabled = store.getBool('profilingEnabled'); + $rootScope.profilingEnabled = store.getBool('profilingEnabled') || config.buildInfo.env === 'development'; $rootScope.performance = { loadStart: new Date().getTime() }; $rootScope.appSubUrl = config.appSubUrl; From 83c76981b2628c039f0e83cdb67f5b2348fbffa7 Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanda Date: Mon, 6 Jun 2016 14:24:31 +0900 Subject: [PATCH 04/17] fix profiling code --- public/app/features/dashboard/dashboardCtrl.js | 8 ++++++++ public/app/features/dashboard/viewStateSrv.js | 7 +++++++ public/app/features/panel/metrics_panel_ctrl.ts | 1 - public/app/features/panel/panel_ctrl.ts | 17 +++++++++++++++++ 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/public/app/features/dashboard/dashboardCtrl.js b/public/app/features/dashboard/dashboardCtrl.js index b6702631155..9f3e6da998f 100644 --- a/public/app/features/dashboard/dashboardCtrl.js +++ b/public/app/features/dashboard/dashboardCtrl.js @@ -60,6 +60,14 @@ function (angular, $, config, moment) { $scope.updateSubmenuVisibility(); $scope.setWindowTitleAndTheme(); + if ($scope.profilingEnabled) { + $scope.performance.panels = []; + $scope.performance.panelCount = 0; + $scope.dashboard.rows.forEach(function(row) { + $scope.performance.panelCount += row.panels.length; + }); + } + $scope.appEvent("dashboard-loaded", $scope.dashboard); }).catch(function(err) { if (err.data && err.data.message) { err.message = err.data.message; } diff --git a/public/app/features/dashboard/viewStateSrv.js b/public/app/features/dashboard/viewStateSrv.js index 2138dd37438..2a833b003c9 100644 --- a/public/app/features/dashboard/viewStateSrv.js +++ b/public/app/features/dashboard/viewStateSrv.js @@ -51,6 +51,13 @@ function (angular, _, $) { $scope.onAppEvent('panel-initialized', function(evt, payload) { self.registerPanel(payload.scope); + + if ($scope.profilingEnabled) { + $scope.performance.panelsInitialized++; + if ($scope.performance.panelsInitialized === $scope.performance.panelCount) { + $scope.performance.allPanelsInitialized = new Date().getTime(); + } + } }); this.update(this.getQueryStringState()); diff --git a/public/app/features/panel/metrics_panel_ctrl.ts b/public/app/features/panel/metrics_panel_ctrl.ts index 0bccee8ff35..edc744a5049 100644 --- a/public/app/features/panel/metrics_panel_ctrl.ts +++ b/public/app/features/panel/metrics_panel_ctrl.ts @@ -95,7 +95,6 @@ class MetricsPanelCtrl extends PanelCtrl { } setTimeQueryStart() { - this.timing = {}; this.timing.queryStart = new Date().getTime(); } diff --git a/public/app/features/panel/panel_ctrl.ts b/public/app/features/panel/panel_ctrl.ts index e47e3444564..df44559595b 100644 --- a/public/app/features/panel/panel_ctrl.ts +++ b/public/app/features/panel/panel_ctrl.ts @@ -31,6 +31,7 @@ export class PanelCtrl { height: any; containerHeight: any; events: Emitter; + timing: any; constructor($scope, $injector) { this.$injector = $injector; @@ -38,6 +39,7 @@ export class PanelCtrl { this.$timeout = $injector.get('$timeout'); this.editorTabIndex = 0; this.events = new Emitter(); + this.timing = {}; var plugin = config.panels[this.panel.type]; if (plugin) { @@ -58,6 +60,20 @@ export class PanelCtrl { renderingCompleted() { this.$scope.$root.performance.panelsRendered++; + this.timing.renderEnd = new Date().getTime(); + if (this.$scope.$root.profilingEnabled) { + this.$scope.$root.performance.panels.push({ + panelId: this.panel.id, + query: this.timing.queryEnd - this.timing.queryStart, + render: this.timing.renderEnd - this.timing.renderStart, + }); + + if (this.$scope.$root.performance.panelsRendered === this.$scope.$root.performance.panelCount) { + this.$scope.$root.performance.allPanelsRendered = new Date().getTime(); + var timeTaken = this.$scope.$root.performance.allPanelsRendered - this.$scope.$root.performance.dashboardLoadStart; + console.log("Dashboard::Performance - All panels rendered in " + timeTaken + " ms"); + } + } } refresh() { @@ -169,6 +185,7 @@ export class PanelCtrl { } this.calculatePanelHeight(); + this.timing.renderStart = new Date().getTime(); this.events.emit('render', payload); } From b28368c859a7b7c6b3155f1e5df8b1b1d52139f6 Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanda Date: Tue, 31 May 2016 01:41:42 +0900 Subject: [PATCH 05/17] cache panel width --- public/app/plugins/panel/graph/graph.js | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/public/app/plugins/panel/graph/graph.js b/public/app/plugins/panel/graph/graph.js index 86ad3b5f025..9f0c3ff06eb 100755 --- a/public/app/plugins/panel/graph/graph.js +++ b/public/app/plugins/panel/graph/graph.js @@ -31,6 +31,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { var sortedSeries; var legendSideLastValue = null; var rootScope = scope.$root; + var panelWidth = 0; rootScope.onAppEvent('setCrosshair', function(event, info) { // do not need to to this if event is from this panel @@ -104,7 +105,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { return true; } - if (elem.width() === 0) { + if (panelWidth === 0) { return true; } } @@ -159,6 +160,16 @@ function (angular, $, moment, _, kbn, GraphTooltip) { // Function for rendering panel function render_panel() { + if (!rootScope.panelWidthCache) { + rootScope.panelWidthCache = {}; + } + if (rootScope.panelWidthCache[panel.span]) { + panelWidth = rootScope.panelWidthCache[panel.span]; + } else { + panelWidth = elem.width(); + rootScope.panelWidthCache[panel.span] = panelWidth; + } + if (shouldAbortRender()) { return; } @@ -276,7 +287,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { } function addTimeAxis(options) { - var ticks = elem.width() / 100; + var ticks = panelWidth / 100; var min = _.isUndefined(ctrl.range.from) ? null : ctrl.range.from.valueOf(); var max = _.isUndefined(ctrl.range.to) ? null : ctrl.range.to.valueOf(); @@ -444,7 +455,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { } function render_panel_as_graphite_png(url) { - url += '&width=' + elem.width(); + url += '&width=' + panelWidth; url += '&height=' + elem.css('height').replace('px', ''); url += '&bgcolor=1f1f1f'; // @grayDarker & @grafanaPanelBackground url += '&fgcolor=BBBFC2'; // @textColor & @grayLighter From 487b2089a9e5fb5cf55c611362febe62a79ea13a Mon Sep 17 00:00:00 2001 From: Mitsuhiro Tanda Date: Tue, 7 Jun 2016 14:28:08 +0900 Subject: [PATCH 06/17] cache label width --- public/app/plugins/panel/graph/graph.js | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/public/app/plugins/panel/graph/graph.js b/public/app/plugins/panel/graph/graph.js index 9f0c3ff06eb..3097bc6eac6 100755 --- a/public/app/plugins/panel/graph/graph.js +++ b/public/app/plugins/panel/graph/graph.js @@ -110,6 +110,23 @@ function (angular, $, moment, _, kbn, GraphTooltip) { } } + function getLabelWidth(type, text, elem) { + var labelWidth = 0; + if (!rootScope.labelWidthCache) { + rootScope.labelWidthCache = {}; + } + if (!rootScope.labelWidthCache[type]) { + rootScope.labelWidthCache[type] = {}; + } + if (rootScope.labelWidthCache[type][text]) { + labelWidth = rootScope.labelWidthCache[type][text]; + } else { + labelWidth = elem.width(); + rootScope.labelWidthCache[type][text] = labelWidth; + } + return labelWidth; + } + function drawHook(plot) { // Update legend values var yaxis = plot.getYAxes(); @@ -138,7 +155,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { .text(panel.yaxes[0].label) .appendTo(elem); - yaxisLabel.css("margin-top", yaxisLabel.width() / 2); + yaxisLabel[0].style.marginTop = (getLabelWidth('left', panel.yaxes[0].label, yaxisLabel) / 2) + 'px'; } // add right axis labels @@ -147,7 +164,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { .text(panel.yaxes[1].label) .appendTo(elem); - rightLabel.css("margin-top", rightLabel.width() / 2); + rightLabel[0].style.marginTop = (getLabelWidth('right', panel.yaxes[1].label, rightLabel) / 2) + 'px'; } } From b76b42f209bb4fc920d77d956982bbb59464bfea Mon Sep 17 00:00:00 2001 From: bergquist Date: Tue, 7 Jun 2016 12:05:39 +0200 Subject: [PATCH 07/17] docs(changelog): add note about deb post install script --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3b6371b324..4afea5a1e61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ * **Theme**: Add default theme to config file [#5011](https://github.com/grafana/grafana/pull/5011) * **Page Footer**: Added page footer with links to docs, shows Grafana version and info if new version is available, closes [#4889](https://github.com/grafana/grafana/pull/4889) * **InfluxDB**: Add spread function, closes [#5211](https://github.com/grafana/grafana/issues/5211) +* **Scripts**: Use restart instead of start for deb package script, closes [#5282](https://github.com/grafana/grafana/pull/5282) # 3.0.4 Patch release (2016-05-25) * **Panel**: Fixed blank dashboard issue when switching to other dashboard while in fullscreen edit mode, fixes [#5163](https://github.com/grafana/grafana/pull/5163) From 93fdc18fd43e69dbd291314c382e35b27de0a5e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 7 Jun 2016 15:59:50 +0200 Subject: [PATCH 08/17] feat(logging): fixed dependencies --- Godeps/Godeps.json | 22 +- .../src/github.com/go-stack/stack/.travis.yml | 16 + .../src/github.com/go-stack/stack/LICENSE.md | 13 + .../src/github.com/go-stack/stack/README.md | 38 + .../src/github.com/go-stack/stack/stack.go | 349 ++++ .../inconshreveable/log15/.travis.yml | 10 + .../inconshreveable/log15/CONTRIBUTORS | 11 + .../github.com/inconshreveable/log15/LICENSE | 13 + .../inconshreveable/log15/README.md | 70 + .../github.com/inconshreveable/log15/doc.go | 333 ++++ .../inconshreveable/log15/format.go | 257 +++ .../inconshreveable/log15/handler.go | 356 ++++ .../inconshreveable/log15/handler_go13.go | 26 + .../inconshreveable/log15/handler_go14.go | 23 + .../inconshreveable/log15/logger.go | 208 +++ .../github.com/inconshreveable/log15/root.go | 67 + .../inconshreveable/log15/syslog.go | 55 + .../inconshreveable/log15/term/LICENSE | 21 + .../log15/term/terminal_appengine.go | 13 + .../log15/term/terminal_darwin.go | 12 + .../log15/term/terminal_freebsd.go | 18 + .../log15/term/terminal_linux.go | 14 + .../log15/term/terminal_notwindows.go | 20 + .../log15/term/terminal_openbsd.go | 7 + .../log15/term/terminal_windows.go | 26 + .../influxdata/influxdb/client/README.md | 267 --- .../influxdata/influxdb/client/influxdb.go | 789 --------- .../influxdata/influxdb/models/consistency.go | 46 - .../influxdata/influxdb/models/points.go | 1576 ----------------- .../influxdata/influxdb/models/rows.go | 60 - .../influxdata/influxdb/models/time.go | 51 - .../influxdata/influxdb/pkg/escape/bytes.go | 53 - .../influxdata/influxdb/pkg/escape/strings.go | 34 - pkg/services/sqlstore/sqlstore.go | 2 +- 34 files changed, 1988 insertions(+), 2888 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md create mode 100644 Godeps/_workspace/src/github.com/go-stack/stack/README.md create mode 100644 Godeps/_workspace/src/github.com/go-stack/stack/stack.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/README.md create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/format.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/root.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go create mode 100644 Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go delete mode 100644 Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ee17ad95d04..c9afb49850b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -205,6 +205,11 @@ "Comment": "v1.2-171-g267b128", "Rev": "267b128680c46286b9ca13475c3cca5de8f79bd7" }, + { + "ImportPath": "github.com/go-stack/stack", + "Comment": "v1.5.2", + "Rev": "100eb0c0a9c5b306ca2fb4f165df21d80ada4b82" + }, { "ImportPath": "github.com/go-xorm/core", "Comment": "v0.4.4-7-g9e608f7", @@ -228,19 +233,14 @@ "Rev": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38" }, { - "ImportPath": "github.com/influxdata/influxdb/client", - "Comment": "v0.13.0-74-g2c9d0fc", - "Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d" + "ImportPath": "github.com/inconshreveable/log15", + "Comment": "v2.3-61-g20bca5a", + "Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1" }, { - "ImportPath": "github.com/influxdata/influxdb/models", - "Comment": "v0.13.0-74-g2c9d0fc", - "Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d" - }, - { - "ImportPath": "github.com/influxdata/influxdb/pkg/escape", - "Comment": "v0.13.0-74-g2c9d0fc", - "Rev": "2c9d0fcc04eba3ffc88f2aafe8466874e384d80d" + "ImportPath": "github.com/inconshreveable/log15/term", + "Comment": "v2.3-61-g20bca5a", + "Rev": "20bca5a7a57282e241fac83ec9ea42538027f1c1" }, { "ImportPath": "github.com/jmespath/go-jmespath", diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml b/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml new file mode 100644 index 00000000000..d5e5dd52da0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-stack/stack/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + +script: + - goveralls -service=travis-ci diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md b/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md new file mode 100644 index 00000000000..c8ca66c5ede --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-stack/stack/LICENSE.md @@ -0,0 +1,13 @@ +Copyright 2014 Chris Hines + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/README.md b/Godeps/_workspace/src/github.com/go-stack/stack/README.md new file mode 100644 index 00000000000..f11ccccaa43 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-stack/stack/README.md @@ -0,0 +1,38 @@ +[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) +[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) +[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) +[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) + +# stack + +Package stack implements utilities to capture, manipulate, and format call +stacks. It provides a simpler API than package runtime. + +The implementation takes care of the minutia and special cases of interpreting +the program counter (pc) values returned by runtime.Callers. + +## Versioning + +Package stack publishes releases via [semver](http://semver.org/) compatible Git +tags prefixed with a single 'v'. The master branch always contains the latest +release. The develop branch contains unreleased commits. + +## Formatting + +Package stack's types implement fmt.Formatter, which provides a simple and +flexible way to declaratively configure formatting when used with logging or +error tracking packages. + +```go +func DoTheThing() { + c := stack.Caller(0) + log.Print(c) // "source.go:10" + log.Printf("%+v", c) // "pkg/path/source.go:10" + log.Printf("%n", c) // "DoTheThing" + + s := stack.Trace().TrimRuntime() + log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" +} +``` + +See the docs for all of the supported formatting options. diff --git a/Godeps/_workspace/src/github.com/go-stack/stack/stack.go b/Godeps/_workspace/src/github.com/go-stack/stack/stack.go new file mode 100644 index 00000000000..a614eeebf16 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-stack/stack/stack.go @@ -0,0 +1,349 @@ +// Package stack implements utilities to capture, manipulate, and format call +// stacks. It provides a simpler API than package runtime. +// +// The implementation takes care of the minutia and special cases of +// interpreting the program counter (pc) values returned by runtime.Callers. +// +// Package stack's types implement fmt.Formatter, which provides a simple and +// flexible way to declaratively configure formatting when used with logging +// or error tracking packages. +package stack + +import ( + "bytes" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "strings" +) + +// Call records a single function invocation from a goroutine stack. +type Call struct { + fn *runtime.Func + pc uintptr +} + +// Caller returns a Call from the stack of the current goroutine. The argument +// skip is the number of stack frames to ascend, with 0 identifying the +// calling function. +func Caller(skip int) Call { + var pcs [2]uintptr + n := runtime.Callers(skip+1, pcs[:]) + + var c Call + + if n < 2 { + return c + } + + c.pc = pcs[1] + if runtime.FuncForPC(pcs[0]) != sigpanic { + c.pc-- + } + c.fn = runtime.FuncForPC(c.pc) + return c +} + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). +func (c Call) String() string { + return fmt.Sprint(c) +} + +// MarshalText implements encoding.TextMarshaler. It formats the Call the same +// as fmt.Sprintf("%v", c). +func (c Call) MarshalText() ([]byte, error) { + if c.fn == nil { + return nil, ErrNoFunc + } + buf := bytes.Buffer{} + fmt.Fprint(&buf, c) + return buf.Bytes(), nil +} + +// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely +// cause is a Call with the zero value. +var ErrNoFunc = errors.New("no call stack information") + +// Format implements fmt.Formatter with support for the following verbs. +// +// %s source file +// %d line number +// %n function name +// %v equivalent to %s:%d +// +// It accepts the '+' and '#' flags for most of the verbs as follows. +// +// %+s path of source file relative to the compile time GOPATH +// %#s full path of source file +// %+n import path qualified function name +// %+v equivalent to %+s:%d +// %#v equivalent to %#s:%d +func (c Call) Format(s fmt.State, verb rune) { + if c.fn == nil { + fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) + return + } + + switch verb { + case 's', 'v': + file, line := c.fn.FileLine(c.pc) + switch { + case s.Flag('#'): + // done + case s.Flag('+'): + file = file[pkgIndex(file, c.fn.Name()):] + default: + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + } + io.WriteString(s, file) + if verb == 'v' { + buf := [7]byte{':'} + s.Write(strconv.AppendInt(buf[:1], int64(line), 10)) + } + + case 'd': + _, line := c.fn.FileLine(c.pc) + buf := [6]byte{} + s.Write(strconv.AppendInt(buf[:0], int64(line), 10)) + + case 'n': + name := c.fn.Name() + if !s.Flag('+') { + const pathSep = "/" + if i := strings.LastIndex(name, pathSep); i != -1 { + name = name[i+len(pathSep):] + } + const pkgSep = "." + if i := strings.Index(name, pkgSep); i != -1 { + name = name[i+len(pkgSep):] + } + } + io.WriteString(s, name) + } +} + +// PC returns the program counter for this call frame; multiple frames may +// have the same PC value. +func (c Call) PC() uintptr { + return c.pc +} + +// name returns the import path qualified name of the function containing the +// call. +func (c Call) name() string { + if c.fn == nil { + return "???" + } + return c.fn.Name() +} + +func (c Call) file() string { + if c.fn == nil { + return "???" + } + file, _ := c.fn.FileLine(c.pc) + return file +} + +func (c Call) line() int { + if c.fn == nil { + return 0 + } + _, line := c.fn.FileLine(c.pc) + return line +} + +// CallStack records a sequence of function invocations from a goroutine +// stack. +type CallStack []Call + +// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). +func (cs CallStack) String() string { + return fmt.Sprint(cs) +} + +var ( + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + spaceBytes = []byte(" ") +) + +// MarshalText implements encoding.TextMarshaler. It formats the CallStack the +// same as fmt.Sprintf("%v", cs). +func (cs CallStack) MarshalText() ([]byte, error) { + buf := bytes.Buffer{} + buf.Write(openBracketBytes) + for i, pc := range cs { + if pc.fn == nil { + return nil, ErrNoFunc + } + if i > 0 { + buf.Write(spaceBytes) + } + fmt.Fprint(&buf, pc) + } + buf.Write(closeBracketBytes) + return buf.Bytes(), nil +} + +// Format implements fmt.Formatter by printing the CallStack as square brackets +// ([, ]) surrounding a space separated list of Calls each formatted with the +// supplied verb and options. +func (cs CallStack) Format(s fmt.State, verb rune) { + s.Write(openBracketBytes) + for i, pc := range cs { + if i > 0 { + s.Write(spaceBytes) + } + pc.Format(s, verb) + } + s.Write(closeBracketBytes) +} + +// findSigpanic intentionally executes faulting code to generate a stack trace +// containing an entry for runtime.sigpanic. +func findSigpanic() *runtime.Func { + var fn *runtime.Func + var p *int + func() int { + defer func() { + if p := recover(); p != nil { + var pcs [512]uintptr + n := runtime.Callers(2, pcs[:]) + for _, pc := range pcs[:n] { + f := runtime.FuncForPC(pc) + if f.Name() == "runtime.sigpanic" { + fn = f + break + } + } + } + }() + // intentional nil pointer dereference to trigger sigpanic + return *p + }() + return fn +} + +var sigpanic = findSigpanic() + +// Trace returns a CallStack for the current goroutine with element 0 +// identifying the calling function. +func Trace() CallStack { + var pcs [512]uintptr + n := runtime.Callers(2, pcs[:]) + cs := make([]Call, n) + + for i, pc := range pcs[:n] { + pcFix := pc + if i > 0 && cs[i-1].fn != sigpanic { + pcFix-- + } + cs[i] = Call{ + fn: runtime.FuncForPC(pcFix), + pc: pcFix, + } + } + + return cs +} + +// TrimBelow returns a slice of the CallStack with all entries below c +// removed. +func (cs CallStack) TrimBelow(c Call) CallStack { + for len(cs) > 0 && cs[0].pc != c.pc { + cs = cs[1:] + } + return cs +} + +// TrimAbove returns a slice of the CallStack with all entries above c +// removed. +func (cs CallStack) TrimAbove(c Call) CallStack { + for len(cs) > 0 && cs[len(cs)-1].pc != c.pc { + cs = cs[:len(cs)-1] + } + return cs +} + +// pkgIndex returns the index that results in file[index:] being the path of +// file relative to the compile time GOPATH, and file[:index] being the +// $GOPATH/src/ portion of file. funcName must be the name of a function in +// file as returned by runtime.Func.Name. +func pkgIndex(file, funcName string) int { + // As of Go 1.6.2 there is no direct way to know the compile time GOPATH + // at runtime, but we can infer the number of path segments in the GOPATH. + // We note that runtime.Func.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // file[:idx] == /home/user/src/ + // file[idx:] == pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired result for file[idx:]. We count separators from the + // end of the file path until it finds two more than in the function name + // and then move one character forward to preserve the initial path + // segment without a leading separator. + const sep = "/" + i := len(file) + for n := strings.Count(funcName, sep) + 2; n > 0; n-- { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + return i + len(sep) +} + +var runtimePath string + +func init() { + var pcs [1]uintptr + runtime.Callers(0, pcs[:]) + fn := runtime.FuncForPC(pcs[0]) + file, _ := fn.FileLine(pcs[0]) + + idx := pkgIndex(file, fn.Name()) + + runtimePath = file[:idx] + if runtime.GOOS == "windows" { + runtimePath = strings.ToLower(runtimePath) + } +} + +func inGoroot(c Call) bool { + file := c.file() + if len(file) == 0 || file[0] == '?' { + return true + } + if runtime.GOOS == "windows" { + file = strings.ToLower(file) + } + return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") +} + +// TrimRuntime returns a slice of the CallStack with the topmost entries from +// the go runtime removed. It considers any calls originating from unknown +// files, files under GOROOT, or _testmain.go as part of the runtime. +func (cs CallStack) TrimRuntime() CallStack { + for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { + cs = cs[:len(cs)-1] + } + return cs +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml b/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml new file mode 100644 index 00000000000..ff5d75e72b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS b/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS new file mode 100644 index 00000000000..a0866713be0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/CONTRIBUTORS @@ -0,0 +1,11 @@ +Contributors to log15: + +- Aaron L +- Alan Shreve +- Chris Hines +- Ciaran Downey +- Dmitry Chestnykh +- Evan Shaw +- Péter Szilágyi +- Trevor Gattis +- Vincent Vanackere diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE new file mode 100644 index 00000000000..5f0d1fb6a7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md b/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md new file mode 100644 index 00000000000..8ccd5a38d05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/README.md @@ -0,0 +1,70 @@ +![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png) + +# log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15) + +Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. + +## Features +- A simple, easy-to-understand API +- Promotes structured logging by encouraging use of key/value pairs +- Child loggers which inherit and add their own private context +- Lazy evaluation of expensive operations +- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API. +- Color terminal support +- Built-in support for logging to files, streams, syslog, and the network +- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more + +## Versioning +The API of the master branch of log15 should always be considered unstable. If you want to rely on a stable API, +you must vendor the library. + +## Importing + +```go +import log "github.com/inconshreveable/log15" +``` + +## Examples + +```go +// all loggers can have key/value context +srvlog := log.New("module", "app/server") + +// all log messages can have key/value context +srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate) + +// child loggers with inherited context +connlog := srvlog.New("raddr", c.RemoteAddr()) +connlog.Info("connection open") + +// lazy evaluation +connlog.Debug("ping remote", "latency", log.Lazy{pingRemote}) + +// flexible configuration +srvlog.SetHandler(log.MultiHandler( + log.StreamHandler(os.Stderr, log.LogfmtFormat()), + log.LvlFilterHandler( + log.LvlError, + log.Must.FileHandler("errors.json", log.JsonFormat()))) +``` + +## Breaking API Changes +The following commits broke API stability. This reference is intended to help you understand the consequences of updating to a newer version +of log15. + +- 57a084d014d4150152b19e4e531399a7145d1540 - Added a `Get()` method to the `Logger` interface to retrieve the current handler +- 93404652ee366648fa622b64d1e2b67d75a3094a - `Record` field `Call` changed to `stack.Call` with switch to `github.com/go-stack/stack` +- a5e7613673c73281f58e15a87d2cf0cf111e8152 - Restored `syslog.Priority` argument to the `SyslogXxx` handler constructors + +## FAQ + +### The varargs style is brittle and error prone! Can I have type safety please? +Yes. Use `log.Ctx`: + +```go +srvlog := log.New(log.Ctx{"module": "app/server"}) +srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate}) +``` + +## License +Apache diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go new file mode 100644 index 00000000000..a5cc87419c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/doc.go @@ -0,0 +1,333 @@ +/* +Package log15 provides an opinionated, simple toolkit for best-practice logging that is +both human and machine readable. It is modeled after the standard library's io and net/http +packages. + +This package enforces you to only log key/value pairs. Keys must be strings. Values may be +any type that you like. The default output format is logfmt, but you may also choose to use +JSON instead if that suits you. Here's how you log: + + log.Info("page accessed", "path", r.URL.Path, "user_id", user.id) + +This will output a line that looks like: + + lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9 + +Getting Started + +To get started, you'll want to import the library: + + import log "github.com/inconshreveable/log15" + + +Now you're ready to start logging: + + func main() { + log.Info("Program starting", "args", os.Args()) + } + + +Convention + +Because recording a human-meaningful message is common and good practice, the first argument to every +logging method is the value to the *implicit* key 'msg'. + +Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so +will the current timestamp with key 't'. + +You may supply any additional context as a set of key/value pairs to the logging function. log15 allows +you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for +logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate +in the variadic argument list: + + log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val) + +If you really do favor your type-safety, you may choose to pass a log.Ctx instead: + + log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val}) + + +Context loggers + +Frequently, you want to add context to a logger so that you can track actions associated with it. An http +request is a good example. You can easily create new loggers that have context that is automatically included +with each log line: + + requestlogger := log.New("path", r.URL.Path) + + // later + requestlogger.Debug("db txn commit", "duration", txnTimer.Finish()) + +This will output a log line that includes the path context that is attached to the logger: + + lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12 + + +Handlers + +The Handler interface defines where log lines are printed to and how they are formated. Handler is a +single interface that is inspired by net/http's handler interface: + + type Handler interface { + Log(r *Record) error + } + + +Handlers can filter records, format them, or dispatch to multiple other Handlers. +This package implements a number of Handlers for common logging patterns that are +easily composed to create flexible, custom logging structures. + +Here's an example handler that prints logfmt output to Stdout: + + handler := log.StreamHandler(os.Stdout, log.LogfmtFormat()) + +Here's an example handler that defers to two other handlers. One handler only prints records +from the rpc package in logfmt to standard out. The other prints records at Error level +or above in JSON formatted output to the file /var/log/service.json + + handler := log.MultiHandler( + log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())), + log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler()) + ) + +Logging File Names and Line Numbers + +This package implements three Handlers that add debugging information to the +context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's +an example that adds the source file and line number of each logging call to +the context. + + h := log.CallerFileHandler(log.StdoutHandler()) + log.Root().SetHandler(h) + ... + log.Error("open file", "err", err) + +This will output a line that looks like: + + lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42 + +Here's an example that logs the call stack rather than just the call site. + + h := log.CallerStackHandler("%+v", log.StdoutHandler()) + log.Root().SetHandler(h) + ... + log.Error("open file", "err", err) + +This will output a line that looks like: + + lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]" + +The "%+v" format instructs the handler to include the path of the source file +relative to the compile time GOPATH. The github.com/go-stack/stack package +documents the full list of formatting verbs and modifiers available. + +Custom Handlers + +The Handler interface is so simple that it's also trivial to write your own. Let's create an +example handler which tries to write to one handler, but if that fails it falls back to +writing to another handler and includes the error that it encountered when trying to write +to the primary. This might be useful when trying to log over a network socket, but if that +fails you want to log those records to a file on disk. + + type BackupHandler struct { + Primary Handler + Secondary Handler + } + + func (h *BackupHandler) Log (r *Record) error { + err := h.Primary.Log(r) + if err != nil { + r.Ctx = append(ctx, "primary_err", err) + return h.Secondary.Log(r) + } + return nil + } + +This pattern is so useful that a generic version that handles an arbitrary number of Handlers +is included as part of this library called FailoverHandler. + +Logging Expensive Operations + +Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay +the price of computing them if you haven't turned up your logging level to a high level of detail. + +This package provides a simple type to annotate a logging operation that you want to be evaluated +lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler +filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example: + + func factorRSAKey() (factors []int) { + // return the factors of a very large number + } + + log.Debug("factors", log.Lazy{factorRSAKey}) + +If this message is not logged for any reason (like logging at the Error level), then +factorRSAKey is never evaluated. + +Dynamic context values + +The same log.Lazy mechanism can be used to attach context to a logger which you want to be +evaluated when the message is logged, but not when the logger is created. For example, let's imagine +a game where you have Player objects: + + type Player struct { + name string + alive bool + log.Logger + } + +You always want to log a player's name and whether they're alive or dead, so when you create the player +object, you might do: + + p := &Player{name: name, alive: true} + p.Logger = log.New("name", p.name, "alive", p.alive) + +Only now, even after a player has died, the logger will still report they are alive because the logging +context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation +of whether the player is alive or not to each log message, so that the log records will reflect the player's +current state no matter when the log message is written: + + p := &Player{name: name, alive: true} + isAlive := func() bool { return p.alive } + player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive}) + +Terminal Format + +If log15 detects that stdout is a terminal, it will configure the default +handler for it (which is log.StdoutHandler) to use TerminalFormat. This format +logs records nicely for your terminal, including color-coded output based +on log level. + +Error Handling + +Becasuse log15 allows you to step around the type system, there are a few ways you can specify +invalid arguments to the logging functions. You could, for example, wrap something that is not +a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries +are typically the mechanism by which errors are reported, it would be onerous for the logging functions +to return errors. Instead, log15 handles errors by making these guarantees to you: + +- Any log record containing an error will still be printed with the error explained to you as part of the log record. + +- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily +(and if you like, automatically) detect if any of your logging calls are passing bad values. + +Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers +are encouraged to return errors only if they fail to write their log records out to an external source like if the +syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures +like the FailoverHandler. + +Library Use + +log15 is intended to be useful for library authors as a way to provide configurable logging to +users of their library. Best practice for use in a library is to always disable all output for your logger +by default and to provide a public Logger instance that consumers of your library can configure. Like so: + + package yourlib + + import "github.com/inconshreveable/log15" + + var Log = log.New() + + func init() { + Log.SetHandler(log.DiscardHandler()) + } + +Users of your library may then enable it if they like: + + import "github.com/inconshreveable/log15" + import "example.com/yourlib" + + func main() { + handler := // custom handler setup + yourlib.Log.SetHandler(handler) + } + +Best practices attaching logger context + +The ability to attach context to a logger is a powerful one. Where should you do it and why? +I favor embedding a Logger directly into any persistent object in my application and adding +unique, tracing context keys to it. For instance, imagine I am writing a web browser: + + type Tab struct { + url string + render *RenderingContext + // ... + + Logger + } + + func NewTab(url string) *Tab { + return &Tab { + // ... + url: url, + + Logger: log.New("url", url), + } + } + +When a new tab is created, I assign a logger to it with the url of +the tab as context so it can easily be traced through the logs. +Now, whenever we perform any operation with the tab, we'll log with its +embedded logger and it will include the tab title automatically: + + tab.Debug("moved position", "idx", tab.idx) + +There's only one problem. What if the tab url changes? We could +use log.Lazy to make sure the current url is always written, but that +would mean that we couldn't trace a tab's full lifetime through our +logs after the user navigate to a new URL. + +Instead, think about what values to attach to your loggers the +same way you think about what to use as a key in a SQL database schema. +If it's possible to use a natural key that is unique for the lifetime of the +object, do so. But otherwise, log15's ext package has a handy RandId +function to let you generate what you might call "surrogate keys" +They're just random hex identifiers to use for tracing. Back to our +Tab example, we would prefer to set up our Logger like so: + + import logext "github.com/inconshreveable/log15/ext" + + t := &Tab { + // ... + url: url, + } + + t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl}) + return t + +Now we'll have a unique traceable identifier even across loading new urls, but +we'll still be able to see the tab's current url in the log messages. + +Must + +For all Handler functions which can return an error, there is a version of that +function which will return no error but panics on failure. They are all available +on the Must object. For example: + + log.Must.FileHandler("/path", log.JsonFormat) + log.Must.NetHandler("tcp", ":1234", log.JsonFormat) + +Inspiration and Credit + +All of the following excellent projects inspired the design of this library: + +code.google.com/p/log4go + +github.com/op/go-logging + +github.com/technoweenie/grohl + +github.com/Sirupsen/logrus + +github.com/kr/logfmt + +github.com/spacemonkeygo/spacelog + +golang's stdlib, notably io and net/http + +The Name + +https://xkcd.com/927/ + +*/ +package log15 diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go new file mode 100644 index 00000000000..3468f3048f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/format.go @@ -0,0 +1,257 @@ +package log15 + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + timeFormat = "2006-01-02T15:04:05-0700" + termTimeFormat = "01-02|15:04:05" + floatFormat = 'f' + termMsgJust = 40 +) + +type Format interface { + Format(r *Record) []byte +} + +// FormatFunc returns a new Format object which uses +// the given function to perform record formatting. +func FormatFunc(f func(*Record) []byte) Format { + return formatFunc(f) +} + +type formatFunc func(*Record) []byte + +func (f formatFunc) Format(r *Record) []byte { + return f(r) +} + +// TerminalFormat formats log records optimized for human readability on +// a terminal with color-coded level output and terser human friendly timestamp. +// This format should only be used for interactive programs or while developing. +// +// [TIME] [LEVEL] MESAGE key=value key=value ... +// +// Example: +// +// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002 +// +func TerminalFormat() Format { + return FormatFunc(func(r *Record) []byte { + var color = 0 + switch r.Lvl { + case LvlCrit: + color = 35 + case LvlError: + color = 31 + case LvlWarn: + color = 33 + case LvlInfo: + color = 32 + case LvlDebug: + color = 36 + } + + b := &bytes.Buffer{} + lvl := strings.ToUpper(r.Lvl.String()) + if color > 0 { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg) + } else { + fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg) + } + + // try to justify the log output for short messages + if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust { + b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg))) + } + + // print the keys logfmt style + logfmt(b, r.Ctx, color) + return b.Bytes() + }) +} + +// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable +// format for key/value pairs. +// +// For more details see: http://godoc.org/github.com/kr/logfmt +// +func LogfmtFormat() Format { + return FormatFunc(func(r *Record) []byte { + common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg} + buf := &bytes.Buffer{} + logfmt(buf, append(common, r.Ctx...), 0) + return buf.Bytes() + }) +} + +func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) { + for i := 0; i < len(ctx); i += 2 { + if i != 0 { + buf.WriteByte(' ') + } + + k, ok := ctx[i].(string) + v := formatLogfmtValue(ctx[i+1]) + if !ok { + k, v = errorKey, formatLogfmtValue(k) + } + + // XXX: we should probably check that all of your key bytes aren't invalid + if color > 0 { + fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v) + } else { + fmt.Fprintf(buf, "%s=%s", k, v) + } + } + + buf.WriteByte('\n') +} + +// JsonFormat formats log records as JSON objects separated by newlines. +// It is the equivalent of JsonFormatEx(false, true). +func JsonFormat() Format { + return JsonFormatEx(false, true) +} + +// JsonFormatEx formats log records as JSON objects. If pretty is true, +// records will be pretty-printed. If lineSeparated is true, records +// will be logged with a new line between each record. +func JsonFormatEx(pretty, lineSeparated bool) Format { + jsonMarshal := json.Marshal + if pretty { + jsonMarshal = func(v interface{}) ([]byte, error) { + return json.MarshalIndent(v, "", " ") + } + } + + return FormatFunc(func(r *Record) []byte { + props := make(map[string]interface{}) + + props[r.KeyNames.Time] = r.Time + props[r.KeyNames.Lvl] = r.Lvl.String() + props[r.KeyNames.Msg] = r.Msg + + for i := 0; i < len(r.Ctx); i += 2 { + k, ok := r.Ctx[i].(string) + if !ok { + props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i]) + } + props[k] = formatJsonValue(r.Ctx[i+1]) + } + + b, err := jsonMarshal(props) + if err != nil { + b, _ = jsonMarshal(map[string]string{ + errorKey: err.Error(), + }) + return b + } + + if lineSeparated { + b = append(b, '\n') + } + + return b + }) +} + +func formatShared(value interface{}) (result interface{}) { + defer func() { + if err := recover(); err != nil { + if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() { + result = "nil" + } else { + panic(err) + } + } + }() + + switch v := value.(type) { + case time.Time: + return v.Format(timeFormat) + + case error: + return v.Error() + + case fmt.Stringer: + return v.String() + + default: + return v + } +} + +func formatJsonValue(value interface{}) interface{} { + value = formatShared(value) + switch value.(type) { + case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string: + return value + default: + return fmt.Sprintf("%+v", value) + } +} + +// formatValue formats a value for serialization +func formatLogfmtValue(value interface{}) string { + if value == nil { + return "nil" + } + + value = formatShared(value) + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case float32: + return strconv.FormatFloat(float64(v), floatFormat, 3, 64) + case float64: + return strconv.FormatFloat(v, floatFormat, 3, 64) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("%d", value) + case string: + return escapeString(v) + default: + return escapeString(fmt.Sprintf("%+v", value)) + } +} + +func escapeString(s string) string { + needQuotes := false + e := bytes.Buffer{} + e.WriteByte('"') + for _, r := range s { + if r <= ' ' || r == '=' || r == '"' { + needQuotes = true + } + + switch r { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(byte(r)) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + e.WriteRune(r) + } + } + e.WriteByte('"') + start, stop := 0, e.Len() + if !needQuotes { + start, stop = 1, stop-1 + } + return string(e.Bytes()[start:stop]) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go new file mode 100644 index 00000000000..43205608cc1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler.go @@ -0,0 +1,356 @@ +package log15 + +import ( + "fmt" + "io" + "net" + "os" + "reflect" + "sync" + + "github.com/go-stack/stack" +) + +// A Logger prints its log records by writing to a Handler. +// The Handler interface defines where and how log records are written. +// Handlers are composable, providing you great flexibility in combining +// them to achieve the logging structure that suits your applications. +type Handler interface { + Log(r *Record) error +} + +// FuncHandler returns a Handler that logs records with the given +// function. +func FuncHandler(fn func(r *Record) error) Handler { + return funcHandler(fn) +} + +type funcHandler func(r *Record) error + +func (h funcHandler) Log(r *Record) error { + return h(r) +} + +// StreamHandler writes log records to an io.Writer +// with the given format. StreamHandler can be used +// to easily begin writing log records to other +// outputs. +// +// StreamHandler wraps itself with LazyHandler and SyncHandler +// to evaluate Lazy objects and perform safe concurrent writes. +func StreamHandler(wr io.Writer, fmtr Format) Handler { + h := FuncHandler(func(r *Record) error { + _, err := wr.Write(fmtr.Format(r)) + return err + }) + return LazyHandler(SyncHandler(h)) +} + +// SyncHandler can be wrapped around a handler to guarantee that +// only a single Log operation can proceed at a time. It's necessary +// for thread-safe concurrent writes. +func SyncHandler(h Handler) Handler { + var mu sync.Mutex + return FuncHandler(func(r *Record) error { + defer mu.Unlock() + mu.Lock() + return h.Log(r) + }) +} + +// FileHandler returns a handler which writes log records to the give file +// using the given format. If the path +// already exists, FileHandler will append to the given file. If it does not, +// FileHandler will create the file with mode 0644. +func FileHandler(path string, fmtr Format) (Handler, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + return closingHandler{f, StreamHandler(f, fmtr)}, nil +} + +// NetHandler opens a socket to the given address and writes records +// over the connection. +func NetHandler(network, addr string, fmtr Format) (Handler, error) { + conn, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + + return closingHandler{conn, StreamHandler(conn, fmtr)}, nil +} + +// XXX: closingHandler is essentially unused at the moment +// it's meant for a future time when the Handler interface supports +// a possible Close() operation +type closingHandler struct { + io.WriteCloser + Handler +} + +func (h *closingHandler) Close() error { + return h.WriteCloser.Close() +} + +// CallerFileHandler returns a Handler that adds the line number and file of +// the calling function to the context with key "caller". +func CallerFileHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + r.Ctx = append(r.Ctx, "caller", fmt.Sprint(r.Call)) + return h.Log(r) + }) +} + +// CallerFuncHandler returns a Handler that adds the calling function name to +// the context with key "fn". +func CallerFuncHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", r.Call)) + return h.Log(r) + }) +} + +// CallerStackHandler returns a Handler that adds a stack trace to the context +// with key "stack". The stack trace is formated as a space separated list of +// call sites inside matching []'s. The most recent call site is listed first. +// Each call site is formatted according to format. See the documentation of +// package github.com/go-stack/stack for the list of supported formats. +func CallerStackHandler(format string, h Handler) Handler { + return FuncHandler(func(r *Record) error { + s := stack.Trace().TrimBelow(r.Call).TrimRuntime() + if len(s) > 0 { + r.Ctx = append(r.Ctx, "stack", fmt.Sprintf(format, s)) + } + return h.Log(r) + }) +} + +// FilterHandler returns a Handler that only writes records to the +// wrapped Handler if the given function evaluates true. For example, +// to only log records where the 'err' key is not nil: +// +// logger.SetHandler(FilterHandler(func(r *Record) bool { +// for i := 0; i < len(r.Ctx); i += 2 { +// if r.Ctx[i] == "err" { +// return r.Ctx[i+1] != nil +// } +// } +// return false +// }, h)) +// +func FilterHandler(fn func(r *Record) bool, h Handler) Handler { + return FuncHandler(func(r *Record) error { + if fn(r) { + return h.Log(r) + } + return nil + }) +} + +// MatchFilterHandler returns a Handler that only writes records +// to the wrapped Handler if the given key in the logged +// context matches the value. For example, to only log records +// from your ui package: +// +// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler) +// +func MatchFilterHandler(key string, value interface{}, h Handler) Handler { + return FilterHandler(func(r *Record) (pass bool) { + switch key { + case r.KeyNames.Lvl: + return r.Lvl == value + case r.KeyNames.Time: + return r.Time == value + case r.KeyNames.Msg: + return r.Msg == value + } + + for i := 0; i < len(r.Ctx); i += 2 { + if r.Ctx[i] == key { + return r.Ctx[i+1] == value + } + } + return false + }, h) +} + +// LvlFilterHandler returns a Handler that only writes +// records which are less than the given verbosity +// level to the wrapped Handler. For example, to only +// log Error/Crit records: +// +// log.LvlFilterHandler(log.Error, log.StdoutHandler) +// +func LvlFilterHandler(maxLvl Lvl, h Handler) Handler { + return FilterHandler(func(r *Record) (pass bool) { + return r.Lvl <= maxLvl + }, h) +} + +// A MultiHandler dispatches any write to each of its handlers. +// This is useful for writing different types of log information +// to different locations. For example, to log to a file and +// standard error: +// +// log.MultiHandler( +// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), +// log.StderrHandler) +// +func MultiHandler(hs ...Handler) Handler { + return FuncHandler(func(r *Record) error { + for _, h := range hs { + // what to do about failures? + h.Log(r) + } + return nil + }) +} + +// A FailoverHandler writes all log records to the first handler +// specified, but will failover and write to the second handler if +// the first handler has failed, and so on for all handlers specified. +// For example you might want to log to a network socket, but failover +// to writing to a file if the network fails, and then to +// standard out if the file write fails: +// +// log.FailoverHandler( +// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()), +// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), +// log.StdoutHandler) +// +// All writes that do not go to the first handler will add context with keys of +// the form "failover_err_{idx}" which explain the error encountered while +// trying to write to the handlers before them in the list. +func FailoverHandler(hs ...Handler) Handler { + return FuncHandler(func(r *Record) error { + var err error + for i, h := range hs { + err = h.Log(r) + if err == nil { + return nil + } else { + r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err) + } + } + + return err + }) +} + +// ChannelHandler writes all records to the given channel. +// It blocks if the channel is full. Useful for async processing +// of log messages, it's used by BufferedHandler. +func ChannelHandler(recs chan<- *Record) Handler { + return FuncHandler(func(r *Record) error { + recs <- r + return nil + }) +} + +// BufferedHandler writes all records to a buffered +// channel of the given size which flushes into the wrapped +// handler whenever it is available for writing. Since these +// writes happen asynchronously, all writes to a BufferedHandler +// never return an error and any errors from the wrapped handler are ignored. +func BufferedHandler(bufSize int, h Handler) Handler { + recs := make(chan *Record, bufSize) + go func() { + for m := range recs { + _ = h.Log(m) + } + }() + return ChannelHandler(recs) +} + +// LazyHandler writes all values to the wrapped handler after evaluating +// any lazy functions in the record's context. It is already wrapped +// around StreamHandler and SyslogHandler in this library, you'll only need +// it if you write your own Handler. +func LazyHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + // go through the values (odd indices) and reassign + // the values of any lazy fn to the result of its execution + hadErr := false + for i := 1; i < len(r.Ctx); i += 2 { + lz, ok := r.Ctx[i].(Lazy) + if ok { + v, err := evaluateLazy(lz) + if err != nil { + hadErr = true + r.Ctx[i] = err + } else { + if cs, ok := v.(stack.CallStack); ok { + v = cs.TrimBelow(r.Call).TrimRuntime() + } + r.Ctx[i] = v + } + } + } + + if hadErr { + r.Ctx = append(r.Ctx, errorKey, "bad lazy") + } + + return h.Log(r) + }) +} + +func evaluateLazy(lz Lazy) (interface{}, error) { + t := reflect.TypeOf(lz.Fn) + + if t.Kind() != reflect.Func { + return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn) + } + + if t.NumIn() > 0 { + return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn) + } + + if t.NumOut() == 0 { + return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn) + } + + value := reflect.ValueOf(lz.Fn) + results := value.Call([]reflect.Value{}) + if len(results) == 1 { + return results[0].Interface(), nil + } else { + values := make([]interface{}, len(results)) + for i, v := range results { + values[i] = v.Interface() + } + return values, nil + } +} + +// DiscardHandler reports success for all writes but does nothing. +// It is useful for dynamically disabling logging at runtime via +// a Logger's SetHandler method. +func DiscardHandler() Handler { + return FuncHandler(func(r *Record) error { + return nil + }) +} + +// The Must object provides the following Handler creation functions +// which instead of returning an error parameter only return a Handler +// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler +var Must muster + +func must(h Handler, err error) Handler { + if err != nil { + panic(err) + } + return h +} + +type muster struct{} + +func (m muster) FileHandler(path string, fmtr Format) Handler { + return must(FileHandler(path, fmtr)) +} + +func (m muster) NetHandler(network, addr string, fmtr Format) Handler { + return must(NetHandler(network, addr, fmtr)) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go new file mode 100644 index 00000000000..f6181746e31 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go13.go @@ -0,0 +1,26 @@ +// +build !go1.4 + +package log15 + +import ( + "sync/atomic" + "unsafe" +) + +// swapHandler wraps another handler that may be swapped out +// dynamically at runtime in a thread-safe fashion. +type swapHandler struct { + handler unsafe.Pointer +} + +func (h *swapHandler) Log(r *Record) error { + return h.Get().Log(r) +} + +func (h *swapHandler) Get() Handler { + return *(*Handler)(atomic.LoadPointer(&h.handler)) +} + +func (h *swapHandler) Swap(newHandler Handler) { + atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go new file mode 100644 index 00000000000..6041f2302fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/handler_go14.go @@ -0,0 +1,23 @@ +// +build go1.4 + +package log15 + +import "sync/atomic" + +// swapHandler wraps another handler that may be swapped out +// dynamically at runtime in a thread-safe fashion. +type swapHandler struct { + handler atomic.Value +} + +func (h *swapHandler) Log(r *Record) error { + return (*h.handler.Load().(*Handler)).Log(r) +} + +func (h *swapHandler) Swap(newHandler Handler) { + h.handler.Store(&newHandler) +} + +func (h *swapHandler) Get() Handler { + return *h.handler.Load().(*Handler) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go new file mode 100644 index 00000000000..3163653159f --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/logger.go @@ -0,0 +1,208 @@ +package log15 + +import ( + "fmt" + "time" + + "github.com/go-stack/stack" +) + +const timeKey = "t" +const lvlKey = "lvl" +const msgKey = "msg" +const errorKey = "LOG15_ERROR" + +type Lvl int + +const ( + LvlCrit Lvl = iota + LvlError + LvlWarn + LvlInfo + LvlDebug +) + +// Returns the name of a Lvl +func (l Lvl) String() string { + switch l { + case LvlDebug: + return "dbug" + case LvlInfo: + return "info" + case LvlWarn: + return "warn" + case LvlError: + return "eror" + case LvlCrit: + return "crit" + default: + panic("bad level") + } +} + +// Returns the appropriate Lvl from a string name. +// Useful for parsing command line args and configuration files. +func LvlFromString(lvlString string) (Lvl, error) { + switch lvlString { + case "debug", "dbug": + return LvlDebug, nil + case "info": + return LvlInfo, nil + case "warn": + return LvlWarn, nil + case "error", "eror": + return LvlError, nil + case "crit": + return LvlCrit, nil + default: + return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString) + } +} + +// A Record is what a Logger asks its handler to write +type Record struct { + Time time.Time + Lvl Lvl + Msg string + Ctx []interface{} + Call stack.Call + KeyNames RecordKeyNames +} + +type RecordKeyNames struct { + Time string + Msg string + Lvl string +} + +// A Logger writes key/value pairs to a Handler +type Logger interface { + // New returns a new Logger that has this logger's context plus the given context + New(ctx ...interface{}) Logger + + // GetHandler gets the handler associated with the logger. + GetHandler() Handler + + // SetHandler updates the logger to write records to the specified handler. + SetHandler(h Handler) + + // Log a message at the given level with context key/value pairs + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) + Crit(msg string, ctx ...interface{}) +} + +type logger struct { + ctx []interface{} + h *swapHandler +} + +func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) { + l.h.Log(&Record{ + Time: time.Now(), + Lvl: lvl, + Msg: msg, + Ctx: newContext(l.ctx, ctx), + Call: stack.Caller(2), + KeyNames: RecordKeyNames{ + Time: timeKey, + Msg: msgKey, + Lvl: lvlKey, + }, + }) +} + +func (l *logger) New(ctx ...interface{}) Logger { + child := &logger{newContext(l.ctx, ctx), new(swapHandler)} + child.SetHandler(l.h) + return child +} + +func newContext(prefix []interface{}, suffix []interface{}) []interface{} { + normalizedSuffix := normalize(suffix) + newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix)) + n := copy(newCtx, prefix) + copy(newCtx[n:], normalizedSuffix) + return newCtx +} + +func (l *logger) Debug(msg string, ctx ...interface{}) { + l.write(msg, LvlDebug, ctx) +} + +func (l *logger) Info(msg string, ctx ...interface{}) { + l.write(msg, LvlInfo, ctx) +} + +func (l *logger) Warn(msg string, ctx ...interface{}) { + l.write(msg, LvlWarn, ctx) +} + +func (l *logger) Error(msg string, ctx ...interface{}) { + l.write(msg, LvlError, ctx) +} + +func (l *logger) Crit(msg string, ctx ...interface{}) { + l.write(msg, LvlCrit, ctx) +} + +func (l *logger) GetHandler() Handler { + return l.h.Get() +} + +func (l *logger) SetHandler(h Handler) { + l.h.Swap(h) +} + +func normalize(ctx []interface{}) []interface{} { + // if the caller passed a Ctx object, then expand it + if len(ctx) == 1 { + if ctxMap, ok := ctx[0].(Ctx); ok { + ctx = ctxMap.toArray() + } + } + + // ctx needs to be even because it's a series of key/value pairs + // no one wants to check for errors on logging functions, + // so instead of erroring on bad input, we'll just make sure + // that things are the right length and users can fix bugs + // when they see the output looks wrong + if len(ctx)%2 != 0 { + ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil") + } + + return ctx +} + +// Lazy allows you to defer calculation of a logged value that is expensive +// to compute until it is certain that it must be evaluated with the given filters. +// +// Lazy may also be used in conjunction with a Logger's New() function +// to generate a child logger which always reports the current value of changing +// state. +// +// You may wrap any function which takes no arguments to Lazy. It may return any +// number of values of any type. +type Lazy struct { + Fn interface{} +} + +// Ctx is a map of key/value pairs to pass as context to a log function +// Use this only if you really need greater safety around the arguments you pass +// to the logging functions. +type Ctx map[string]interface{} + +func (c Ctx) toArray() []interface{} { + arr := make([]interface{}, len(c)*2) + + i := 0 + for k, v := range c { + arr[i] = k + arr[i+1] = v + i += 2 + } + + return arr +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go new file mode 100644 index 00000000000..c5118d4090f --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/root.go @@ -0,0 +1,67 @@ +package log15 + +import ( + "os" + + "github.com/inconshreveable/log15/term" + "github.com/mattn/go-colorable" +) + +var ( + root *logger + StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat()) + StderrHandler = StreamHandler(os.Stderr, LogfmtFormat()) +) + +func init() { + if term.IsTty(os.Stdout.Fd()) { + StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat()) + } + + if term.IsTty(os.Stderr.Fd()) { + StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat()) + } + + root = &logger{[]interface{}{}, new(swapHandler)} + root.SetHandler(StdoutHandler) +} + +// New returns a new logger with the given context. +// New is a convenient alias for Root().New +func New(ctx ...interface{}) Logger { + return root.New(ctx...) +} + +// Root returns the root logger +func Root() Logger { + return root +} + +// The following functions bypass the exported logger methods (logger.Debug, +// etc.) to keep the call depth the same for all paths to logger.write so +// runtime.Caller(2) always refers to the call site in client code. + +// Debug is a convenient alias for Root().Debug +func Debug(msg string, ctx ...interface{}) { + root.write(msg, LvlDebug, ctx) +} + +// Info is a convenient alias for Root().Info +func Info(msg string, ctx ...interface{}) { + root.write(msg, LvlInfo, ctx) +} + +// Warn is a convenient alias for Root().Warn +func Warn(msg string, ctx ...interface{}) { + root.write(msg, LvlWarn, ctx) +} + +// Error is a convenient alias for Root().Error +func Error(msg string, ctx ...interface{}) { + root.write(msg, LvlError, ctx) +} + +// Crit is a convenient alias for Root().Crit +func Crit(msg string, ctx ...interface{}) { + root.write(msg, LvlCrit, ctx) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go new file mode 100644 index 00000000000..5f95f99f1ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/syslog.go @@ -0,0 +1,55 @@ +// +build !windows,!plan9 + +package log15 + +import ( + "log/syslog" + "strings" +) + +// SyslogHandler opens a connection to the system syslog daemon by calling +// syslog.New and writes all records to it. +func SyslogHandler(priority syslog.Priority, tag string, fmtr Format) (Handler, error) { + wr, err := syslog.New(priority, tag) + return sharedSyslog(fmtr, wr, err) +} + +// SyslogHandler opens a connection to a log daemon over the network and writes +// all log records to it. +func SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) (Handler, error) { + wr, err := syslog.Dial(net, addr, priority, tag) + return sharedSyslog(fmtr, wr, err) +} + +func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) { + if err != nil { + return nil, err + } + h := FuncHandler(func(r *Record) error { + var syslogFn = sysWr.Info + switch r.Lvl { + case LvlCrit: + syslogFn = sysWr.Crit + case LvlError: + syslogFn = sysWr.Err + case LvlWarn: + syslogFn = sysWr.Warning + case LvlInfo: + syslogFn = sysWr.Info + case LvlDebug: + syslogFn = sysWr.Debug + } + + s := strings.TrimSpace(string(fmtr.Format(r))) + return syslogFn(s) + }) + return LazyHandler(&closingHandler{sysWr, h}), nil +} + +func (m muster) SyslogHandler(priority syslog.Priority, tag string, fmtr Format) Handler { + return must(SyslogHandler(priority, tag, fmtr)) +} + +func (m muster) SyslogNetHandler(net, addr string, priority syslog.Priority, tag string, fmtr Format) Handler { + return must(SyslogNetHandler(net, addr, priority, tag, fmtr)) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE new file mode 100644 index 00000000000..f090cb42f37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go new file mode 100644 index 00000000000..c1b5d2a3b1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_appengine.go @@ -0,0 +1,13 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package term + +// IsTty always returns false on AppEngine. +func IsTty(fd uintptr) bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go new file mode 100644 index 00000000000..b05de4cb8c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_darwin.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package term + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go new file mode 100644 index 00000000000..cfaceab337a --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_freebsd.go @@ -0,0 +1,18 @@ +package term + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go new file mode 100644 index 00000000000..5290468d698 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package term + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go new file mode 100644 index 00000000000..87df7d5b029 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_notwindows.go @@ -0,0 +1,20 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin freebsd openbsd + +package term + +import ( + "syscall" + "unsafe" +) + +// IsTty returns true if the given file descriptor is a terminal. +func IsTty(fd uintptr) bool { + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go new file mode 100644 index 00000000000..f9bb9e1c23b --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_openbsd.go @@ -0,0 +1,7 @@ +package term + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go new file mode 100644 index 00000000000..df3c30c1589 --- /dev/null +++ b/Godeps/_workspace/src/github.com/inconshreveable/log15/term/terminal_windows.go @@ -0,0 +1,26 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTty returns true if the given file descriptor is a terminal. +func IsTty(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md b/Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md deleted file mode 100644 index e11eaee93f5..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/client/README.md +++ /dev/null @@ -1,267 +0,0 @@ -# InfluxDB Client - -[![GoDoc](https://godoc.org/github.com/influxdata/influxdb?status.svg)](http://godoc.org/github.com/influxdata/influxdb/client/v2) - -## Description - -**NOTE:** The Go client library now has a "v2" version, with the old version -being deprecated. The new version can be imported at -`import "github.com/influxdata/influxdb/client/v2"`. It is not backwards-compatible. - -A Go client library written and maintained by the **InfluxDB** team. -This package provides convenience functions to read and write time series data. -It uses the HTTP protocol to communicate with your **InfluxDB** cluster. - - -## Getting Started - -### Connecting To Your Database - -Connecting to an **InfluxDB** database is straightforward. You will need a host -name, a port and the cluster user credentials if applicable. The default port is -8086. You can customize these settings to your specific installation via the -**InfluxDB** configuration file. - -Though not necessary for experimentation, you may want to create a new user -and authenticate the connection to your database. - -For more information please check out the -[Admin Docs](https://docs.influxdata.com/influxdb/latest/administration/). - -For the impatient, you can create a new admin user _bubba_ by firing off the -[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). - -```shell -influx -> create user bubba with password 'bumblebeetuna' -> grant all privileges to bubba -``` - -And now for good measure set the credentials in you shell environment. -In the example below we will use $INFLUX_USER and $INFLUX_PWD - -Now with the administrivia out of the way, let's connect to our database. - -NOTE: If you've opted out of creating a user, you can omit Username and Password in -the configuration below. - -```go -package main - -import ( - "log" - "time" - - "github.com/influxdata/influxdb/client/v2" -) - -const ( - MyDB = "square_holes" - username = "bubba" - password = "bumblebeetuna" -) - -func main() { - // Make client - c, err := client.NewHTTPClient(client.HTTPConfig{ - Addr: "http://localhost:8086", - Username: username, - Password: password, - }) - - if err != nil { - log.Fatalln("Error: ", err) - } - - // Create a new point batch - bp, err := client.NewBatchPoints(client.BatchPointsConfig{ - Database: MyDB, - Precision: "s", - }) - - if err != nil { - log.Fatalln("Error: ", err) - } - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - - if err != nil { - log.Fatalln("Error: ", err) - } - - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} - -``` - -### Inserting Data - -Time series data aka *points* are written to the database using batch inserts. -The mechanism is to create one or more points and then create a batch aka -*batch points* and write these to a given database and series. A series is a -combination of a measurement (time/values) and a set of tags. - -In this sample we will create a batch of a 1,000 points. Each point has a time and -a single value as well as 2 tags indicating a shape and color. We write these points -to a database called _square_holes_ using a measurement named _shapes_. - -NOTE: You can specify a RetentionPolicy as part of the batch points. If not -provided InfluxDB will use the database _default_ retention policy. - -```go -func writePoints(clnt client.Client) { - sampleSize := 1000 - rand.Seed(42) - - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Database: "systemstats", - Precision: "us", - }) - - for i := 0; i < sampleSize; i++ { - regions := []string{"us-west1", "us-west2", "us-west3", "us-east1"} - tags := map[string]string{ - "cpu": "cpu-total", - "host": fmt.Sprintf("host%d", rand.Intn(1000)), - "region": regions[rand.Intn(len(regions))], - } - - idle := rand.Float64() * 100.0 - fields := map[string]interface{}{ - "idle": idle, - "busy": 100.0 - idle, - } - - bp.AddPoint(client.NewPoint( - "cpu_usage", - tags, - fields, - time.Now(), - )) - } - - err := clnt.Write(bp) - if err != nil { - log.Fatal(err) - } -} -``` - - -### Querying Data - -One nice advantage of using **InfluxDB** the ability to query your data using familiar -SQL constructs. In this example we can create a convenience function to query the database -as follows: - -```go -// queryDB convenience function to query the database -func queryDB(clnt client.Client, cmd string) (res []client.Result, err error) { - q := client.Query{ - Command: cmd, - Database: MyDB, - } - if response, err := clnt.Query(q); err == nil { - if response.Error() != nil { - return res, response.Error() - } - res = response.Results - } else { - return res, err - } - return res, nil -} -``` - -#### Creating a Database - -```go -_, err := queryDB(clnt, fmt.Sprintf("CREATE DATABASE %s", MyDB)) -if err != nil { - log.Fatal(err) -} -``` - -#### Count Records - -```go -q := fmt.Sprintf("SELECT count(%s) FROM %s", "value", MyMeasurement) -res, err := queryDB(clnt, q) -if err != nil { - log.Fatal(err) -} -count := res[0].Series[0].Values[0][1] -log.Printf("Found a total of %v records\n", count) -``` - -#### Find the last 10 _shapes_ records - -```go -q := fmt.Sprintf("SELECT * FROM %s LIMIT %d", MyMeasurement, 20) -res, err = queryDB(clnt, q) -if err != nil { - log.Fatal(err) -} - -for i, row := range res[0].Series[0].Values { - t, err := time.Parse(time.RFC3339, row[0].(string)) - if err != nil { - log.Fatal(err) - } - val := row[1].(string) - log.Printf("[%2d] %s: %s\n", i, t.Format(time.Stamp), val) -} -``` - -### Using the UDP Client - -The **InfluxDB** client also supports writing over UDP. - -```go -func WriteUDP() { - // Make client - c := client.NewUDPClient("localhost:8089") - - // Create a new point batch - bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ - Precision: "s", - }) - - // Create a point and add to batch - tags := map[string]string{"cpu": "cpu-total"} - fields := map[string]interface{}{ - "idle": 10.1, - "system": 53.3, - "user": 46.6, - } - pt, err := client.NewPoint("cpu_usage", tags, fields, time.Now()) - if err != nil { - panic(err.Error()) - } - bp.AddPoint(pt) - - // Write the batch - c.Write(bp) -} -``` - -## Go Docs - -Please refer to -[http://godoc.org/github.com/influxdata/influxdb/client/v2](http://godoc.org/github.com/influxdata/influxdb/client/v2) -for documentation. - -## See Also - -You can also examine how the client library is used by the -[InfluxDB CLI](https://github.com/influxdata/influxdb/blob/master/cmd/influx/main.go). diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go deleted file mode 100644 index 23e09eec424..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/client/influxdb.go +++ /dev/null @@ -1,789 +0,0 @@ -package client - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/influxdata/influxdb/models" -) - -const ( - // DefaultHost is the default host used to connect to an InfluxDB instance - DefaultHost = "localhost" - - // DefaultPort is the default port used to connect to an InfluxDB instance - DefaultPort = 8086 - - // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance - DefaultTimeout = 0 -) - -// Query is used to send a command to the server. Both Command and Database are required. -type Query struct { - Command string - Database string - - // Chunked tells the server to send back chunked responses. This places - // less load on the server by sending back chunks of the response rather - // than waiting for the entire response all at once. - Chunked bool - - // ChunkSize sets the maximum number of rows that will be returned per - // chunk. Chunks are either divided based on their series or if they hit - // the chunk size limit. - // - // Chunked must be set to true for this option to be used. - ChunkSize int -} - -// ParseConnectionString will parse a string to create a valid connection URL -func ParseConnectionString(path string, ssl bool) (url.URL, error) { - var host string - var port int - - h, p, err := net.SplitHostPort(path) - if err != nil { - if path == "" { - host = DefaultHost - } else { - host = path - } - // If they didn't specify a port, always use the default port - port = DefaultPort - } else { - host = h - port, err = strconv.Atoi(p) - if err != nil { - return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) - } - } - - u := url.URL{ - Scheme: "http", - } - if ssl { - u.Scheme = "https" - } - - u.Host = net.JoinHostPort(host, strconv.Itoa(port)) - - return u, nil -} - -// Config is used to specify what server to connect to. -// URL: The URL of the server connecting to. -// Username/Password are optional. They will be passed via basic auth if provided. -// UserAgent: If not provided, will default "InfluxDBClient", -// Timeout: If not provided, will default to 0 (no timeout) -type Config struct { - URL url.URL - Username string - Password string - UserAgent string - Timeout time.Duration - Precision string - UnsafeSsl bool -} - -// NewConfig will create a config to be used in connecting to the client -func NewConfig() Config { - return Config{ - Timeout: DefaultTimeout, - } -} - -// Client is used to make calls to the server. -type Client struct { - url url.URL - username string - password string - httpClient *http.Client - userAgent string - precision string -} - -const ( - // ConsistencyOne requires at least one data node acknowledged a write. - ConsistencyOne = "one" - - // ConsistencyAll requires all data nodes to acknowledge a write. - ConsistencyAll = "all" - - // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. - ConsistencyQuorum = "quorum" - - // ConsistencyAny allows for hinted hand off, potentially no write happened yet. - ConsistencyAny = "any" -) - -// NewClient will instantiate and return a connected client to issue commands to the server. -func NewClient(c Config) (*Client, error) { - tlsConfig := &tls.Config{ - InsecureSkipVerify: c.UnsafeSsl, - } - - tr := &http.Transport{ - TLSClientConfig: tlsConfig, - } - - client := Client{ - url: c.URL, - username: c.Username, - password: c.Password, - httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, - userAgent: c.UserAgent, - precision: c.Precision, - } - if client.userAgent == "" { - client.userAgent = "InfluxDBClient" - } - return &client, nil -} - -// SetAuth will update the username and passwords -func (c *Client) SetAuth(u, p string) { - c.username = u - c.password = p -} - -// SetPrecision will update the precision -func (c *Client) SetPrecision(precision string) { - c.precision = precision -} - -// Query sends a command to the server and returns the Response -func (c *Client) Query(q Query) (*Response, error) { - u := c.url - - u.Path = "query" - values := u.Query() - values.Set("q", q.Command) - values.Set("db", q.Database) - if q.Chunked { - values.Set("chunked", "true") - if q.ChunkSize > 0 { - values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) - } - } - if c.precision != "" { - values.Set("epoch", c.precision) - } - u.RawQuery = values.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return nil, err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - if q.Chunked { - cr := NewChunkedResponse(resp.Body) - for { - r, err := cr.NextResponse() - if err != nil { - // If we got an error while decoding the response, send that back. - return nil, err - } - - if r == nil { - break - } - - response.Results = append(response.Results, r.Results...) - if r.Err != nil { - response.Err = r.Err - break - } - } - } else { - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - if err := dec.Decode(&response); err != nil { - // Ignore EOF errors if we got an invalid status code. - if !(err == io.EOF && resp.StatusCode != http.StatusOK) { - return nil, err - } - } - } - - // If we don't have an error in our json response, and didn't get StatusOK, - // then send back an error. - if resp.StatusCode != http.StatusOK && response.Error() == nil { - return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) - } - return &response, nil -} - -// Write takes BatchPoints and allows for writing of multiple points with defaults -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) Write(bp BatchPoints) (*Response, error) { - u := c.url - u.Path = "write" - - var b bytes.Buffer - for _, p := range bp.Points { - err := checkPointTypes(p) - if err != nil { - return nil, err - } - if p.Raw != "" { - if _, err := b.WriteString(p.Raw); err != nil { - return nil, err - } - } else { - for k, v := range bp.Tags { - if p.Tags == nil { - p.Tags = make(map[string]string, len(bp.Tags)) - } - p.Tags[k] = v - } - - if _, err := b.WriteString(p.MarshalString()); err != nil { - return nil, err - } - } - - if err := b.WriteByte('\n'); err != nil { - return nil, err - } - } - - req, err := http.NewRequest("POST", u.String(), &b) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - precision := bp.Precision - if precision == "" { - precision = c.precision - } - - params := req.URL.Query() - params.Set("db", bp.Database) - params.Set("rp", bp.RetentionPolicy) - params.Set("precision", precision) - params.Set("consistency", bp.WriteConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - var err = fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// WriteLineProtocol takes a string with line returns to delimit each write -// If successful, error is nil and Response is nil -// If an error occurs, Response may contain additional information if populated. -func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { - u := c.url - u.Path = "write" - - r := strings.NewReader(data) - - req, err := http.NewRequest("POST", u.String(), r) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "") - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - params := req.URL.Query() - params.Set("db", database) - params.Set("rp", retentionPolicy) - params.Set("precision", precision) - params.Set("consistency", writeConsistency) - req.URL.RawQuery = params.Encode() - - resp, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var response Response - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - err := fmt.Errorf(string(body)) - response.Err = err - return &response, err - } - - return nil, nil -} - -// Ping will check to see if the server is up -// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. -func (c *Client) Ping() (time.Duration, string, error) { - now := time.Now() - u := c.url - u.Path = "ping" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return 0, "", err - } - req.Header.Set("User-Agent", c.userAgent) - if c.username != "" { - req.SetBasicAuth(c.username, c.password) - } - - resp, err := c.httpClient.Do(req) - if err != nil { - return 0, "", err - } - defer resp.Body.Close() - - version := resp.Header.Get("X-Influxdb-Version") - return time.Since(now), version, nil -} - -// Structs - -// Message represents a user message. -type Message struct { - Level string `json:"level,omitempty"` - Text string `json:"text,omitempty"` -} - -// Result represents a resultset returned from a single statement. -type Result struct { - Series []models.Row - Messages []*Message - Err error -} - -// MarshalJSON encodes the result into JSON. -func (r *Result) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Series []models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Series = r.Series - o.Messages = r.Messages - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Result struct -func (r *Result) UnmarshalJSON(b []byte) error { - var o struct { - Series []models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Series = o.Series - r.Messages = o.Messages - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Response represents a list of statement results. -type Response struct { - Results []Result - Err error -} - -// MarshalJSON encodes the response into JSON. -func (r *Response) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Results = r.Results - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Response struct -func (r *Response) UnmarshalJSON(b []byte) error { - var o struct { - Results []Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - err := dec.Decode(&o) - if err != nil { - return err - } - r.Results = o.Results - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. -func (r *Response) Error() error { - if r.Err != nil { - return r.Err - } - for _, result := range r.Results { - if result.Err != nil { - return result.Err - } - } - return nil -} - -// ChunkedResponse represents a response from the server that -// uses chunking to stream the output. -type ChunkedResponse struct { - dec *json.Decoder -} - -// NewChunkedResponse reads a stream and produces responses from the stream. -func NewChunkedResponse(r io.Reader) *ChunkedResponse { - dec := json.NewDecoder(r) - dec.UseNumber() - return &ChunkedResponse{dec: dec} -} - -// NextResponse reads the next line of the stream and returns a response. -func (r *ChunkedResponse) NextResponse() (*Response, error) { - var response Response - if err := r.dec.Decode(&response); err != nil { - if err == io.EOF { - return nil, nil - } - return nil, err - } - return &response, nil -} - -// Point defines the fields that will be written to the database -// Measurement, Time, and Fields are required -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type Point struct { - Measurement string - Tags map[string]string - Time time.Time - Fields map[string]interface{} - Precision string - Raw string -} - -// MarshalJSON will format the time in RFC3339Nano -// Precision is also ignored as it is only used for writing, not reading -// Or another way to say it is we always send back in nanosecond precision -func (p *Point) MarshalJSON() ([]byte, error) { - point := struct { - Measurement string `json:"measurement,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time string `json:"time,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` - Precision string `json:"precision,omitempty"` - }{ - Measurement: p.Measurement, - Tags: p.Tags, - Fields: p.Fields, - Precision: p.Precision, - } - // Let it omit empty if it's really zero - if !p.Time.IsZero() { - point.Time = p.Time.UTC().Format(time.RFC3339Nano) - } - return json.Marshal(&point) -} - -// MarshalString renders string representation of a Point with specified -// precision. The default precision is nanoseconds. -func (p *Point) MarshalString() string { - pt, err := models.NewPoint(p.Measurement, p.Tags, p.Fields, p.Time) - if err != nil { - return "# ERROR: " + err.Error() + " " + p.Measurement - } - if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { - return pt.String() - } - return pt.PrecisionString(p.Precision) -} - -// UnmarshalJSON decodes the data into the Point struct -func (p *Point) UnmarshalJSON(b []byte) error { - var normal struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - var epoch struct { - Measurement string `json:"measurement"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - Fields map[string]interface{} `json:"fields"` - } - - if err := func() error { - var err error - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err = dec.Decode(&epoch); err != nil { - return err - } - // Convert from epoch to time.Time, but only if Time - // was actually set. - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - p.Measurement = epoch.Measurement - p.Tags = epoch.Tags - p.Time = ts - p.Precision = epoch.Precision - p.Fields = normalizeFields(epoch.Fields) - return nil - }(); err == nil { - return nil - } - - dec := json.NewDecoder(bytes.NewBuffer(b)) - dec.UseNumber() - if err := dec.Decode(&normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - p.Measurement = normal.Measurement - p.Tags = normal.Tags - p.Time = normal.Time - p.Precision = normal.Precision - p.Fields = normalizeFields(normal.Fields) - - return nil -} - -// Remove any notion of json.Number -func normalizeFields(fields map[string]interface{}) map[string]interface{} { - newFields := map[string]interface{}{} - - for k, v := range fields { - switch v := v.(type) { - case json.Number: - jv, e := v.Float64() - if e != nil { - panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) - } - newFields[k] = jv - default: - newFields[k] = v - } - } - return newFields -} - -// BatchPoints is used to send batched data in a single write. -// Database and Points are required -// If no retention policy is specified, it will use the databases default retention policy. -// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. -// If time is specified, it will be applied to any point with an empty time. -// Precision can be specified if the time is in epoch format (integer). -// Valid values for Precision are n, u, ms, s, m, and h -type BatchPoints struct { - Points []Point `json:"points,omitempty"` - Database string `json:"database,omitempty"` - RetentionPolicy string `json:"retentionPolicy,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Time time.Time `json:"time,omitempty"` - Precision string `json:"precision,omitempty"` - WriteConsistency string `json:"-"` -} - -// UnmarshalJSON decodes the data into the BatchPoints struct -func (bp *BatchPoints) UnmarshalJSON(b []byte) error { - var normal struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time time.Time `json:"time"` - Precision string `json:"precision"` - } - var epoch struct { - Points []Point `json:"points"` - Database string `json:"database"` - RetentionPolicy string `json:"retentionPolicy"` - Tags map[string]string `json:"tags"` - Time *int64 `json:"time"` - Precision string `json:"precision"` - } - - if err := func() error { - var err error - if err = json.Unmarshal(b, &epoch); err != nil { - return err - } - // Convert from epoch to time.Time - var ts time.Time - if epoch.Time != nil { - ts, err = EpochToTime(*epoch.Time, epoch.Precision) - if err != nil { - return err - } - } - bp.Points = epoch.Points - bp.Database = epoch.Database - bp.RetentionPolicy = epoch.RetentionPolicy - bp.Tags = epoch.Tags - bp.Time = ts - bp.Precision = epoch.Precision - return nil - }(); err == nil { - return nil - } - - if err := json.Unmarshal(b, &normal); err != nil { - return err - } - normal.Time = SetPrecision(normal.Time, normal.Precision) - bp.Points = normal.Points - bp.Database = normal.Database - bp.RetentionPolicy = normal.RetentionPolicy - bp.Tags = normal.Tags - bp.Time = normal.Time - bp.Precision = normal.Precision - - return nil -} - -// utility functions - -// Addr provides the current url as a string of the server the client is connected to. -func (c *Client) Addr() string { - return c.url.String() -} - -// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. -func checkPointTypes(p Point) error { - for _, v := range p.Fields { - switch v.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, string, nil: - return nil - default: - return fmt.Errorf("unsupported point type: %T", v) - } - } - return nil -} - -// helper functions - -// EpochToTime takes a unix epoch time and uses precision to return back a time.Time -func EpochToTime(epoch int64, precision string) (time.Time, error) { - if precision == "" { - precision = "s" - } - var t time.Time - switch precision { - case "h": - t = time.Unix(0, epoch*int64(time.Hour)) - case "m": - t = time.Unix(0, epoch*int64(time.Minute)) - case "s": - t = time.Unix(0, epoch*int64(time.Second)) - case "ms": - t = time.Unix(0, epoch*int64(time.Millisecond)) - case "u": - t = time.Unix(0, epoch*int64(time.Microsecond)) - case "n": - t = time.Unix(0, epoch) - default: - return time.Time{}, fmt.Errorf("Unknown precision %q", precision) - } - return t, nil -} - -// SetPrecision will round a time to the specified precision -func SetPrecision(t time.Time, precision string) time.Time { - switch precision { - case "n": - case "u": - return t.Round(time.Microsecond) - case "ms": - return t.Round(time.Millisecond) - case "s": - return t.Round(time.Second) - case "m": - return t.Round(time.Minute) - case "h": - return t.Round(time.Hour) - } - return t -} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go deleted file mode 100644 index 97cdc51aa08..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/consistency.go +++ /dev/null @@ -1,46 +0,0 @@ -package models - -import ( - "errors" - "strings" -) - -// ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful -type ConsistencyLevel int - -const ( - // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet - ConsistencyLevelAny ConsistencyLevel = iota - - // ConsistencyLevelOne requires at least one data node acknowledged a write - ConsistencyLevelOne - - // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write - ConsistencyLevelQuorum - - // ConsistencyLevelAll requires all data nodes to acknowledge a write - ConsistencyLevelAll -) - -var ( - // ErrInvalidConsistencyLevel is returned when parsing the string version - // of a consistency level. - ErrInvalidConsistencyLevel = errors.New("invalid consistency level") -) - -// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const -func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { - switch strings.ToLower(level) { - case "any": - return ConsistencyLevelAny, nil - case "one": - return ConsistencyLevelOne, nil - case "quorum": - return ConsistencyLevelQuorum, nil - case "all": - return ConsistencyLevelAll, nil - default: - return 0, ErrInvalidConsistencyLevel - } -} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go deleted file mode 100644 index d83fe24d9ef..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/points.go +++ /dev/null @@ -1,1576 +0,0 @@ -package models - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/fnv" - "math" - "sort" - "strconv" - "strings" - "time" - - "github.com/influxdata/influxdb/pkg/escape" -) - -var ( - measurementEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), - } - - tagEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), - '=': []byte(`\=`), - } - - ErrPointMustHaveAField = errors.New("point without fields is unsupported") - ErrInvalidNumber = errors.New("invalid number") - ErrMaxKeyLengthExceeded = errors.New("max key length exceeded") -) - -const ( - MaxKeyLength = 65535 -) - -// Point defines the values that will be written to the database -type Point interface { - Name() string - SetName(string) - - Tags() Tags - AddTag(key, value string) - SetTags(tags Tags) - - Fields() Fields - - Time() time.Time - SetTime(t time.Time) - UnixNano() int64 - - HashID() uint64 - Key() []byte - - Data() []byte - SetData(buf []byte) - - // String returns a string representation of the point, if there is a - // timestamp associated with the point then it will be specified with the default - // precision of nanoseconds - String() string - - // Bytes returns a []byte representation of the point similar to string. - MarshalBinary() ([]byte, error) - - // PrecisionString returns a string representation of the point, if there - // is a timestamp associated with the point then it will be specified in the - // given unit - PrecisionString(precision string) string - - // RoundedString returns a string representation of the point, if there - // is a timestamp associated with the point, then it will be rounded to the - // given duration - RoundedString(d time.Duration) string -} - -// Points represents a sortable list of points by timestamp. -type Points []Point - -func (a Points) Len() int { return len(a) } -func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } -func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// point is the default implementation of Point. -type point struct { - time time.Time - - // text encoding of measurement and tags - // key must always be stored sorted by tags, if the original line was not sorted, - // we need to resort it - key []byte - - // text encoding of field data - fields []byte - - // text encoding of timestamp - ts []byte - - // binary encoded field data - data []byte - - // cached version of parsed fields from data - cachedFields map[string]interface{} - - // cached version of parsed name from key - cachedName string -} - -const ( - // the number of characters for the largest possible int64 (9223372036854775807) - maxInt64Digits = 19 - - // the number of characters for the smallest possible int64 (-9223372036854775808) - minInt64Digits = 20 - - // the number of characters required for the largest float64 before a range check - // would occur during parsing - maxFloat64Digits = 25 - - // the number of characters required for smallest float64 before a range check occur - // would occur during parsing - minFloat64Digits = 27 -) - -// ParsePoints returns a slice of Points from a text representation of a point -// with each point separated by newlines. If any points fail to parse, a non-nil error -// will be returned in addition to the points that parsed successfully. -func ParsePoints(buf []byte) ([]Point, error) { - return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") -} - -// ParsePointsString is identical to ParsePoints but accepts a string -// buffer. -func ParsePointsString(buf string) ([]Point, error) { - return ParsePoints([]byte(buf)) -} - -// ParseKey returns the measurement name and tags from a point. -func ParseKey(buf string) (string, Tags, error) { - // Ignore the error because scanMeasurement returns "missing fields" which we ignore - // when just parsing a key - state, i, _ := scanMeasurement([]byte(buf), 0) - - var tags Tags - if state == tagKeyState { - tags = parseTags([]byte(buf)) - // scanMeasurement returns the location of the comma if there are tags, strip that off - return string(buf[:i-1]), tags, nil - } - return string(buf[:i]), tags, nil -} - -// ParsePointsWithPrecision is similar to ParsePoints, but allows the -// caller to provide a precision for time. -func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { - points := []Point{} - var ( - pos int - block []byte - failed []string - ) - for { - pos, block = scanLine(buf, pos) - pos++ - - if len(block) == 0 { - break - } - - // lines which start with '#' are comments - start := skipWhitespace(block, 0) - - // If line is all whitespace, just skip it - if start >= len(block) { - continue - } - - if block[start] == '#' { - continue - } - - // strip the newline if one is present - if block[len(block)-1] == '\n' { - block = block[:len(block)-1] - } - - pt, err := parsePoint(block[start:len(block)], defaultTime, precision) - if err != nil { - failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:len(block)]), err)) - } else { - points = append(points, pt) - } - - if pos >= len(buf) { - break - } - - } - if len(failed) > 0 { - return points, fmt.Errorf("%s", strings.Join(failed, "\n")) - } - return points, nil - -} - -func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { - // scan the first block which is measurement[,tag1=value1,tag2=value=2...] - pos, key, err := scanKey(buf, 0) - if err != nil { - return nil, err - } - - // measurement name is required - if len(key) == 0 { - return nil, fmt.Errorf("missing measurement") - } - - if len(key) > MaxKeyLength { - return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) - } - - // scan the second block is which is field1=value1[,field2=value2,...] - pos, fields, err := scanFields(buf, pos) - if err != nil { - return nil, err - } - - // at least one field is required - if len(fields) == 0 { - return nil, fmt.Errorf("missing fields") - } - - // scan the last block which is an optional integer timestamp - pos, ts, err := scanTime(buf, pos) - - if err != nil { - return nil, err - } - - pt := &point{ - key: key, - fields: fields, - ts: ts, - } - - if len(ts) == 0 { - pt.time = defaultTime - pt.SetPrecision(precision) - } else { - ts, err := strconv.ParseInt(string(ts), 10, 64) - if err != nil { - return nil, err - } - pt.time, err = SafeCalcTime(ts, precision) - if err != nil { - return nil, err - } - } - return pt, nil -} - -// GetPrecisionMultiplier will return a multiplier for the precision specified -func GetPrecisionMultiplier(precision string) int64 { - d := time.Nanosecond - switch precision { - case "u": - d = time.Microsecond - case "ms": - d = time.Millisecond - case "s": - d = time.Second - case "m": - d = time.Minute - case "h": - d = time.Hour - } - return int64(d) -} - -// scanKey scans buf starting at i for the measurement and tag portion of the point. -// It returns the ending position and the byte slice of key within buf. If there -// are tags, they will be sorted if they are not already. -func scanKey(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - - i = start - - // Determines whether the tags are sort, assume they are - sorted := true - - // indices holds the indexes within buf of the start of each tag. For example, - // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] - // which indicates that the first tag starts at buf[4], seconds at buf[11], and - // last at buf[20] - indices := make([]int, 100) - - // tracks how many commas we've seen so we know how many values are indices. - // Since indices is an arbitrarily large slice, - // we need to know how many values in the buffer are in use. - commas := 0 - - // First scan the Point's measurement. - state, i, err := scanMeasurement(buf, i) - if err != nil { - return i, buf[start:i], err - } - - // Optionally scan tags if needed. - if state == tagKeyState { - i, commas, indices, err = scanTags(buf, i, indices) - if err != nil { - return i, buf[start:i], err - } - } - - // Now we know where the key region is within buf, and the locations of tags, we - // need to determine if duplicate tags exist and if the tags are sorted. This iterates - // 1/2 of the list comparing each end with each other, walking towards the center from - // both sides. - for j := 0; j < commas/2; j++ { - // get the left and right tags - _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - _, right := scanTo(buf[indices[commas-j-1]:indices[commas-j]-1], 0, '=') - - // If the tags are equal, then there are duplicate tags, and we should abort - if bytes.Equal(left, right) { - return i, buf[start:i], fmt.Errorf("duplicate tags") - } - - // If left is greater than right, the tags are not sorted. We must continue - // since their could be duplicate tags still. - if bytes.Compare(left, right) > 0 { - sorted = false - } - } - - // If the tags are not sorted, then sort them. This sort is inline and - // uses the tag indices we created earlier. The actual buffer is not sorted, the - // indices are using the buffer for value comparison. After the indices are sorted, - // the buffer is reconstructed from the sorted indices. - if !sorted && commas > 0 { - // Get the measurement name for later - measurement := buf[start : indices[0]-1] - - // Sort the indices - indices := indices[:commas] - insertionSort(0, commas, buf, indices) - - // Create a new key using the measurement and sorted indices - b := make([]byte, len(buf[start:i])) - pos := copy(b, measurement) - for _, i := range indices { - b[pos] = ',' - pos++ - _, v := scanToSpaceOr(buf, i, ',') - pos += copy(b[pos:], v) - } - - return i, b, nil - } - - return i, buf[start:i], nil -} - -// The following constants allow us to specify which state to move to -// next, when scanning sections of a Point. -const ( - tagKeyState = iota - tagValueState - fieldsState -) - -// scanMeasurement examines the measurement part of a Point, returning -// the next state to move to, and the current location in the buffer. -func scanMeasurement(buf []byte, i int) (int, int, error) { - // Check first byte of measurement, anything except a comma is fine. - // It can't be a space, since whitespace is stripped prior to this - // function call. - if buf[i] == ',' { - return -1, i, fmt.Errorf("missing measurement") - } - - for { - i++ - if i >= len(buf) { - // cpu - return -1, i, fmt.Errorf("missing fields") - } - - if buf[i-1] == '\\' { - // Skip character (it's escaped). - continue - } - - // Unescaped comma; move onto scanning the tags. - if buf[i] == ',' { - return tagKeyState, i + 1, nil - } - - // Unescaped space; move onto scanning the fields. - if buf[i] == ' ' { - // cpu value=1.0 - return fieldsState, i, nil - } - } -} - -// scanTags examines all the tags in a Point, keeping track of and -// returning the updated indices slice, number of commas and location -// in buf where to start examining the Point fields. -func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { - var ( - err error - commas int - state = tagKeyState - ) - - for { - switch state { - case tagKeyState: - // Grow our indices slice if we have too many tags. - if commas >= len(indices) { - newIndics := make([]int, cap(indices)*2) - copy(newIndics, indices) - indices = newIndics - } - indices[commas] = i - commas++ - - i, err = scanTagsKey(buf, i) - state = tagValueState // tag value always follows a tag key - case tagValueState: - state, i, err = scanTagsValue(buf, i) - case fieldsState: - indices[commas] = i + 1 - return i, commas, indices, nil - } - - if err != nil { - return i, commas, indices, err - } - } -} - -// scanTagsKey scans each character in a tag key. -func scanTagsKey(buf []byte, i int) (int, error) { - // First character of the key. - if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { - // cpu,{'', ' ', ',', '='} - return i, fmt.Errorf("missing tag key") - } - - // Examine each character in the tag key until we hit an unescaped - // equals (the tag value), or we hit an error (i.e., unescaped - // space or comma). - for { - i++ - - // Either we reached the end of the buffer or we hit an - // unescaped comma or space. - if i >= len(buf) || - ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { - // cpu,tag{'', ' ', ','} - return i, fmt.Errorf("missing tag value") - } - - if buf[i] == '=' && buf[i-1] != '\\' { - // cpu,tag= - return i + 1, nil - } - } -} - -// scanTagsValue scans each character in a tag value. -func scanTagsValue(buf []byte, i int) (int, int, error) { - // Tag value cannot be empty. - if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { - // cpu,tag={',', ' '} - return -1, i, fmt.Errorf("missing tag value") - } - - // Examine each character in the tag value until we hit an unescaped - // comma (move onto next tag key), an unescaped space (move onto - // fields), or we error out. - for { - i++ - if i >= len(buf) { - // cpu,tag=value - return -1, i, fmt.Errorf("missing fields") - } - - // An unescaped equals sign is an invalid tag value. - if buf[i] == '=' && buf[i-1] != '\\' { - // cpu,tag={'=', 'fo=o'} - return -1, i, fmt.Errorf("invalid tag format") - } - - if buf[i] == ',' && buf[i-1] != '\\' { - // cpu,tag=foo, - return tagKeyState, i + 1, nil - } - - // cpu,tag=foo value=1.0 - // cpu, tag=foo\= value=1.0 - if buf[i] == ' ' && buf[i-1] != '\\' { - return fieldsState, i, nil - } - } -} - -func insertionSort(l, r int, buf []byte, indices []int) { - for i := l + 1; i < r; i++ { - for j := i; j > l && less(buf, indices, j, j-1); j-- { - indices[j], indices[j-1] = indices[j-1], indices[j] - } - } -} - -func less(buf []byte, indices []int, i, j int) bool { - // This grabs the tag names for i & j, it ignores the values - _, a := scanTo(buf, indices[i], '=') - _, b := scanTo(buf, indices[j], '=') - return bytes.Compare(a, b) < 0 -} - -func isFieldEscapeChar(b byte) bool { - for c := range escape.Codes { - if c == b { - return true - } - } - return false -} - -// scanFields scans buf, starting at i for the fields section of a point. It returns -// the ending position and the byte slice of the fields within buf -func scanFields(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - quoted := false - - // tracks how many '=' we've seen - equals := 0 - - // tracks how many commas we've seen - commas := 0 - - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // escaped characters? - if buf[i] == '\\' && i+1 < len(buf) { - i += 2 - continue - } - - // If the value is quoted, scan until we get to the end quote - // Only quote values in the field value since quotes are not significant - // in the field key - if buf[i] == '"' && equals > commas { - quoted = !quoted - i++ - continue - } - - // If we see an =, ensure that there is at least on char before and after it - if buf[i] == '=' && !quoted { - equals++ - - // check for "... =123" but allow "a\ =123" - if buf[i-1] == ' ' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field key") - } - - // check for "...a=123,=456" but allow "a=123,a\,=456" - if buf[i-1] == ',' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field key") - } - - // check for "... value=" - if i+1 >= len(buf) { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - // check for "... value=,value2=..." - if buf[i+1] == ',' || buf[i+1] == ' ' { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { - var err error - i, err = scanNumber(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - // If next byte is not a double-quote, the value must be a boolean - if buf[i+1] != '"' { - var err error - i, _, err = scanBoolean(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - } - - if buf[i] == ',' && !quoted { - commas++ - } - - // reached end of block? - if buf[i] == ' ' && !quoted { - break - } - i++ - } - - if quoted { - return i, buf[start:i], fmt.Errorf("unbalanced quotes") - } - - // check that all field sections had key and values (e.g. prevent "a=1,b" - if equals == 0 || commas != equals-1 { - return i, buf[start:i], fmt.Errorf("invalid field format") - } - - return i, buf[start:i], nil -} - -// scanTime scans buf, starting at i for the time section of a point. It returns -// the ending position and the byte slice of the fields within buf and error if the -// timestamp is not in the correct numeric format -func scanTime(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // Timestamps should be integers, make sure they are so we don't need to actually - // parse the timestamp until needed - if buf[i] < '0' || buf[i] > '9' { - // Handle negative timestamps - if i == start && buf[i] == '-' { - i++ - continue - } - return i, buf[start:i], fmt.Errorf("bad timestamp") - } - - // reached end of block? - if buf[i] == '\n' { - break - } - i++ - } - return i, buf[start:i], nil -} - -func isNumeric(b byte) bool { - return (b >= '0' && b <= '9') || b == '.' -} - -// scanNumber returns the end position within buf, start at i after -// scanning over buf for an integer, or float. It returns an -// error if a invalid number is scanned. -func scanNumber(buf []byte, i int) (int, error) { - start := i - var isInt bool - - // Is negative number? - if i < len(buf) && buf[i] == '-' { - i++ - // There must be more characters now, as just '-' is illegal. - if i == len(buf) { - return i, ErrInvalidNumber - } - } - - // how many decimal points we've see - decimal := false - - // indicates the number is float in scientific notation - scientific := false - - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - - if buf[i] == 'i' && i > start && !isInt { - isInt = true - i++ - continue - } - - if buf[i] == '.' { - // Can't have more than 1 decimal (e.g. 1.1.1 should fail) - if decimal { - return i, ErrInvalidNumber - } - decimal = true - } - - // `e` is valid for floats but not as the first char - if i > start && (buf[i] == 'e' || buf[i] == 'E') { - scientific = true - i++ - continue - } - - // + and - are only valid at this point if they follow an e (scientific notation) - if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { - i++ - continue - } - - // NaN is an unsupported value - if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { - return i, ErrInvalidNumber - } - - if !isNumeric(buf[i]) { - return i, ErrInvalidNumber - } - i++ - } - - if isInt && (decimal || scientific) { - return i, ErrInvalidNumber - } - - numericDigits := i - start - if isInt { - numericDigits-- - } - if decimal { - numericDigits-- - } - if buf[start] == '-' { - numericDigits-- - } - - if numericDigits == 0 { - return i, ErrInvalidNumber - } - - // It's more common that numbers will be within min/max range for their type but we need to prevent - // out or range numbers from being parsed successfully. This uses some simple heuristics to decide - // if we should parse the number to the actual type. It does not do it all the time because it incurs - // extra allocations and we end up converting the type again when writing points to disk. - if isInt { - // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) - if buf[i-1] != 'i' { - return i, ErrInvalidNumber - } - // Parse the int to check bounds the number of digits could be larger than the max range - // We subtract 1 from the index to remove the `i` from our tests - if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { - if _, err := strconv.ParseInt(string(buf[start:i-1]), 10, 64); err != nil { - return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) - } - } - } else { - // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range - if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { - if _, err := strconv.ParseFloat(string(buf[start:i]), 10); err != nil { - return i, fmt.Errorf("invalid float") - } - } - } - - return i, nil -} - -// scanBoolean returns the end position within buf, start at i after -// scanning over buf for boolean. Valid values for a boolean are -// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean -// is scanned. -func scanBoolean(buf []byte, i int) (int, []byte, error) { - start := i - - if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - i++ - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - i++ - } - - // Single char bool (t, T, f, F) is ok - if i-start == 1 { - return i, buf[start:i], nil - } - - // length must be 4 for true or TRUE - if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // length must be 5 for false or FALSE - if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // Otherwise - valid := false - switch buf[start] { - case 't': - valid = bytes.Equal(buf[start:i], []byte("true")) - case 'f': - valid = bytes.Equal(buf[start:i], []byte("false")) - case 'T': - valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) - case 'F': - valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) - } - - if !valid { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - return i, buf[start:i], nil - -} - -// skipWhitespace returns the end position within buf, starting at i after -// scanning over spaces in tags -func skipWhitespace(buf []byte, i int) int { - for i < len(buf) { - if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { - break - } - i++ - } - return i -} - -// scanLine returns the end position in buf and the next line found within -// buf. -func scanLine(buf []byte, i int) (int, []byte) { - start := i - quoted := false - fields := false - - // tracks how many '=' and commas we've seen - // this duplicates some of the functionality in scanFields - equals := 0 - commas := 0 - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // skip past escaped characters - if buf[i] == '\\' { - i += 2 - continue - } - - if buf[i] == ' ' { - fields = true - } - - // If we see a double quote, makes sure it is not escaped - if fields { - if !quoted && buf[i] == '=' { - i++ - equals++ - continue - } else if !quoted && buf[i] == ',' { - i++ - commas++ - continue - } else if buf[i] == '"' && equals > commas { - i++ - quoted = !quoted - continue - } - } - - if buf[i] == '\n' && !quoted { - break - } - - i++ - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte, where stop byte -// has not been escaped. -// -// If there are leading spaces, they are skipped. -func scanTo(buf []byte, i int, stop byte) (int, []byte) { - start := i - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // Reached unescaped stop value? - if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { - break - } - i++ - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte. If there are leading -// spaces, they are skipped. -func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { - start := i - if buf[i] == stop || buf[i] == ' ' { - return i, buf[start:i] - } - - for { - i++ - if buf[i-1] == '\\' { - continue - } - - // reached the end of buf? - if i >= len(buf) { - return i, buf[start:i] - } - - // reached end of block? - if buf[i] == stop || buf[i] == ' ' { - return i, buf[start:i] - } - } -} - -func scanTagValue(buf []byte, i int) (int, []byte) { - start := i - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' && buf[i-1] != '\\' { - break - } - i++ - } - return i, buf[start:i] -} - -func scanFieldValue(buf []byte, i int) (int, []byte) { - start := i - quoted := false - for { - if i >= len(buf) { - break - } - - // Only escape char for a field value is a double-quote - if buf[i] == '\\' && i+1 < len(buf) && buf[i+1] == '"' { - i += 2 - continue - } - - // Quoted value? (e.g. string) - if buf[i] == '"' { - i++ - quoted = !quoted - continue - } - - if buf[i] == ',' && !quoted { - break - } - i++ - } - return i, buf[start:i] -} - -func escapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -func unescapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, esc, []byte{b}, -1) - } - return in -} - -func escapeTag(in []byte) []byte { - for b, esc := range tagEscapeCodes { - if bytes.IndexByte(in, b) != -1 { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - } - return in -} - -func unescapeTag(in []byte) []byte { - for b, esc := range tagEscapeCodes { - if bytes.IndexByte(in, b) != -1 { - in = bytes.Replace(in, esc, []byte{b}, -1) - } - } - return in -} - -// escapeStringField returns a copy of in with any double quotes or -// backslashes with escaped values -func escapeStringField(in string) string { - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // escape double-quotes - if in[i] == '\\' { - out = append(out, '\\') - out = append(out, '\\') - i++ - continue - } - // escape double-quotes - if in[i] == '"' { - out = append(out, '\\') - out = append(out, '"') - i++ - continue - } - out = append(out, in[i]) - i++ - - } - return string(out) -} - -// unescapeStringField returns a copy of in with any escaped double-quotes -// or backslashes unescaped -func unescapeStringField(in string) string { - if strings.IndexByte(in, '\\') == -1 { - return in - } - - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // unescape backslashes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { - out = append(out, '\\') - i += 2 - continue - } - // unescape double-quotes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { - out = append(out, '"') - i += 2 - continue - } - out = append(out, in[i]) - i++ - - } - return string(out) -} - -// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If -// an unsupported field value (NaN) or out of range time is passed, this function returns an error. -func NewPoint(name string, tags Tags, fields Fields, time time.Time) (Point, error) { - if len(fields) == 0 { - return nil, ErrPointMustHaveAField - } - if !time.IsZero() { - if err := CheckTime(time); err != nil { - return nil, err - } - } - - for key, value := range fields { - if fv, ok := value.(float64); ok { - // Ensure the caller validates and handles invalid field values - if math.IsNaN(fv) { - return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) - } - } - if len(key) == 0 { - return nil, fmt.Errorf("all fields must have non-empty names") - } - } - - key := MakeKey([]byte(name), tags) - if len(key) > MaxKeyLength { - return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) - } - - return &point{ - key: key, - time: time, - fields: fields.MarshalBinary(), - }, nil -} - -// NewPointFromBytes returns a new Point from a marshalled Point. -func NewPointFromBytes(b []byte) (Point, error) { - p := &point{} - if err := p.UnmarshalBinary(b); err != nil { - return nil, err - } - if len(p.Fields()) == 0 { - return nil, ErrPointMustHaveAField - } - return p, nil -} - -// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If -// an unsupported field value (NaN) is passed, this function panics. -func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { - pt, err := NewPoint(name, tags, fields, time) - if err != nil { - panic(err.Error()) - } - return pt -} - -func (p *point) Data() []byte { - return p.data -} - -func (p *point) SetData(b []byte) { - p.data = b -} - -func (p *point) Key() []byte { - return p.key -} - -func (p *point) name() []byte { - _, name := scanTo(p.key, 0, ',') - return name -} - -// Name return the measurement name for the point -func (p *point) Name() string { - if p.cachedName != "" { - return p.cachedName - } - p.cachedName = string(escape.Unescape(p.name())) - return p.cachedName -} - -// SetName updates the measurement name for the point -func (p *point) SetName(name string) { - p.cachedName = "" - p.key = MakeKey([]byte(name), p.Tags()) -} - -// Time return the timestamp for the point -func (p *point) Time() time.Time { - return p.time -} - -// SetTime updates the timestamp for the point -func (p *point) SetTime(t time.Time) { - p.time = t -} - -// Tags returns the tag set for the point -func (p *point) Tags() Tags { - return parseTags(p.key) -} - -func parseTags(buf []byte) Tags { - tags := map[string]string{} - - if len(buf) != 0 { - pos, name := scanTo(buf, 0, ',') - - // it's an empyt key, so there are no tags - if len(name) == 0 { - return tags - } - - i := pos + 1 - var key, value []byte - for { - if i >= len(buf) { - break - } - i, key = scanTo(buf, i, '=') - i, value = scanTagValue(buf, i+1) - - if len(value) == 0 { - continue - } - - tags[string(unescapeTag(key))] = string(unescapeTag(value)) - - i++ - } - } - return tags -} - -// MakeKey creates a key for a set of tags. -func MakeKey(name []byte, tags Tags) []byte { - // unescape the name and then re-escape it to avoid double escaping. - // The key should always be stored in escaped form. - return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) -} - -// SetTags replaces the tags for the point -func (p *point) SetTags(tags Tags) { - p.key = MakeKey([]byte(p.Name()), tags) -} - -// AddTag adds or replaces a tag value for a point -func (p *point) AddTag(key, value string) { - tags := p.Tags() - tags[key] = value - p.key = MakeKey([]byte(p.Name()), tags) -} - -// Fields returns the fields for the point -func (p *point) Fields() Fields { - if p.cachedFields != nil { - return p.cachedFields - } - p.cachedFields = p.unmarshalBinary() - return p.cachedFields -} - -// SetPrecision will round a time to the specified precision -func (p *point) SetPrecision(precision string) { - switch precision { - case "n": - case "u": - p.SetTime(p.Time().Truncate(time.Microsecond)) - case "ms": - p.SetTime(p.Time().Truncate(time.Millisecond)) - case "s": - p.SetTime(p.Time().Truncate(time.Second)) - case "m": - p.SetTime(p.Time().Truncate(time.Minute)) - case "h": - p.SetTime(p.Time().Truncate(time.Hour)) - } -} - -func (p *point) String() string { - if p.Time().IsZero() { - return string(p.Key()) + " " + string(p.fields) - } - return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) -} - -func (p *point) MarshalBinary() ([]byte, error) { - tb, err := p.time.MarshalBinary() - if err != nil { - return nil, err - } - - b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) - i := 0 - - binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) - i += 4 - - i += copy(b[i:], p.key) - - binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) - i += 4 - - i += copy(b[i:], p.fields) - - copy(b[i:], tb) - return b, nil -} - -func (p *point) UnmarshalBinary(b []byte) error { - var i int - keyLen := int(binary.BigEndian.Uint32(b[:4])) - i += int(4) - - p.key = b[i : i+keyLen] - i += keyLen - - fieldLen := int(binary.BigEndian.Uint32(b[i : i+4])) - i += int(4) - - p.fields = b[i : i+fieldLen] - i += fieldLen - - p.time = time.Now() - p.time.UnmarshalBinary(b[i:]) - return nil -} - -func (p *point) PrecisionString(precision string) string { - if p.Time().IsZero() { - return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) - } - return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), - p.UnixNano()/GetPrecisionMultiplier(precision)) -} - -func (p *point) RoundedString(d time.Duration) string { - if p.Time().IsZero() { - return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) - } - return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), - p.time.Round(d).UnixNano()) -} - -func (p *point) unmarshalBinary() Fields { - return newFieldsFromBinary(p.fields) -} - -func (p *point) HashID() uint64 { - h := fnv.New64a() - h.Write(p.key) - sum := h.Sum64() - return sum -} - -func (p *point) UnixNano() int64 { - return p.Time().UnixNano() -} - -// Tags represents a mapping between a Point's tag names and their -// values. -type Tags map[string]string - -// HashKey hashes all of a tag's keys. -func (t Tags) HashKey() []byte { - // Empty maps marshal to empty bytes. - if len(t) == 0 { - return nil - } - - escaped := Tags{} - for k, v := range t { - ek := escapeTag([]byte(k)) - ev := escapeTag([]byte(v)) - - if len(ev) > 0 { - escaped[string(ek)] = string(ev) - } - } - - // Extract keys and determine final size. - sz := len(escaped) + (len(escaped) * 2) // separators - keys := make([]string, len(escaped)+1) - i := 0 - for k, v := range escaped { - keys[i] = k - i++ - sz += len(k) + len(v) - } - keys = keys[:i] - sort.Strings(keys) - // Generate marshaled bytes. - b := make([]byte, sz) - buf := b - idx := 0 - for _, k := range keys { - buf[idx] = ',' - idx++ - copy(buf[idx:idx+len(k)], k) - idx += len(k) - buf[idx] = '=' - idx++ - v := escaped[k] - copy(buf[idx:idx+len(v)], v) - idx += len(v) - } - return b[:idx] -} - -// Fields represents a mapping between a Point's field names and their -// values. -type Fields map[string]interface{} - -func parseNumber(val []byte) (interface{}, error) { - if val[len(val)-1] == 'i' { - val = val[:len(val)-1] - return strconv.ParseInt(string(val), 10, 64) - } - for i := 0; i < len(val); i++ { - // If there is a decimal or an N (NaN), I (Inf), parse as float - if val[i] == '.' || val[i] == 'N' || val[i] == 'n' || val[i] == 'I' || val[i] == 'i' || val[i] == 'e' { - return strconv.ParseFloat(string(val), 64) - } - if val[i] < '0' && val[i] > '9' { - return string(val), nil - } - } - return strconv.ParseFloat(string(val), 64) -} - -func newFieldsFromBinary(buf []byte) Fields { - fields := make(Fields, 8) - var ( - i int - name, valueBuf []byte - value interface{} - err error - ) - for i < len(buf) { - - i, name = scanTo(buf, i, '=') - name = escape.Unescape(name) - - i, valueBuf = scanFieldValue(buf, i+1) - if len(name) > 0 { - if len(valueBuf) == 0 { - fields[string(name)] = nil - continue - } - - // If the first char is a double-quote, then unmarshal as string - if valueBuf[0] == '"' { - value = unescapeStringField(string(valueBuf[1 : len(valueBuf)-1])) - // Check for numeric characters and special NaN or Inf - } else if (valueBuf[0] >= '0' && valueBuf[0] <= '9') || valueBuf[0] == '-' || valueBuf[0] == '.' || - valueBuf[0] == 'N' || valueBuf[0] == 'n' || // NaN - valueBuf[0] == 'I' || valueBuf[0] == 'i' { // Inf - - value, err = parseNumber(valueBuf) - if err != nil { - panic(fmt.Sprintf("unable to parse number value '%v': %v", string(valueBuf), err)) - } - - // Otherwise parse it as bool - } else { - value, err = strconv.ParseBool(string(valueBuf)) - if err != nil { - panic(fmt.Sprintf("unable to parse bool value '%v': %v\n", string(valueBuf), err)) - } - } - fields[string(name)] = value - } - i++ - } - return fields -} - -// MarshalBinary encodes all the fields to their proper type and returns the binary -// represenation -// NOTE: uint64 is specifically not supported due to potential overflow when we decode -// again later to an int64 -func (p Fields) MarshalBinary() []byte { - b := []byte{} - keys := make([]string, len(p)) - i := 0 - for k := range p { - keys[i] = k - i++ - } - sort.Strings(keys) - - for _, k := range keys { - v := p[k] - b = append(b, []byte(escape.String(k))...) - b = append(b, '=') - switch t := v.(type) { - case int: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case int64: - b = append(b, []byte(strconv.FormatInt(t, 10))...) - b = append(b, 'i') - case uint: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint8: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint16: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case uint32: - b = append(b, []byte(strconv.FormatInt(int64(t), 10))...) - b = append(b, 'i') - case float32: - val := []byte(strconv.FormatFloat(float64(t), 'f', -1, 32)) - b = append(b, val...) - case float64: - val := []byte(strconv.FormatFloat(t, 'f', -1, 64)) - b = append(b, val...) - case bool: - b = append(b, []byte(strconv.FormatBool(t))...) - case []byte: - b = append(b, t...) - case string: - b = append(b, '"') - b = append(b, []byte(escapeStringField(t))...) - b = append(b, '"') - case nil: - // skip - default: - // Can't determine the type, so convert to string - b = append(b, '"') - b = append(b, []byte(escapeStringField(fmt.Sprintf("%v", v)))...) - b = append(b, '"') - - } - b = append(b, ',') - } - if len(b) > 0 { - return b[0 : len(b)-1] - } - return b -} - -type indexedSlice struct { - indices []int - b []byte -} - -func (s *indexedSlice) Less(i, j int) bool { - _, a := scanTo(s.b, s.indices[i], '=') - _, b := scanTo(s.b, s.indices[j], '=') - return bytes.Compare(a, b) < 0 -} - -func (s *indexedSlice) Swap(i, j int) { - s.indices[i], s.indices[j] = s.indices[j], s.indices[i] -} - -func (s *indexedSlice) Len() int { - return len(s.indices) -} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go deleted file mode 100644 index 72435f5c708..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/rows.go +++ /dev/null @@ -1,60 +0,0 @@ -package models - -import ( - "hash/fnv" - "sort" -) - -// Row represents a single row returned from the execution of a statement. -type Row struct { - Name string `json:"name,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Columns []string `json:"columns,omitempty"` - Values [][]interface{} `json:"values,omitempty"` - Err error `json:"err,omitempty"` -} - -// SameSeries returns true if r contains values for the same series as o. -func (r *Row) SameSeries(o *Row) bool { - return r.tagsHash() == o.tagsHash() && r.Name == o.Name -} - -// tagsHash returns a hash of tag key/value pairs. -func (r *Row) tagsHash() uint64 { - h := fnv.New64a() - keys := r.tagsKeys() - for _, k := range keys { - h.Write([]byte(k)) - h.Write([]byte(r.Tags[k])) - } - return h.Sum64() -} - -// tagKeys returns a sorted list of tag keys. -func (r *Row) tagsKeys() []string { - a := make([]string, 0, len(r.Tags)) - for k := range r.Tags { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// Rows represents a collection of rows. Rows implements sort.Interface. -type Rows []*Row - -func (p Rows) Len() int { return len(p) } - -func (p Rows) Less(i, j int) bool { - // Sort by name first. - if p[i].Name != p[j].Name { - return p[i].Name < p[j].Name - } - - // Sort by tag set hash. Tags don't have a meaningful sort order so we - // just compute a hash and sort by that instead. This allows the tests - // to receive rows in a predictable order every time. - return p[i].tagsHash() < p[j].tagsHash() -} - -func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go deleted file mode 100644 index 9e41577742f..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/models/time.go +++ /dev/null @@ -1,51 +0,0 @@ -package models - -// Helper time methods since parsing time can easily overflow and we only support a -// specific time range. - -import ( - "fmt" - "math" - "time" -) - -var ( - // MaxNanoTime is the maximum time that can be represented via int64 nanoseconds since the epoch. - MaxNanoTime = time.Unix(0, math.MaxInt64).UTC() - // MinNanoTime is the minumum time that can be represented via int64 nanoseconds since the epoch. - MinNanoTime = time.Unix(0, math.MinInt64).UTC() - - // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. - ErrTimeOutOfRange = fmt.Errorf("time outside range %s - %s", MinNanoTime, MaxNanoTime) -) - -// SafeCalcTime safely calculates the time given. Will return error if the time is outside the -// supported range. -func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { - mult := GetPrecisionMultiplier(precision) - if t, ok := safeSignedMult(timestamp, mult); ok { - return time.Unix(0, t).UTC(), nil - } - - return time.Time{}, ErrTimeOutOfRange -} - -// CheckTime checks that a time is within the safe range. -func CheckTime(t time.Time) error { - if t.Before(MinNanoTime) || t.After(MaxNanoTime) { - return ErrTimeOutOfRange - } - return nil -} - -// Perform the multiplication and check to make sure it didn't overflow. -func safeSignedMult(a, b int64) (int64, bool) { - if a == 0 || b == 0 || a == 1 || b == 1 { - return a * b, true - } - if a == math.MinInt64 || b == math.MaxInt64 { - return 0, false - } - c := a * b - return c, c/b == a -} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go deleted file mode 100644 index a2191ffd4da..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ /dev/null @@ -1,53 +0,0 @@ -package escape - -import "bytes" - -func Bytes(in []byte) []byte { - for b, esc := range Codes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -func Unescape(in []byte) []byte { - if len(in) == 0 { - return nil - } - - if bytes.IndexByte(in, '\\') == -1 { - return in - } - - i := 0 - inLen := len(in) - var out []byte - - for { - if i >= inLen { - break - } - if in[i] == '\\' && i+1 < inLen { - switch in[i+1] { - case ',': - out = append(out, ',') - i += 2 - continue - case '"': - out = append(out, '"') - i += 2 - continue - case ' ': - out = append(out, ' ') - i += 2 - continue - case '=': - out = append(out, '=') - i += 2 - continue - } - } - out = append(out, in[i]) - i += 1 - } - return out -} diff --git a/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go b/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go deleted file mode 100644 index 330fbf4226a..00000000000 --- a/Godeps/_workspace/src/github.com/influxdata/influxdb/pkg/escape/strings.go +++ /dev/null @@ -1,34 +0,0 @@ -package escape - -import "strings" - -var ( - Codes = map[byte][]byte{ - ',': []byte(`\,`), - '"': []byte(`\"`), - ' ': []byte(`\ `), - '=': []byte(`\=`), - } - - codesStr = map[string]string{} -) - -func init() { - for k, v := range Codes { - codesStr[string(k)] = string(v) - } -} - -func UnescapeString(in string) string { - for b, esc := range codesStr { - in = strings.Replace(in, esc, b, -1) - } - return in -} - -func String(in string) string { - for b, esc := range codesStr { - in = strings.Replace(in, b, esc, -1) - } - return in -} diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index 30f194d6938..81b19717ddf 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -81,7 +81,7 @@ func NewEngine() { err = SetEngine(x, setting.Env == setting.DEV) if err != nil { - sqlog.Error("Fail to initialize orm engine: %v", err) + sqlog.Error("Fail to initialize orm engine", "error", err) os.Exit(1) } } From 36b0802789fbaf86a13988a39d9f7eb6b1856ca8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 05:29:38 +0200 Subject: [PATCH 09/17] fix(png): fixed issue with png rendering, fixes #5274 --- public/app/features/dashboard/viewStateSrv.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/public/app/features/dashboard/viewStateSrv.js b/public/app/features/dashboard/viewStateSrv.js index 2138dd37438..035bfb6ae6e 100644 --- a/public/app/features/dashboard/viewStateSrv.js +++ b/public/app/features/dashboard/viewStateSrv.js @@ -92,7 +92,6 @@ function (angular, _, $) { state.fullscreen = state.fullscreen ? true : null; state.edit = (state.edit === "true" || state.edit === true) || null; state.editview = state.editview || null; - state.org = contextSrv.user.orgId; return state; }; @@ -100,7 +99,6 @@ function (angular, _, $) { var urlState = _.clone(this.state); urlState.fullscreen = this.state.fullscreen ? true : null; urlState.edit = this.state.edit ? true : null; - urlState.org = contextSrv.user.orgId; return urlState; }; From 67ad903556c2ec9bd661b2bcd1556b8e3f8261c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 05:45:13 +0200 Subject: [PATCH 10/17] feat(test metrics): fixed issue with built in Grafana test data source, fixes #5299 --- pkg/api/metrics.go | 4 ++-- public/app/features/panel/metrics_panel_ctrl.ts | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/api/metrics.go b/pkg/api/metrics.go index 35359d57cdd..dc674e770ae 100644 --- a/pkg/api/metrics.go +++ b/pkg/api/metrics.go @@ -12,7 +12,7 @@ import ( "github.com/grafana/grafana/pkg/util" ) -func GetTestMetrics(c *middleware.Context) { +func GetTestMetrics(c *middleware.Context) Response { from := c.QueryInt64("from") to := c.QueryInt64("to") maxDataPoints := c.QueryInt64("maxDataPoints") @@ -37,7 +37,7 @@ func GetTestMetrics(c *middleware.Context) { result.Data[seriesIndex].DataPoints = points } - c.JSON(200, &result) + return Json(200, &result) } func GetInternalMetrics(c *middleware.Context) Response { diff --git a/public/app/features/panel/metrics_panel_ctrl.ts b/public/app/features/panel/metrics_panel_ctrl.ts index 0bccee8ff35..0a283cd983f 100644 --- a/public/app/features/panel/metrics_panel_ctrl.ts +++ b/public/app/features/panel/metrics_panel_ctrl.ts @@ -200,6 +200,11 @@ class MetricsPanelCtrl extends PanelCtrl { this.panel.snapshotData = result.data; } + if (!result || !result.data) { + console.log('Data source query result invalid, missing data field:', result); + result = {data: []}; + } + return this.events.emit('data-received', result.data); } From 5eceabf8100ca6bf1ae9edb41f0551745f0a48f3 Mon Sep 17 00:00:00 2001 From: Zdenek Styblik Date: Wed, 8 Jun 2016 05:46:21 +0200 Subject: [PATCH 11/17] fix(): Check Organization exists before User is added (#5302) Commit adds a check whether Organization exists before User is added to the organization. Fixes #3151. --- pkg/login/ldap.go | 3 ++- pkg/services/sqlstore/org_users.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/login/ldap.go b/pkg/login/ldap.go index 48f226ccfa5..0c817c9df0b 100644 --- a/pkg/login/ldap.go +++ b/pkg/login/ldap.go @@ -219,7 +219,8 @@ func (a *ldapAuther) syncOrgRoles(user *m.User, ldapUser *ldapUserInfo) error { // add role cmd := m.AddOrgUserCommand{UserId: user.Id, Role: group.OrgRole, OrgId: group.OrgId} - if err := bus.Dispatch(&cmd); err != nil { + err := bus.Dispatch(&cmd) + if err != nil && err != m.ErrOrgNotFound { return err } diff --git a/pkg/services/sqlstore/org_users.go b/pkg/services/sqlstore/org_users.go index fdd671d0bfe..11ea558b0ce 100644 --- a/pkg/services/sqlstore/org_users.go +++ b/pkg/services/sqlstore/org_users.go @@ -26,6 +26,12 @@ func AddOrgUser(cmd *m.AddOrgUserCommand) error { return m.ErrOrgUserAlreadyAdded } + if res, err := sess.Query("SELECT 1 from org WHERE id=?", cmd.OrgId); err != nil { + return err + } else if len(res) != 1 { + return m.ErrOrgNotFound + } + entity := m.OrgUser{ OrgId: cmd.OrgId, UserId: cmd.UserId, From c739428c30745ec808813521bd88b1575ed4b85a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 06:51:01 +0200 Subject: [PATCH 12/17] feat(profiling): refactorign profiling code, #5286 --- public/app/core/components/grafana_app.ts | 85 +---------- public/app/core/profiler.ts | 132 ++++++++++++++++++ .../features/annotations/annotations_srv.js | 2 +- .../app/features/dashboard/dashboardCtrl.js | 7 +- .../features/dashboard/dashboardLoaderSrv.js | 2 + public/app/features/dashboard/viewStateSrv.js | 7 - public/app/features/panel/panel_ctrl.ts | 17 +-- public/app/features/panel/solo_panel_ctrl.js | 2 +- .../test/specs/dashboardViewStateSrv-specs.js | 3 +- 9 files changed, 143 insertions(+), 114 deletions(-) create mode 100644 public/app/core/profiler.ts diff --git a/public/app/core/components/grafana_app.ts b/public/app/core/components/grafana_app.ts index bfa0720fd71..a871e06ad30 100644 --- a/public/app/core/components/grafana_app.ts +++ b/public/app/core/components/grafana_app.ts @@ -6,6 +6,7 @@ import _ from 'lodash'; import angular from 'angular'; import $ from 'jquery'; import coreModule from 'app/core/core_module'; +import {profiler} from 'app/core/profiler'; export class GrafanaCtrl { @@ -15,14 +16,10 @@ export class GrafanaCtrl { $scope.init = function() { $scope.contextSrv = contextSrv; + $rootScope.appSubUrl = config.appSubUrl; $scope._ = _; - $rootScope.profilingEnabled = store.getBool('profilingEnabled') || config.buildInfo.env === 'development'; - $rootScope.performance = { loadStart: new Date().getTime() }; - $rootScope.appSubUrl = config.appSubUrl; - - if ($rootScope.profilingEnabled) { $scope.initProfiling(); } - + profiler.init(config, $rootScope); alertSrv.init(); utilSrv.init(); @@ -59,82 +56,6 @@ export class GrafanaCtrl { "#E0F9D7","#FCEACA","#CFFAFF","#F9E2D2","#FCE2DE","#BADFF4","#F9D9F9","#DEDAF7" ]; - $scope.getTotalWatcherCount = function() { - var count = 0; - var scopes = 0; - var root = $(document.getElementsByTagName('body')); - - var f = function (element) { - if (element.data().hasOwnProperty('$scope')) { - scopes++; - angular.forEach(element.data().$scope.$$watchers, function () { - count++; - }); - } - - angular.forEach(element.children(), function (childElement) { - f($(childElement)); - }); - }; - - f(root); - $rootScope.performance.scopeCount = scopes; - return count; - }; - - $scope.initProfiling = function() { - var count = 0; - - $scope.$watch(function digestCounter() { - count++; - }, function() { - // something - }); - - $rootScope.performance.panels = []; - - $scope.$on('refresh', function() { - if ($rootScope.performance.panels.length > 0) { - var totalRender = 0; - var totalQuery = 0; - - _.each($rootScope.performance.panels, function(panelTiming: any) { - totalRender += panelTiming.render; - totalQuery += panelTiming.query; - }); - - console.log('total query: ' + totalQuery); - console.log('total render: ' + totalRender); - console.log('avg render: ' + totalRender / $rootScope.performance.panels.length); - } - - $rootScope.performance.panels = []; - }); - - $scope.onAppEvent('dashboard-loaded', function() { - count = 0; - - setTimeout(function() { - console.log("Dashboard::Performance Total Digests: " + count); - console.log("Dashboard::Performance Total Watchers: " + $scope.getTotalWatcherCount()); - console.log("Dashboard::Performance Total ScopeCount: " + $rootScope.performance.scopeCount); - - var timeTaken = $rootScope.performance.allPanelsInitialized - $rootScope.performance.dashboardLoadStart; - console.log("Dashboard::Performance - All panels initialized in " + timeTaken + " ms"); - - // measure digest performance - var rootDigestStart = window.performance.now(); - for (var i = 0; i < 30; i++) { - $rootScope.$apply(); - } - console.log("Dashboard::Performance Root Digest " + ((window.performance.now() - rootDigestStart) / 30)); - - }, 3000); - - }); - - }; - $scope.init(); } } diff --git a/public/app/core/profiler.ts b/public/app/core/profiler.ts new file mode 100644 index 00000000000..0a0d611ac55 --- /dev/null +++ b/public/app/core/profiler.ts @@ -0,0 +1,132 @@ +/// +// +import $ from 'jquery'; +import _ from 'lodash'; +import angular from 'angular'; + +export class Profiler { + panelsRendered: number; + enabled: boolean; + panels: any[]; + panelsInitCount: any; + timings: any; + digestCounter: any; + $rootScope: any; + scopeCount: any; + + init(config, $rootScope) { + this.enabled = config.buildInfo.env === 'development'; + this.timings = {}; + this.timings.appStart = { loadStart: new Date().getTime() }; + this.$rootScope = $rootScope; + + if (!this.enabled) { + return; + } + + $rootScope.$watch(() => { + this.digestCounter++; + return false; + }, () => {}); + + $rootScope.$on('refresh', this.refresh.bind(this)); + $rootScope.onAppEvent('dashboard-fetched', this.dashboardFetched.bind(this)); + $rootScope.onAppEvent('dashboard-initialized', this.dashboardInitialized.bind(this)); + $rootScope.onAppEvent('panel-initialized', this.panelInitialized.bind(this)); + } + + refresh() { + if (this.panels.length > 0) { + var totalRender = 0; + var totalQuery = 0; + + for (let panelTiming of this.panels) { + totalRender += panelTiming.render; + totalQuery += panelTiming.query; + } + + console.log('panel count: ' + this.panels.length); + console.log('total query: ' + totalQuery); + console.log('total render: ' + totalRender); + console.log('avg render: ' + totalRender / this.panels.length); + } + this.$rootScope.panels = []; + } + + dashboardFetched() { + this.timings.dashboardLoadStart = new Date().getTime(); + this.panelsInitCount = 0; + this.digestCounter = 0; + this.panelsInitCount = 0; + this.panelsRendered = 0; + this.panels = []; + } + + dashboardInitialized() { + setTimeout(() => { + console.log("Dashboard::Performance Total Digests: " + this.digestCounter); + console.log("Dashboard::Performance Total Watchers: " + this.getTotalWatcherCount()); + console.log("Dashboard::Performance Total ScopeCount: " + this.scopeCount); + + var timeTaken = this.timings.lastPanelInitializedAt - this.timings.dashboardLoadStart; + console.log("Dashboard::Performance All panels initialized in " + timeTaken + " ms"); + + // measure digest performance + var rootDigestStart = window.performance.now(); + for (var i = 0; i < 30; i++) { + this.$rootScope.$apply(); + } + + console.log("Dashboard::Performance Root Digest " + ((window.performance.now() - rootDigestStart) / 30)); + }, 3000); + } + + getTotalWatcherCount() { + var count = 0; + var scopes = 0; + var root = $(document.getElementsByTagName('body')); + + var f = function (element) { + if (element.data().hasOwnProperty('$scope')) { + scopes++; + angular.forEach(element.data().$scope.$$watchers, function () { + count++; + }); + } + + angular.forEach(element.children(), function (childElement) { + f($(childElement)); + }); + }; + + f(root); + this.scopeCount = scopes; + return count; + } + + renderingCompleted(panelId, panelTimings) { + this.panelsRendered++; + + if (this.enabled) { + panelTimings.renderEnd = new Date().getTime(); + this.panels.push({ + panelId: panelId, + query: panelTimings.queryEnd - panelTimings.queryStart, + render: panelTimings.renderEnd - panelTimings.renderStart, + }); + } + } + + panelInitialized() { + if (!this.enabled) { + return; + } + + this.panelsInitCount++; + this.timings.lastPanelInitializedAt = new Date().getTime(); + } + +} + +var profiler = new Profiler(); +export {profiler}; diff --git a/public/app/features/annotations/annotations_srv.js b/public/app/features/annotations/annotations_srv.js index a693dd602c8..8f84a6ba905 100644 --- a/public/app/features/annotations/annotations_srv.js +++ b/public/app/features/annotations/annotations_srv.js @@ -14,7 +14,7 @@ define([ this.init = function() { $rootScope.onAppEvent('refresh', this.clearCache, $rootScope); - $rootScope.onAppEvent('dashboard-loaded', this.clearCache, $rootScope); + $rootScope.onAppEvent('dashboard-initialized', this.clearCache, $rootScope); }; this.clearCache = function() { diff --git a/public/app/features/dashboard/dashboardCtrl.js b/public/app/features/dashboard/dashboardCtrl.js index 9f3e6da998f..0a9c0fd7e92 100644 --- a/public/app/features/dashboard/dashboardCtrl.js +++ b/public/app/features/dashboard/dashboardCtrl.js @@ -35,10 +35,6 @@ function (angular, $, config, moment) { }; $scope.setupDashboard = function(data) { - $rootScope.performance.dashboardLoadStart = new Date().getTime(); - $rootScope.performance.panelsInitialized = 0; - $rootScope.performance.panelsRendered = 0; - var dashboard = dashboardSrv.create(data.dashboard, data.meta); dashboardSrv.setCurrent(dashboard); @@ -68,7 +64,7 @@ function (angular, $, config, moment) { }); } - $scope.appEvent("dashboard-loaded", $scope.dashboard); + $scope.appEvent("dashboard-initialized", $scope.dashboard); }).catch(function(err) { if (err.data && err.data.message) { err.message = err.data.message; } $scope.appEvent("alert-error", ['Dashboard init failed', 'Template variables could not be initialized: ' + err.message]); @@ -84,7 +80,6 @@ function (angular, $, config, moment) { }; $scope.broadcastRefresh = function() { - $rootScope.performance.panelsRendered = 0; $rootScope.$broadcast('refresh'); }; diff --git a/public/app/features/dashboard/dashboardLoaderSrv.js b/public/app/features/dashboard/dashboardLoaderSrv.js index 1af0894b462..70c49967ea5 100644 --- a/public/app/features/dashboard/dashboardLoaderSrv.js +++ b/public/app/features/dashboard/dashboardLoaderSrv.js @@ -47,6 +47,8 @@ function (angular, moment, _, $, kbn, dateMath, impressionStore) { } promise.then(function(result) { + $rootScope.appEvent("dashboard-fetched", result.dashboard); + if (result.meta.dashboardNotFound !== true) { impressionStore.impressions.addDashboardImpression(result.dashboard.id); } diff --git a/public/app/features/dashboard/viewStateSrv.js b/public/app/features/dashboard/viewStateSrv.js index ba820e86a92..035bfb6ae6e 100644 --- a/public/app/features/dashboard/viewStateSrv.js +++ b/public/app/features/dashboard/viewStateSrv.js @@ -51,13 +51,6 @@ function (angular, _, $) { $scope.onAppEvent('panel-initialized', function(evt, payload) { self.registerPanel(payload.scope); - - if ($scope.profilingEnabled) { - $scope.performance.panelsInitialized++; - if ($scope.performance.panelsInitialized === $scope.performance.panelCount) { - $scope.performance.allPanelsInitialized = new Date().getTime(); - } - } }); this.update(this.getQueryStringState()); diff --git a/public/app/features/panel/panel_ctrl.ts b/public/app/features/panel/panel_ctrl.ts index df44559595b..0f253b5048a 100644 --- a/public/app/features/panel/panel_ctrl.ts +++ b/public/app/features/panel/panel_ctrl.ts @@ -4,6 +4,7 @@ import config from 'app/core/config'; import _ from 'lodash'; import angular from 'angular'; import $ from 'jquery'; +import {profiler} from 'app/core/profiler'; const TITLE_HEIGHT = 25; const EMPTY_TITLE_HEIGHT = 9; @@ -59,21 +60,7 @@ export class PanelCtrl { } renderingCompleted() { - this.$scope.$root.performance.panelsRendered++; - this.timing.renderEnd = new Date().getTime(); - if (this.$scope.$root.profilingEnabled) { - this.$scope.$root.performance.panels.push({ - panelId: this.panel.id, - query: this.timing.queryEnd - this.timing.queryStart, - render: this.timing.renderEnd - this.timing.renderStart, - }); - - if (this.$scope.$root.performance.panelsRendered === this.$scope.$root.performance.panelCount) { - this.$scope.$root.performance.allPanelsRendered = new Date().getTime(); - var timeTaken = this.$scope.$root.performance.allPanelsRendered - this.$scope.$root.performance.dashboardLoadStart; - console.log("Dashboard::Performance - All panels rendered in " + timeTaken + " ms"); - } - } + profiler.renderingCompleted(this.panel.id, this.timing); } refresh() { diff --git a/public/app/features/panel/solo_panel_ctrl.js b/public/app/features/panel/solo_panel_ctrl.js index 355d5e8b265..0eb271675ee 100644 --- a/public/app/features/panel/solo_panel_ctrl.js +++ b/public/app/features/panel/solo_panel_ctrl.js @@ -25,7 +25,7 @@ function (angular, $) { $scope.initDashboard(result, $scope); }); - $scope.onAppEvent("dashboard-loaded", $scope.initPanelScope); + $scope.onAppEvent("dashboard-initialized", $scope.initPanelScope); }; $scope.initPanelScope = function() { diff --git a/public/test/specs/dashboardViewStateSrv-specs.js b/public/test/specs/dashboardViewStateSrv-specs.js index 202a43670b1..90e35810ac0 100644 --- a/public/test/specs/dashboardViewStateSrv-specs.js +++ b/public/test/specs/dashboardViewStateSrv-specs.js @@ -31,7 +31,7 @@ define([ it('should update querystring and view state', function() { var updateState = {fullscreen: true, edit: true, panelId: 1}; viewState.update(updateState); - expect(location.search()).to.eql({fullscreen: true, edit: true, panelId: 1, org: 19}); + expect(location.search()).to.eql({fullscreen: true, edit: true, panelId: 1}); expect(viewState.dashboard.meta.fullscreen).to.be(true); expect(viewState.state.fullscreen).to.be(true); }); @@ -41,7 +41,6 @@ define([ it('should remove params from query string', function() { viewState.update({fullscreen: true, panelId: 1, edit: true}); viewState.update({fullscreen: false}); - expect(location.search()).to.eql({org: 19}); expect(viewState.dashboard.meta.fullscreen).to.be(false); expect(viewState.state.fullscreen).to.be(null); }); From eed0d9c8d2b9b7d70cb4ca12e4352e6af13f548a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 07:23:44 +0200 Subject: [PATCH 13/17] feat(graph performance): graph performance tweaks, refactoring #5297 --- public/app/core/profiler.ts | 7 ++--- public/app/plugins/panel/graph/graph.js | 36 +++++++++---------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/public/app/core/profiler.ts b/public/app/core/profiler.ts index 0a0d611ac55..8684a5d3531 100644 --- a/public/app/core/profiler.ts +++ b/public/app/core/profiler.ts @@ -36,7 +36,9 @@ export class Profiler { } refresh() { - if (this.panels.length > 0) { + this.panels = []; + + setTimeout(() => { var totalRender = 0; var totalQuery = 0; @@ -49,8 +51,7 @@ export class Profiler { console.log('total query: ' + totalQuery); console.log('total render: ' + totalRender); console.log('avg render: ' + totalRender / this.panels.length); - } - this.$rootScope.panels = []; + }, 5000); } dashboardFetched() { diff --git a/public/app/plugins/panel/graph/graph.js b/public/app/plugins/panel/graph/graph.js index 3097bc6eac6..3a42df657a5 100755 --- a/public/app/plugins/panel/graph/graph.js +++ b/public/app/plugins/panel/graph/graph.js @@ -18,6 +18,8 @@ function (angular, $, moment, _, kbn, GraphTooltip) { 'use strict'; var module = angular.module('grafana.directives'); + var labelWidthCache = {}; + var panelWidthCache = {}; module.directive('grafanaGraph', function($rootScope, timeSrv) { return { @@ -110,20 +112,13 @@ function (angular, $, moment, _, kbn, GraphTooltip) { } } - function getLabelWidth(type, text, elem) { - var labelWidth = 0; - if (!rootScope.labelWidthCache) { - rootScope.labelWidthCache = {}; - } - if (!rootScope.labelWidthCache[type]) { - rootScope.labelWidthCache[type] = {}; - } - if (rootScope.labelWidthCache[type][text]) { - labelWidth = rootScope.labelWidthCache[type][text]; - } else { - labelWidth = elem.width(); - rootScope.labelWidthCache[type][text] = labelWidth; + function getLabelWidth(text, elem) { + var labelWidth = labelWidthCache[text]; + + if (!labelWidth) { + labelWidth = labelWidthCache[text] = elem.width(); } + return labelWidth; } @@ -155,7 +150,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { .text(panel.yaxes[0].label) .appendTo(elem); - yaxisLabel[0].style.marginTop = (getLabelWidth('left', panel.yaxes[0].label, yaxisLabel) / 2) + 'px'; + yaxisLabel[0].style.marginTop = (getLabelWidth(panel.yaxes[0].label, yaxisLabel) / 2) + 'px'; } // add right axis labels @@ -164,7 +159,7 @@ function (angular, $, moment, _, kbn, GraphTooltip) { .text(panel.yaxes[1].label) .appendTo(elem); - rightLabel[0].style.marginTop = (getLabelWidth('right', panel.yaxes[1].label, rightLabel) / 2) + 'px'; + rightLabel[0].style.marginTop = (getLabelWidth(panel.yaxes[1].label, rightLabel) / 2) + 'px'; } } @@ -177,14 +172,9 @@ function (angular, $, moment, _, kbn, GraphTooltip) { // Function for rendering panel function render_panel() { - if (!rootScope.panelWidthCache) { - rootScope.panelWidthCache = {}; - } - if (rootScope.panelWidthCache[panel.span]) { - panelWidth = rootScope.panelWidthCache[panel.span]; - } else { - panelWidth = elem.width(); - rootScope.panelWidthCache[panel.span] = panelWidth; + panelWidth = panelWidthCache[panel.span]; + if (!panelWidth) { + panelWidth = panelWidthCache[panel.span] = elem.width(); } if (shouldAbortRender()) { From d1c06a93df661d6afffcd7cd9cdc7433f3c52c12 Mon Sep 17 00:00:00 2001 From: Tom Hukins Date: Wed, 8 Jun 2016 06:27:52 +0100 Subject: [PATCH 14/17] Fix a typo (#5306) --- conf/defaults.ini | 2 +- conf/sample.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conf/defaults.ini b/conf/defaults.ini index e7c3554340f..99a105fba36 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -293,7 +293,7 @@ facility = # Syslog tag. By default, the process' argv[0] is used. tag = -#################################### AMPQ Event Publisher ########################## +#################################### AMQP Event Publisher ########################## [event_publisher] enabled = false rabbitmq_url = amqp://localhost/ diff --git a/conf/sample.ini b/conf/sample.ini index 7a1099d35af..6d6c6e0e9fd 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -261,7 +261,7 @@ check_for_updates = true # Expired days of log file(delete after max days), default is 7 ;max_days = 7 -#################################### AMPQ Event Publisher ########################## +#################################### AMQP Event Publisher ########################## [event_publisher] ;enabled = false ;rabbitmq_url = amqp://localhost/ From 22cda198aedfacaa93e2666e54db963ab93825a8 Mon Sep 17 00:00:00 2001 From: Karl Date: Wed, 8 Jun 2016 06:28:16 +0100 Subject: [PATCH 15/17] Apply EscapeFilter to username to address grafana/grafana#5121 (#5279) --- pkg/login/ldap.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/login/ldap.go b/pkg/login/ldap.go index 0c817c9df0b..e02c59e1823 100644 --- a/pkg/login/ldap.go +++ b/pkg/login/ldap.go @@ -291,7 +291,7 @@ func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) { a.server.Attr.Name, a.server.Attr.MemberOf, }, - Filter: strings.Replace(a.server.SearchFilter, "%s", username, -1), + Filter: strings.Replace(a.server.SearchFilter, "%s", ldap.EscapeFilter(username), -1), } searchResult, err = a.conn.Search(&searchReq) @@ -324,7 +324,7 @@ func (a *ldapAuther) searchForUser(username string) (*ldapUserInfo, error) { if a.server.GroupSearchFilterUserAttribute == "" { filter_replace = getLdapAttr(a.server.Attr.Username, searchResult) } - filter := strings.Replace(a.server.GroupSearchFilter, "%s", filter_replace, -1) + filter := strings.Replace(a.server.GroupSearchFilter, "%s", ldap.EscapeFilter(filter_replace), -1) if ldapCfg.VerboseLogging { log.Info("LDAP: Searching for user's groups: %s", filter) From 87e98f01cd3876a785f1888b4899a15eac46fdbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 08:09:29 +0200 Subject: [PATCH 16/17] feat(logging): updated syslog writer to work with log15 log interface, closes #4590 --- CHANGELOG.md | 4 ++ pkg/log/log.go | 25 +++---- pkg/log/syslog.go | 177 ++++++++++++++++++++++------------------------ 3 files changed, 102 insertions(+), 104 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3b6371b324..6c94d5032d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ * **Theme**: Add default theme to config file [#5011](https://github.com/grafana/grafana/pull/5011) * **Page Footer**: Added page footer with links to docs, shows Grafana version and info if new version is available, closes [#4889](https://github.com/grafana/grafana/pull/4889) * **InfluxDB**: Add spread function, closes [#5211](https://github.com/grafana/grafana/issues/5211) +* **Logging**: Moved to structured logging lib, and moved to component specific level filters via config file, closes [#4590](https://github.com/grafana/grafana/issues/4590) + +## Breaking changes +* **Logging** : Changed default logging output format (now structured into message, and key value pairs, with logger key acting as component). You can also no change in config to json log ouput. # 3.0.4 Patch release (2016-05-25) * **Panel**: Fixed blank dashboard issue when switching to other dashboard while in fullscreen edit mode, fixes [#5163](https://github.com/grafana/grafana/pull/5163) diff --git a/pkg/log/log.go b/pkg/log/log.go index 6e75a2b9a8c..f4865169e95 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -157,15 +157,20 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { loggersToClose = append(loggersToClose, fileHandler) handler = fileHandler + case "syslog": + sysLogHandler := NewSyslog() + sysLogHandler.Network = sec.Key("network").MustString("") + sysLogHandler.Address = sec.Key("address").MustString("") + sysLogHandler.Facility = sec.Key("facility").MustString("local7") + sysLogHandler.Tag = sec.Key("tag").MustString("") - // case "syslog": - // LogConfigs[i] = util.DynMap{ - // "level": level, - // "network": sec.Key("network").MustString(""), - // "address": sec.Key("address").MustString(""), - // "facility": sec.Key("facility").MustString("local7"), - // "tag": sec.Key("tag").MustString(""), - // } + if err := sysLogHandler.Init(); err != nil { + Root.Error("Failed to init syslog log handler", "error", err) + os.Exit(1) + } + + loggersToClose = append(loggersToClose, sysLogHandler) + handler = sysLogHandler } for key, value := range defaultFilters { @@ -174,10 +179,6 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { } } - for key, value := range modeFilters { - fmt.Printf("key: %v, value: %v \n", key, value) - } - handler = LogFilterHandler(level, modeFilters, handler) handlers = append(handlers, handler) } diff --git a/pkg/log/syslog.go b/pkg/log/syslog.go index cdf03555238..29a22e9fe1e 100644 --- a/pkg/log/syslog.go +++ b/pkg/log/syslog.go @@ -2,95 +2,88 @@ package log -// -// import ( -// "encoding/json" -// "errors" -// "log/syslog" -// ) -// -// type SyslogWriter struct { -// syslog *syslog.Writer -// Network string `json:"network"` -// Address string `json:"address"` -// Facility string `json:"facility"` -// Tag string `json:"tag"` -// } -// -// func NewSyslog() LoggerInterface { -// return new(SyslogWriter) -// } -// -// func (sw *SyslogWriter) Init(config string) error { -// if err := json.Unmarshal([]byte(config), sw); err != nil { -// return err -// } -// -// prio, err := parseFacility(sw.Facility) -// if err != nil { -// return err -// } -// -// w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag) -// if err != nil { -// return err -// } -// -// sw.syslog = w -// return nil -// } -// -// func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error { -// var err error -// -// switch level { -// case TRACE, DEBUG: -// err = sw.syslog.Debug(msg) -// case INFO: -// err = sw.syslog.Info(msg) -// case WARN: -// err = sw.syslog.Warning(msg) -// case ERROR: -// err = sw.syslog.Err(msg) -// case CRITICAL: -// err = sw.syslog.Crit(msg) -// case FATAL: -// err = sw.syslog.Alert(msg) -// default: -// err = errors.New("invalid syslog level") -// } -// -// return err -// } -// -// func (sw *SyslogWriter) Destroy() { -// sw.syslog.Close() -// } -// -// func (sw *SyslogWriter) Flush() {} -// -// var facilities = map[string]syslog.Priority{ -// "user": syslog.LOG_USER, -// "daemon": syslog.LOG_DAEMON, -// "local0": syslog.LOG_LOCAL0, -// "local1": syslog.LOG_LOCAL1, -// "local2": syslog.LOG_LOCAL2, -// "local3": syslog.LOG_LOCAL3, -// "local4": syslog.LOG_LOCAL4, -// "local5": syslog.LOG_LOCAL5, -// "local6": syslog.LOG_LOCAL6, -// "local7": syslog.LOG_LOCAL7, -// } -// -// func parseFacility(facility string) (syslog.Priority, error) { -// prio, ok := facilities[facility] -// if !ok { -// return syslog.LOG_LOCAL0, errors.New("invalid syslog facility") -// } -// -// return prio, nil -// } -// -// func init() { -// Register("syslog", NewSyslog) -// } +import ( + "errors" + "log/syslog" + + "github.com/inconshreveable/log15" +) + +type SysLogHandler struct { + syslog *syslog.Writer + Network string + Address string + Facility string + Tag string + Format log15.Format +} + +func NewSyslog() *SysLogHandler { + return &SysLogHandler{ + Format: log15.LogfmtFormat(), + } +} + +func (sw *SysLogHandler) Init() error { + prio, err := parseFacility(sw.Facility) + if err != nil { + return err + } + + w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag) + if err != nil { + return err + } + + sw.syslog = w + return nil +} + +func (sw *SysLogHandler) Log(r *log15.Record) error { + var err error + + msg := string(sw.Format.Format(r)) + + switch r.Lvl { + case log15.LvlDebug: + err = sw.syslog.Debug(msg) + case log15.LvlInfo: + err = sw.syslog.Info(msg) + case log15.LvlWarn: + err = sw.syslog.Warning(msg) + case log15.LvlError: + err = sw.syslog.Err(msg) + case log15.LvlCrit: + err = sw.syslog.Crit(msg) + default: + err = errors.New("invalid syslog level") + } + + return err +} + +func (sw *SysLogHandler) Close() { + sw.syslog.Close() +} + +var facilities = map[string]syslog.Priority{ + "user": syslog.LOG_USER, + "daemon": syslog.LOG_DAEMON, + "local0": syslog.LOG_LOCAL0, + "local1": syslog.LOG_LOCAL1, + "local2": syslog.LOG_LOCAL2, + "local3": syslog.LOG_LOCAL3, + "local4": syslog.LOG_LOCAL4, + "local5": syslog.LOG_LOCAL5, + "local6": syslog.LOG_LOCAL6, + "local7": syslog.LOG_LOCAL7, +} + +func parseFacility(facility string) (syslog.Priority, error) { + prio, ok := facilities[facility] + if !ok { + return syslog.LOG_LOCAL0, errors.New("invalid syslog facility") + } + + return prio, nil +} From 1d8fdc09e734aa653fd0669e650e1c13c2d72b73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 8 Jun 2016 08:48:46 +0200 Subject: [PATCH 17/17] feat(logging): added log format option, #4590 --- conf/defaults.ini | 18 ++++++++++++++---- conf/sample.ini | 30 +++++++++++++++++++++++++++--- pkg/log/log.go | 39 ++++++++++++++++++++++++++++++--------- 3 files changed, 71 insertions(+), 16 deletions(-) diff --git a/conf/defaults.ini b/conf/defaults.ini index d94ffd31fdc..068c038373c 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -251,18 +251,23 @@ templates_pattern = emails/*.html # Use space to separate multiple modes, e.g. "console file" mode = console, file -# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info" -level = Info +# Either "trace", "debug", "info", "warn", "error", "critical", default is "info" +level = info # For "console" mode only [log.console] level = -# Set formatting to "false" to disable color formatting of console logs -formatting = false + +# log line format, valid options are text, console and json +format = console # For "file" mode only [log.file] level = + +# log line format, valid options are text, console and json +format = text + # This enables automated log rotate(switch of following options), default is true log_rotate = true @@ -280,6 +285,10 @@ max_days = 7 [log.syslog] level = + +# log line format, valid options are text, console and json +format = text + # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. network = address = @@ -290,6 +299,7 @@ facility = # Syslog tag. By default, the process' argv[0] is used. tag = + #################################### AMQP Event Publisher ########################## [event_publisher] enabled = false diff --git a/conf/sample.ini b/conf/sample.ini index 3ee33fce6ff..dbb761c4613 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -230,19 +230,26 @@ check_for_updates = true #################################### Logging ########################## [log] # Either "console", "file", "syslog". Default is console and file -# Use comma to separate multiple modes, e.g. "console, file" +# Use space to separate multiple modes, e.g. "console file" ;mode = console, file -# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info" -;level = Info +# Either "trace", "debug", "info", "warn", "error", "critical", default is "info" +;level = info # For "console" mode only [log.console] ;level = +# log line format, valid options are text, console and json +;format = console + # For "file" mode only [log.file] ;level = + +# log line format, valid options are text, console and json +;format = text + # This enables automated log rotate(switch of following options), default is true ;log_rotate = true @@ -258,6 +265,23 @@ check_for_updates = true # Expired days of log file(delete after max days), default is 7 ;max_days = 7 +[log.syslog] +;level = + +# log line format, valid options are text, console and json +;format = text + +# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. +;network = +;address = + +# Syslog facility. user, daemon and local0 through local7 are valid. +;facility = + +# Syslog tag. By default, the process' argv[0] is used. +;tag = + + #################################### AMQP Event Publisher ########################## [event_publisher] ;enabled = false diff --git a/pkg/log/log.go b/pkg/log/log.go index f4865169e95..f74511e4f45 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -13,6 +13,7 @@ import ( "gopkg.in/ini.v1" "github.com/inconshreveable/log15" + "github.com/inconshreveable/log15/term" ) var Root log15.Logger @@ -82,16 +83,17 @@ func Close() { } var logLevels = map[string]log15.Lvl{ - "Trace": log15.LvlDebug, - "Debug": log15.LvlDebug, - "Info": log15.LvlInfo, - "Warn": log15.LvlWarn, - "Error": log15.LvlError, - "Critical": log15.LvlCrit, + "trace": log15.LvlDebug, + "debug": log15.LvlDebug, + "info": log15.LvlInfo, + "warn": log15.LvlWarn, + "error": log15.LvlError, + "critical": log15.LvlCrit, } func getLogLevelFromConfig(key string, defaultName string, cfg *ini.File) (string, log15.Lvl) { - levelName := cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}) + levelName := cfg.Section(key).Key("level").MustString("info") + levelName = strings.ToLower(levelName) level := getLogLevelFromString(levelName) return levelName, level } @@ -118,10 +120,26 @@ func getFilters(filterStrArray []string) map[string]log15.Lvl { return filterMap } +func getLogFormat(format string) log15.Format { + switch format { + case "console": + if term.IsTty(os.Stdout.Fd()) { + return log15.TerminalFormat() + } + return log15.LogfmtFormat() + case "text": + return log15.LogfmtFormat() + case "json": + return log15.JsonFormat() + default: + return log15.LogfmtFormat() + } +} + func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { Close() - defaultLevelName, _ := getLogLevelFromConfig("log", "Info", cfg) + defaultLevelName, _ := getLogLevelFromConfig("log", "info", cfg) defaultFilters := getFilters(cfg.Section("log").Key("filters").Strings(" ")) handlers := make([]log15.Handler, 0) @@ -136,18 +154,20 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { // Log level. _, level := getLogLevelFromConfig("log."+mode, defaultLevelName, cfg) modeFilters := getFilters(sec.Key("filters").Strings(" ")) + format := getLogFormat(sec.Key("format").MustString("")) var handler log15.Handler // Generate log configuration. switch mode { case "console": - handler = log15.StdoutHandler + handler = log15.StreamHandler(os.Stdout, format) case "file": fileName := sec.Key("file_name").MustString(filepath.Join(logsPath, "grafana.log")) os.MkdirAll(filepath.Dir(fileName), os.ModePerm) fileHandler := NewFileWriter() fileHandler.Filename = fileName + fileHandler.Format = format fileHandler.Rotate = sec.Key("log_rotate").MustBool(true) fileHandler.Maxlines = sec.Key("max_lines").MustInt(1000000) fileHandler.Maxsize = 1 << uint(sec.Key("max_size_shift").MustInt(28)) @@ -159,6 +179,7 @@ func ReadLoggingConfig(modes []string, logsPath string, cfg *ini.File) { handler = fileHandler case "syslog": sysLogHandler := NewSyslog() + sysLogHandler.Format = format sysLogHandler.Network = sec.Key("network").MustString("") sysLogHandler.Address = sec.Key("address").MustString("") sysLogHandler.Facility = sec.Key("facility").MustString("local7")