From 2bd2605ae96d6ec2f60b2d21305fd03ecc52ff02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Fri, 20 Mar 2015 19:16:59 -0400 Subject: [PATCH 001/274] Added poc of dashboard snapshot, sharable dashboard with data embedded --- src/app/features/dashboard/dashboardNavCtrl.js | 14 +++++++++++++- src/app/panels/graph/module.js | 12 +++++++++++- src/app/partials/dashboard_topnav.html | 1 + 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/app/features/dashboard/dashboardNavCtrl.js b/src/app/features/dashboard/dashboardNavCtrl.js index a47e19b561f..9a8042cd680 100644 --- a/src/app/features/dashboard/dashboardNavCtrl.js +++ b/src/app/features/dashboard/dashboardNavCtrl.js @@ -11,7 +11,7 @@ function (angular, _, moment) { var module = angular.module('grafana.controllers'); - module.controller('DashboardNavCtrl', function($scope, $rootScope, alertSrv, $location, playlistSrv, backendSrv, timeSrv) { + module.controller('DashboardNavCtrl', function($scope, $rootScope, alertSrv, $location, playlistSrv, backendSrv, timeSrv, $timeout) { $scope.init = function() { $scope.onAppEvent('save-dashboard', $scope.saveDashboard); @@ -157,6 +157,18 @@ function (angular, _, moment) { }); }; + $scope.snapshot = function() { + $scope.dashboard.snapshot = true; + $rootScope.$broadcast('refresh'); + + $timeout(function() { + $scope.exportDashboard(); + $scope.dashboard.snapshot = false; + $scope.appEvent('dashboard-snapshot-cleanup'); + }, 1000); + + }; + $scope.editJson = function() { $scope.appEvent('show-json-editor', { object: $scope.dashboard }); }; diff --git a/src/app/panels/graph/module.js b/src/app/panels/graph/module.js index 3af9f59eb9b..966e1f9aa23 100644 --- a/src/app/panels/graph/module.js +++ b/src/app/panels/graph/module.js @@ -23,7 +23,7 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { }; }); - module.controller('GraphCtrl', function($scope, $rootScope, panelSrv, annotationsSrv, panelHelper) { + module.controller('GraphCtrl', function($scope, $rootScope, panelSrv, annotationsSrv, panelHelper, $q) { $scope.panelMeta = new PanelMeta({ panelName: 'Graph', @@ -130,6 +130,12 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { $scope.refreshData = function(datasource) { panelHelper.updateTimeRange($scope); + if ($scope.panel.snapshotData) { + $scope.annotationsPromise = $q.when([]); + $scope.dataHandler($scope.panel.snapshotData); + return; + } + $scope.annotationsPromise = annotationsSrv.getAnnotations($scope.rangeUnparsed, $scope.dashboard); return panelHelper.issueMetricQuery($scope, datasource) @@ -141,6 +147,9 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { }; $scope.dataHandler = function(results) { + if ($scope.dashboard.snapshot) { + $scope.panel.snapshotData = results; + } // png renderer returns just a url if (_.isString(results)) { $scope.render(results); @@ -285,6 +294,7 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { }; panelSrv.init($scope); + }); }); diff --git a/src/app/partials/dashboard_topnav.html b/src/app/partials/dashboard_topnav.html index bf3ba635a97..77cccecb10b 100644 --- a/src/app/partials/dashboard_topnav.html +++ b/src/app/partials/dashboard_topnav.html @@ -40,6 +40,7 @@
  • View JSON
  • Save As...
  • Delete dashboard
  • +
  • Snapshot dashboard
  • From 7db37032759427c5a9b36654933982b7582a3caf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Fri, 20 Mar 2015 22:01:39 -0400 Subject: [PATCH 002/274] Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site, #1622 --- CHANGELOG.md | 1 + .../features/dashboard/dashboardNavCtrl.js | 2 +- .../dashboard/partials/shareDashboard.html | 48 ++++++++++++++ .../dashboard/partials/shareModal.html | 53 --------------- .../dashboard/partials/sharePanel.html | 65 +++++++++++++++++++ src/app/features/dashboard/sharePanelCtrl.js | 5 +- src/app/features/panel/panelSrv.js | 2 +- src/app/features/panel/soloPanelCtrl.js | 16 ++++- src/css/less/forms.less | 6 ++ src/test/specs/soloPanelCtrl-specs.js | 6 ++ 10 files changed, 146 insertions(+), 58 deletions(-) create mode 100644 src/app/features/dashboard/partials/shareDashboard.html delete mode 100644 src/app/features/dashboard/partials/shareModal.html create mode 100644 src/app/features/dashboard/partials/sharePanel.html diff --git a/CHANGELOG.md b/CHANGELOG.md index eb1ffcdecb9..15e1527ad8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # 2.0.0 (unreleased) **New features** +- [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site - [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes - [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views - [Issue #1241](https://github.com/grafana/grafana/issues/1242). Timepicker: New option in timepicker (under dashboard settings), to change ``now`` to be for example ``now-1m``, usefull when you want to ignore last minute because it contains incomplete data diff --git a/src/app/features/dashboard/dashboardNavCtrl.js b/src/app/features/dashboard/dashboardNavCtrl.js index a47e19b561f..5950c504a3a 100644 --- a/src/app/features/dashboard/dashboardNavCtrl.js +++ b/src/app/features/dashboard/dashboardNavCtrl.js @@ -42,7 +42,7 @@ function (angular, _, moment) { $scope.shareDashboard = function() { $scope.appEvent('show-modal', { - src: './app/features/dashboard/partials/shareModal.html', + src: './app/features/dashboard/partials/shareDashboard.html', scope: $scope.$new(), }); }; diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html new file mode 100644 index 00000000000..e052c5b298f --- /dev/null +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -0,0 +1,48 @@ + diff --git a/src/app/features/dashboard/partials/shareModal.html b/src/app/features/dashboard/partials/shareModal.html deleted file mode 100644 index a9707be94b1..00000000000 --- a/src/app/features/dashboard/partials/shareModal.html +++ /dev/null @@ -1,53 +0,0 @@ - diff --git a/src/app/features/dashboard/sharePanelCtrl.js b/src/app/features/dashboard/sharePanelCtrl.js index 40c7ba45a90..88710660e3c 100644 --- a/src/app/features/dashboard/sharePanelCtrl.js +++ b/src/app/features/dashboard/sharePanelCtrl.js @@ -9,7 +9,7 @@ function (angular, _, require, config) { var module = angular.module('grafana.controllers'); - module.controller('SharePanelCtrl', function($scope, $location, $timeout, timeSrv, $element, templateSrv) { + module.controller('SharePanelCtrl', function($scope, $rootScope, $location, $timeout, timeSrv, $element, templateSrv) { $scope.init = function() { $scope.editor = { index: 0 }; @@ -81,6 +81,17 @@ function (angular, _, require, config) { $scope.imageUrl += '&height=500'; }; + $scope.snapshot = function() { + $scope.dashboard.snapshot = true; + $rootScope.$broadcast('refresh'); + + $timeout(function() { + $scope.exportDashboard(); + $scope.dashboard.snapshot = false; + $scope.appEvent('dashboard-snapshot-cleanup'); + }, 1000); + }; + $scope.init(); }); diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js new file mode 100644 index 00000000000..fcc4ce2ba1a --- /dev/null +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -0,0 +1,30 @@ +define([ + 'angular', +], +function (angular) { + 'use strict'; + + var module = angular.module('grafana.controllers'); + + module.controller('ShareSnapshotCtrl', function($scope, $rootScope, backendSrv, $timeout) { + + $scope.snapshot = function() { + $scope.dashboard.snapshot = true; + $rootScope.$broadcast('refresh'); + + $timeout(function() { + var dash = angular.copy($scope.dashboard); + backendSrv.post('/api/snapshots/', { + dashboard: dash + }).then(function(results) { + console.log(results); + }); + + $scope.dashboard.snapshot = false; + $scope.appEvent('dashboard-snapshot-cleanup'); + }, 2000); + }; + + }); + +}); diff --git a/src/app/partials/shareDashboard.html b/src/app/partials/shareDashboard.html deleted file mode 100644 index 79ff15c548e..00000000000 --- a/src/app/partials/shareDashboard.html +++ /dev/null @@ -1,18 +0,0 @@ - From f48f5428e57cb6c082e5ed875a41469164ef10ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sat, 21 Mar 2015 10:56:26 -0400 Subject: [PATCH 004/274] Adding snapshot storage and route, #1623 --- pkg/api/dashboard_snapshot.go | 10 +++++-- pkg/api/dtos/models.go | 7 +++-- .../migrations/dashboard_snapshot_mig.go | 6 ++-- .../dashboard/partials/shareDashboard.html | 28 ++++++++++++++++++- .../features/dashboard/shareSnapshotCtrl.js | 21 ++++++++++---- src/app/routes/all.js | 4 +++ src/app/routes/dashLoadControllers.js | 23 ++++++++++----- 7 files changed, 77 insertions(+), 22 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 3ac574d4991..979a3aa86f2 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -1,13 +1,14 @@ package api import ( + "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/util" ) -func CreateDashboardSnapshotCommand(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { +func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { cmd.Key = util.GetRandomString(20) if err := bus.Dispatch(&cmd); err != nil { @@ -29,5 +30,10 @@ func GetDashboardSnapshot(c *middleware.Context) { return } - c.JSON(200, query.Result) + dto := dtos.Dashboard{ + Model: query.Result.Dashboard, + Meta: dtos.DashboardMeta{IsSnapshot: true}, + } + + c.JSON(200, dto) } diff --git a/pkg/api/dtos/models.go b/pkg/api/dtos/models.go index c225c6a5bbb..2cb9da0189f 100644 --- a/pkg/api/dtos/models.go +++ b/pkg/api/dtos/models.go @@ -27,9 +27,10 @@ type CurrentUser struct { } type DashboardMeta struct { - IsStarred bool `json:"isStarred"` - IsHome bool `json:"isHome"` - Slug string `json:"slug"` + IsStarred bool `json:"isStarred"` + IsHome bool `json:"isHome"` + IsSnapshot bool `json:"isSnapshot"` + Slug string `json:"slug"` } type Dashboard struct { diff --git a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go index 8ac7bee8be4..a30b5eccbe4 100644 --- a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go +++ b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go @@ -3,7 +3,7 @@ package migrations import . "github.com/grafana/grafana/pkg/services/sqlstore/migrator" func addDashboardSnapshotMigrations(mg *Migrator) { - snapshotV3 := Table{ + snapshotV4 := Table{ Name: "dashboard_snapshot", Columns: []*Column{ {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, @@ -19,6 +19,6 @@ func addDashboardSnapshotMigrations(mg *Migrator) { }, } - mg.AddMigration("create dashboard_snapshot table v3", NewAddTableMigration(snapshotV3)) - addTableIndicesMigrations(mg, "v3", snapshotV3) + mg.AddMigration("create dashboard_snapshot table v4", NewAddTableMigration(snapshotV4)) + addTableIndicesMigrations(mg, "v4", snapshotV4) } diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html index c622254292a..0463b342650 100644 --- a/src/app/features/dashboard/partials/shareDashboard.html +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -54,7 +54,33 @@

    - +
    +
    +
      +
    • + Snapshot name +
    • +
    • + +
    • +
    +
    +
    +
    + +
    +
    + + + + +
    +
    + + diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index fcc4ce2ba1a..2f316c71ada 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -6,18 +6,27 @@ function (angular) { var module = angular.module('grafana.controllers'); - module.controller('ShareSnapshotCtrl', function($scope, $rootScope, backendSrv, $timeout) { + module.controller('ShareSnapshotCtrl', function($scope, $rootScope, $location, backendSrv, $timeout) { - $scope.snapshot = function() { + $scope.snapshot = { + name: $scope.dashboard.title + }; + + $scope.createSnapshot = function() { $scope.dashboard.snapshot = true; + $scope.loading = true; $rootScope.$broadcast('refresh'); $timeout(function() { var dash = angular.copy($scope.dashboard); - backendSrv.post('/api/snapshots/', { - dashboard: dash - }).then(function(results) { - console.log(results); + backendSrv.post('/api/snapshots/', {dashboard: dash}).then(function(results) { + $scope.loading = false; + + var baseUrl = $location.absUrl().replace($location.url(), ""); + $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; + + }, function() { + $scope.loading = false; }); $scope.dashboard.snapshot = false; diff --git a/src/app/routes/all.js b/src/app/routes/all.js index de6477ea959..d68fbbdd525 100644 --- a/src/app/routes/all.js +++ b/src/app/routes/all.js @@ -35,6 +35,10 @@ define([ controller : 'DashFromImportCtrl', reloadOnSearch: false, }) + .when('/dashboard/snapshots/:key', { + templateUrl: 'app/partials/dashboard.html', + controller : 'DashFromSnapshotCtrl', + }) .when('/dashboard/new', { templateUrl: 'app/partials/dashboard.html', controller : 'NewDashboardCtrl', diff --git a/src/app/routes/dashLoadControllers.js b/src/app/routes/dashLoadControllers.js index 710c30719cc..1a1d61142b2 100644 --- a/src/app/routes/dashLoadControllers.js +++ b/src/app/routes/dashLoadControllers.js @@ -33,6 +33,15 @@ function (angular, _, kbn, moment, $) { }); }); + module.controller('DashFromSnapshotCtrl', function($scope, $routeParams, backendSrv) { + backendSrv.get('/api/snapshots/' + $routeParams.key).then(function(result) { + $scope.initDashboard(result, $scope); + },function() { + $scope.initDashboard({}, $scope); + $scope.appEvent('alert-error', ['Dashboard Snapshot', '']); + }); + }); + module.controller('DashFromImportCtrl', function($scope, $location, alertSrv) { if (!window.grafanaImportDashboard) { alertSrv.set('Not found', 'Cannot reload page with unsaved imported dashboard', 'warning', 7000); @@ -47,7 +56,7 @@ function (angular, _, kbn, moment, $) { meta: {}, model: { title: "New dashboard", - rows: [{ height: '250px', panels:[] }] + rows: [{ height: '250px', panels:[] }] }, }, $scope); }); @@ -57,10 +66,10 @@ function (angular, _, kbn, moment, $) { var file_load = function(file) { return $http({ url: "public/dashboards/"+file.replace(/\.(?!json)/,"/")+'?' + new Date().getTime(), - method: "GET", - transformResponse: function(response) { - return angular.fromJson(response); - } + method: "GET", + transformResponse: function(response) { + return angular.fromJson(response); + } }).then(function(result) { if(!result) { return false; @@ -83,8 +92,8 @@ function (angular, _, kbn, moment, $) { var execute_script = function(result) { var services = { dashboardSrv: dashboardSrv, - datasourceSrv: datasourceSrv, - $q: $q, + datasourceSrv: datasourceSrv, + $q: $q, }; /*jshint -W054 */ From 7d4293f849d6c8d32e4ee05df809823e2ca3a722 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 06:48:53 -0400 Subject: [PATCH 005/274] removed cli commands, need to be mobed to a seperate binary using http api, #1570 --- main.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/main.go b/main.go index f23a544e1e9..3b25b73039d 100644 --- a/main.go +++ b/main.go @@ -39,17 +39,7 @@ func main() { app.Name = "Grafana Backend" app.Usage = "grafana web" app.Version = version - app.Commands = []cli.Command{ - cmd.ListOrgs, - cmd.CreateOrg, - cmd.DeleteOrg, - cmd.ExportDashboard, - cmd.ImportDashboard, - cmd.ListDataSources, - cmd.CreateDataSource, - cmd.DescribeDataSource, - cmd.DeleteDataSource, - cmd.Web} + app.Commands = []cli.Command{cmd.ImportDashboard, cmd.Web} app.Flags = append(app.Flags, []cli.Flag{ cli.StringFlag{ Name: "config", From d987532262435937215e72664101257058d2057e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 15:14:00 -0400 Subject: [PATCH 006/274] Added server metrics --- conf/defaults.ini | 7 ++ pkg/api/admin_users.go | 3 + pkg/api/dashboard.go | 5 ++ pkg/api/index.go | 2 +- pkg/api/login.go | 4 +- pkg/api/login_oauth.go | 3 + pkg/api/org.go | 3 + pkg/api/signup.go | 3 + pkg/cmd/web.go | 5 ++ pkg/metrics/counter.go | 72 ++++++++++++++++++++ pkg/metrics/metric_ref.go | 39 +++++++++++ pkg/metrics/metrics.go | 25 +++++++ pkg/metrics/registry.go | 102 +++++++++++++++++++++++++++++ pkg/metrics/report_usage.go | 60 +++++++++++++++++ pkg/middleware/middleware.go | 12 ++++ pkg/models/stats.go | 11 ++++ pkg/services/sqlstore/dashboard.go | 2 + pkg/services/sqlstore/stats.go | 36 ++++++++++ pkg/setting/setting.go | 4 ++ 19 files changed, 396 insertions(+), 2 deletions(-) create mode 100644 pkg/metrics/counter.go create mode 100644 pkg/metrics/metric_ref.go create mode 100644 pkg/metrics/metrics.go create mode 100644 pkg/metrics/registry.go create mode 100644 pkg/metrics/report_usage.go create mode 100644 pkg/models/stats.go create mode 100644 pkg/services/sqlstore/stats.go diff --git a/conf/defaults.ini b/conf/defaults.ini index 5318e89b2ad..095457dd2a2 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -1,6 +1,13 @@ app_name = Grafana app_mode = production +# Once every 24 hours Grafana will report anonymous data to +# stats.grafana.org (https). No ip addresses are being tracked. +# only simple counters to track running instances, dashboard +# count and errors. It is very helpful to us. +# Change this option to false to disable reporting. +reporting-enabled = true + [server] ; protocol (http or https) protocol = http diff --git a/pkg/api/admin_users.go b/pkg/api/admin_users.go index d3fa111d333..f7e8fca2b5e 100644 --- a/pkg/api/admin_users.go +++ b/pkg/api/admin_users.go @@ -3,6 +3,7 @@ package api import ( "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/util" @@ -64,6 +65,8 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) { return } + metrics.M_Api_Admin_User_Create.Inc(1) + c.JsonOK("User created") } diff --git a/pkg/api/dashboard.go b/pkg/api/dashboard.go index 8cde5a8bc8a..278264f22d7 100644 --- a/pkg/api/dashboard.go +++ b/pkg/api/dashboard.go @@ -7,6 +7,7 @@ import ( "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -27,6 +28,8 @@ func isDasboardStarredByUser(c *middleware.Context, dashId int64) (bool, error) } func GetDashboard(c *middleware.Context) { + metrics.M_Api_Dashboard_Get.Inc(1) + slug := c.Params(":slug") query := m.GetDashboardQuery{Slug: slug, OrgId: c.OrgId} @@ -88,6 +91,8 @@ func PostDashboard(c *middleware.Context, cmd m.SaveDashboardCommand) { return } + metrics.M_Api_Dashboard_Post.Inc(1) + c.JSON(200, util.DynMap{"status": "success", "slug": cmd.Result.Slug, "version": cmd.Result.Version}) } diff --git a/pkg/api/index.go b/pkg/api/index.go index 3006c54e8ab..4af66f18133 100644 --- a/pkg/api/index.go +++ b/pkg/api/index.go @@ -47,7 +47,7 @@ func Index(c *middleware.Context) { func NotFound(c *middleware.Context) { if c.IsApiRequest() { - c.JsonApiErr(200, "Not found", nil) + c.JsonApiErr(404, "Not found", nil) return } diff --git a/pkg/api/login.go b/pkg/api/login.go index e7707c53138..56a61697cb9 100644 --- a/pkg/api/login.go +++ b/pkg/api/login.go @@ -6,6 +6,7 @@ import ( "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -75,7 +76,6 @@ func LoginView(c *middleware.Context) { } func LoginPost(c *middleware.Context, cmd dtos.LoginCommand) { - userQuery := m.GetUserByLoginQuery{LoginOrEmail: cmd.User} err := bus.Dispatch(&userQuery) @@ -112,6 +112,8 @@ func LoginPost(c *middleware.Context, cmd dtos.LoginCommand) { c.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/") } + metrics.M_Api_Login_Post.Inc(1) + c.JSON(200, result) } diff --git a/pkg/api/login_oauth.go b/pkg/api/login_oauth.go index 9ccb8f0b60d..a234ef02bf3 100644 --- a/pkg/api/login_oauth.go +++ b/pkg/api/login_oauth.go @@ -8,6 +8,7 @@ import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -81,5 +82,7 @@ func OAuthLogin(ctx *middleware.Context) { // login loginUserWithUser(userQuery.Result, ctx) + metrics.M_Api_Login_OAuth.Inc(1) + ctx.Redirect(setting.AppSubUrl + "/") } diff --git a/pkg/api/org.go b/pkg/api/org.go index 8b41b0e3f5f..ed180b1af77 100644 --- a/pkg/api/org.go +++ b/pkg/api/org.go @@ -2,6 +2,7 @@ package api import ( "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" ) @@ -35,6 +36,8 @@ func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) { return } + metrics.M_Api_Org_Create.Inc(1) + c.JsonOK("Organization created") } diff --git a/pkg/api/signup.go b/pkg/api/signup.go index 74f00509b98..63bb34c72ac 100644 --- a/pkg/api/signup.go +++ b/pkg/api/signup.go @@ -2,6 +2,7 @@ package api import ( "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" @@ -26,4 +27,6 @@ func SignUp(c *middleware.Context, cmd m.CreateUserCommand) { loginUserWithUser(&user, c) c.JsonOK("User created and logged in") + + metrics.M_Api_User_SignUp.Inc(1) } diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index 6619e5b1e0e..e5516fb52d9 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -21,6 +21,7 @@ import ( "github.com/grafana/grafana/pkg/api" "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/services/eventpublisher" @@ -88,6 +89,10 @@ func runWeb(c *cli.Context) { m := newMacaron() api.Register(m) + if setting.ReportingEnabled { + go metrics.StartUsageReportLoop() + } + listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort) log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl) switch setting.Protocol { diff --git a/pkg/metrics/counter.go b/pkg/metrics/counter.go new file mode 100644 index 00000000000..1a4a88be37b --- /dev/null +++ b/pkg/metrics/counter.go @@ -0,0 +1,72 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + return &StandardCounter{0} +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/pkg/metrics/metric_ref.go b/pkg/metrics/metric_ref.go new file mode 100644 index 00000000000..f9e5d693d4c --- /dev/null +++ b/pkg/metrics/metric_ref.go @@ -0,0 +1,39 @@ +package metrics + +type comboCounterRef struct { + usageCounter Counter + metricCounter Counter +} + +func NewComboCounterRef(name string) Counter { + cr := &comboCounterRef{} + cr.usageCounter = UsageStats.GetOrRegister(name, NewCounter).(Counter) + cr.metricCounter = MetricStats.GetOrRegister(name, NewCounter).(Counter) + return cr +} + +func (c comboCounterRef) Clear() { + c.usageCounter.Clear() + c.metricCounter.Clear() +} + +func (c comboCounterRef) Count() int64 { + panic("Count called on a combocounter ref") +} + +// Dec panics. +func (c comboCounterRef) Dec(i int64) { + c.usageCounter.Dec(i) + c.metricCounter.Dec(i) +} + +// Inc panics. +func (c comboCounterRef) Inc(i int64) { + c.usageCounter.Inc(i) + c.metricCounter.Inc(i) +} + +// Snapshot returns the snapshot. +func (c comboCounterRef) Snapshot() Counter { + panic("snapshot called on a combocounter ref") +} diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go new file mode 100644 index 00000000000..45b964fb56e --- /dev/null +++ b/pkg/metrics/metrics.go @@ -0,0 +1,25 @@ +package metrics + +var UsageStats = NewRegistry() +var MetricStats = NewRegistry() + +var ( + M_Instance_Start = NewComboCounterRef("instance.start") + + M_Page_Status_200 = NewComboCounterRef("page.status.200") + M_Page_Status_500 = NewComboCounterRef("page.status.500") + M_Page_Status_404 = NewComboCounterRef("page.status.404") + + M_Api_Status_500 = NewComboCounterRef("api.status.500") + M_Api_Status_404 = NewComboCounterRef("api.status.404") + + M_Api_User_SignUp = NewComboCounterRef("api.user.signup") + M_Api_Dashboard_Get = NewComboCounterRef("api.dashboard.get") + M_Api_Dashboard_Post = NewComboCounterRef("api.dashboard.post") + M_Api_Admin_User_Create = NewComboCounterRef("api.admin.user_create") + M_Api_Login_Post = NewComboCounterRef("api.login.post") + M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth") + M_Api_Org_Create = NewComboCounterRef("api.org.create") + + M_Models_Dashboard_Insert = NewComboCounterRef("models.dashboard.insert") +) diff --git a/pkg/metrics/registry.go b/pkg/metrics/registry.go new file mode 100644 index 00000000000..9e1618f3691 --- /dev/null +++ b/pkg/metrics/registry.go @@ -0,0 +1,102 @@ +package metrics + +import ( + "fmt" + "reflect" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +type Registry interface { + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.Mutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + + r.metrics[name] = i + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + metrics := make(map[string]interface{}, len(r.metrics)) + r.mutex.Lock() + defer r.mutex.Unlock() + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go new file mode 100644 index 00000000000..af6552b5c1e --- /dev/null +++ b/pkg/metrics/report_usage.go @@ -0,0 +1,60 @@ +package metrics + +import ( + "bytes" + "encoding/json" + "net/http" + "time" + + "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/setting" +) + +func StartUsageReportLoop() chan struct{} { + M_Instance_Start.Inc(1) + + ticker := time.NewTicker(10 * time.Minute) + for { + select { + case <-ticker.C: + sendUsageStats() + } + } +} + +func sendUsageStats() { + log.Trace("Sending anonymous usage stats to stats.grafana.org") + + metrics := map[string]interface{}{} + report := map[string]interface{}{ + "version": setting.BuildVersion, + "metrics": metrics, + } + + // statsQuery := m.GetSystemStatsQuery{} + // if err := bus.Dispatch(&statsQuery); err != nil { + // log.Error(3, "Failed to get system stats", err) + // return + // } + + UsageStats.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + if metric.Count() > 0 { + metrics[name+".count"] = metric.Count() + metric.Clear() + } + } + }) + + // metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount + // metrics["stats.users.count"] = statsQuery.Result.UserCount + // metrics["stats.orgs.count"] = statsQuery.Result.OrgCount + + out, _ := json.Marshal(report) + data := bytes.NewBuffer(out) + + client := http.Client{Timeout: time.Duration(5 * time.Second)} + + go client.Post("http://stats.grafana.org/grafana-usage-report", "application/json", data) +} diff --git a/pkg/middleware/middleware.go b/pkg/middleware/middleware.go index a15fd075fca..20e1eb196e5 100644 --- a/pkg/middleware/middleware.go +++ b/pkg/middleware/middleware.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/apikeygen" "github.com/grafana/grafana/pkg/log" + "github.com/grafana/grafana/pkg/metrics" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" ) @@ -99,6 +100,15 @@ func (ctx *Context) Handle(status int, title string, err error) { } } + switch status { + case 200: + metrics.M_Page_Status_200.Inc(1) + case 404: + metrics.M_Page_Status_404.Inc(1) + case 500: + metrics.M_Page_Status_500.Inc(1) + } + ctx.Data["Title"] = title ctx.HTML(status, strconv.Itoa(status)) } @@ -128,7 +138,9 @@ func (ctx *Context) JsonApiErr(status int, message string, err error) { switch status { case 404: resp["message"] = "Not Found" + metrics.M_Api_Status_500.Inc(1) case 500: + metrics.M_Api_Status_404.Inc(1) resp["message"] = "Internal Server Error" } diff --git a/pkg/models/stats.go b/pkg/models/stats.go new file mode 100644 index 00000000000..0d83882e666 --- /dev/null +++ b/pkg/models/stats.go @@ -0,0 +1,11 @@ +package models + +type SystemStats struct { + DashboardCount int + UserCount int + OrgCount int +} + +type GetSystemStatsQuery struct { + Result *SystemStats +} diff --git a/pkg/services/sqlstore/dashboard.go b/pkg/services/sqlstore/dashboard.go index bf748b600f4..0384a5bb6e6 100644 --- a/pkg/services/sqlstore/dashboard.go +++ b/pkg/services/sqlstore/dashboard.go @@ -6,6 +6,7 @@ import ( "github.com/go-xorm/xorm" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" m "github.com/grafana/grafana/pkg/models" ) @@ -48,6 +49,7 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error { } if dash.Id == 0 { + metrics.M_Models_Dashboard_Insert.Inc(1) _, err = sess.Insert(dash) } else { dash.Version += 1 diff --git a/pkg/services/sqlstore/stats.go b/pkg/services/sqlstore/stats.go new file mode 100644 index 00000000000..7995dd43f38 --- /dev/null +++ b/pkg/services/sqlstore/stats.go @@ -0,0 +1,36 @@ +package sqlstore + +import ( + "github.com/grafana/grafana/pkg/bus" + m "github.com/grafana/grafana/pkg/models" +) + +func init() { + bus.AddHandler("sql", GetSystemStats) +} + +func GetSystemStats(query *m.GetSystemStatsQuery) error { + var rawSql = `SELECT + ( + SELECT COUNT(*) + FROM ` + dialect.Quote("user") + ` + ) AS user_count, + ( + SELECT COUNT(*) + FROM ` + dialect.Quote("org") + ` + ) AS org_count, + ( + SELECT COUNT(*) + FROM ` + dialect.Quote("dashboard") + ` + ) AS dashboard_count + ` + + var stats m.SystemStats + _, err := x.Sql(rawSql).Get(&stats) + if err != nil { + return err + } + + query.Result = &stats + return err +} diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index b8d038dbc29..defa8311e8c 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -96,6 +96,8 @@ var ( PhantomDir string configFiles []string + + ReportingEnabled bool ) func init() { @@ -233,6 +235,8 @@ func NewConfigContext(config string) { ImagesDir = "data/png" PhantomDir = "vendor/phantomjs" + ReportingEnabled = Cfg.Section("").Key("reporting-enabled").MustBool(true) + readSessionConfig() } From 9c9ebb49875941805e9e2e71284dcb30afdd13a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 15:24:35 -0400 Subject: [PATCH 007/274] Updated server stats --- pkg/metrics/report_usage.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index af6552b5c1e..1887f7f60a9 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -6,7 +6,6 @@ import ( "net/http" "time" - "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/setting" ) @@ -23,8 +22,6 @@ func StartUsageReportLoop() chan struct{} { } func sendUsageStats() { - log.Trace("Sending anonymous usage stats to stats.grafana.org") - metrics := map[string]interface{}{} report := map[string]interface{}{ "version": setting.BuildVersion, @@ -56,5 +53,5 @@ func sendUsageStats() { client := http.Client{Timeout: time.Duration(5 * time.Second)} - go client.Post("http://stats.grafana.org/grafana-usage-report", "application/json", data) + go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data) } From c67291da33a1f3a9ecbd54c3f54e2e3b24e864ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 15:25:21 -0400 Subject: [PATCH 008/274] Updated --- conf/sample.ini | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/conf/sample.ini b/conf/sample.ini index 4e4c335ae18..a51600a606d 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -5,6 +5,13 @@ app_mode = production +# Once every 24 hours Grafana will report anonymous data to +# stats.grafana.org (https). No ip addresses are being tracked. +# only simple counters to track running instances, dashboard +# count and errors. It is very helpful to us. +# Change this option to false to disable reporting. +reporting-enabled = true + [server] ; protocol (http or https) protocol = http From 526f3e1a314248430fa7e18ae94fc71dd6822a62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 15:27:05 -0400 Subject: [PATCH 009/274] Fixed failing unit test --- src/test/specs/graph-specs.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/specs/graph-specs.js b/src/test/specs/graph-specs.js index 7e870bfd7d0..a234323ee01 100644 --- a/src/test/specs/graph-specs.js +++ b/src/test/specs/graph-specs.js @@ -153,9 +153,9 @@ define([ it('should apply axis transform and ticks', function() { var axis = ctx.plotOptions.yaxes[0]; - expect(axis.transform(100)).to.be(Math.log(100+0.0001)); - expect(axis.ticks[0]).to.be(1); - expect(axis.ticks[1]).to.be(10); + expect(axis.transform(100)).to.be(Math.log(100+0.1)); + expect(axis.ticks[0]).to.be(0); + expect(axis.ticks[1]).to.be(1); }); }); From 1e4c62a70d12f3cc9027691cee9dc66b76d5b540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 15:45:13 -0400 Subject: [PATCH 010/274] updated server reporting --- pkg/metrics/report_usage.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index 1887f7f60a9..e5cbf7a45f1 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -6,13 +6,14 @@ import ( "net/http" "time" + "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/setting" ) func StartUsageReportLoop() chan struct{} { M_Instance_Start.Inc(1) - ticker := time.NewTicker(10 * time.Minute) + ticker := time.NewTicker(24 * time.Hour) for { select { case <-ticker.C: @@ -22,6 +23,8 @@ func StartUsageReportLoop() chan struct{} { } func sendUsageStats() { + log.Trace("Sending anonymous usage stats to stats.grafana.org") + metrics := map[string]interface{}{} report := map[string]interface{}{ "version": setting.BuildVersion, From a26436f59bae959087fba7c506e4312cb47aec1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 16:13:16 -0400 Subject: [PATCH 011/274] Server metrics fix --- pkg/metrics/report_usage.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index e5cbf7a45f1..b31b55333cc 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "net/http" + "strings" "time" "github.com/grafana/grafana/pkg/log" @@ -13,7 +14,7 @@ import ( func StartUsageReportLoop() chan struct{} { M_Instance_Start.Inc(1) - ticker := time.NewTicker(24 * time.Hour) + ticker := time.NewTicker(10 * time.Minute) for { select { case <-ticker.C: @@ -25,9 +26,11 @@ func StartUsageReportLoop() chan struct{} { func sendUsageStats() { log.Trace("Sending anonymous usage stats to stats.grafana.org") + version := strings.Replace(setting.BuildVersion, ".", "_", -1) + metrics := map[string]interface{}{} report := map[string]interface{}{ - "version": setting.BuildVersion, + "version": version, "metrics": metrics, } From 44bc2b2d56423c3b25283bb3d0c4721d91441a70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Sun, 22 Mar 2015 16:30:28 -0400 Subject: [PATCH 012/274] Updated conf description, metrics interval --- conf/defaults.ini | 2 +- conf/sample.ini | 4 ++-- pkg/metrics/report_usage.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conf/defaults.ini b/conf/defaults.ini index 095457dd2a2..d35f71fa4ce 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -1,7 +1,7 @@ app_name = Grafana app_mode = production -# Once every 24 hours Grafana will report anonymous data to +# Once every 1 hour Grafana will report anonymous data to # stats.grafana.org (https). No ip addresses are being tracked. # only simple counters to track running instances, dashboard # count and errors. It is very helpful to us. diff --git a/conf/sample.ini b/conf/sample.ini index a51600a606d..f7207668c8f 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -5,10 +5,10 @@ app_mode = production -# Once every 24 hours Grafana will report anonymous data to +# Once every 1 hour Grafana will report anonymous data to # stats.grafana.org (https). No ip addresses are being tracked. # only simple counters to track running instances, dashboard -# count and errors. It is very helpful to us. +# counts and errors. It is very helpful to us. # Change this option to false to disable reporting. reporting-enabled = true diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index b31b55333cc..f952e56fab6 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -14,7 +14,7 @@ import ( func StartUsageReportLoop() chan struct{} { M_Instance_Start.Inc(1) - ticker := time.NewTicker(10 * time.Minute) + ticker := time.NewTicker(time.Hour) for { select { case <-ticker.C: From a5fac17f2b7abbd8338922efbc891be21b30a251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 07:32:03 -0400 Subject: [PATCH 013/274] Added public snapshot test, hosted on snapshots.raintank.io --- .../dashboard/partials/shareDashboard.html | 5 +++++ src/app/features/dashboard/shareSnapshotCtrl.js | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html index f96fe844a89..1afd4a20a84 100644 --- a/src/app/features/dashboard/partials/shareDashboard.html +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -82,6 +82,11 @@ + + diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 2f316c71ada..acc0f38d946 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -12,17 +12,29 @@ function (angular) { name: $scope.dashboard.title }; - $scope.createSnapshot = function() { + $scope.createSnapshot = function(makePublic) { $scope.dashboard.snapshot = true; $scope.loading = true; $rootScope.$broadcast('refresh'); $timeout(function() { var dash = angular.copy($scope.dashboard); - backendSrv.post('/api/snapshots/', {dashboard: dash}).then(function(results) { + dash.title = $scope.snapshot.name; + + var apiUrl = '/api/snapshots'; + + if (makePublic) { + apiUrl = 'http://snapshots.raintank.io/api/snapshots'; + } + + backendSrv.post(apiUrl, {dashboard: dash}).then(function(results) { $scope.loading = false; var baseUrl = $location.absUrl().replace($location.url(), ""); + if (makePublic) { + baseUrl = 'http://snapshots.raintank.io'; + } + $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; }, function() { From 7614ddb318b65133c3ddf5d77219a5601932dc8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 13:58:30 -0400 Subject: [PATCH 014/274] Updated design for snapshot sharing dialog, #1596 --- pkg/api/api.go | 1 + src/app/features/dashboard/dashboardSrv.js | 10 ++ .../dashboard/partials/shareDashboard.html | 93 +++++++++++-------- .../features/dashboard/shareSnapshotCtrl.js | 5 + src/css/less/gfbox.less | 18 ++++ 5 files changed, 89 insertions(+), 38 deletions(-) diff --git a/pkg/api/api.go b/pkg/api/api.go index 4fc76a6e8ba..4683e95fa40 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -43,6 +43,7 @@ func Register(r *macaron.Macaron) { // dashboard snapshots r.Post("/api/snapshots/", bind(m.CreateDashboardSnapshotCommand{}), CreateDashboardSnapshot) + r.Get("/dashboard/snapshots/*", Index) r.Get("/api/snapshots/:key", GetDashboardSnapshot) // authed api diff --git a/src/app/features/dashboard/dashboardSrv.js b/src/app/features/dashboard/dashboardSrv.js index 90ec885ed41..77b19b3edd2 100644 --- a/src/app/features/dashboard/dashboardSrv.js +++ b/src/app/features/dashboard/dashboardSrv.js @@ -67,6 +67,16 @@ function (angular, $, kbn, _, moment) { return max + 1; }; + p.forEachPanel = function(callback) { + var i, j, row; + for (i = 0; i < this.rows.length; i++) { + row = this.rows[i]; + for (j = 0; j < row.panels.length; j++) { + callback(row.panels[j], row); + } + } + }; + p.rowSpan = function(row) { return _.reduce(row.panels, function(p,v) { return p + v.span; diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html index 1afd4a20a84..bfe126b390f 100644 --- a/src/app/features/dashboard/partials/shareDashboard.html +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -1,7 +1,7 @@ -
    -
    Share dashboard and data with anyone
    -

    - +

    diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index acc0f38d946..6f4e21cd70d 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -21,6 +21,11 @@ function (angular) { var dash = angular.copy($scope.dashboard); dash.title = $scope.snapshot.name; + dash.forEachPanel(function(panel){ + panel.targets = []; + panel.links = []; + }); + var apiUrl = '/api/snapshots'; if (makePublic) { diff --git a/src/css/less/gfbox.less b/src/css/less/gfbox.less index 12b16974162..995ccb5b435 100644 --- a/src/css/less/gfbox.less +++ b/src/css/less/gfbox.less @@ -96,3 +96,21 @@ } } } + +.share-snapshot { + text-align: center; + + .share-snapshot-header { + .fa { + position: absolute; + font-size: 600%; + left: 41%; + color: @grafanaTargetFuncBackground; + z-index: -1; + } + + position: relative; + z-index: 1000; + line-height: 106px; + } +} From 4d13a5bffb6f9b2fadb26d187b7e65aa11421293 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 14:00:03 -0400 Subject: [PATCH 015/274] Fixed failing style check --- src/app/features/dashboard/sharePanelCtrl.js | 11 ----------- src/app/features/dashboard/shareSnapshotCtrl.js | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/src/app/features/dashboard/sharePanelCtrl.js b/src/app/features/dashboard/sharePanelCtrl.js index 88710660e3c..c7303ab1a68 100644 --- a/src/app/features/dashboard/sharePanelCtrl.js +++ b/src/app/features/dashboard/sharePanelCtrl.js @@ -81,17 +81,6 @@ function (angular, _, require, config) { $scope.imageUrl += '&height=500'; }; - $scope.snapshot = function() { - $scope.dashboard.snapshot = true; - $rootScope.$broadcast('refresh'); - - $timeout(function() { - $scope.exportDashboard(); - $scope.dashboard.snapshot = false; - $scope.appEvent('dashboard-snapshot-cleanup'); - }, 1000); - }; - $scope.init(); }); diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 6f4e21cd70d..88247e3138b 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -21,7 +21,7 @@ function (angular) { var dash = angular.copy($scope.dashboard); dash.title = $scope.snapshot.name; - dash.forEachPanel(function(panel){ + dash.forEachPanel(function(panel) { panel.targets = []; panel.links = []; }); From 41820ccb0507cab11f333a265218ea9b626d1943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 15:32:38 -0400 Subject: [PATCH 016/274] Dashboard Snapshot sharing: singlestat panel now works, #1623 --- src/app/features/panel/panelHelper.js | 8 +++++++- src/app/features/panel/panelSrv.js | 7 +++++++ src/app/panels/graph/module.js | 15 ++++++--------- src/app/panels/singlestat/module.js | 5 +++++ 4 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/app/features/panel/panelHelper.js b/src/app/features/panel/panelHelper.js index c2842fb225e..62982f69438 100644 --- a/src/app/features/panel/panelHelper.js +++ b/src/app/features/panel/panelHelper.js @@ -70,7 +70,13 @@ function (angular, _, kbn, $) { cacheTimeout: scope.panel.cacheTimeout }; - return datasource.query(metricsQuery); + return datasource.query(metricsQuery).then(function(results) { + if (scope.dashboard.snapshot) { + scope.panel.snapshotData = results; + } + + return results; + }); }; }); diff --git a/src/app/features/panel/panelSrv.js b/src/app/features/panel/panelSrv.js index 7ce3d69b376..8113194dfaf 100644 --- a/src/app/features/panel/panelSrv.js +++ b/src/app/features/panel/panelSrv.js @@ -93,6 +93,13 @@ function (angular, _, config) { $scope.get_data = function() { if ($scope.otherPanelInFullscreenMode()) { return; } + if ($scope.panel.snapshotData) { + if ($scope.loadSnapshot) { + $scope.loadSnapshot($scope.panel.snapshotData); + } + return; + } + delete $scope.panelMeta.error; $scope.panelMeta.loading = true; diff --git a/src/app/panels/graph/module.js b/src/app/panels/graph/module.js index 966e1f9aa23..c7612c7cc9f 100644 --- a/src/app/panels/graph/module.js +++ b/src/app/panels/graph/module.js @@ -130,12 +130,6 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { $scope.refreshData = function(datasource) { panelHelper.updateTimeRange($scope); - if ($scope.panel.snapshotData) { - $scope.annotationsPromise = $q.when([]); - $scope.dataHandler($scope.panel.snapshotData); - return; - } - $scope.annotationsPromise = annotationsSrv.getAnnotations($scope.rangeUnparsed, $scope.dashboard); return panelHelper.issueMetricQuery($scope, datasource) @@ -146,10 +140,13 @@ function (angular, app, $, _, kbn, moment, TimeSeries, PanelMeta) { }); }; + $scope.loadSnapshot = function(snapshotData) { + panelHelper.updateTimeRange($scope); + $scope.annotationsPromise = $q.when([]); + $scope.dataHandler(snapshotData); + }; + $scope.dataHandler = function(results) { - if ($scope.dashboard.snapshot) { - $scope.panel.snapshotData = results; - } // png renderer returns just a url if (_.isString(results)) { $scope.render(results); diff --git a/src/app/panels/singlestat/module.js b/src/app/panels/singlestat/module.js index 81167a83a7d..8f33ce57cae 100644 --- a/src/app/panels/singlestat/module.js +++ b/src/app/panels/singlestat/module.js @@ -87,6 +87,11 @@ function (angular, app, _, TimeSeries, kbn, PanelMeta) { }); }; + $scope.loadSnapshot = function(snapshotData) { + panelHelper.updateTimeRange($scope); + $scope.dataHandler(snapshotData); + }; + $scope.dataHandler = function(results) { $scope.series = _.map(results.data, $scope.seriesHandler); $scope.render(); From 6f2a8e27b8a521439630fb13b703510a30856f0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 15:36:18 -0400 Subject: [PATCH 017/274] Dashboard Snapshot: added dashboard snapshot to changelog, #1623 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15e1527ad8c..690c94a0fbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # 2.0.0 (unreleased) **New features** +- [Issue #1623](https://github.com/grafana/grafana/issues/1623). Share Dashboard: Dashboard snapshot sharing (dash and data snapshot), save to local or save to public snapshot dashboard snapshots.raintank.io site - [Issue #1622](https://github.com/grafana/grafana/issues/1622). Share Panel: The share modal now has an embed option, gives you an iframe that you can use to embedd a single graph on another web site - [Issue #718](https://github.com/grafana/grafana/issues/718). Dashboard: When saving a dashboard and another user has made changes inbetween the user is promted with a warning if he really wants to overwrite the other's changes - [Issue #1331](https://github.com/grafana/grafana/issues/1331). Graph & Singlestat: New axis/unit format selector and more units (kbytes, Joule, Watt, eV), and new design for graph axis & grid tab and single stat options tab views From 98c0209976a16bf7e604837718e285ddd62e21c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 17:34:41 -0400 Subject: [PATCH 018/274] Dashboard snapshot: cleanup snapshot data after snapshot, #1623 --- src/app/features/dashboard/shareSnapshotCtrl.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 88247e3138b..6a787cee752 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -26,6 +26,12 @@ function (angular) { panel.links = []; }); + // cleanup snapshotData + $scope.dashboard.snapshot = false; + $scope.dashboard.forEachPanel(function(panel) { + delete panel.snapshotData; + }); + var apiUrl = '/api/snapshots'; if (makePublic) { @@ -46,8 +52,7 @@ function (angular) { $scope.loading = false; }); - $scope.dashboard.snapshot = false; - $scope.appEvent('dashboard-snapshot-cleanup'); + }, 2000); }; From 5f0e7cd52a3e78f02190cc769f91e52f27748445 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 23 Mar 2015 18:28:59 -0400 Subject: [PATCH 019/274] Added custom cache control headers for static content --- conf/defaults.ini | 7 +- pkg/api/dashboard_snapshot.go | 1 + pkg/api/static/static.go | 218 ++++++++++++++++++ pkg/cmd/web.go | 20 +- .../features/dashboard/shareSnapshotCtrl.js | 58 ++--- tasks/options/requirejs.js | 1 + 6 files changed, 267 insertions(+), 38 deletions(-) create mode 100644 pkg/api/static/static.go diff --git a/conf/defaults.ini b/conf/defaults.ini index d35f71fa4ce..2e4ea89b3f2 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -1,10 +1,9 @@ app_name = Grafana app_mode = production -# Once every 1 hour Grafana will report anonymous data to -# stats.grafana.org (https). No ip addresses are being tracked. -# only simple counters to track running instances, dashboard -# count and errors. It is very helpful to us. +# Report anonymous usage counters to stats.grafana.org (https). +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard count and errors. It is very helpful to us. # Change this option to false to disable reporting. reporting-enabled = true diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 979a3aa86f2..e4841074901 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -35,5 +35,6 @@ func GetDashboardSnapshot(c *middleware.Context) { Meta: dtos.DashboardMeta{IsSnapshot: true}, } + c.Resp.Header().Set("Cache-Control", "public max-age: 31536000") c.JSON(200, dto) } diff --git a/pkg/api/static/static.go b/pkg/api/static/static.go new file mode 100644 index 00000000000..43ba6a32b20 --- /dev/null +++ b/pkg/api/static/static.go @@ -0,0 +1,218 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package httpstatic + +import ( + "log" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "sync" + + "github.com/Unknwon/macaron" +) + +var Root string + +func init() { + var err error + Root, err = os.Getwd() + if err != nil { + panic("error getting work directory: " + err.Error()) + } +} + +// StaticOptions is a struct for specifying configuration options for the macaron.Static middleware. +type StaticOptions struct { + // Prefix is the optional prefix used to serve the static directory content + Prefix string + // SkipLogging will disable [Static] log messages when a static file is served. + SkipLogging bool + // IndexFile defines which file to serve as index if it exists. + IndexFile string + // Expires defines which user-defined function to use for producing a HTTP Expires Header + // https://developers.google.com/speed/docs/insights/LeverageBrowserCaching + AddHeaders func(ctx *macaron.Context) + // FileSystem is the interface for supporting any implmentation of file system. + FileSystem http.FileSystem +} + +// FIXME: to be deleted. +type staticMap struct { + lock sync.RWMutex + data map[string]*http.Dir +} + +func (sm *staticMap) Set(dir *http.Dir) { + sm.lock.Lock() + defer sm.lock.Unlock() + + sm.data[string(*dir)] = dir +} + +func (sm *staticMap) Get(name string) *http.Dir { + sm.lock.RLock() + defer sm.lock.RUnlock() + + return sm.data[name] +} + +func (sm *staticMap) Delete(name string) { + sm.lock.Lock() + defer sm.lock.Unlock() + + delete(sm.data, name) +} + +var statics = staticMap{sync.RWMutex{}, map[string]*http.Dir{}} + +// staticFileSystem implements http.FileSystem interface. +type staticFileSystem struct { + dir *http.Dir +} + +func newStaticFileSystem(directory string) staticFileSystem { + if !filepath.IsAbs(directory) { + directory = filepath.Join(Root, directory) + } + dir := http.Dir(directory) + statics.Set(&dir) + return staticFileSystem{&dir} +} + +func (fs staticFileSystem) Open(name string) (http.File, error) { + return fs.dir.Open(name) +} + +func prepareStaticOption(dir string, opt StaticOptions) StaticOptions { + // Defaults + if len(opt.IndexFile) == 0 { + opt.IndexFile = "index.html" + } + // Normalize the prefix if provided + if opt.Prefix != "" { + // Ensure we have a leading '/' + if opt.Prefix[0] != '/' { + opt.Prefix = "/" + opt.Prefix + } + // Remove any trailing '/' + opt.Prefix = strings.TrimRight(opt.Prefix, "/") + } + if opt.FileSystem == nil { + opt.FileSystem = newStaticFileSystem(dir) + } + return opt +} + +func prepareStaticOptions(dir string, options []StaticOptions) StaticOptions { + var opt StaticOptions + if len(options) > 0 { + opt = options[0] + } + return prepareStaticOption(dir, opt) +} + +func staticHandler(ctx *macaron.Context, log *log.Logger, opt StaticOptions) bool { + if ctx.Req.Method != "GET" && ctx.Req.Method != "HEAD" { + return false + } + + file := ctx.Req.URL.Path + // if we have a prefix, filter requests by stripping the prefix + if opt.Prefix != "" { + if !strings.HasPrefix(file, opt.Prefix) { + return false + } + file = file[len(opt.Prefix):] + if file != "" && file[0] != '/' { + return false + } + } + + f, err := opt.FileSystem.Open(file) + if err != nil { + return false + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return true // File exists but fail to open. + } + + // Try to serve index file + if fi.IsDir() { + // Redirect if missing trailing slash. + if !strings.HasSuffix(ctx.Req.URL.Path, "/") { + http.Redirect(ctx.Resp, ctx.Req.Request, ctx.Req.URL.Path+"/", http.StatusFound) + return true + } + + file = path.Join(file, opt.IndexFile) + f, err = opt.FileSystem.Open(file) + if err != nil { + return false // Discard error. + } + defer f.Close() + + fi, err = f.Stat() + if err != nil || fi.IsDir() { + return true + } + } + + if !opt.SkipLogging { + log.Println("[Static] Serving " + file) + } + + // Add an Expires header to the static content + if opt.AddHeaders != nil { + opt.AddHeaders(ctx) + } + + http.ServeContent(ctx.Resp, ctx.Req.Request, file, fi.ModTime(), f) + return true +} + +// Static returns a middleware handler that serves static files in the given directory. +func Static(directory string, staticOpt ...StaticOptions) macaron.Handler { + opt := prepareStaticOptions(directory, staticOpt) + + return func(ctx *macaron.Context, log *log.Logger) { + staticHandler(ctx, log, opt) + } +} + +// Statics registers multiple static middleware handlers all at once. +func Statics(opt StaticOptions, dirs ...string) macaron.Handler { + if len(dirs) == 0 { + panic("no static directory is given") + } + opts := make([]StaticOptions, len(dirs)) + for i := range dirs { + opts[i] = prepareStaticOption(dirs[i], opt) + } + + return func(ctx *macaron.Context, log *log.Logger) { + for i := range opts { + if staticHandler(ctx, log, opts[i]) { + return + } + } + } +} diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index e5516fb52d9..a8482185e78 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -11,7 +11,6 @@ import ( "path" "path/filepath" "strconv" - "time" "github.com/Unknwon/macaron" "github.com/codegangsta/cli" @@ -20,6 +19,7 @@ import ( _ "github.com/macaron-contrib/session/postgres" "github.com/grafana/grafana/pkg/api" + "github.com/grafana/grafana/pkg/api/static" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" @@ -65,14 +65,22 @@ func newMacaron() *macaron.Macaron { } func mapStatic(m *macaron.Macaron, dir string, prefix string) { - m.Use(macaron.Static( + headers := func(c *macaron.Context) { + c.Resp.Header().Set("Cache-Control", "public max-age: 3600") + } + + if setting.Env == setting.DEV { + headers = func(c *macaron.Context) { + c.Resp.Header().Set("Cache-Control", "max-age: 0") + } + } + + m.Use(httpstatic.Static( path.Join(setting.StaticRootPath, dir), - macaron.StaticOptions{ + httpstatic.StaticOptions{ SkipLogging: true, Prefix: prefix, - Expires: func() string { - return time.Now().UTC().Format(http.TimeFormat) - }, + AddHeaders: headers, }, )) } diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 6a787cee752..fc7973a676c 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -18,42 +18,44 @@ function (angular) { $rootScope.$broadcast('refresh'); $timeout(function() { - var dash = angular.copy($scope.dashboard); - dash.title = $scope.snapshot.name; + $scope.saveSnapshot(makePublic); + }, 2000); + }; - dash.forEachPanel(function(panel) { - panel.targets = []; - panel.links = []; - }); + $scope.saveSnapshot = function(makePublic) { + var dash = angular.copy($scope.dashboard); + dash.title = $scope.snapshot.name; - // cleanup snapshotData - $scope.dashboard.snapshot = false; - $scope.dashboard.forEachPanel(function(panel) { - delete panel.snapshotData; - }); + dash.forEachPanel(function(panel) { + panel.targets = []; + panel.links = []; + }); - var apiUrl = '/api/snapshots'; + // cleanup snapshotData + $scope.dashboard.snapshot = false; + $scope.dashboard.forEachPanel(function(panel) { + delete panel.snapshotData; + }); + var apiUrl = '/api/snapshots'; + + if (makePublic) { + apiUrl = 'http://snapshots.raintank.io/api/snapshots'; + } + + backendSrv.post(apiUrl, {dashboard: dash}).then(function(results) { + $scope.loading = false; + + var baseUrl = $location.absUrl().replace($location.url(), ""); if (makePublic) { - apiUrl = 'http://snapshots.raintank.io/api/snapshots'; + baseUrl = 'http://snapshots.raintank.io'; } - backendSrv.post(apiUrl, {dashboard: dash}).then(function(results) { - $scope.loading = false; + $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; - var baseUrl = $location.absUrl().replace($location.url(), ""); - if (makePublic) { - baseUrl = 'http://snapshots.raintank.io'; - } - - $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; - - }, function() { - $scope.loading = false; - }); - - - }, 2000); + }, function() { + $scope.loading = false; + }); }; }); diff --git a/tasks/options/requirejs.js b/tasks/options/requirejs.js index 947553f1266..d9edf76ada4 100644 --- a/tasks/options/requirejs.js +++ b/tasks/options/requirejs.js @@ -61,6 +61,7 @@ module.exports = function(config,grunt) { 'controllers/all', 'routes/all', 'components/partials', + 'plugins/datasource/grafana/datasource', ] } ]; From 3e9adeefbcf81ba1633801582fe8e553dfeeb73d Mon Sep 17 00:00:00 2001 From: Matt Robenolt Date: Mon, 23 Mar 2015 21:58:29 -0700 Subject: [PATCH 020/274] Fix format of Cache-Control header --- pkg/api/dashboard_snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index e4841074901..aa20c4b8a7f 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -35,6 +35,6 @@ func GetDashboardSnapshot(c *middleware.Context) { Meta: dtos.DashboardMeta{IsSnapshot: true}, } - c.Resp.Header().Set("Cache-Control", "public max-age: 31536000") + c.Resp.Header().Set("Cache-Control", "public, max-age=31536000") c.JSON(200, dto) } From 527e802b05d808531381ce3020e34f19c8a3aeb1 Mon Sep 17 00:00:00 2001 From: Stefan Wehner Date: Tue, 24 Mar 2015 11:37:26 +0100 Subject: [PATCH 021/274] Limit ElasticSearch return to title and tags --- src/app/features/elasticsearch/datasource.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/app/features/elasticsearch/datasource.js b/src/app/features/elasticsearch/datasource.js index 3c82d98f8d2..8fb9d8c12d5 100644 --- a/src/app/features/elasticsearch/datasource.js +++ b/src/app/features/elasticsearch/datasource.js @@ -270,7 +270,8 @@ function (angular, _, config, kbn, moment) { query: { query_string: { query: queryString } }, facets: { tags: { terms: { field: "tags", order: "term", size: 50 } } }, size: this.searchMaxResults, - sort: ["_uid"] + sort: ["_uid"], + fields: ["title", "tags"] }; return this._post('/dashboard/_search', query) @@ -286,8 +287,8 @@ function (angular, _, config, kbn, moment) { var hit = resultsHits[i]; displayHits.dashboards.push({ id: hit._id, - title: hit._source.title, - tags: hit._source.tags + title: hit.fields.title, + tags: hit.fields.tags }); } From c27db7a3471c0bb61abb5e6cf56025ce1339a4ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 15:45:31 +0100 Subject: [PATCH 022/274] Small updates to share dashboard snapshot feature --- pkg/api/dashboard_snapshot.go | 2 +- .../dashboard/partials/shareDashboard.html | 47 +++++++++---------- .../features/dashboard/shareSnapshotCtrl.js | 7 ++- src/css/less/gfbox.less | 16 ------- src/css/less/grafana.less | 27 +++++++++++ 5 files changed, 55 insertions(+), 44 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index aa20c4b8a7f..8bb6c6e6df3 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -9,7 +9,7 @@ import ( ) func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { - cmd.Key = util.GetRandomString(20) + cmd.Key = util.GetRandomString(32) if err := bus.Dispatch(&cmd); err != nil { c.JsonApiErr(500, "Failed to create snaphost", err) diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html index bfe126b390f..320042b64bf 100644 --- a/src/app/features/dashboard/partials/shareDashboard.html +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -50,36 +50,33 @@
    -
    - -
    -
    -
      -
    • - Snapshot name -
    • -
    • - -
    • -
    -
    -
    +
    +
    +
      +
    • + Snapshot name +
    • +
    • + +
    • +
    +
    +
    -
    - +
    +
    + +
    +
    +
    diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index fc7973a676c..c9a71f4230b 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -6,7 +6,7 @@ function (angular) { var module = angular.module('grafana.controllers'); - module.controller('ShareSnapshotCtrl', function($scope, $rootScope, $location, backendSrv, $timeout) { + module.controller('ShareSnapshotCtrl', function($scope, $rootScope, $location, backendSrv, $timeout, timeSrv) { $scope.snapshot = { name: $scope.dashboard.title @@ -24,8 +24,11 @@ function (angular) { $scope.saveSnapshot = function(makePublic) { var dash = angular.copy($scope.dashboard); + // change title dash.title = $scope.snapshot.name; - + // make relative times absolute + dash.time = timeSrv.timeRange(); + // remove panel queries & links dash.forEachPanel(function(panel) { panel.targets = []; panel.links = []; diff --git a/src/css/less/gfbox.less b/src/css/less/gfbox.less index 995ccb5b435..50401e21c50 100644 --- a/src/css/less/gfbox.less +++ b/src/css/less/gfbox.less @@ -97,20 +97,4 @@ } } -.share-snapshot { - text-align: center; - .share-snapshot-header { - .fa { - position: absolute; - font-size: 600%; - left: 41%; - color: @grafanaTargetFuncBackground; - z-index: -1; - } - - position: relative; - z-index: 1000; - line-height: 106px; - } -} diff --git a/src/css/less/grafana.less b/src/css/less/grafana.less index 579415f17f8..6b6e17ce834 100644 --- a/src/css/less/grafana.less +++ b/src/css/less/grafana.less @@ -294,3 +294,30 @@ } } } + +.share-snapshot { + text-align: center; + + .share-snapshot-header { + .fa { + position: absolute; + font-size: 600%; + left: 42%; + color: @grafanaTargetFuncBackground; + z-index: -1; + } + + position: relative; + z-index: 1000; + line-height: 106px; + margin: 45px 0 22px 0; + } + + .share-snapshot-link { + max-width: 716px; + white-space: nowrap; + overflow: hidden; + display: block; + text-overflow: ellipsis; + } +} From ddd3df26b103beb5e3b24fc845e3b19b5ef01bb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 15:52:22 +0100 Subject: [PATCH 023/274] Fixed docs spelling issue, #1634 --- docs/sources/installation/performance.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/installation/performance.md b/docs/sources/installation/performance.md index 535cf72a228..ce41e7a0548 100644 --- a/docs/sources/installation/performance.md +++ b/docs/sources/installation/performance.md @@ -11,6 +11,6 @@ page_keywords: grafana, performance, documentation Graphite 0.9.13 adds a much needed feature to the json rendering API that is very important for Grafana. If you are experiance slow load & rendering times for large time ranges then it is most likely caused by running Graphite 0.9.12 or lower. The latest version of Graphite adds a maxDataPoints parameter to the json render API, without this feature Graphite can return hundreds of thousands of data points per graph, which -can hang your browser. Be sue to upgrade to [0.9.13](http://graphite.readthedocs.org/en/latest/releases/0_9_13.html). +can hang your browser. Be sure to upgrade to [0.9.13](http://graphite.readthedocs.org/en/latest/releases/0_9_13.html). From a5c3855233b20dcbff641541a2a78de4a54e6237 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 16:49:12 +0100 Subject: [PATCH 024/274] Added dashboard snapshot metrics --- pkg/api/dashboard_snapshot.go | 6 ++++++ pkg/metrics/metrics.go | 3 +++ 2 files changed, 9 insertions(+) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 8bb6c6e6df3..635e1d14711 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -3,6 +3,7 @@ package api import ( "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" + "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/util" @@ -16,6 +17,8 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho return } + metrics.M_Api_Dashboard_Snapshot_Create.Inc(1) + c.JSON(200, util.DynMap{"key": cmd.Key}) } @@ -35,6 +38,9 @@ func GetDashboardSnapshot(c *middleware.Context) { Meta: dtos.DashboardMeta{IsSnapshot: true}, } + metrics.M_Api_Dashboard_Snapshot_Get.Inc(1) + c.Resp.Header().Set("Cache-Control", "public, max-age=31536000") + c.JSON(200, dto) } diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 45b964fb56e..71a5aeaacf5 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -21,5 +21,8 @@ var ( M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth") M_Api_Org_Create = NewComboCounterRef("api.org.create") + M_Api_Dashboard_Snapshot_Create = NewComboCounterRef("api.dashboard_snapshot.create") + M_Api_Dashboard_Snapshot_Get = NewComboCounterRef("api.dashboard_snapshot.get") + M_Models_Dashboard_Insert = NewComboCounterRef("models.dashboard.insert") ) From 7919d79347792b7d93de448844a0ce3fbb289de0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 17:16:13 +0100 Subject: [PATCH 025/274] Another cache header fix --- pkg/cmd/web.go | 4 ++-- src/app/features/dashboard/rowCtrl.js | 8 ++++++++ src/app/partials/dashboard.html | 8 ++++++++ src/css/less/panel.less | 12 ++++++++++++ 4 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index a8482185e78..72f19f6675f 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -66,12 +66,12 @@ func newMacaron() *macaron.Macaron { func mapStatic(m *macaron.Macaron, dir string, prefix string) { headers := func(c *macaron.Context) { - c.Resp.Header().Set("Cache-Control", "public max-age: 3600") + c.Resp.Header().Set("Cache-Control", "public, max-age: 3600") } if setting.Env == setting.DEV { headers = func(c *macaron.Context) { - c.Resp.Header().Set("Cache-Control", "max-age: 0") + c.Resp.Header().Set("Cache-Control", "max-age: 0, must-revalidate") } } diff --git a/src/app/features/dashboard/rowCtrl.js b/src/app/features/dashboard/rowCtrl.js index 409fee2fd37..527a29a2ad5 100644 --- a/src/app/features/dashboard/rowCtrl.js +++ b/src/app/features/dashboard/rowCtrl.js @@ -168,4 +168,12 @@ function (angular, app, _, config) { }; }); + module.directive('panelGhostPanel', function() { + return function(scope, element) { + var dropZoneSpan = 12 - scope.dashboard.rowSpan(scope.row); + element.find('.panel-container').css('height', scope.row.height); + element[0].style.width = ((dropZoneSpan / 1.2) * 10) + '%'; + }; + }); + }); diff --git a/src/app/partials/dashboard.html b/src/app/partials/dashboard.html index 6d7d0634fcd..29c4d581df3 100644 --- a/src/app/partials/dashboard.html +++ b/src/app/partials/dashboard.html @@ -86,6 +86,14 @@
    +
    +
    +
    +

    Add panel

    +
    +
    +
    +
    diff --git a/src/css/less/panel.less b/src/css/less/panel.less index 6a8c2355b5b..e3f1fd2b598 100644 --- a/src/css/less/panel.less +++ b/src/css/less/panel.less @@ -169,6 +169,18 @@ } } +.ghost-panel { + &:hover { + .panel-container { + visibility: visible; + } + } + .panel-container { + visibility: hidden; + border: 1px solid @grayDark; + } +} + .panel-time-info { font-weight: bold; float: right; From cc71b1f07d6ad3a4e069f0f4fa22a4bfbb7187af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 19:42:39 +0100 Subject: [PATCH 026/274] Ghost panel test --- src/app/features/dashboard/rowCtrl.js | 4 ++-- src/app/features/panel/panelMenu.js | 6 +++--- src/app/features/panel/panelSrv.js | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/app/features/dashboard/rowCtrl.js b/src/app/features/dashboard/rowCtrl.js index 527a29a2ad5..2e18291719d 100644 --- a/src/app/features/dashboard/rowCtrl.js +++ b/src/app/features/dashboard/rowCtrl.js @@ -81,13 +81,13 @@ function (angular, app, _, config) { $scope.$broadcast('render'); }; - $scope.remove_panel_from_row = function(row, panel) { + $scope.removePanel = function(panel) { $scope.appEvent('confirm-modal', { title: 'Are you sure you want to remove this panel?', icon: 'fa-trash', yesText: 'Delete', onConfirm: function() { - row.panels = _.without(row.panels, panel); + $scope.row.panels = _.without($scope.row.panels, panel); } }); }; diff --git a/src/app/features/panel/panelMenu.js b/src/app/features/panel/panelMenu.js index a529dd87b5c..96fcc521f49 100644 --- a/src/app/features/panel/panelMenu.js +++ b/src/app/features/panel/panelMenu.js @@ -20,9 +20,9 @@ function (angular, $, _) { var template = '
    '; template += '
    '; template += '
    '; - template += ''; - template += ''; - template += ''; + template += ''; + template += ''; + template += ''; template += '
    '; template += '
    '; diff --git a/src/app/features/panel/panelSrv.js b/src/app/features/panel/panelSrv.js index 8113194dfaf..4c5e34cdbfa 100644 --- a/src/app/features/panel/panelSrv.js +++ b/src/app/features/panel/panelSrv.js @@ -41,6 +41,7 @@ function (angular, _, config) { $scope.updateColumnSpan = function(span) { $scope.panel.span = Math.min(Math.max($scope.panel.span + span, 1), 12); + $scope.row.updatePanelSpan() $timeout(function() { $scope.$broadcast('render'); From f9cf673f81f8d5c0520137ebd62e2ad3b526fd6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 19:49:51 +0100 Subject: [PATCH 027/274] removed accidental code, should have been part of ghost-panel branch commit --- src/app/partials/dashboard.html | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/app/partials/dashboard.html b/src/app/partials/dashboard.html index 29c4d581df3..6d7d0634fcd 100644 --- a/src/app/partials/dashboard.html +++ b/src/app/partials/dashboard.html @@ -86,14 +86,6 @@
    -
    -
    -
    -

    Add panel

    -
    -
    -
    -
    From 789363b0ad7e8f3d9d7b0ee80d711b4952e2b161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 24 Mar 2015 21:10:44 +0100 Subject: [PATCH 028/274] Added ghost panel that shows up empty rows, this panel will show add panel buttons to more quickly/easier get to add a panel, #1635 --- src/app/features/dashboard/rowCtrl.js | 33 ++++++++++++++++++--------- src/app/features/panel/panelMenu.js | 4 ++-- src/app/features/panel/panelSrv.js | 3 +-- src/app/partials/dashboard.html | 22 +++++++++++------- src/app/partials/roweditor.html | 28 ++--------------------- src/css/less/panel.less | 24 ++++++++++++------- 6 files changed, 57 insertions(+), 57 deletions(-) diff --git a/src/app/features/dashboard/rowCtrl.js b/src/app/features/dashboard/rowCtrl.js index 2e18291719d..e2ab9a0c156 100644 --- a/src/app/features/dashboard/rowCtrl.js +++ b/src/app/features/dashboard/rowCtrl.js @@ -38,11 +38,6 @@ function (angular, app, _, config) { } }; - // This can be overridden by individual panels - $scope.close_edit = function() { - $scope.$broadcast('render'); - }; - $scope.add_panel = function(panel) { $scope.dashboard.add_panel(panel, $scope.row); }; @@ -92,6 +87,10 @@ function (angular, app, _, config) { }); }; + $scope.updatePanelSpan = function(panel, span) { + panel.span = Math.min(Math.max(panel.span + span, 1), 12); + }; + $scope.replacePanel = function(newPanel, oldPanel) { var row = $scope.row; var index = _.indexOf(row.panels, oldPanel); @@ -144,9 +143,11 @@ function (angular, app, _, config) { module.directive('panelWidth', function() { return function(scope, element) { - scope.$watch('panel.span', function() { + function updateWidth() { element[0].style.width = ((scope.panel.span / 1.2) * 10) + '%'; - }); + } + + scope.$watch('panel.span', updateWidth); }; }); @@ -168,11 +169,21 @@ function (angular, app, _, config) { }; }); - module.directive('panelGhostPanel', function() { + module.directive('panelGhost', function() { return function(scope, element) { - var dropZoneSpan = 12 - scope.dashboard.rowSpan(scope.row); - element.find('.panel-container').css('height', scope.row.height); - element[0].style.width = ((dropZoneSpan / 1.2) * 10) + '%'; + function updateWidth() { + var spanLeft = 12 - scope.dashboard.rowSpan(scope.row); + if (spanLeft > 1) { + element.show(); + element.find('.panel-container').css('height', scope.row.height); + element[0].style.width = ((spanLeft / 1.2) * 10) + '%'; + } else { + element.hide(); + } + } + + updateWidth(); + scope.$on('dashboard-panel-span-updated', updateWidth); }; }); diff --git a/src/app/features/panel/panelMenu.js b/src/app/features/panel/panelMenu.js index 96fcc521f49..1f7d6aba71c 100644 --- a/src/app/features/panel/panelMenu.js +++ b/src/app/features/panel/panelMenu.js @@ -20,8 +20,8 @@ function (angular, $, _) { var template = '
    '; template += '
    '; template += '
    '; - template += ''; - template += ''; + template += ''; + template += ''; template += ''; template += '
    '; template += '
    '; diff --git a/src/app/features/panel/panelSrv.js b/src/app/features/panel/panelSrv.js index 4c5e34cdbfa..0e983fc3d07 100644 --- a/src/app/features/panel/panelSrv.js +++ b/src/app/features/panel/panelSrv.js @@ -40,8 +40,7 @@ function (angular, _, config) { }; $scope.updateColumnSpan = function(span) { - $scope.panel.span = Math.min(Math.max($scope.panel.span + span, 1), 12); - $scope.row.updatePanelSpan() + $scope.updatePanelSpan($scope.panel, span); $timeout(function() { $scope.$broadcast('render'); diff --git a/src/app/partials/dashboard.html b/src/app/partials/dashboard.html index 6d7d0634fcd..edfe1bfcafb 100644 --- a/src/app/partials/dashboard.html +++ b/src/app/partials/dashboard.html @@ -86,14 +86,20 @@
    -
    -
    -
    - Drop here -
    -
    +
    +
    +
    + +
    +
    +
    + +
    +
    +
    + Drop here +
    +
    diff --git a/src/app/partials/roweditor.html b/src/app/partials/roweditor.html index 4c77a0bf814..177a8fd4ce5 100644 --- a/src/app/partials/roweditor.html +++ b/src/app/partials/roweditor.html @@ -5,7 +5,7 @@
    -
    +
    @@ -26,29 +26,5 @@
    -
    -
    - - - - - - - - - - - - - - - - - -
    TitleTypeSpan
    {{panel.title}}{{panel.type}} - - - -
    -
    +
    diff --git a/src/css/less/panel.less b/src/css/less/panel.less index e3f1fd2b598..20164acfb9e 100644 --- a/src/css/less/panel.less +++ b/src/css/less/panel.less @@ -169,15 +169,23 @@ } } -.ghost-panel { - &:hover { - .panel-container { - visibility: visible; - } - } +.panel-ghost{ + width: 100%; .panel-container { - visibility: hidden; - border: 1px solid @grayDark; + border: none; + background: transparent; + } + .panel-ghost-list { + margin: 10px 0 10px 20px; + } + + button { + text-align: left; + min-width: 135px; + .fa { + position: relative; + left: -5px; + } } } From 5286f0856d0f7c7765c709d0559d9ef168fbe8b1 Mon Sep 17 00:00:00 2001 From: Matt Robenolt Date: Tue, 24 Mar 2015 17:30:26 -0700 Subject: [PATCH 029/274] Fix more Cache-Control headers `max-age` is always with an `=`, not a `:`. --- pkg/cmd/web.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index 72f19f6675f..1fc6e8a999c 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -66,12 +66,12 @@ func newMacaron() *macaron.Macaron { func mapStatic(m *macaron.Macaron, dir string, prefix string) { headers := func(c *macaron.Context) { - c.Resp.Header().Set("Cache-Control", "public, max-age: 3600") + c.Resp.Header().Set("Cache-Control", "public, max-age=3600") } if setting.Env == setting.DEV { headers = func(c *macaron.Context) { - c.Resp.Header().Set("Cache-Control", "max-age: 0, must-revalidate") + c.Resp.Header().Set("Cache-Control", "max-age=0, must-revalidate, no-cache") } } From 9268ecf3e9ad0a94ee8e6d0dc6564dfc793d88ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 09:04:38 +0100 Subject: [PATCH 030/274] Some refinements to dashboard snapshots --- pkg/api/dashboard_snapshot.go | 37 ++++++++++++++++++- pkg/metrics/metrics.go | 5 ++- pkg/metrics/report_usage.go | 19 +++++----- pkg/models/dashboard_snapshot.go | 1 + .../features/dashboard/shareSnapshotCtrl.js | 21 ++++------- 5 files changed, 56 insertions(+), 27 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 635e1d14711..c4035e921d5 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -1,17 +1,27 @@ package api import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "time" + "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" + "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/util" ) func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { - cmd.Key = util.GetRandomString(32) + if cmd.External { + createExternalSnapshot(c, cmd) + } + cmd.Key = util.GetRandomString(32) if err := bus.Dispatch(&cmd); err != nil { c.JsonApiErr(500, "Failed to create snaphost", err) return @@ -19,7 +29,30 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho metrics.M_Api_Dashboard_Snapshot_Create.Inc(1) - c.JSON(200, util.DynMap{"key": cmd.Key}) + c.JSON(200, util.DynMap{"key": cmd.Key, "url": setting.ToAbsUrl("/dashboard/snapshots")}) +} + +func createExternalSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { + metrics.M_Api_Dashboard_Snapshot_External.Inc(1) + + json, _ := json.Marshal(cmd) + jsonData := bytes.NewBuffer(json) + + client := http.Client{Timeout: time.Duration(5 * time.Second)} + resp, err := client.Post("http://snapshots-origin.raintank.io/api/snapshots", "application/json", jsonData) + + if err != nil { + c.JsonApiErr(500, "Failed to publish external snapshot", err) + return + } + + c.Header().Set("Content-Type", resp.Header.Get("Content-Type")) + c.WriteHeader(resp.StatusCode) + + if resp.ContentLength > 0 { + bytes, _ := ioutil.ReadAll(resp.Body) + c.Write(bytes) + } } func GetDashboardSnapshot(c *middleware.Context) { diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 71a5aeaacf5..f6dab8c8043 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -21,8 +21,9 @@ var ( M_Api_Login_OAuth = NewComboCounterRef("api.login.oauth") M_Api_Org_Create = NewComboCounterRef("api.org.create") - M_Api_Dashboard_Snapshot_Create = NewComboCounterRef("api.dashboard_snapshot.create") - M_Api_Dashboard_Snapshot_Get = NewComboCounterRef("api.dashboard_snapshot.get") + M_Api_Dashboard_Snapshot_Create = NewComboCounterRef("api.dashboard_snapshot.create") + M_Api_Dashboard_Snapshot_External = NewComboCounterRef("api.dashboard_snapshot.external") + M_Api_Dashboard_Snapshot_Get = NewComboCounterRef("api.dashboard_snapshot.get") M_Models_Dashboard_Insert = NewComboCounterRef("models.dashboard.insert") ) diff --git a/pkg/metrics/report_usage.go b/pkg/metrics/report_usage.go index f952e56fab6..4a4355c5deb 100644 --- a/pkg/metrics/report_usage.go +++ b/pkg/metrics/report_usage.go @@ -7,7 +7,9 @@ import ( "strings" "time" + "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/log" + m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" ) @@ -34,11 +36,11 @@ func sendUsageStats() { "metrics": metrics, } - // statsQuery := m.GetSystemStatsQuery{} - // if err := bus.Dispatch(&statsQuery); err != nil { - // log.Error(3, "Failed to get system stats", err) - // return - // } + statsQuery := m.GetSystemStatsQuery{} + if err := bus.Dispatch(&statsQuery); err != nil { + log.Error(3, "Failed to get system stats", err) + return + } UsageStats.Each(func(name string, i interface{}) { switch metric := i.(type) { @@ -50,14 +52,13 @@ func sendUsageStats() { } }) - // metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount - // metrics["stats.users.count"] = statsQuery.Result.UserCount - // metrics["stats.orgs.count"] = statsQuery.Result.OrgCount + metrics["stats.dashboards.count"] = statsQuery.Result.DashboardCount + metrics["stats.users.count"] = statsQuery.Result.UserCount + metrics["stats.orgs.count"] = statsQuery.Result.OrgCount out, _ := json.Marshal(report) data := bytes.NewBuffer(out) client := http.Client{Timeout: time.Duration(5 * time.Second)} - go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data) } diff --git a/pkg/models/dashboard_snapshot.go b/pkg/models/dashboard_snapshot.go index 61abca12673..8f96b27f6ae 100644 --- a/pkg/models/dashboard_snapshot.go +++ b/pkg/models/dashboard_snapshot.go @@ -20,6 +20,7 @@ type DashboardSnapshot struct { type CreateDashboardSnapshotCommand struct { Dashboard map[string]interface{} `json:"dashboard" binding:"Required"` + External bool Key string `json:"-"` diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index c9a71f4230b..a3f279b9c64 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -22,7 +22,7 @@ function (angular) { }, 2000); }; - $scope.saveSnapshot = function(makePublic) { + $scope.saveSnapshot = function(external) { var dash = angular.copy($scope.dashboard); // change title dash.title = $scope.snapshot.name; @@ -40,22 +40,15 @@ function (angular) { delete panel.snapshotData; }); - var apiUrl = '/api/snapshots'; - - if (makePublic) { - apiUrl = 'http://snapshots.raintank.io/api/snapshots'; - } - - backendSrv.post(apiUrl, {dashboard: dash}).then(function(results) { + backendSrv.post('/api/snapshots', {dashboard: dash, external: external}).then(function(results) { $scope.loading = false; - var baseUrl = $location.absUrl().replace($location.url(), ""); - if (makePublic) { - baseUrl = 'http://snapshots.raintank.io'; + if (external) { + $scope.snapshotUrl = results.url; + } else { + var baseUrl = $location.absUrl().replace($location.url(), ""); + $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; } - - $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; - }, function() { $scope.loading = false; }); From da833cbc5853068c26a6a4a1475cf00576aafaf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 11:07:12 +0100 Subject: [PATCH 031/274] Small progress on influxdb 0.9 query editor, #1525 --- src/app/directives/graphiteSegment.js | 4 +- .../plugins/datasource/graphite/queryCtrl.js | 19 +- .../plugins/datasource/influxdb/datasource.js | 74 ++--- .../influxdb/partials/query.editor.html | 284 ++++-------------- .../plugins/datasource/influxdb/queryCtrl.js | 203 ++++++++----- 5 files changed, 226 insertions(+), 358 deletions(-) diff --git a/src/app/directives/graphiteSegment.js b/src/app/directives/graphiteSegment.js index 577bbf5d6b7..c8ad131e6c7 100644 --- a/src/app/directives/graphiteSegment.js +++ b/src/app/directives/graphiteSegment.js @@ -37,12 +37,14 @@ function (angular, app, _, $) { if (selected) { segment.value = selected.value; segment.html = selected.html; + segment.fake = false; segment.expandable = selected.expandable; } else { segment.value = value; segment.html = $sce.trustAsHtml(value); segment.expandable = true; + segment.fake = false; } $scope.segmentValueChanged(segment, $scope.$index); }); @@ -71,7 +73,7 @@ function (angular, app, _, $) { options = _.map($scope.altSegments, function(alt) { return alt.value; }); // add custom values - if (segment.value !== 'select metric' && _.indexOf(options, segment.value) === -1) { + if (!segment.fake && _.indexOf(options, segment.value) === -1) { options.unshift(segment.value); } diff --git a/src/app/plugins/datasource/graphite/queryCtrl.js b/src/app/plugins/datasource/graphite/queryCtrl.js index d878386d461..069fa815797 100644 --- a/src/app/plugins/datasource/graphite/queryCtrl.js +++ b/src/app/plugins/datasource/graphite/queryCtrl.js @@ -113,7 +113,7 @@ function (angular, _, config, gfunc, Parser) { function checkOtherSegments(fromIndex) { if (fromIndex === 0) { - $scope.segments.push(new MetricSegment('select metric')); + $scope.segments.push(MetricSegment.newSelectMetric()); return; } @@ -123,13 +123,13 @@ function (angular, _, config, gfunc, Parser) { if (segments.length === 0) { if (path !== '') { $scope.segments = $scope.segments.splice(0, fromIndex); - $scope.segments.push(new MetricSegment('select metric')); + $scope.segments.push(MetricSegment.newSelectMetric()); } return; } if (segments[0].expandable) { if ($scope.segments.length === fromIndex) { - $scope.segments.push(new MetricSegment('select metric')); + $scope.segments.push(MetricSegment.newSelectMetric()); } else { return checkOtherSegments(fromIndex + 1); @@ -238,7 +238,7 @@ function (angular, _, config, gfunc, Parser) { $scope.moveAliasFuncLast(); $scope.smartlyHandleNewAliasByNode(newFunc); - if ($scope.segments.length === 1 && $scope.segments[0].value === 'select metric') { + if ($scope.segments.length === 1 && $scope.segments[0].fake) { $scope.segments = []; } @@ -298,18 +298,17 @@ function (angular, _, config, gfunc, Parser) { return; } - if (_.isString(options)) { - this.value = options; - this.html = $sce.trustAsHtml(this.value); - return; - } - + this.fake = options.fake; this.value = options.value; this.type = options.type; this.expandable = options.expandable; this.html = $sce.trustAsHtml(templateSrv.highlightVariablesAsHtml(this.value)); } + MetricSegment.newSelectMetric = function() { + return new MetricSegment({value: 'select metric', fake: true}); + }; + }); module.directive('focusMe', function($timeout, $parse) { diff --git a/src/app/plugins/datasource/influxdb/datasource.js b/src/app/plugins/datasource/influxdb/datasource.js index 26bacfe2c59..dbaf443affd 100644 --- a/src/app/plugins/datasource/influxdb/datasource.js +++ b/src/app/plugins/datasource/influxdb/datasource.js @@ -36,7 +36,7 @@ function (angular, _, kbn, InfluxSeries, InfluxQueryBuilder) { var timeFilter = getTimeFilter(options); var promises = _.map(options.targets, function(target) { - if (target.hide || !((target.series && target.column) || target.query)) { + if (target.hide || !target.query) { return []; } @@ -73,40 +73,7 @@ function (angular, _, kbn, InfluxSeries, InfluxQueryBuilder) { }); }; - InfluxDatasource.prototype.listColumns = function(seriesName) { - seriesName = templateSrv.replace(seriesName); - - if(!seriesName.match('^/.*/') && !seriesName.match(/^merge\(.*\)/)) { - seriesName = '"' + seriesName+ '"'; - } - - return this._seriesQuery('select * from ' + seriesName + ' limit 1').then(function(data) { - if (!data) { - return []; - } - return data[0].columns.map(function(item) { - return /^\w+$/.test(item) ? item : ('"' + item + '"'); - }); - }); - }; - - InfluxDatasource.prototype.listSeries = function(query) { - // wrap in regex - if (query && query.length > 0 && query[0] !== '/') { - query = '/' + query + '/'; - } - - return this._seriesQuery('SHOW MEASUREMENTS').then(function(data) { - if (!data || data.length === 0) { - return []; - } - return _.map(data[0].points, function(point) { - return point[1]; - }); - }); - }; - - InfluxDatasource.prototype.metricFindQuery = function (query) { + InfluxDatasource.prototype.metricFindQuery = function (query, queryType) { var interpolated; try { interpolated = templateSrv.replace(query); @@ -115,17 +82,30 @@ function (angular, _, kbn, InfluxSeries, InfluxQueryBuilder) { return $q.reject(err); } - return this._seriesQuery(interpolated) - .then(function (results) { - if (!results || results.length === 0) { return []; } + console.log('metricFindQuery called with: ' + [query, queryType].join(', ')); - return _.map(results[0].points, function (metric) { - return { - text: metric[1], - expandable: false - }; - }); - }); + return this._seriesQuery(interpolated, queryType).then(function (results) { + if (!results || results.results.length === 0) { return []; } + + var influxResults = results.results[0]; + if (!influxResults.series) { + return []; + } + + console.log('metric find query response', results); + var series = influxResults.series[0]; + + switch (queryType) { + case 'MEASUREMENTS': + return _.map(series.values, function(value) { return { text: value[0], expandable: true }; }); + case 'TAG_KEYS': + var tagKeys = _.flatten(series.values); + return _.map(tagKeys, function(tagKey) { return { text: tagKey, expandable: true }; }); + case 'TAG_VALUES': + var tagValues = _.flatten(series.values); + return _.map(tagValues, function(tagValue) { return { text: tagValue, expandable: true }; }); + } + }); }; function retry(deferred, callback, delay) { @@ -143,9 +123,7 @@ function (angular, _, kbn, InfluxSeries, InfluxQueryBuilder) { } InfluxDatasource.prototype._seriesQuery = function(query) { - return this._influxRequest('GET', '/query', { - q: query, - }); + return this._influxRequest('GET', '/query', {q: query}); }; InfluxDatasource.prototype._influxRequest = function(method, url, data) { diff --git a/src/app/plugins/datasource/influxdb/partials/query.editor.html b/src/app/plugins/datasource/influxdb/partials/query.editor.html index d3d7d0ff95d..c91f13c57c6 100644 --- a/src/app/plugins/datasource/influxdb/partials/query.editor.html +++ b/src/app/plugins/datasource/influxdb/partials/query.editor.html @@ -1,18 +1,47 @@
    -
    -
    -
    + +
    +
    -
    -
    -
      -
    • - -
    • -
    • - group by time -
    • -
    • - -
    • -
    • - -
    • -
    -
    -
    - - -
    -
    - -
    -
    Alias patterns
    -
      -
    • $s = series name
    • -
    • $g = group by
    • -
    • $[0-9] part of series name for series names seperated by dots.
    • -
    -
    - -
    -
    Stacking and fill
    -
      -
    • When stacking is enabled it important that points align
    • -
    • If there are missing points for one series it can cause gaps or missing bars
    • -
    • You must use fill(0), and select a group by time low limit
    • -
    • Use the group by time option below your queries and specify for example >10s if your metrics are written every 10 seconds
    • -
    • This will insert zeros for series that are missing measurements and will make stacking work properly
    • -
    -
    - -
    -
    Group by time
    -
      -
    • Group by time is important, otherwise the query could return many thousands of datapoints that will slow down Grafana
    • -
    • Leave the group by time field empty for each query and it will be calculated based on time range and pixel width of the graph
    • -
    • If you use fill(0) or fill(null) set a low limit for the auto group by time interval
    • -
    • The low limit can only be set in the group by time option below your queries
    • -
    • You set a low limit by adding a greater sign before the interval
    • -
    • Example: >60s if you write metrics to InfluxDB every 60 seconds
    • -
    -
    - -
    - - diff --git a/src/app/plugins/datasource/influxdb/queryCtrl.js b/src/app/plugins/datasource/influxdb/queryCtrl.js index 608b5845d88..517ed79e1a1 100644 --- a/src/app/plugins/datasource/influxdb/queryCtrl.js +++ b/src/app/plugins/datasource/influxdb/queryCtrl.js @@ -7,93 +7,23 @@ function (angular, _) { var module = angular.module('grafana.controllers'); - var seriesList = null; - - module.controller('InfluxQueryCtrl', function($scope, $timeout) { + module.controller('InfluxQueryCtrl', function($scope, $timeout, $sce, templateSrv, $q) { $scope.init = function() { - var target = $scope.target; + $scope.segments = $scope.target.segments || []; - target.function = target.function || 'mean'; - target.column = target.column || 'value'; - - // backward compatible correction of schema - if (target.condition_value) { - target.condition = target.condition_key + ' ' + target.condition_op + ' ' + target.condition_value; - delete target.condition_key; - delete target.condition_op; - delete target.condition_value; - } - - if (target.groupby_field_add === false) { - target.groupby_field = ''; - delete target.groupby_field_add; - } - - $scope.rawQuery = true; - - $scope.functions = [ + $scope.functionsSelect = [ 'count', 'mean', 'sum', 'min', 'max', 'mode', 'distinct', 'median', 'derivative', 'stddev', 'first', 'last', 'difference' ]; - $scope.operators = ['=', '=~', '>', '<', '!~', '<>']; - $scope.oldSeries = target.series; - $scope.$on('typeahead-updated', function() { - $timeout($scope.get_data); - }); + checkOtherSegments(0); }; - $scope.showQuery = function () { - $scope.target.rawQuery = true; - }; - - $scope.hideQuery = function () { - $scope.target.rawQuery = false; - }; - - // Cannot use typeahead and ng-change on blur at the same time - $scope.seriesBlur = function() { - if ($scope.oldSeries !== $scope.target.series) { - $scope.oldSeries = $scope.target.series; - $scope.columnList = null; - $scope.get_data(); - } - }; - - $scope.changeFunction = function(func) { - $scope.target.function = func; - $scope.get_data(); - }; - - // called outside of digest - $scope.listColumns = function(query, callback) { - if (!$scope.columnList) { - $scope.$apply(function() { - $scope.datasource.listColumns($scope.target.series).then(function(columns) { - $scope.columnList = columns; - callback(columns); - }); - }); - } - else { - return $scope.columnList; - } - }; - - $scope.listSeries = function(query, callback) { - if (query !== '') { - seriesList = []; - $scope.datasource.listSeries(query).then(function(series) { - seriesList = series; - callback(seriesList); - }); - } - else { - return seriesList; - } + $scope.toggleQueryMode = function () { + $scope.target.rawQuery = !$scope.target.rawQuery; }; $scope.moveMetricQuery = function(fromIndex, toIndex) { @@ -105,6 +35,127 @@ function (angular, _) { $scope.panel.targets.push(clone); }; + $scope.getAltSegments = function (index) { + $scope.altSegments = []; + + var measurement = $scope.segments[0].value; + var queryType, query; + if (index === 0) { + queryType = 'MEASUREMENTS'; + query = 'SHOW MEASUREMENTS'; + } else if (index % 2 === 1) { + queryType = 'TAG_KEYS'; + query = 'SHOW TAG KEYS FROM ' + measurement; + } else { + queryType = 'TAG_VALUES'; + query = "SHOW TAG VALUES FROM " + measurement + " WITH KEY = " + $scope.segments[$scope.segments.length - 2].value; + } + + console.log('getAltSegments: query' , query); + + return $scope.datasource.metricFindQuery(query, queryType).then(function(results) { + console.log('get alt segments: response', results); + $scope.altSegments = _.map(results, function(segment) { + return new MetricSegment({ value: segment.text, expandable: segment.expandable }); + }); + + _.each(templateSrv.variables, function(variable) { + $scope.altSegments.unshift(new MetricSegment({ + type: 'template', + value: '$' + variable.name, + expandable: true, + })); + }); + }, function(err) { + $scope.parserError = err.message || 'Failed to issue metric query'; + }); + }; + + $scope.segmentValueChanged = function (segment, segmentIndex) { + delete $scope.parserError; + + if (segment.expandable) { + return checkOtherSegments(segmentIndex + 1).then(function () { + setSegmentFocus(segmentIndex + 1); + $scope.targetChanged(); + }); + } + else { + $scope.segments = $scope.segments.splice(0, segmentIndex + 1); + } + + setSegmentFocus(segmentIndex + 1); + $scope.targetChanged(); + }; + + $scope.targetChanged = function() { + if ($scope.parserError) { + return; + } + + $scope.$parent.get_data(); + }; + + function checkOtherSegments(fromIndex) { + if (fromIndex === 0) { + $scope.segments.push(MetricSegment.newSelectMetric()); + return; + } + + if ($scope.segments.length === 0) { + throw('should always have a scope segment?'); + } + + if (_.last($scope.segments).fake) { + return $q.when([]); + } else if ($scope.segments.length % 2 === 1) { + $scope.segments.push(MetricSegment.newSelectTag()); + return $q.when([]); + } else { + $scope.segments.push(MetricSegment.newSelectTagValue()); + return $q.when([]); + } + } + + function setSegmentFocus(segmentIndex) { + _.each($scope.segments, function(segment, index) { + segment.focus = segmentIndex === index; + }); + } + + function MetricSegment(options) { + if (options === '*' || options.value === '*') { + this.value = '*'; + this.html = $sce.trustAsHtml(''); + this.expandable = true; + return; + } + + if (_.isString(options)) { + this.value = options; + this.html = $sce.trustAsHtml(this.value); + return; + } + + this.fake = options.fake; + this.value = options.value; + this.type = options.type; + this.expandable = options.expandable; + this.html = $sce.trustAsHtml(templateSrv.highlightVariablesAsHtml(this.value)); + } + + MetricSegment.newSelectMetric = function() { + return new MetricSegment({value: 'select metric', fake: true}); + }; + + MetricSegment.newSelectTag = function() { + return new MetricSegment({value: 'select tag', fake: true}); + }; + + MetricSegment.newSelectTagValue = function() { + return new MetricSegment({value: 'select tag value', fake: true}); + }; + }); }); From 1f6d5bfd530ae4d367cd30e30d46b2f86ab8254c Mon Sep 17 00:00:00 2001 From: tuexss Date: Wed, 25 Mar 2015 12:03:20 +0100 Subject: [PATCH 032/274] readme cleanup --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index dc9ed582fda..517ec0d4ac8 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [IRC](http://webchat.freenode.net/?channels=grafana) | [Email](mailto:contact@grafana.org) -Grafana is An open source, feature rich metrics dashboard and graph editor for +Grafana is an open source, feature rich metrics dashboard and graph editor for Graphite, InfluxDB & OpenTSDB. ![](http://grafana.org/assets/img/start_page_bg.png) @@ -25,13 +25,13 @@ The code is available in the [develop](https://github.com/grafana/grafana/tree/d - [See it in action](http://grafana.org/docs/features/graphite) ### Graphing -- Fast rendering, even over large timespans. -- Click and drag to zoom. -- Multiple Y-axis. -- Bars, Lines, Points. +- Fast rendering, even over large timespans +- Click and drag to zoom +- Multiple Y-axis +- Bars, Lines, Points - Smart Y-axis formating - Series toggles & color selector -- Legend values, and formating options +- Legend values, and formatting options - Grid thresholds, axis labels - [Annotations](http://grafana.org/docs/features/annotations) @@ -48,7 +48,7 @@ The code is available in the [develop](https://github.com/grafana/grafana/tree/d - [Time range controls](http://grafana.org/docs/features/time_range) ### InfluxDB -- Use InfluxDB as a metric data source, annotation source and for dashboard storage +- Use InfluxDB as a metric data source, annotation source, and for dashboard storage - Query editor with series and column typeahead, easy group by and function selection ### OpenTSDB @@ -62,7 +62,7 @@ There are no dependencies, Grafana is a client side application that runs in you Head to [grafana.org](http://grafana.org) and [download](http://grafana.org/download/) the latest release. -Then follow the quick [setup & config guide](http://grafana.org/docs/). If you have any problems please +Then follow the [quick setup & config guide](http://grafana.org/docs/). If you have any problems please read the [troubleshooting guide](http://grafana.org/docs/troubleshooting). ## Documentation & Support @@ -70,12 +70,12 @@ Be sure to read the [getting started guide](http://grafana.org/docs/features/int feature guides. ## Run from master -Grafana uses nodejs and grunt for asset management (css & javascript), unit test runner and javascript syntax verification. +Grafana uses Node.js and Grunt for asset management (css & javascript), unit test runner and javascript syntax verification. - clone repository - install nodejs - npm install (in project root) - npm install -g grunt-cli -- grunt (runt default task that will generate css files) +- grunt (grunt default task that will generate css files) - grunt build (creates optimized & minified release) - grunt release (same as grunt build but will also create tar & zip package) - grunt test (executes jshint and unit tests) From f235b516dca76516eb43aca3e7b71d0444ccce84 Mon Sep 17 00:00:00 2001 From: tuexss Date: Wed, 25 Mar 2015 12:14:26 +0100 Subject: [PATCH 033/274] http->https for latest version --- latest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/latest.json b/latest.json index 90189fabeca..1ca0904fd94 100644 --- a/latest.json +++ b/latest.json @@ -1,4 +1,4 @@ { "version": "1.9.1", - "url": "http://grafanarel.s3.amazonaws.com/grafana-1.9.1.tar.gz" + "url": "https://grafanarel.s3.amazonaws.com/grafana-1.9.1.tar.gz" } From 152b01064a7a6ad32bb799c2abde7a6a4667a6eb Mon Sep 17 00:00:00 2001 From: tuexss Date: Wed, 25 Mar 2015 12:17:22 +0100 Subject: [PATCH 034/274] http -> https for external links --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index dc9ed582fda..04c4d8c6dd4 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ [Grafana](http://grafana.org) [![Build Status](https://api.travis-ci.org/grafana/grafana.svg)](https://travis-ci.org/grafana/grafana) [![Coverage Status](https://coveralls.io/repos/grafana/grafana/badge.png)](https://coveralls.io/r/grafana/grafana) [![Gitter](https://badges.gitter.im/Join Chat.svg)](https://gitter.im/grafana/grafana?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) ================ [Website](http://grafana.org) | -[Twitter](http://twitter.com/grafana) | -[IRC](http://webchat.freenode.net/?channels=grafana) | +[Twitter](https://twitter.com/grafana) | +[IRC](https://webchat.freenode.net/?channels=grafana) | [Email](mailto:contact@grafana.org) Grafana is An open source, feature rich metrics dashboard and graph editor for From e31a3a64e19cb8896207c1633c75ef5b22d44872 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 12:27:33 +0100 Subject: [PATCH 035/274] OpenTSDB: Alias patterns (reference tag values), syntax is: or [[tag_tagname]], Closes #1344, match opentsdb response to query, Fixes #1601 --- CHANGELOG.md | 1 + src/app/features/templating/templateSrv.js | 14 ++++- .../plugins/datasource/opentsdb/datasource.js | 62 ++++++++++++------- .../opentsdb/partials/query.editor.html | 3 +- src/test/specs/templateSrv-specs.js | 16 +++++ 5 files changed, 69 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 690c94a0fbf..15f4fe5fa22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - [Issue #599](https://github.com/grafana/grafana/issues/599). Graph: Added right y axis label setting and graph support - [Issue #1253](https://github.com/grafana/grafana/issues/1253). Graph & Singlestat: Users can now set decimal precision for legend and tooltips (override auto precision) - [Issue #1255](https://github.com/grafana/grafana/issues/1255). Templating: Dashboard will now wait to load until all template variables that have refresh on load set or are initialized via url to be fully loaded and so all variables are in valid state before panels start issuing metric requests. +- [Issue #1344](https://github.com/grafana/grafana/issues/1344). OpenTSDB: Alias patterns (reference tag values), syntax is: $tag_tagname or [[tag_tagname]] **Fixes** - [Issue #1298](https://github.com/grafana/grafana/issues/1298). InfluxDB: Fix handling of empty array in templating variable query diff --git a/src/app/features/templating/templateSrv.js b/src/app/features/templating/templateSrv.js index 8fc45097944..09aed015386 100644 --- a/src/app/features/templating/templateSrv.js +++ b/src/app/features/templating/templateSrv.js @@ -63,13 +63,18 @@ function (angular, _) { }); }; - this.replace = function(target) { + this.replace = function(target, scopedVars) { if (!target) { return; } var value; this._regex.lastIndex = 0; return target.replace(this._regex, function(match, g1, g2) { + if (scopedVars) { + value = scopedVars[g1 || g2]; + if (value) { return value.value; } + } + value = self._values[g1 || g2]; if (!value) { return match; } @@ -77,7 +82,7 @@ function (angular, _) { }); }; - this.replaceWithText = function(target) { + this.replaceWithText = function(target, scopedVars) { if (!target) { return; } var value; @@ -85,6 +90,11 @@ function (angular, _) { this._regex.lastIndex = 0; return target.replace(this._regex, function(match, g1, g2) { + if (scopedVars) { + var option = scopedVars[g1 || g2]; + if (option) { return option.text; } + } + value = self._values[g1 || g2]; text = self._texts[g1 || g2]; if (!value) { return match; } diff --git a/src/app/plugins/datasource/opentsdb/datasource.js b/src/app/plugins/datasource/opentsdb/datasource.js index 3ade07a7aac..cd0c83b7c1d 100644 --- a/src/app/plugins/datasource/opentsdb/datasource.js +++ b/src/app/plugins/datasource/opentsdb/datasource.js @@ -46,13 +46,14 @@ function (angular, _, kbn) { }); }); - return this.performTimeSeriesQuery(queries, start, end) - .then(_.bind(function(response) { - var result = _.map(response.data, _.bind(function(metricData, index) { - return transformMetricData(metricData, groupByTags, this.targets[index]); - }, this)); - return { data: result }; - }, options)); + return this.performTimeSeriesQuery(queries, start, end).then(function(response) { + var metricToTargetMapping = mapMetricsToTargets(response.data, options.targets); + var result = _.map(response.data, function(metricData, index) { + index = metricToTargetMapping[index]; + return transformMetricData(metricData, groupByTags, options.targets[index]); + }); + return { data: result }; + }); }; OpenTSDBDatasource.prototype.performTimeSeriesQuery = function(queries, start, end) { @@ -90,19 +91,8 @@ function (angular, _, kbn) { }; function transformMetricData(md, groupByTags, options) { - var dps = [], - tagData = [], - metricLabel = null; - - if (!_.isEmpty(md.tags)) { - _.each(_.pairs(md.tags), function(tag) { - if (_.has(groupByTags, tag[0])) { - tagData.push(tag[0] + "=" + tag[1]); - } - }); - } - - metricLabel = createMetricLabel(md.metric, tagData, options); + var metricLabel = createMetricLabel(md, options, groupByTags); + var dps = []; // TSDB returns datapoints has a hash of ts => value. // Can't use _.pairs(invert()) because it stringifies keys/values @@ -113,16 +103,31 @@ function (angular, _, kbn) { return { target: metricLabel, datapoints: dps }; } - function createMetricLabel(metric, tagData, options) { + function createMetricLabel(md, options, groupByTags) { if (!_.isUndefined(options) && options.alias) { - return options.alias; + var scopedVars = {}; + _.each(md.tags, function(value, key) { + scopedVars['tag_' + key] = {value: value}; + }); + return templateSrv.replace(options.alias, scopedVars); + } + + var label = md.metric; + var tagData = []; + + if (!_.isEmpty(md.tags)) { + _.each(_.pairs(md.tags), function(tag) { + if (_.has(groupByTags, tag[0])) { + tagData.push(tag[0] + "=" + tag[1]); + } + }); } if (!_.isEmpty(tagData)) { - metric += "{" + tagData.join(", ") + "}"; + label += "{" + tagData.join(", ") + "}"; } - return metric; + return label; } function convertTargetToQuery(target, interval) { @@ -174,6 +179,15 @@ function (angular, _, kbn) { return query; } + function mapMetricsToTargets(metrics, targets) { + return _.map(metrics, function(metricData) { + return _.findIndex(targets, function(target) { + return target.metric === metricData.metric && + _.all(target.tags, function(tagV, tagK) { return metricData.tags[tagK] !== void 0; }); + }); + }); + } + function convertToTSDBTime(date) { if (date === 'now') { return null; diff --git a/src/app/plugins/datasource/opentsdb/partials/query.editor.html b/src/app/plugins/datasource/opentsdb/partials/query.editor.html index 79dd6cd5ffd..a5478ff0cc3 100644 --- a/src/app/plugins/datasource/opentsdb/partials/query.editor.html +++ b/src/app/plugins/datasource/opentsdb/partials/query.editor.html @@ -81,10 +81,11 @@
  • Alias: + Use patterns like $tag_tagname to replace part of the alias for a tag value
  • Date: Wed, 25 Mar 2015 13:43:52 +0100 Subject: [PATCH 036/274] Removed ghost panel --- src/app/features/dashboard/rowCtrl.js | 18 ------------------ src/app/partials/dashboard.html | 8 -------- 2 files changed, 26 deletions(-) diff --git a/src/app/features/dashboard/rowCtrl.js b/src/app/features/dashboard/rowCtrl.js index e2ab9a0c156..494f10657fc 100644 --- a/src/app/features/dashboard/rowCtrl.js +++ b/src/app/features/dashboard/rowCtrl.js @@ -169,22 +169,4 @@ function (angular, app, _, config) { }; }); - module.directive('panelGhost', function() { - return function(scope, element) { - function updateWidth() { - var spanLeft = 12 - scope.dashboard.rowSpan(scope.row); - if (spanLeft > 1) { - element.show(); - element.find('.panel-container').css('height', scope.row.height); - element[0].style.width = ((spanLeft / 1.2) * 10) + '%'; - } else { - element.hide(); - } - } - - updateWidth(); - scope.$on('dashboard-panel-span-updated', updateWidth); - }; - }); - }); diff --git a/src/app/partials/dashboard.html b/src/app/partials/dashboard.html index edfe1bfcafb..39e05ebecb6 100644 --- a/src/app/partials/dashboard.html +++ b/src/app/partials/dashboard.html @@ -86,14 +86,6 @@
  • -
    -
    -
    - -
    -
    -
    -
    From 10618637e2620603635239c37dd8569ad2782010 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 13:53:58 +0100 Subject: [PATCH 037/274] Fixed bug in sql migration, closes #1643 --- pkg/services/sqlstore/migrations/datasource_mig.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/sqlstore/migrations/datasource_mig.go b/pkg/services/sqlstore/migrations/datasource_mig.go index 924e1a16189..4f046b1f8e9 100644 --- a/pkg/services/sqlstore/migrations/datasource_mig.go +++ b/pkg/services/sqlstore/migrations/datasource_mig.go @@ -95,5 +95,5 @@ func addDataSourceMigration(mg *Migrator) { "updated": "updated", })) - mg.AddMigration("Drop old table data_source_v1", NewDropTableMigration("data_source_old")) + mg.AddMigration("Drop old table data_source_v1 #2", NewDropTableMigration("data_source_v1")) } From 9c5e116d09abea0e1d27c8b39521db223c1bc861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 14:14:45 +0100 Subject: [PATCH 038/274] Fixed small file nameing issue in build script --- build.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.go b/build.go index f7d77129d92..4ee59928d67 100644 --- a/build.go +++ b/build.go @@ -90,8 +90,8 @@ func main() { func makeLatestDistCopies() { runError("cp", "dist/grafana_"+version+"_amd64.deb", "dist/grafana_latest_amd64.deb") - runError("cp", "dist/grafana-"+strings.Replace(version, "-", "_", 5)+"-1.x86_64.rpm", "dist/grafana-latest-1.x84_64.rpm") - runError("cp", "dist/grafana-"+version+".x86_64.tar.gz", "dist/grafana-latest.x84_64.tar.gz") + runError("cp", "dist/grafana-"+strings.Replace(version, "-", "_", 5)+"-1.x86_64.rpm", "dist/grafana-latest-1.x86_64.rpm") + runError("cp", "dist/grafana-"+version+".x86_64.tar.gz", "dist/grafana-latest.x86_64.tar.gz") } func readVersionFromPackageJson() { From 2e6d28027ad5f6edcbce942fb9db6d6416718e28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 14:19:14 +0100 Subject: [PATCH 039/274] Removed snapshot from dashboard settings dropdown, its only reached through the share menu --- src/app/partials/dashboard_topnav.html | 1 - 1 file changed, 1 deletion(-) diff --git a/src/app/partials/dashboard_topnav.html b/src/app/partials/dashboard_topnav.html index 77cccecb10b..bf3ba635a97 100644 --- a/src/app/partials/dashboard_topnav.html +++ b/src/app/partials/dashboard_topnav.html @@ -40,7 +40,6 @@
  • View JSON
  • Save As...
  • Delete dashboard
  • -
  • Snapshot dashboard
  • From cb3593e4725bc2efcbc6c25eddcf7731ca163fab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 25 Mar 2015 15:48:51 +0100 Subject: [PATCH 040/274] Lots of small fixes, role viewer hides save icon and some actions in config dropdown. Snapshot dashboard hides save, star, config menu icons. Can now embedd panel from snapshotted dashboard. --- pkg/api/dashboard_snapshot.go | 2 +- src/app/features/dashboard/dashboardCtrl.js | 30 ++++++++++++++++++- src/app/features/dashboard/dashboardSrv.js | 1 + src/app/features/dashboard/sharePanelCtrl.js | 12 +++++--- .../features/dashboard/shareSnapshotCtrl.js | 15 ++++++---- src/app/features/panel/panelHelper.js | 1 + src/app/features/panel/soloPanelCtrl.js | 19 ++++++++---- src/app/partials/dashboard_topnav.html | 16 +++++----- src/app/routes/all.js | 14 +++++---- src/test/specs/soloPanelCtrl-specs.js | 2 +- 10 files changed, 80 insertions(+), 32 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index c4035e921d5..7af9d8b1f66 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -29,7 +29,7 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho metrics.M_Api_Dashboard_Snapshot_Create.Inc(1) - c.JSON(200, util.DynMap{"key": cmd.Key, "url": setting.ToAbsUrl("/dashboard/snapshots")}) + c.JSON(200, util.DynMap{"key": cmd.Key, "url": setting.ToAbsUrl("/dashboard/snapshot")}) } func createExternalSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { diff --git a/src/app/features/dashboard/dashboardCtrl.js b/src/app/features/dashboard/dashboardCtrl.js index 8430ac18631..430363574c8 100644 --- a/src/app/features/dashboard/dashboardCtrl.js +++ b/src/app/features/dashboard/dashboardCtrl.js @@ -17,6 +17,7 @@ function (angular, $, config) { templateValuesSrv, dashboardSrv, dashboardViewStateSrv, + contextSrv, $timeout) { $scope.editor = { index: 0 }; @@ -46,7 +47,7 @@ function (angular, $, config) { templateValuesSrv.init(dashboard).then(function() { $scope.dashboard = dashboard; $scope.dashboardViewState = dashboardViewStateSrv.create($scope); - $scope.dashboardMeta = data.meta; + $scope.initDashboardMeta(data.meta, $scope.dashboard); dashboardKeybindings.shortcuts($scope); @@ -57,6 +58,32 @@ function (angular, $, config) { }); }; + $scope.initDashboardMeta = function(meta, dashboard) { + meta.canShare = true; + meta.canSave = true; + meta.canEdit = true; + meta.canStar = true; + + if (contextSrv.hasRole('Viewer')) { + meta.canSave = false; + } + + if (meta.isHome) { + meta.canShare = false; + meta.canStar = false; + meta.canSave = false; + meta.canEdit = false; + } + + if (dashboard.snapshot) { + meta.canEdit = false; + meta.canSave = false; + meta.canStar = false; + } + + $scope.dashboardMeta = meta; + }; + $scope.updateSubmenuVisibility = function() { $scope.submenuEnabled = $scope.dashboard.hasTemplateVarsOrAnnotations(); }; @@ -132,4 +159,5 @@ function (angular, $, config) { }; }); + }); diff --git a/src/app/features/dashboard/dashboardSrv.js b/src/app/features/dashboard/dashboardSrv.js index 77b19b3edd2..20f04b421d7 100644 --- a/src/app/features/dashboard/dashboardSrv.js +++ b/src/app/features/dashboard/dashboardSrv.js @@ -37,6 +37,7 @@ function (angular, $, kbn, _, moment) { this.templating = this._ensureListExist(data.templating); this.annotations = this._ensureListExist(data.annotations); this.refresh = data.refresh; + this.snapshot = data.snapshot; this.schemaVersion = data.schemaVersion || 0; this.version = data.version || 0; diff --git a/src/app/features/dashboard/sharePanelCtrl.js b/src/app/features/dashboard/sharePanelCtrl.js index c7303ab1a68..eb0a7a7a957 100644 --- a/src/app/features/dashboard/sharePanelCtrl.js +++ b/src/app/features/dashboard/sharePanelCtrl.js @@ -71,12 +71,16 @@ function (angular, _, require, config) { } }); - $scope.shareUrl = baseUrl + "?" + paramsArray.join('&'); + var queryParams = "?" + paramsArray.join('&'); + $scope.shareUrl = baseUrl + queryParams; - $scope.soloUrl = $scope.shareUrl.replace('/dashboard/db/', '/dashboard/solo/'); - $scope.iframeHtml = ''; + var soloUrl = $scope.shareUrl; + soloUrl = soloUrl.replace('/dashboard/db/', '/dashboard/solo/db/'); + soloUrl = soloUrl.replace('/dashboard/snapshot/', '/dashboard/solo/snapshot/'); - $scope.imageUrl = $scope.shareUrl.replace('/dashboard/db/', '/render/dashboard/solo/'); + $scope.iframeHtml = ''; + + $scope.imageUrl = soloUrl.replace('/dashboard/', '/render/dashboard/'); $scope.imageUrl += '&width=1000'; $scope.imageUrl += '&height=500'; }; diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index a3f279b9c64..5c41b0ee649 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -12,14 +12,17 @@ function (angular) { name: $scope.dashboard.title }; - $scope.createSnapshot = function(makePublic) { - $scope.dashboard.snapshot = true; + $scope.createSnapshot = function(external) { + $scope.dashboard.snapshot = { + timestamp: new Date() + }; + $scope.loading = true; $rootScope.$broadcast('refresh'); $timeout(function() { - $scope.saveSnapshot(makePublic); - }, 2000); + $scope.saveSnapshot(external); + }, 3000); }; $scope.saveSnapshot = function(external) { @@ -35,7 +38,7 @@ function (angular) { }); // cleanup snapshotData - $scope.dashboard.snapshot = false; + delete $scope.dashboard.snapshot; $scope.dashboard.forEachPanel(function(panel) { delete panel.snapshotData; }); @@ -47,7 +50,7 @@ function (angular) { $scope.snapshotUrl = results.url; } else { var baseUrl = $location.absUrl().replace($location.url(), ""); - $scope.snapshotUrl = baseUrl + '/dashboard/snapshots/' + results.key; + $scope.snapshotUrl = baseUrl + '/dashboard/snapshot/' + results.key; } }, function() { $scope.loading = false; diff --git a/src/app/features/panel/panelHelper.js b/src/app/features/panel/panelHelper.js index 62982f69438..442bcbac8c5 100644 --- a/src/app/features/panel/panelHelper.js +++ b/src/app/features/panel/panelHelper.js @@ -8,6 +8,7 @@ function (angular, _, kbn, $) { 'use strict'; var module = angular.module('grafana.services'); + module.service('panelHelper', function(timeSrv) { this.updateTimeRange = function(scope) { diff --git a/src/app/features/panel/soloPanelCtrl.js b/src/app/features/panel/soloPanelCtrl.js index c6a01d9ccfe..3c9a3dc34f0 100644 --- a/src/app/features/panel/soloPanelCtrl.js +++ b/src/app/features/panel/soloPanelCtrl.js @@ -26,12 +26,19 @@ function (angular, $) { var params = $location.search(); panelId = parseInt(params.panelId); - backendSrv.getDashboard($routeParams.slug) - .then(function(dashboard) { - $scope.initPanelScope(dashboard); - }).then(null, function(err) { - $scope.appEvent('alert-error', ['Load panel error', err.message]); - }); + var request; + + if ($routeParams.slug) { + request = backendSrv.getDashboard($routeParams.slug); + } else { + request = backendSrv.get('/api/snapshots/' + $routeParams.key); + } + + request.then(function(dashboard) { + $scope.initPanelScope(dashboard); + }).then(null, function(err) { + $scope.appEvent('alert-error', ['Load panel error', err.message]); + }); }; $scope.initPanelScope = function(dashboard) { diff --git a/src/app/partials/dashboard_topnav.html b/src/app/partials/dashboard_topnav.html index bf3ba635a97..aff1f0bbba7 100644 --- a/src/app/partials/dashboard_topnav.html +++ b/src/app/partials/dashboard_topnav.html @@ -18,19 +18,19 @@
    -
    - - - - - - - - - - -
    From 0122a9ab18febb0606233bfef085fc141acfd39d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 26 Mar 2015 18:03:37 +0100 Subject: [PATCH 057/274] Updated whats new doc --- docs/mkdocs.yml | 2 +- .../{changes_in_v2.md => whats-new-in-v2.md} | 14 +++----------- 2 files changed, 4 insertions(+), 12 deletions(-) rename docs/sources/guides/{changes_in_v2.md => whats-new-in-v2.md} (94%) diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index bec14f8a9dd..5d4ddbf8de6 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -34,7 +34,7 @@ pages: - ['installation/migrating_to2.md', 'Installation', 'Migrating from v1.x to v2.x'] - ['guides/gettingstarted.md', 'User Guides', 'Getting started'] -- ['guides/changes_in_v2.md', 'User Guides', 'Changes and New Features in v2.0'] +- ['guides/whats-new-in-v2.md', 'User Guides', "What's New in Grafana v2.0"] - ['guides/screencasts.md', 'User Guides', 'Screencasts'] - ['reference/graph.md', 'Reference', 'Graph Panel'] diff --git a/docs/sources/guides/changes_in_v2.md b/docs/sources/guides/whats-new-in-v2.md similarity index 94% rename from docs/sources/guides/changes_in_v2.md rename to docs/sources/guides/whats-new-in-v2.md index 4bab6314991..76701de0a34 100644 --- a/docs/sources/guides/changes_in_v2.md +++ b/docs/sources/guides/whats-new-in-v2.md @@ -1,14 +1,13 @@ --- -page_title: Changes and new features in Grafana v2.0 +page_title: Whats New in Grafana v2.0 page_description: Changes and new features in Grafana v2.0 page_keywords: grafana, changes, features, documentation --- -# Changes and new features in v2.0 +# What's New in Grafana v2.0 This is a guide that descriptes some of changes and new features that can be found in Grafana v2.0. - ## New dashboard top header @@ -19,14 +18,7 @@ This is a guide that descriptes some of changes and new features that can be fou 4. Star/unstar current dashboard 5. Share current dashboard (Make sure the dashboard is saved before) 6. Save current dashboard -7. Settings dropdown - - Dashboard settings - - Annotations - - Templating - - Export (exports current dashboard to json file) - - View JSON (view current dashboard json model) - - Save As... (Copy & Save current dashboard under a new name) - - Delete dashboard +7. Settings dropdown (dashboard settings, annotations, templating, etc) > **Note** In Grafana v2.0 when you change the title of a dashboard and then save it it will no > longer create a new dashboard. It will just change the name for the current dashboard. From 7d0ae23c0e045955e36e706891b79774fb4c2685 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 26 Mar 2015 19:31:43 +0100 Subject: [PATCH 058/274] small docs update --- docs/sources/guides/gettingstarted.md | 13 ++++++++++++- docs/sources/guides/whats-new-in-v2.md | 11 +++++------ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/docs/sources/guides/gettingstarted.md b/docs/sources/guides/gettingstarted.md index df936138483..9e0048263f8 100644 --- a/docs/sources/guides/gettingstarted.md +++ b/docs/sources/guides/gettingstarted.md @@ -8,7 +8,18 @@ page_keywords: grafana, guide, documentation This guide will help you get started and acquainted with the Grafana user interface. ## Interface overview - + +### Dashboard header + + +1. Side menu toggle +2. Dashboard title & Search dropdown (also includes access to New dashboard, Import & Playlist) +3. Star/unstar current dashboard +4. Share current dashboard (Make sure the dashboard is saved before) +5. Save current dashboard +6. Settings dropdown (dashboard settings, annotations, templating, etc) + + ## New dashboard ![](/img/animated_gifs/new_dashboard.gif) diff --git a/docs/sources/guides/whats-new-in-v2.md b/docs/sources/guides/whats-new-in-v2.md index 76701de0a34..e78a40d3bcf 100644 --- a/docs/sources/guides/whats-new-in-v2.md +++ b/docs/sources/guides/whats-new-in-v2.md @@ -13,12 +13,11 @@ This is a guide that descriptes some of changes and new features that can be fou 1. Side menu toggle -2. Dashboard search (also includes access to New dashboard, Import & Playlist) -3. Dashboard title -4. Star/unstar current dashboard -5. Share current dashboard (Make sure the dashboard is saved before) -6. Save current dashboard -7. Settings dropdown (dashboard settings, annotations, templating, etc) +2. Dashboard title & Search dropdown (also includes access to New dashboard, Import & Playlist) +3. Star/unstar current dashboard +4. Share current dashboard (Make sure the dashboard is saved before) +5. Save current dashboard +6. Settings dropdown (dashboard settings, annotations, templating, etc) > **Note** In Grafana v2.0 when you change the title of a dashboard and then save it it will no > longer create a new dashboard. It will just change the name for the current dashboard. From 4322f29f34b6bbb7e341012fa068a7b27bc30284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 26 Mar 2015 20:34:58 +0100 Subject: [PATCH 059/274] Dashboard snapshot: added delete key which can be used to delete snapshots, #1623 --- pkg/api/api.go | 2 ++ pkg/api/dashboard_snapshot.go | 28 +++++++++++---- pkg/models/dashboard_snapshot.go | 29 +++++++++++----- pkg/services/sqlstore/dashboard_snapshot.go | 25 ++++++++++---- .../migrations/dashboard_snapshot_mig.go | 34 ++++++++++++++++--- .../dashboard/partials/shareDashboard.html | 3 ++ .../features/dashboard/shareSnapshotCtrl.js | 2 ++ 7 files changed, 97 insertions(+), 26 deletions(-) diff --git a/pkg/api/api.go b/pkg/api/api.go index 4683e95fa40..88f1a7a37ee 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -44,7 +44,9 @@ func Register(r *macaron.Macaron) { // dashboard snapshots r.Post("/api/snapshots/", bind(m.CreateDashboardSnapshotCommand{}), CreateDashboardSnapshot) r.Get("/dashboard/snapshots/*", Index) + r.Get("/api/snapshots/:key", GetDashboardSnapshot) + r.Get("/api/snapshots-delete/:key", DeleteDashboardSnapshot) // authed api r.Group("/api", func() { diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 0f9c918e011..dd9e8f81e5b 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -1,7 +1,6 @@ package api import ( - "strconv" "time" "github.com/grafana/grafana/pkg/api/dtos" @@ -15,12 +14,15 @@ import ( func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { cmd.Key = util.GetRandomString(32) + cmd.DeleteKey = util.GetRandomString(32) if cmd.External { cmd.OrgId = -1 + cmd.UserId = -1 metrics.M_Api_Dashboard_Snapshot_External.Inc(1) } else { cmd.OrgId = c.OrgId + cmd.UserId = c.UserId metrics.M_Api_Dashboard_Snapshot_Create.Inc(1) } @@ -29,7 +31,12 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho return } - c.JSON(200, util.DynMap{"key": cmd.Key, "url": setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key)}) + c.JSON(200, util.DynMap{ + "key": cmd.Key, + "deleteKey": cmd.DeleteKey, + "url": setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key), + "deleteUrl": setting.ToAbsUrl("api/snapshots-delete/" + cmd.DeleteKey), + }) } func GetDashboardSnapshot(c *middleware.Context) { @@ -58,9 +65,18 @@ func GetDashboardSnapshot(c *middleware.Context) { metrics.M_Api_Dashboard_Snapshot_Get.Inc(1) - maxAge := int64(snapshot.Expires.Sub(time.Now()).Seconds()) - - c.Resp.Header().Set("Cache-Control", "public, max-age="+strconv.FormatInt(maxAge, 10)) - + c.Resp.Header().Set("Cache-Control", "public, max-age=3600") c.JSON(200, dto) } + +func DeleteDashboardSnapshot(c *middleware.Context) { + key := c.Params(":key") + cmd := &m.DeleteDashboardSnapshotCommand{DeleteKey: key} + + if err := bus.Dispatch(cmd); err != nil { + c.JsonApiErr(500, "Failed to delete dashboard snapshot", err) + return + } + + c.JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it is cleared from a CDN cache."}) +} diff --git a/pkg/models/dashboard_snapshot.go b/pkg/models/dashboard_snapshot.go index cd082666ea9..12638f4150f 100644 --- a/pkg/models/dashboard_snapshot.go +++ b/pkg/models/dashboard_snapshot.go @@ -4,10 +4,14 @@ import "time" // DashboardSnapshot model type DashboardSnapshot struct { - Id int64 - Name string - Key string - OrgId int64 + Id int64 + Name string + Key string + DeleteKey string + OrgId int64 + UserId int64 + External bool + ExternalUrl string Expires time.Time Created time.Time @@ -20,16 +24,23 @@ type DashboardSnapshot struct { // COMMANDS type CreateDashboardSnapshotCommand struct { - Dashboard map[string]interface{} `json:"dashboard" binding:"Required"` - External bool `json:"external"` - Expires int64 `json:"expires"` + Dashboard map[string]interface{} `json:"dashboard" binding:"Required"` + External bool `json:"external"` + ExternalUrl string `json:"externalUrl"` + Expires int64 `json:"expires"` - OrgId int64 `json:"-"` - Key string `json:"-"` + OrgId int64 `json:"-"` + UserId int64 `json:"-"` + Key string `json:"-"` + DeleteKey string `json:"-"` Result *DashboardSnapshot } +type DeleteDashboardSnapshotCommand struct { + DeleteKey string `json:"-"` +} + type GetDashboardSnapshotQuery struct { Key string diff --git a/pkg/services/sqlstore/dashboard_snapshot.go b/pkg/services/sqlstore/dashboard_snapshot.go index 679f679322c..3f66f49f6b6 100644 --- a/pkg/services/sqlstore/dashboard_snapshot.go +++ b/pkg/services/sqlstore/dashboard_snapshot.go @@ -11,6 +11,7 @@ import ( func init() { bus.AddHandler("sql", CreateDashboardSnapshot) bus.AddHandler("sql", GetDashboardSnapshot) + bus.AddHandler("sql", DeleteDashboardSnapshot) } func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { @@ -23,12 +24,16 @@ func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { } snapshot := &m.DashboardSnapshot{ - Key: cmd.Key, - OrgId: cmd.OrgId, - Dashboard: cmd.Dashboard, - Expires: expires, - Created: time.Now(), - Updated: time.Now(), + Key: cmd.Key, + DeleteKey: cmd.DeleteKey, + OrgId: cmd.OrgId, + UserId: cmd.UserId, + External: cmd.External, + ExternalUrl: cmd.ExternalUrl, + Dashboard: cmd.Dashboard, + Expires: expires, + Created: time.Now(), + Updated: time.Now(), } _, err := sess.Insert(snapshot) @@ -38,6 +43,14 @@ func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { }) } +func DeleteDashboardSnapshot(cmd *m.DeleteDashboardSnapshotCommand) error { + return inTransaction(func(sess *xorm.Session) error { + var rawSql = "DELETE FROM dashboard_snapshot WHERE delete_key=?" + _, err := sess.Exec(rawSql, cmd.DeleteKey) + return err + }) +} + func GetDashboardSnapshot(query *m.GetDashboardSnapshotQuery) error { snapshot := m.DashboardSnapshot{Key: query.Key} has, err := x.Get(&snapshot) diff --git a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go index f08d7a11e7e..4386f07ffd1 100644 --- a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go +++ b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go @@ -19,12 +19,36 @@ func addDashboardSnapshotMigrations(mg *Migrator) { }, } + // add v4 mg.AddMigration("create dashboard_snapshot table v4", NewAddTableMigration(snapshotV4)) - addTableIndicesMigrations(mg, "v4", snapshotV4) - mg.AddMigration("add org_id to dashboard_snapshot", new(AddColumnMigration). - Table("dashboard_snapshot").Column(&Column{Name: "org_id", Type: DB_BigInt, Nullable: true})) + // drop v4 + addDropAllIndicesMigrations(mg, "v4", snapshotV4) + mg.AddMigration("drop table dashboard_snapshot_v4 #1", NewDropTableMigration("dashboard_snapshot")) - mg.AddMigration("add index org_id to dashboard_snapshot", - NewAddIndexMigration(snapshotV4, &Index{Cols: []string{"org_id"}})) + snapshotV5 := Table{ + Name: "dashboard_snapshot", + Columns: []*Column{ + {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, + {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "key", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "delete_key", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "org_id", Type: DB_BigInt, Nullable: false}, + {Name: "user_id", Type: DB_BigInt, Nullable: false}, + {Name: "external", Type: DB_Bool, Nullable: false}, + {Name: "external_url", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "dashboard", Type: DB_Text, Nullable: false}, + {Name: "expires", Type: DB_DateTime, Nullable: false}, + {Name: "created", Type: DB_DateTime, Nullable: false}, + {Name: "updated", Type: DB_DateTime, Nullable: false}, + }, + Indices: []*Index{ + {Cols: []string{"key"}, Type: UniqueIndex}, + {Cols: []string{"delete_key"}, Type: UniqueIndex}, + {Cols: []string{"user_id"}}, + }, + } + + mg.AddMigration("create dashboard_snapshot table v5 #2", NewAddTableMigration(snapshotV5)) + addTableIndicesMigrations(mg, "v5", snapshotV5) } diff --git a/src/app/features/dashboard/partials/shareDashboard.html b/src/app/features/dashboard/partials/shareDashboard.html index f5f466e9917..fefca5a9100 100644 --- a/src/app/features/dashboard/partials/shareDashboard.html +++ b/src/app/features/dashboard/partials/shareDashboard.html @@ -110,6 +110,9 @@
    +
    diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 36cd1089f3a..59a0b2ca10a 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -84,10 +84,12 @@ function (angular, _) { $scope.loading = false; if (external) { + $scope.deleteUrl = results.deleteUrl; $scope.snapshotUrl = results.url; } else { var baseUrl = $location.absUrl().replace($location.url(), ""); $scope.snapshotUrl = baseUrl + '/dashboard/snapshot/' + results.key; + $scope.deleteUrl = baseUrl + '/api/snapshots-delete/' + results.deleteKey; } $scope.step = 2; From 541cd2e43091be1af739055f478b8f05921c855a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 26 Mar 2015 20:59:41 +0100 Subject: [PATCH 060/274] Dashboard snapshot: more work on snapshot deletion, and saving external reference, #1623 --- pkg/api/dashboard_snapshot.go | 13 ++++++++---- pkg/models/dashboard_snapshot.go | 17 ++++++++-------- pkg/services/sqlstore/dashboard_snapshot.go | 19 +++++++++--------- .../features/dashboard/shareSnapshotCtrl.js | 20 +++++++++++++------ 4 files changed, 41 insertions(+), 28 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index dd9e8f81e5b..1c641d10c1c 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -13,14 +13,19 @@ import ( ) func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { - cmd.Key = util.GetRandomString(32) - cmd.DeleteKey = util.GetRandomString(32) - if cmd.External { + // external snapshot ref requires key and delete key + if cmd.Key != "" && cmd.DeleteKey != "" { + c.JsonApiErr(400, "Missing key and delete key for external snapshot", nil) + return + } + cmd.OrgId = -1 cmd.UserId = -1 metrics.M_Api_Dashboard_Snapshot_External.Inc(1) } else { + cmd.Key = util.GetRandomString(32) + cmd.DeleteKey = util.GetRandomString(32) cmd.OrgId = c.OrgId cmd.UserId = c.UserId metrics.M_Api_Dashboard_Snapshot_Create.Inc(1) @@ -78,5 +83,5 @@ func DeleteDashboardSnapshot(c *middleware.Context) { return } - c.JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it is cleared from a CDN cache."}) + c.JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it's cleared from a CDN cache."}) } diff --git a/pkg/models/dashboard_snapshot.go b/pkg/models/dashboard_snapshot.go index 12638f4150f..e8f37e2a236 100644 --- a/pkg/models/dashboard_snapshot.go +++ b/pkg/models/dashboard_snapshot.go @@ -24,15 +24,16 @@ type DashboardSnapshot struct { // COMMANDS type CreateDashboardSnapshotCommand struct { - Dashboard map[string]interface{} `json:"dashboard" binding:"Required"` - External bool `json:"external"` - ExternalUrl string `json:"externalUrl"` - Expires int64 `json:"expires"` + Dashboard map[string]interface{} `json:"dashboard" binding:"Required"` + Expires int64 `json:"expires"` - OrgId int64 `json:"-"` - UserId int64 `json:"-"` - Key string `json:"-"` - DeleteKey string `json:"-"` + // these are passed when storing an external snapshot ref + External bool `json:"external"` + Key string `json:"key"` + DeleteKey string `json:"deleteKey"` + + OrgId int64 `json:"-"` + UserId int64 `json:"-"` Result *DashboardSnapshot } diff --git a/pkg/services/sqlstore/dashboard_snapshot.go b/pkg/services/sqlstore/dashboard_snapshot.go index 3f66f49f6b6..0bbb01ed6bd 100644 --- a/pkg/services/sqlstore/dashboard_snapshot.go +++ b/pkg/services/sqlstore/dashboard_snapshot.go @@ -24,16 +24,15 @@ func CreateDashboardSnapshot(cmd *m.CreateDashboardSnapshotCommand) error { } snapshot := &m.DashboardSnapshot{ - Key: cmd.Key, - DeleteKey: cmd.DeleteKey, - OrgId: cmd.OrgId, - UserId: cmd.UserId, - External: cmd.External, - ExternalUrl: cmd.ExternalUrl, - Dashboard: cmd.Dashboard, - Expires: expires, - Created: time.Now(), - Updated: time.Now(), + Key: cmd.Key, + DeleteKey: cmd.DeleteKey, + OrgId: cmd.OrgId, + UserId: cmd.UserId, + External: cmd.External, + Dashboard: cmd.Dashboard, + Expires: expires, + Created: time.Now(), + Updated: time.Now(), } _, err := sess.Insert(snapshot) diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 59a0b2ca10a..240bd33488b 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -29,6 +29,9 @@ function (angular, _) { {text: 'Public on the web', value: 3}, ]; + $scope.externalUrl = 'http://snapshots-origin.raintank.io'; + $scope.apiUrl = '/api/snapshots'; + $scope.createSnapshot = function(external) { $scope.dashboard.snapshot = { timestamp: new Date() @@ -71,21 +74,18 @@ function (angular, _) { var cmdData = { dashboard: dash, - external: external === true, expires: $scope.snapshot.expires, }; - var apiUrl = '/api/snapshots/'; - if (external) { - apiUrl = "http://snapshots-origin.raintank.io/api/snapshots"; - } + var postUrl = external ? $scope.externalUrl + $scope.apiUrl : $scope.apiUrl; - backendSrv.post(apiUrl, cmdData).then(function(results) { + backendSrv.post(postUrl, cmdData).then(function(results) { $scope.loading = false; if (external) { $scope.deleteUrl = results.deleteUrl; $scope.snapshotUrl = results.url; + $scope.saveExternalSnapshotRef(cmdData, results); } else { var baseUrl = $location.absUrl().replace($location.url(), ""); $scope.snapshotUrl = baseUrl + '/dashboard/snapshot/' + results.key; @@ -98,6 +98,14 @@ function (angular, _) { }); }; + $scope.saveExternalSnapshotRef = function(cmdData, results) { + // save external in local instance as well + cmdData.external = true; + cmdData.key = results.key; + cmdData.delete_key = results.delete_key; + backendSrv.post('/api/snapshots/', cmdData); + }; + }); }); From 7be7aeb70ad571ca7740c177a24a1a223df82bde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 26 Mar 2015 21:20:44 +0100 Subject: [PATCH 061/274] Fixed sql migration issue with dashboard snapshots --- pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go index 4386f07ffd1..4d83dfd5bc6 100644 --- a/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go +++ b/pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go @@ -21,9 +21,6 @@ func addDashboardSnapshotMigrations(mg *Migrator) { // add v4 mg.AddMigration("create dashboard_snapshot table v4", NewAddTableMigration(snapshotV4)) - - // drop v4 - addDropAllIndicesMigrations(mg, "v4", snapshotV4) mg.AddMigration("drop table dashboard_snapshot_v4 #1", NewDropTableMigration("dashboard_snapshot")) snapshotV5 := Table{ From d3db49ae3ead84c550d8fd0b23d666e1d5cef67d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Fri, 27 Mar 2015 06:47:58 +0100 Subject: [PATCH 062/274] Fixed snapshot sharing issue --- pkg/api/dashboard_snapshot.go | 2 +- src/app/features/dashboard/shareSnapshotCtrl.js | 2 +- src/app/services/backendSrv.js | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/api/dashboard_snapshot.go b/pkg/api/dashboard_snapshot.go index 1c641d10c1c..8de96ec9f21 100644 --- a/pkg/api/dashboard_snapshot.go +++ b/pkg/api/dashboard_snapshot.go @@ -15,7 +15,7 @@ import ( func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapshotCommand) { if cmd.External { // external snapshot ref requires key and delete key - if cmd.Key != "" && cmd.DeleteKey != "" { + if cmd.Key == "" || cmd.DeleteKey == "" { c.JsonApiErr(400, "Missing key and delete key for external snapshot", nil) return } diff --git a/src/app/features/dashboard/shareSnapshotCtrl.js b/src/app/features/dashboard/shareSnapshotCtrl.js index 240bd33488b..4006abfe4b5 100644 --- a/src/app/features/dashboard/shareSnapshotCtrl.js +++ b/src/app/features/dashboard/shareSnapshotCtrl.js @@ -102,7 +102,7 @@ function (angular, _) { // save external in local instance as well cmdData.external = true; cmdData.key = results.key; - cmdData.delete_key = results.delete_key; + cmdData.deleteKey = results.deleteKey; backendSrv.post('/api/snapshots/', cmdData); }; diff --git a/src/app/services/backendSrv.js b/src/app/services/backendSrv.js index 13565342333..004bf663e86 100644 --- a/src/app/services/backendSrv.js +++ b/src/app/services/backendSrv.js @@ -54,12 +54,16 @@ function (angular, _, config) { this.request = function(options) { var httpOptions = { - url: config.appSubUrl + options.url, + url: options.url, method: options.method, data: options.data, params: options.params, }; + if (httpOptions.url.indexOf('/') === 0) { + httpOptions.url = config.appSubUrl + httpOptions.url; + } + return $http(httpOptions).then(function(results) { if (options.method !== 'GET') { if (results && results.data.message) { From e646ae8be490915b5fcfd8d0824ccb19a61d5979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Fri, 27 Mar 2015 07:39:06 +0100 Subject: [PATCH 063/274] updated whats new doc --- docs/sources/guides/whats-new-in-v2.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/guides/whats-new-in-v2.md b/docs/sources/guides/whats-new-in-v2.md index e78a40d3bcf..bb5c4c15c7d 100644 --- a/docs/sources/guides/whats-new-in-v2.md +++ b/docs/sources/guides/whats-new-in-v2.md @@ -1,12 +1,12 @@ --- -page_title: Whats New in Grafana v2.0 -page_description: Changes and new features in Grafana v2.0 -page_keywords: grafana, changes, features, documentation +page_title: What's New in Grafana v2.0 +page_description: What's new in Grafana v2.0 +page_keywords: grafana, new, changes, features, documentation --- # What's New in Grafana v2.0 -This is a guide that descriptes some of changes and new features that can be found in Grafana v2.0. +This is a guide that describes some of changes and new features that can be found in Grafana v2.0. ## New dashboard top header From 1d64ba3b5d361064db9edd59e867a931ffb4df39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Fri, 27 Mar 2015 08:39:08 +0100 Subject: [PATCH 064/274] Small style update to submenu (template variables, annotation menu) --- src/app/partials/submenu.html | 7 +------ src/css/less/submenu.less | 4 ++-- src/css/less/tightform.less | 3 ++- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/app/partials/submenu.html b/src/app/partials/submenu.html index 416fb47558b..fcfb71f5d4f 100644 --- a/src/app/partials/submenu.html +++ b/src/app/partials/submenu.html @@ -3,9 +3,6 @@
      -
    • - VARIABLES: -
    • ${{variable.name}}: @@ -20,11 +17,9 @@
    From 4af1dcd54f26a564de74990f777c5e73f02477a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 31 Mar 2015 17:42:51 +0200 Subject: [PATCH 108/274] Updated defaults.ini, reverted change in previous commit --- conf/defaults.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/defaults.ini b/conf/defaults.ini index 26fa1b6f126..d328733abc9 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -16,7 +16,7 @@ router_logging = false ; the path relative to the binary where the static (html/js/css) files are placed static_root_path = public ; enable gzip -enable_gzip = true +enable_gzip = false ; https certs & key file cert_file = cert_key = From 7eb45e17992a42e0ec4855386562529e73b98056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 31 Mar 2015 19:18:41 +0200 Subject: [PATCH 109/274] MySQL session: fixed problem using mysql as session store, Fixes #1681 --- CHANGELOG.md | 2 + Godeps/Godeps.json | 2 +- .../macaron-contrib/session/.gitignore | 2 + .../macaron-contrib/session/README.md | 6 +- .../macaron-contrib/session/file.go | 40 +--- .../macaron-contrib/session/ledis/ledis.go | 153 ++++++++----- .../session/ledis/ledis.goconvey | 1 + .../session/ledis/ledis_test.go | 105 +++++++++ .../session/memcache/memcache.go | 210 ++++++++--------- .../session/memcache/memcache.goconvey | 1 + .../session/memcache/memcache_test.go | 107 +++++++++ .../macaron-contrib/session/memory.go | 36 +-- .../macaron-contrib/session/mysql/mysql.go | 148 ++++++------ .../session/mysql/mysql.goconvey | 1 + .../session/mysql/mysql_test.go | 138 ++++++++++++ .../macaron-contrib/session/nodb/nodb.go | 203 +++++++++++++++++ .../session/nodb/nodb.goconvey | 1 + .../macaron-contrib/session/nodb/nodb_test.go | 105 +++++++++ .../session/postgres/postgres.go | 196 ++++++++++++++++ .../session/postgres/postgres.goconvey | 1 + .../session/postgres/postgres_test.go | 138 ++++++++++++ .../session/postgres/postgresql.go | 211 ------------------ .../macaron-contrib/session/redis/redis.go | 199 +++++++++-------- .../session/redis/redis.goconvey | 1 + .../session/redis/redis_test.go | 107 +++++++++ .../macaron-contrib/session/session.go | 44 ++-- .../macaron-contrib/session/session_test.go | 2 +- .../macaron-contrib/session/utils.go | 30 +-- 28 files changed, 1529 insertions(+), 661 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgresql.go create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey create mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c0ab7cde47..e21e94685d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ # 2.0.0-RC1 (unreleased) +**FIxes** +- [Issue #1681](https://github.com/grafana/grafana/issues/1681). MySQL session: fixed problem using mysql as session store - [Issue #1671](https://github.com/grafana/grafana/issues/1671). Data sources: Fixed issue with changing default data source (should not require full page load to take effect, now fixed) # 2.0.0-Beta1 (2015-03-30) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 857b045ddd8..ce0a9ea142e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -47,7 +47,7 @@ }, { "ImportPath": "github.com/macaron-contrib/session", - "Rev": "65b8817c40cb5bdce08673a15fd2a648c2ba0e16" + "Rev": "31e841d95c7302b9ac456c830ea2d6dfcef4f84a" }, { "ImportPath": "github.com/mattn/go-sqlite3", diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore b/Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore new file mode 100644 index 00000000000..9297dbcd7c4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore @@ -0,0 +1,2 @@ +ledis/tmp.db +nodb/tmp.db \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md b/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md index 496ce64ce0d..01de811eacc 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md @@ -1,7 +1,7 @@ session [![Build Status](https://drone.io/github.com/macaron-contrib/session/status.png)](https://drone.io/github.com/macaron-contrib/session/latest) [![](http://gocover.io/_badge/github.com/macaron-contrib/session)](http://gocover.io/github.com/macaron-contrib/session) ======= -Middleware session provides session management for [Macaron](https://github.com/Unknwon/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase and Ledis. +Middleware session provides session management for [Macaron](https://github.com/Unknwon/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase, Ledis and Nodb. ### Installation @@ -12,6 +12,10 @@ Middleware session provides session management for [Macaron](https://github.com/ - [API Reference](https://gowalker.org/github.com/macaron-contrib/session) - [Documentation](http://macaron.gogs.io/docs/middlewares/session) +## Credits + +This package is forked from [beego/session](https://github.com/astaxie/beego/tree/master/session) with reconstruction(over 80%). + ## License This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go index 4dfb906ec7e..cab807d00bd 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go @@ -28,17 +28,17 @@ import ( "github.com/Unknwon/com" ) -// FileSessionStore represents a file session store implementation. -type FileSessionStore struct { +// FileStore represents a file session store implementation. +type FileStore struct { p *FileProvider sid string lock sync.RWMutex data map[interface{}]interface{} } -// NewFileSessionStore creates and returns a file session store. -func NewFileSessionStore(p *FileProvider, sid string, kv map[interface{}]interface{}) *FileSessionStore { - return &FileSessionStore{ +// NewFileStore creates and returns a file session store. +func NewFileStore(p *FileProvider, sid string, kv map[interface{}]interface{}) *FileStore { + return &FileStore{ p: p, sid: sid, data: kv, @@ -46,7 +46,7 @@ func NewFileSessionStore(p *FileProvider, sid string, kv map[interface{}]interfa } // Set sets value to given key in session. -func (s *FileSessionStore) Set(key, val interface{}) error { +func (s *FileStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -55,7 +55,7 @@ func (s *FileSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *FileSessionStore) Get(key interface{}) interface{} { +func (s *FileStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() @@ -63,7 +63,7 @@ func (s *FileSessionStore) Get(key interface{}) interface{} { } // Delete delete a key from session. -func (s *FileSessionStore) Delete(key interface{}) error { +func (s *FileStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -72,12 +72,12 @@ func (s *FileSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *FileSessionStore) ID() string { +func (s *FileStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (s *FileSessionStore) Release() error { +func (s *FileStore) Release() error { data, err := EncodeGob(s.data) if err != nil { return err @@ -87,7 +87,7 @@ func (s *FileSessionStore) Release() error { } // Flush deletes all session data. -func (s *FileSessionStore) Flush() error { +func (s *FileStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -97,7 +97,6 @@ func (s *FileSessionStore) Flush() error { // FileProvider represents a file session provider implementation. type FileProvider struct { - lock sync.RWMutex maxlifetime int64 rootPath string } @@ -115,9 +114,6 @@ func (p *FileProvider) filepath(sid string) string { // Read returns raw session store by session ID. func (p *FileProvider) Read(sid string) (_ RawStore, err error) { - p.lock.Lock() - defer p.lock.Unlock() - filename := p.filepath(sid) if err = os.MkdirAll(path.Dir(filename), os.ModePerm); err != nil { return nil, err @@ -151,22 +147,16 @@ func (p *FileProvider) Read(sid string) (_ RawStore, err error) { return nil, err } } - return NewFileSessionStore(p, sid, kv), nil + return NewFileStore(p, sid, kv), nil } // Exist returns true if session with given ID exists. func (p *FileProvider) Exist(sid string) bool { - p.lock.Lock() - defer p.lock.Unlock() - return com.IsFile(p.filepath(sid)) } // Destory deletes a session by session ID. func (p *FileProvider) Destory(sid string) error { - p.lock.Lock() - defer p.lock.Unlock() - return os.Remove(p.filepath(sid)) } @@ -201,12 +191,9 @@ func (p *FileProvider) regenerate(oldsid, sid string) (err error) { // Regenerate regenerates a session store from old session ID to new one. func (p *FileProvider) Regenerate(oldsid, sid string) (_ RawStore, err error) { - p.lock.Lock() if err := p.regenerate(oldsid, sid); err != nil { - p.lock.Unlock() return nil, err } - p.lock.Unlock() return p.Read(sid) } @@ -236,9 +223,6 @@ func (p *FileProvider) GC() { return } - p.lock.Lock() - defer p.lock.Unlock() - if err := filepath.Walk(p.rootPath, func(path string, fi os.FileInfo, err error) error { if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go index 7893769b525..afde7134cbd 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go @@ -16,26 +16,39 @@ package session import ( + "fmt" + "strings" "sync" + "github.com/Unknwon/com" "github.com/siddontang/ledisdb/config" "github.com/siddontang/ledisdb/ledis" + "gopkg.in/ini.v1" "github.com/macaron-contrib/session" ) -var c *ledis.DB +// LedisStore represents a ledis session store implementation. +type LedisStore struct { + c *ledis.DB + sid string + expire int64 + lock sync.RWMutex + data map[interface{}]interface{} +} -// LedisSessionStore represents a ledis session store implementation. -type LedisSessionStore struct { - sid string - lock sync.RWMutex - data map[interface{}]interface{} - maxlifetime int64 +// NewLedisStore creates and returns a ledis session store. +func NewLedisStore(c *ledis.DB, sid string, expire int64, kv map[interface{}]interface{}) *LedisStore { + return &LedisStore{ + c: c, + expire: expire, + sid: sid, + data: kv, + } } // Set sets value to given key in session. -func (s *LedisSessionStore) Set(key, val interface{}) error { +func (s *LedisStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -44,7 +57,7 @@ func (s *LedisSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *LedisSessionStore) Get(key interface{}) interface{} { +func (s *LedisStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() @@ -52,7 +65,7 @@ func (s *LedisSessionStore) Get(key interface{}) interface{} { } // Delete delete a key from session. -func (s *LedisSessionStore) Delete(key interface{}) error { +func (s *LedisStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -61,25 +74,26 @@ func (s *LedisSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *LedisSessionStore) ID() string { +func (s *LedisStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (s *LedisSessionStore) Release() error { +func (s *LedisStore) Release() error { data, err := session.EncodeGob(s.data) if err != nil { return err } - if err = c.Set([]byte(s.sid), data); err != nil { + + if err = s.c.Set([]byte(s.sid), data); err != nil { return err } - _, err = c.Expire([]byte(s.sid), s.maxlifetime) + _, err = s.c.Expire([]byte(s.sid), s.expire) return err } // Flush deletes all session data. -func (s *LedisSessionStore) Flush() error { +func (s *LedisStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -89,30 +103,54 @@ func (s *LedisSessionStore) Flush() error { // LedisProvider represents a ledis session provider implementation. type LedisProvider struct { - maxlifetime int64 - savePath string + c *ledis.DB + expire int64 } -// Init initializes memory session provider. -func (p *LedisProvider) Init(maxlifetime int64, savePath string) error { - p.maxlifetime = maxlifetime - p.savePath = savePath - cfg := new(config.Config) - cfg.DataDir = p.savePath - var err error - nowLedis, err := ledis.Open(cfg) - c, err = nowLedis.Select(0) +// Init initializes ledis session provider. +// configs: data_dir=./app.db,db=0 +func (p *LedisProvider) Init(expire int64, configs string) error { + p.expire = expire + + cfg, err := ini.Load([]byte(strings.Replace(configs, ",", "\n", -1))) if err != nil { - println(err) - return nil + return err } - return nil + + db := 0 + opt := new(config.Config) + for k, v := range cfg.Section("").KeysHash() { + switch k { + case "data_dir": + opt.DataDir = v + case "db": + db = com.StrTo(v).MustInt() + default: + return fmt.Errorf("session/ledis: unsupported option '%s'", k) + } + } + + l, err := ledis.Open(opt) + if err != nil { + return fmt.Errorf("session/ledis: error opening db: %v", err) + } + p.c, err = l.Select(db) + return err } // Read returns raw session store by session ID. func (p *LedisProvider) Read(sid string) (session.RawStore, error) { - kvs, err := c.Get([]byte(sid)) + if !p.Exist(sid) { + if err := p.c.Set([]byte(sid), []byte("")); err != nil { + return nil, err + } + } + var kv map[interface{}]interface{} + kvs, err := p.c.Get([]byte(sid)) + if err != nil { + return nil, err + } if len(kvs) == 0 { kv = make(map[interface{}]interface{}) } else { @@ -121,41 +159,40 @@ func (p *LedisProvider) Read(sid string) (session.RawStore, error) { return nil, err } } - ls := &LedisSessionStore{sid: sid, data: kv, maxlifetime: p.maxlifetime} - return ls, nil + + return NewLedisStore(p.c, sid, p.expire, kv), nil } // Exist returns true if session with given ID exists. func (p *LedisProvider) Exist(sid string) bool { - count, _ := c.Exists([]byte(sid)) - if count == 0 { - return false - } else { - return true - } + count, err := p.c.Exists([]byte(sid)) + return err == nil && count > 0 } // Destory deletes a session by session ID. func (p *LedisProvider) Destory(sid string) error { - _, err := c.Del([]byte(sid)) + _, err := p.c.Del([]byte(sid)) return err } // Regenerate regenerates a session store from old session ID to new one. -func (p *LedisProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { - count, _ := c.Exists([]byte(sid)) - if count == 0 { - // oldsid doesn't exists, set the new sid directly - // ignore error here, since if it return error - // the existed value will be 0 - c.Set([]byte(sid), []byte("")) - c.Expire([]byte(sid), p.maxlifetime) - } else { - data, _ := c.Get([]byte(oldsid)) - c.Set([]byte(sid), data) - c.Expire([]byte(sid), p.maxlifetime) +func (p *LedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) } - kvs, err := c.Get([]byte(sid)) + + kvs := make([]byte, 0) + if p.Exist(oldsid) { + if kvs, err = p.c.Get([]byte(oldsid)); err != nil { + return nil, err + } else if _, err = p.c.Del([]byte(oldsid)); err != nil { + return nil, err + } + } + if err = p.c.SetEX([]byte(sid), p.expire, kvs); err != nil { + return nil, err + } + var kv map[interface{}]interface{} if len(kvs) == 0 { kv = make(map[interface{}]interface{}) @@ -165,18 +202,20 @@ func (p *LedisProvider) Regenerate(oldsid, sid string) (session.RawStore, error) return nil, err } } - ls := &LedisSessionStore{sid: sid, data: kv, maxlifetime: p.maxlifetime} - return ls, nil + + return NewLedisStore(p.c, sid, p.expire, kv), nil } // Count counts and returns number of sessions. func (p *LedisProvider) Count() int { - // FIXME - return 0 + // FIXME: how come this library does not have DbSize() method? + return -1 } // GC calls GC to clean expired sessions. -func (p *LedisProvider) GC() {} +func (p *LedisProvider) GC() { + // FIXME: wtf??? +} func init() { session.Register("ledis", &LedisProvider{}) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go new file mode 100644 index 00000000000..dac42a364b7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go @@ -0,0 +1,105 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_LedisProvider(t *testing.T) { + Convey("Test ledis session provider", t, func() { + opt := session.Options{ + Provider: "ledis", + ProviderConfig: "data_dir=./tmp.db", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + Convey("Regenrate empty session", func() { + m.Get("/empty", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/empty", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") + m.ServeHTTP(resp, req) + }) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go index e06895202f0..b4fcdde62bd 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go @@ -16,6 +16,7 @@ package session import ( + "fmt" "strings" "sync" @@ -24,20 +25,35 @@ import ( "github.com/macaron-contrib/session" ) -var ( - client *memcache.Client -) +// MemcacheStore represents a memcache session store implementation. +type MemcacheStore struct { + c *memcache.Client + sid string + expire int32 + lock sync.RWMutex + data map[interface{}]interface{} +} -// MemcacheSessionStore represents a memcache session store implementation. -type MemcacheSessionStore struct { - sid string - lock sync.RWMutex - data map[interface{}]interface{} - maxlifetime int64 +// NewMemcacheStore creates and returns a memcache session store. +func NewMemcacheStore(c *memcache.Client, sid string, expire int32, kv map[interface{}]interface{}) *MemcacheStore { + return &MemcacheStore{ + c: c, + sid: sid, + expire: expire, + data: kv, + } +} + +func NewItem(sid string, data []byte, expire int32) *memcache.Item { + return &memcache.Item{ + Key: sid, + Value: data, + Expiration: expire, + } } // Set sets value to given key in session. -func (s *MemcacheSessionStore) Set(key, val interface{}) error { +func (s *MemcacheStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -46,7 +62,7 @@ func (s *MemcacheSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *MemcacheSessionStore) Get(key interface{}) interface{} { +func (s *MemcacheStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() @@ -54,7 +70,7 @@ func (s *MemcacheSessionStore) Get(key interface{}) interface{} { } // Delete delete a key from session. -func (s *MemcacheSessionStore) Delete(key interface{}) error { +func (s *MemcacheStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -63,26 +79,22 @@ func (s *MemcacheSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *MemcacheSessionStore) ID() string { +func (s *MemcacheStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (s *MemcacheSessionStore) Release() error { +func (s *MemcacheStore) Release() error { data, err := session.EncodeGob(s.data) if err != nil { return err } - return client.Set(&memcache.Item{ - Key: s.sid, - Value: data, - Expiration: int32(s.maxlifetime), - }) + return s.c.Set(NewItem(s.sid, data, s.expire)) } // Flush deletes all session data. -func (s *MemcacheSessionStore) Flush() error { +func (s *MemcacheStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -90,41 +102,75 @@ func (s *MemcacheSessionStore) Flush() error { return nil } -// MemProvider represents a memcache session provider implementation. -type MemProvider struct { - maxlifetime int64 - conninfo []string - poolsize int - password string +// MemcacheProvider represents a memcache session provider implementation. +type MemcacheProvider struct { + c *memcache.Client + expire int32 } -// Init initializes memory session provider. -// connStrs can be multiple connection strings separate by ; -// e.g. 127.0.0.1:9090 -func (p *MemProvider) Init(maxlifetime int64, connStrs string) error { - p.maxlifetime = maxlifetime - p.conninfo = strings.Split(connStrs, ";") - client = memcache.New(p.conninfo...) - return nil -} - -func (p *MemProvider) connectInit() error { - client = memcache.New(p.conninfo...) +// Init initializes memcache session provider. +// connStrs: 127.0.0.1:9090;127.0.0.1:9091 +func (p *MemcacheProvider) Init(expire int64, connStrs string) error { + p.expire = int32(expire) + p.c = memcache.New(strings.Split(connStrs, ";")...) return nil } // Read returns raw session store by session ID. -func (p *MemProvider) Read(sid string) (session.RawStore, error) { - if client == nil { - if err := p.connectInit(); err != nil { +func (p *MemcacheProvider) Read(sid string) (session.RawStore, error) { + if !p.Exist(sid) { + if err := p.c.Set(NewItem(sid, []byte(""), p.expire)); err != nil { return nil, err } } - item, err := client.Get(sid) + var kv map[interface{}]interface{} + item, err := p.c.Get(sid) if err != nil { return nil, err } + if len(item.Value) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(item.Value) + if err != nil { + return nil, err + } + } + + return NewMemcacheStore(p.c, sid, p.expire, kv), nil +} + +// Exist returns true if session with given ID exists. +func (p *MemcacheProvider) Exist(sid string) bool { + _, err := p.c.Get(sid) + return err == nil +} + +// Destory deletes a session by session ID. +func (p *MemcacheProvider) Destory(sid string) error { + return p.c.Delete(sid) +} + +// Regenerate regenerates a session store from old session ID to new one. +func (p *MemcacheProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) + } + + item := NewItem(sid, []byte(""), p.expire) + if p.Exist(oldsid) { + item, err = p.c.Get(oldsid) + if err != nil { + return nil, err + } else if err = p.c.Delete(oldsid); err != nil { + return nil, err + } + item.Key = sid + } + if err = p.c.Set(item); err != nil { + return nil, err + } var kv map[interface{}]interface{} if len(item.Value) == 0 { @@ -136,86 +182,18 @@ func (p *MemProvider) Read(sid string) (session.RawStore, error) { } } - rs := &MemcacheSessionStore{sid: sid, data: kv, maxlifetime: p.maxlifetime} - return rs, nil -} - -// Exist returns true if session with given ID exists. -func (p *MemProvider) Exist(sid string) bool { - if client == nil { - if err := p.connectInit(); err != nil { - return false - } - } - - if item, err := client.Get(sid); err != nil || len(item.Value) == 0 { - return false - } else { - return true - } -} - -// Destory deletes a session by session ID. -func (p *MemProvider) Destory(sid string) error { - if client == nil { - if err := p.connectInit(); err != nil { - return err - } - } - - return client.Delete(sid) -} - -// Regenerate regenerates a session store from old session ID to new one. -func (p *MemProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { - if client == nil { - if err := p.connectInit(); err != nil { - return nil, err - } - } - - var contain []byte - if item, err := client.Get(sid); err != nil || len(item.Value) == 0 { - // oldsid doesn't exists, set the new sid directly - // ignore error here, since if it return error - // the existed value will be 0 - item.Key = sid - item.Value = []byte("") - item.Expiration = int32(p.maxlifetime) - client.Set(item) - } else { - client.Delete(oldsid) - item.Key = sid - item.Value = item.Value - item.Expiration = int32(p.maxlifetime) - client.Set(item) - contain = item.Value - } - - var kv map[interface{}]interface{} - if len(contain) == 0 { - kv = make(map[interface{}]interface{}) - } else { - var err error - kv, err = session.DecodeGob(contain) - if err != nil { - return nil, err - } - } - - rs := &MemcacheSessionStore{sid: sid, data: kv, maxlifetime: p.maxlifetime} - return rs, nil + return NewMemcacheStore(p.c, sid, p.expire, kv), nil } // Count counts and returns number of sessions. -func (p *MemProvider) Count() int { - // FIXME - return 0 +func (p *MemcacheProvider) Count() int { + // FIXME: how come this library does not have Stats method? + return -1 } // GC calls GC to clean expired sessions. -func (p *MemProvider) GC() {} +func (p *MemcacheProvider) GC() {} func init() { - session.Register("memcache", &MemProvider{}) + session.Register("memcache", &MemcacheProvider{}) } diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go new file mode 100644 index 00000000000..beb272d52b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go @@ -0,0 +1,107 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_MemcacheProvider(t *testing.T) { + Convey("Test memcache session provider", t, func() { + opt := session.Options{ + Provider: "memcache", + ProviderConfig: "127.0.0.1:9090", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + }) + + Convey("Regenrate empty session", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") + m.ServeHTTP(resp, req) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go index 040b8e697dc..e717635b951 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go @@ -22,17 +22,17 @@ import ( "time" ) -// MemSessionStore represents a in-memory session store implementation. -type MemSessionStore struct { +// MemStore represents a in-memory session store implementation. +type MemStore struct { sid string lock sync.RWMutex data map[interface{}]interface{} lastAccess time.Time } -// NewMemSessionStore creates and returns a memory session store. -func NewMemSessionStore(sid string) *MemSessionStore { - return &MemSessionStore{ +// NewMemStore creates and returns a memory session store. +func NewMemStore(sid string) *MemStore { + return &MemStore{ sid: sid, data: make(map[interface{}]interface{}), lastAccess: time.Now(), @@ -40,7 +40,7 @@ func NewMemSessionStore(sid string) *MemSessionStore { } // Set sets value to given key in session. -func (s *MemSessionStore) Set(key, val interface{}) error { +func (s *MemStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -49,15 +49,15 @@ func (s *MemSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *MemSessionStore) Get(key interface{}) interface{} { +func (s *MemStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() return s.data[key] } -// Delete delete a key from session. -func (s *MemSessionStore) Delete(key interface{}) error { +// Delete deletes a key from session. +func (s *MemStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -66,17 +66,17 @@ func (s *MemSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *MemSessionStore) ID() string { +func (s *MemStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (_ *MemSessionStore) Release() error { +func (_ *MemStore) Release() error { return nil } // Flush deletes all session data. -func (s *MemSessionStore) Flush() error { +func (s *MemStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -105,7 +105,7 @@ func (p *MemProvider) update(sid string) error { defer p.lock.Unlock() if e, ok := p.data[sid]; ok { - e.Value.(*MemSessionStore).lastAccess = time.Now() + e.Value.(*MemStore).lastAccess = time.Now() p.list.MoveToFront(e) return nil } @@ -122,14 +122,14 @@ func (p *MemProvider) Read(sid string) (_ RawStore, err error) { if err = p.update(sid); err != nil { return nil, err } - return e.Value.(*MemSessionStore), nil + return e.Value.(*MemStore), nil } // Create a new session. p.lock.Lock() defer p.lock.Unlock() - s := NewMemSessionStore(sid) + s := NewMemStore(sid) p.data[sid] = p.list.PushBack(s) return s, nil } @@ -173,7 +173,7 @@ func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) { return nil, err } - s.(*MemSessionStore).sid = sid + s.(*MemStore).sid = sid p.data[sid] = p.list.PushBack(s) return s, nil } @@ -193,11 +193,11 @@ func (p *MemProvider) GC() { break } - if (e.Value.(*MemSessionStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() { + if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() { p.lock.RUnlock() p.lock.Lock() p.list.Remove(e) - delete(p.data, e.Value.(*MemSessionStore).sid) + delete(p.data, e.Value.(*MemStore).sid) p.lock.Unlock() p.lock.RLock() } else { diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go index 908de4f1b52..7997e03c0d8 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go @@ -17,6 +17,8 @@ package session import ( "database/sql" + "fmt" + "log" "sync" "time" @@ -25,16 +27,25 @@ import ( "github.com/macaron-contrib/session" ) -// MysqlSessionStore represents a mysql session store implementation. -type MysqlSessionStore struct { +// MysqlStore represents a mysql session store implementation. +type MysqlStore struct { c *sql.DB sid string lock sync.RWMutex data map[interface{}]interface{} } +// NewMysqlStore creates and returns a mysql session store. +func NewMysqlStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *MysqlStore { + return &MysqlStore{ + c: c, + sid: sid, + data: kv, + } +} + // Set sets value to given key in session. -func (s *MysqlSessionStore) Set(key, val interface{}) error { +func (s *MysqlStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -43,7 +54,7 @@ func (s *MysqlSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *MysqlSessionStore) Get(key interface{}) interface{} { +func (s *MysqlStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() @@ -51,7 +62,7 @@ func (s *MysqlSessionStore) Get(key interface{}) interface{} { } // Delete delete a key from session. -func (s *MysqlSessionStore) Delete(key interface{}) error { +func (s *MysqlStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -60,24 +71,24 @@ func (s *MysqlSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *MysqlSessionStore) ID() string { +func (s *MysqlStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (s *MysqlSessionStore) Release() error { - defer s.c.Close() +func (s *MysqlStore) Release() error { data, err := session.EncodeGob(s.data) if err != nil { return err } - _, err = s.c.Exec("UPDATE session set `session_data`=?, `session_expiry`=? where session_key=?", + + _, err = s.c.Exec("UPDATE session SET data=?, expiry=? WHERE `key`=?", data, time.Now().Unix(), s.sid) return err } // Flush deletes all session data. -func (s *MysqlSessionStore) Flush() error { +func (s *MysqlStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -87,113 +98,96 @@ func (s *MysqlSessionStore) Flush() error { // MysqlProvider represents a mysql session provider implementation. type MysqlProvider struct { - maxlifetime int64 - connStr string + c *sql.DB + expire int64 } -func (p *MysqlProvider) connectInit() *sql.DB { - db, e := sql.Open("mysql", p.connStr) - if e != nil { - return nil +// Init initializes mysql session provider. +// connStr: username:password@protocol(address)/dbname?param=value +func (p *MysqlProvider) Init(expire int64, connStr string) (err error) { + p.expire = expire + + p.c, err = sql.Open("mysql", connStr) + if err != nil { + return err } - return db -} - -// Init initializes memory session provider. -func (p *MysqlProvider) Init(maxlifetime int64, connStr string) error { - p.maxlifetime = maxlifetime - p.connStr = connStr - return nil + return p.c.Ping() } // Read returns raw session store by session ID. func (p *MysqlProvider) Read(sid string) (session.RawStore, error) { - c := p.connectInit() - row := c.QueryRow("select session_data from session where session_key=?", sid) - var sessiondata []byte - err := row.Scan(&sessiondata) + var data []byte + err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data) if err == sql.ErrNoRows { - c.Exec("insert into session(`session_key`,`session_data`,`session_expiry`) values(?,?,?)", + _, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)", sid, "", time.Now().Unix()) } + if err != nil { + return nil, err + } + var kv map[interface{}]interface{} - if len(sessiondata) == 0 { + if len(data) == 0 { kv = make(map[interface{}]interface{}) } else { - kv, err = session.DecodeGob(sessiondata) + kv, err = session.DecodeGob(data) if err != nil { return nil, err } } - rs := &MysqlSessionStore{c: c, sid: sid, data: kv} - return rs, nil + + return NewMysqlStore(p.c, sid, kv), nil } // Exist returns true if session with given ID exists. func (p *MysqlProvider) Exist(sid string) bool { - c := p.connectInit() - defer c.Close() - - row := c.QueryRow("select session_data from session where session_key=?", sid) - var sessiondata []byte - err := row.Scan(&sessiondata) - if err == sql.ErrNoRows { - return false - } else { - return true + var data []byte + err := p.c.QueryRow("SELECT data FROM session WHERE `key`=?", sid).Scan(&data) + if err != nil && err != sql.ErrNoRows { + panic("session/mysql: error checking existence: " + err.Error()) } + return err != sql.ErrNoRows } // Destory deletes a session by session ID. -func (p *MysqlProvider) Destory(sid string) (err error) { - c := p.connectInit() - if _, err = c.Exec("DELETE FROM session where session_key=?", sid); err != nil { - return err - } - return c.Close() +func (p *MysqlProvider) Destory(sid string) error { + _, err := p.c.Exec("DELETE FROM session WHERE `key`=?", sid) + return err } // Regenerate regenerates a session store from old session ID to new one. -func (p *MysqlProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { - c := p.connectInit() - row := c.QueryRow("select session_data from session where session_key=?", oldsid) - var sessiondata []byte - err := row.Scan(&sessiondata) - if err == sql.ErrNoRows { - c.Exec("insert into session(`session_key`,`session_data`,`session_expiry`) values(?,?,?)", oldsid, "", time.Now().Unix()) +func (p *MysqlProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) } - c.Exec("update session set `session_key`=? where session_key=?", sid, oldsid) - var kv map[interface{}]interface{} - if len(sessiondata) == 0 { - kv = make(map[interface{}]interface{}) - } else { - kv, err = session.DecodeGob(sessiondata) - if err != nil { + + if !p.Exist(oldsid) { + if _, err = p.c.Exec("INSERT INTO session(`key`,data,expiry) VALUES(?,?,?)", + oldsid, "", time.Now().Unix()); err != nil { return nil, err } } - rs := &MysqlSessionStore{c: c, sid: sid, data: kv} - return rs, nil + + if _, err = p.c.Exec("UPDATE session SET `key`=? WHERE `key`=?", sid, oldsid); err != nil { + return nil, err + } + + return p.Read(sid) } // Count counts and returns number of sessions. -func (p *MysqlProvider) Count() int { - c := p.connectInit() - defer c.Close() - - var total int - err := c.QueryRow("SELECT count(*) as num from session").Scan(&total) - if err != nil { - return 0 +func (p *MysqlProvider) Count() (total int) { + if err := p.c.QueryRow("SELECT COUNT(*) AS NUM FROM session").Scan(&total); err != nil { + panic("session/mysql: error counting records: " + err.Error()) } return total } // GC calls GC to clean expired sessions. -func (mp *MysqlProvider) GC() { - c := mp.connectInit() - c.Exec("DELETE from session where session_expiry < ?", time.Now().Unix()-mp.maxlifetime) - c.Close() +func (p *MysqlProvider) GC() { + if _, err := p.c.Exec("DELETE FROM session WHERE UNIX_TIMESTAMP(NOW()) - expiry > ?", p.expire); err != nil { + log.Printf("session/mysql: error garbage collecting: %v", err) + } } func init() { diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go new file mode 100644 index 00000000000..15b3996a228 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go @@ -0,0 +1,138 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_MysqlProvider(t *testing.T) { + Convey("Test mysql session provider", t, func() { + opt := session.Options{ + Provider: "mysql", + ProviderConfig: "root:@tcp(localhost:3306)/macaron?charset=utf8", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + So(raw.Release(), ShouldBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + }) + + Convey("Regenrate empty session", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf48; Path=/;") + m.ServeHTTP(resp, req) + }) + + Convey("GC session", func() { + m := macaron.New() + opt2 := opt + opt2.Gclifetime = 1 + m.Use(session.Sessioner(opt2)) + + m.Get("/", func(sess session.Store) { + sess.Set("uname", "unknwon") + So(sess.ID(), ShouldNotBeEmpty) + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Flush(), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + time.Sleep(2 * time.Second) + sess.GC() + So(sess.Count(), ShouldEqual, 0) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go new file mode 100644 index 00000000000..7f017bf0457 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go @@ -0,0 +1,203 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "fmt" + "sync" + + "github.com/lunny/nodb" + "github.com/lunny/nodb/config" + + "github.com/macaron-contrib/session" +) + +// NodbStore represents a nodb session store implementation. +type NodbStore struct { + c *nodb.DB + sid string + expire int64 + lock sync.RWMutex + data map[interface{}]interface{} +} + +// NewNodbStore creates and returns a ledis session store. +func NewNodbStore(c *nodb.DB, sid string, expire int64, kv map[interface{}]interface{}) *NodbStore { + return &NodbStore{ + c: c, + expire: expire, + sid: sid, + data: kv, + } +} + +// Set sets value to given key in session. +func (s *NodbStore) Set(key, val interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data[key] = val + return nil +} + +// Get gets value by given key in session. +func (s *NodbStore) Get(key interface{}) interface{} { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.data[key] +} + +// Delete delete a key from session. +func (s *NodbStore) Delete(key interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.data, key) + return nil +} + +// ID returns current session ID. +func (s *NodbStore) ID() string { + return s.sid +} + +// Release releases resource and save data to provider. +func (s *NodbStore) Release() error { + data, err := session.EncodeGob(s.data) + if err != nil { + return err + } + + if err = s.c.Set([]byte(s.sid), data); err != nil { + return err + } + _, err = s.c.Expire([]byte(s.sid), s.expire) + return err +} + +// Flush deletes all session data. +func (s *NodbStore) Flush() error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data = make(map[interface{}]interface{}) + return nil +} + +// NodbProvider represents a ledis session provider implementation. +type NodbProvider struct { + c *nodb.DB + expire int64 +} + +// Init initializes nodb session provider. +func (p *NodbProvider) Init(expire int64, configs string) error { + p.expire = expire + + cfg := new(config.Config) + cfg.DataDir = configs + dbs, err := nodb.Open(cfg) + if err != nil { + return fmt.Errorf("session/nodb: error opening db: %v", err) + } + + p.c, err = dbs.Select(0) + return err +} + +// Read returns raw session store by session ID. +func (p *NodbProvider) Read(sid string) (session.RawStore, error) { + if !p.Exist(sid) { + if err := p.c.Set([]byte(sid), []byte("")); err != nil { + return nil, err + } + } + + var kv map[interface{}]interface{} + kvs, err := p.c.Get([]byte(sid)) + if err != nil { + return nil, err + } + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(kvs) + if err != nil { + return nil, err + } + } + + return NewNodbStore(p.c, sid, p.expire, kv), nil +} + +// Exist returns true if session with given ID exists. +func (p *NodbProvider) Exist(sid string) bool { + count, err := p.c.Exists([]byte(sid)) + return err == nil && count > 0 +} + +// Destory deletes a session by session ID. +func (p *NodbProvider) Destory(sid string) error { + _, err := p.c.Del([]byte(sid)) + return err +} + +// Regenerate regenerates a session store from old session ID to new one. +func (p *NodbProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) + } + + kvs := make([]byte, 0) + if p.Exist(oldsid) { + if kvs, err = p.c.Get([]byte(oldsid)); err != nil { + return nil, err + } else if _, err = p.c.Del([]byte(oldsid)); err != nil { + return nil, err + } + } + + if err = p.c.Set([]byte(sid), kvs); err != nil { + return nil, err + } else if _, err = p.c.Expire([]byte(sid), p.expire); err != nil { + return nil, err + } + + var kv map[interface{}]interface{} + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob([]byte(kvs)) + if err != nil { + return nil, err + } + } + + return NewNodbStore(p.c, sid, p.expire, kv), nil +} + +// Count counts and returns number of sessions. +func (p *NodbProvider) Count() int { + // FIXME: how come this library does not have DbSize() method? + return -1 +} + +// GC calls GC to clean expired sessions. +func (p *NodbProvider) GC() {} + +func init() { + session.Register("nodb", &NodbProvider{}) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go new file mode 100644 index 00000000000..c86ba98ded5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go @@ -0,0 +1,105 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_LedisProvider(t *testing.T) { + Convey("Test nodb session provider", t, func() { + opt := session.Options{ + Provider: "nodb", + ProviderConfig: "./tmp.db", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + Convey("Regenrate empty session", func() { + m.Get("/empty", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/empty", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") + m.ServeHTTP(resp, req) + }) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go new file mode 100644 index 00000000000..5cb4c82ea84 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go @@ -0,0 +1,196 @@ +// Copyright 2013 Beego Authors +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "database/sql" + "fmt" + "log" + "sync" + "time" + + _ "github.com/lib/pq" + + "github.com/macaron-contrib/session" +) + +// PostgresStore represents a postgres session store implementation. +type PostgresStore struct { + c *sql.DB + sid string + lock sync.RWMutex + data map[interface{}]interface{} +} + +// NewPostgresStore creates and returns a postgres session store. +func NewPostgresStore(c *sql.DB, sid string, kv map[interface{}]interface{}) *PostgresStore { + return &PostgresStore{ + c: c, + sid: sid, + data: kv, + } +} + +// Set sets value to given key in session. +func (s *PostgresStore) Set(key, value interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data[key] = value + return nil +} + +// Get gets value by given key in session. +func (s *PostgresStore) Get(key interface{}) interface{} { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.data[key] +} + +// Delete delete a key from session. +func (s *PostgresStore) Delete(key interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.data, key) + return nil +} + +// ID returns current session ID. +func (s *PostgresStore) ID() string { + return s.sid +} + +// save postgres session values to database. +// must call this method to save values to database. +func (s *PostgresStore) Release() error { + data, err := session.EncodeGob(s.data) + if err != nil { + return err + } + + _, err = s.c.Exec("UPDATE session SET data=$1, expiry=$2 WHERE key=$3", + data, time.Now().Unix(), s.sid) + return err +} + +// Flush deletes all session data. +func (s *PostgresStore) Flush() error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data = make(map[interface{}]interface{}) + return nil +} + +// PostgresProvider represents a postgres session provider implementation. +type PostgresProvider struct { + c *sql.DB + maxlifetime int64 +} + +// Init initializes postgres session provider. +// connStr: user=a password=b host=localhost port=5432 dbname=c sslmode=disable +func (p *PostgresProvider) Init(maxlifetime int64, connStr string) (err error) { + p.maxlifetime = maxlifetime + + p.c, err = sql.Open("postgres", connStr) + if err != nil { + return err + } + return p.c.Ping() +} + +// Read returns raw session store by session ID. +func (p *PostgresProvider) Read(sid string) (session.RawStore, error) { + var data []byte + err := p.c.QueryRow("SELECT data FROM session WHERE key=$1", sid).Scan(&data) + if err == sql.ErrNoRows { + _, err = p.c.Exec("INSERT INTO session(key,data,expiry) VALUES($1,$2,$3)", + sid, "", time.Now().Unix()) + } + if err != nil { + return nil, err + } + + var kv map[interface{}]interface{} + if len(data) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(data) + if err != nil { + return nil, err + } + } + + return NewPostgresStore(p.c, sid, kv), nil +} + +// Exist returns true if session with given ID exists. +func (p *PostgresProvider) Exist(sid string) bool { + var data []byte + err := p.c.QueryRow("SELECT data FROM session WHERE key=$1", sid).Scan(&data) + if err != nil && err != sql.ErrNoRows { + panic("session/postgres: error checking existence: " + err.Error()) + } + return err != sql.ErrNoRows +} + +// Destory deletes a session by session ID. +func (p *PostgresProvider) Destory(sid string) error { + _, err := p.c.Exec("DELETE FROM session WHERE key=$1", sid) + return err +} + +// Regenerate regenerates a session store from old session ID to new one. +func (p *PostgresProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) + } + + if !p.Exist(oldsid) { + if _, err = p.c.Exec("INSERT INTO session(key,data,expiry) VALUES($1,$2,$3)", + oldsid, "", time.Now().Unix()); err != nil { + return nil, err + } + } + + if _, err = p.c.Exec("UPDATE session SET key=$1 WHERE key=$2", sid, oldsid); err != nil { + return nil, err + } + + return p.Read(sid) +} + +// Count counts and returns number of sessions. +func (p *PostgresProvider) Count() (total int) { + if err := p.c.QueryRow("SELECT COUNT(*) AS NUM FROM session").Scan(&total); err != nil { + panic("session/postgres: error counting records: " + err.Error()) + } + return total +} + +// GC calls GC to clean expired sessions. +func (p *PostgresProvider) GC() { + if _, err := p.c.Exec("DELETE FROM session WHERE EXTRACT(EPOCH FROM NOW()) - expiry > $1", p.maxlifetime); err != nil { + log.Printf("session/postgres: error garbage collecting: %v", err) + } +} + +func init() { + session.Register("postgres", &PostgresProvider{}) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go new file mode 100644 index 00000000000..ea212c729f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go @@ -0,0 +1,138 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_PostgresProvider(t *testing.T) { + Convey("Test postgres session provider", t, func() { + opt := session.Options{ + Provider: "postgres", + ProviderConfig: "user=jiahuachen dbname=macaron port=5432 sslmode=disable", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + So(raw.Release(), ShouldBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + }) + + Convey("Regenrate empty session", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf48; Path=/;") + m.ServeHTTP(resp, req) + }) + + Convey("GC session", func() { + m := macaron.New() + opt2 := opt + opt2.Gclifetime = 1 + m.Use(session.Sessioner(opt2)) + + m.Get("/", func(sess session.Store) { + sess.Set("uname", "unknwon") + So(sess.ID(), ShouldNotBeEmpty) + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Flush(), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + time.Sleep(2 * time.Second) + sess.GC() + So(sess.Count(), ShouldEqual, 0) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgresql.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgresql.go deleted file mode 100644 index bfe87c69ee0..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgresql.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2013 Beego Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "database/sql" - "sync" - "time" - - _ "github.com/lib/pq" - - "github.com/macaron-contrib/session" -) - -// PostgresqlSessionStore represents a postgresql session store implementation. -type PostgresqlSessionStore struct { - c *sql.DB - sid string - lock sync.RWMutex - data map[interface{}]interface{} -} - -// Set sets value to given key in session. -func (s *PostgresqlSessionStore) Set(key, value interface{}) error { - s.lock.Lock() - defer s.lock.Unlock() - - s.data[key] = value - return nil -} - -// Get gets value by given key in session. -func (s *PostgresqlSessionStore) Get(key interface{}) interface{} { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.data[key] -} - -// Delete delete a key from session. -func (s *PostgresqlSessionStore) Delete(key interface{}) error { - s.lock.Lock() - defer s.lock.Unlock() - - delete(s.data, key) - return nil -} - -// ID returns current session ID. -func (s *PostgresqlSessionStore) ID() string { - return s.sid -} - -// save postgresql session values to database. -// must call this method to save values to database. -func (s *PostgresqlSessionStore) Release() error { - defer s.c.Close() - - data, err := session.EncodeGob(s.data) - if err != nil { - return err - } - - _, err = s.c.Exec("UPDATE session set session_data=$1, session_expiry=$2 where session_key=$3", - data, time.Now().Format(time.RFC3339), s.sid) - return err -} - -// Flush deletes all session data. -func (s *PostgresqlSessionStore) Flush() error { - s.lock.Lock() - defer s.lock.Unlock() - - s.data = make(map[interface{}]interface{}) - return nil -} - -// PostgresqlProvider represents a postgresql session provider implementation. -type PostgresqlProvider struct { - maxlifetime int64 - connStr string -} - -func (p *PostgresqlProvider) connectInit() *sql.DB { - db, e := sql.Open("postgres", p.connStr) - if e != nil { - return nil - } - return db -} - -// Init initializes memory session provider. -func (p *PostgresqlProvider) Init(maxlifetime int64, connStr string) error { - p.maxlifetime = maxlifetime - p.connStr = connStr - return nil -} - -// Read returns raw session store by session ID. -func (p *PostgresqlProvider) Read(sid string) (session.RawStore, error) { - c := p.connectInit() - row := c.QueryRow("select session_data from session where session_key=$1", sid) - var sessiondata []byte - err := row.Scan(&sessiondata) - if err == sql.ErrNoRows { - _, err = c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)", - sid, "", time.Now().Format(time.RFC3339)) - - if err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - - var kv map[interface{}]interface{} - if len(sessiondata) == 0 { - kv = make(map[interface{}]interface{}) - } else { - kv, err = session.DecodeGob(sessiondata) - if err != nil { - return nil, err - } - } - rs := &PostgresqlSessionStore{c: c, sid: sid, data: kv} - return rs, nil -} - -// Exist returns true if session with given ID exists. -func (p *PostgresqlProvider) Exist(sid string) bool { - c := p.connectInit() - defer c.Close() - row := c.QueryRow("select session_data from session where session_key=$1", sid) - var sessiondata []byte - err := row.Scan(&sessiondata) - - if err == sql.ErrNoRows { - return false - } else { - return true - } -} - -// Destory deletes a session by session ID. -func (p *PostgresqlProvider) Destory(sid string) (err error) { - c := p.connectInit() - if _, err = c.Exec("DELETE FROM session where session_key=$1", sid); err != nil { - return err - } - return c.Close() -} - -// Regenerate regenerates a session store from old session ID to new one. -func (p *PostgresqlProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { - c := p.connectInit() - row := c.QueryRow("select session_data from session where session_key=$1", oldsid) - var sessiondata []byte - err := row.Scan(&sessiondata) - if err == sql.ErrNoRows { - c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)", - oldsid, "", time.Now().Format(time.RFC3339)) - } - c.Exec("update session set session_key=$1 where session_key=$2", sid, oldsid) - var kv map[interface{}]interface{} - if len(sessiondata) == 0 { - kv = make(map[interface{}]interface{}) - } else { - kv, err = session.DecodeGob(sessiondata) - if err != nil { - return nil, err - } - } - rs := &PostgresqlSessionStore{c: c, sid: sid, data: kv} - return rs, nil -} - -// Count counts and returns number of sessions. -func (p *PostgresqlProvider) Count() int { - c := p.connectInit() - defer c.Close() - var total int - err := c.QueryRow("SELECT count(*) as num from session").Scan(&total) - if err != nil { - return 0 - } - return total -} - -// GC calls GC to clean expired sessions. -func (mp *PostgresqlProvider) GC() { - c := mp.connectInit() - c.Exec("DELETE from session where EXTRACT(EPOCH FROM (current_timestamp - session_expiry)) > $1", mp.maxlifetime) - c.Close() -} - -func init() { - session.Register("postgresql", &PostgresqlProvider{}) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go index 211866317f3..6d6a2c464c8 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go @@ -16,31 +16,39 @@ package session import ( - "strconv" + "fmt" "strings" "sync" + "time" - "github.com/beego/redigo/redis" + "github.com/Unknwon/com" + "gopkg.in/ini.v1" + "gopkg.in/redis.v2" "github.com/macaron-contrib/session" ) -// redis max pool size -var MAX_POOL_SIZE = 100 +// RedisStore represents a redis session store implementation. +type RedisStore struct { + c *redis.Client + sid string + duration time.Duration + lock sync.RWMutex + data map[interface{}]interface{} +} -var redisPool chan redis.Conn - -// RedisSessionStore represents a redis session store implementation. -type RedisSessionStore struct { - p *redis.Pool - sid string - lock sync.RWMutex - data map[interface{}]interface{} - maxlifetime int64 +// NewRedisStore creates and returns a redis session store. +func NewRedisStore(c *redis.Client, sid string, dur time.Duration, kv map[interface{}]interface{}) *RedisStore { + return &RedisStore{ + c: c, + sid: sid, + duration: dur, + data: kv, + } } // Set sets value to given key in session. -func (s *RedisSessionStore) Set(key, val interface{}) error { +func (s *RedisStore) Set(key, val interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -49,7 +57,7 @@ func (s *RedisSessionStore) Set(key, val interface{}) error { } // Get gets value by given key in session. -func (s *RedisSessionStore) Get(key interface{}) interface{} { +func (s *RedisStore) Get(key interface{}) interface{} { s.lock.RLock() defer s.lock.RUnlock() @@ -57,7 +65,7 @@ func (s *RedisSessionStore) Get(key interface{}) interface{} { } // Delete delete a key from session. -func (s *RedisSessionStore) Delete(key interface{}) error { +func (s *RedisStore) Delete(key interface{}) error { s.lock.Lock() defer s.lock.Unlock() @@ -66,26 +74,22 @@ func (s *RedisSessionStore) Delete(key interface{}) error { } // ID returns current session ID. -func (s *RedisSessionStore) ID() string { +func (s *RedisStore) ID() string { return s.sid } // Release releases resource and save data to provider. -func (s *RedisSessionStore) Release() error { - c := s.p.Get() - defer c.Close() - +func (s *RedisStore) Release() error { data, err := session.EncodeGob(s.data) if err != nil { return err } - _, err = c.Do("SETEX", s.sid, s.maxlifetime, string(data)) - return err + return s.c.SetEx(s.sid, s.duration, string(data)).Err() } // Flush deletes all session data. -func (s *RedisSessionStore) Flush() error { +func (s *RedisStore) Flush() error { s.lock.Lock() defer s.lock.Unlock() @@ -95,59 +99,65 @@ func (s *RedisSessionStore) Flush() error { // RedisProvider represents a redis session provider implementation. type RedisProvider struct { - maxlifetime int64 - connAddr string - poolsize int - password string - poollist *redis.Pool + c *redis.Client + duration time.Duration } -// Init initializes memory session provider. -// connStr: ,, -// e.g. 127.0.0.1:6379,100,macaron -func (p *RedisProvider) Init(maxlifetime int64, connStr string) error { - p.maxlifetime = maxlifetime - configs := strings.Split(connStr, ",") - if len(configs) > 0 { - p.connAddr = configs[0] +// Init initializes redis session provider. +// configs: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180 +func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) { + p.duration, err = time.ParseDuration(fmt.Sprintf("%ds", maxlifetime)) + if err != nil { + return err } - if len(configs) > 1 { - poolsize, err := strconv.Atoi(configs[1]) - if err != nil || poolsize <= 0 { - p.poolsize = MAX_POOL_SIZE - } else { - p.poolsize = poolsize - } - } else { - p.poolsize = MAX_POOL_SIZE - } - if len(configs) > 2 { - p.password = configs[2] - } - p.poollist = redis.NewPool(func() (redis.Conn, error) { - c, err := redis.Dial("tcp", p.connAddr) - if err != nil { - return nil, err - } - if p.password != "" { - if _, err := c.Do("AUTH", p.password); err != nil { - c.Close() - return nil, err - } - } - return c, err - }, p.poolsize) - return p.poollist.Get().Err() + cfg, err := ini.Load([]byte(strings.Replace(configs, ",", "\n", -1))) + if err != nil { + return err + } + + opt := &redis.Options{ + Network: "tcp", + } + for k, v := range cfg.Section("").KeysHash() { + switch k { + case "network": + opt.Network = v + case "addr": + opt.Addr = v + case "password": + opt.Password = v + case "db": + opt.DB = com.StrTo(v).MustInt64() + case "pool_size": + opt.PoolSize = com.StrTo(v).MustInt() + case "idle_timeout": + opt.IdleTimeout, err = time.ParseDuration(v + "s") + if err != nil { + return fmt.Errorf("error parsing idle timeout: %v", err) + } + default: + return fmt.Errorf("session/redis: unsupported option '%s'", k) + } + } + + p.c = redis.NewClient(opt) + return p.c.Ping().Err() } // Read returns raw session store by session ID. func (p *RedisProvider) Read(sid string) (session.RawStore, error) { - c := p.poollist.Get() - defer c.Close() + if !p.Exist(sid) { + if err := p.c.Set(sid, "").Err(); err != nil { + return nil, err + } + } - kvs, err := redis.String(c.Do("GET", sid)) var kv map[interface{}]interface{} + kvs, err := p.c.Get(sid).Result() + if err != nil { + return nil, err + } if len(kvs) == 0 { kv = make(map[interface{}]interface{}) } else { @@ -157,48 +167,41 @@ func (p *RedisProvider) Read(sid string) (session.RawStore, error) { } } - rs := &RedisSessionStore{p: p.poollist, sid: sid, data: kv, maxlifetime: p.maxlifetime} - return rs, nil + return NewRedisStore(p.c, sid, p.duration, kv), nil } // Exist returns true if session with given ID exists. func (p *RedisProvider) Exist(sid string) bool { - c := p.poollist.Get() - defer c.Close() - - if existed, err := redis.Int(c.Do("EXISTS", sid)); err != nil || existed == 0 { - return false - } else { - return true - } + has, err := p.c.Exists(sid).Result() + return err == nil && has } // Destory deletes a session by session ID. func (p *RedisProvider) Destory(sid string) error { - c := p.poollist.Get() - defer c.Close() - - _, err := c.Do("DEL", sid) - return err + return p.c.Del(sid).Err() } // Regenerate regenerates a session store from old session ID to new one. -func (p *RedisProvider) Regenerate(oldsid, sid string) (session.RawStore, error) { - c := p.poollist.Get() - defer c.Close() - - if existed, _ := redis.Int(c.Do("EXISTS", oldsid)); existed == 0 { - // oldsid doesn't exists, set the new sid directly - // ignore error here, since if it return error - // the existed value will be 0 - c.Do("SET", sid, "", "EX", p.maxlifetime) - } else { - c.Do("RENAME", oldsid, sid) - c.Do("EXPIRE", sid, p.maxlifetime) +func (p *RedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) + } else if !p.Exist(oldsid) { + // Make a fake old session. + if err = p.c.SetEx(oldsid, p.duration, "").Err(); err != nil { + return nil, err + } + } + + if err = p.c.Rename(oldsid, sid).Err(); err != nil { + return nil, err } - kvs, err := redis.String(c.Do("GET", sid)) var kv map[interface{}]interface{} + kvs, err := p.c.Get(sid).Result() + if err != nil { + return nil, err + } + if len(kvs) == 0 { kv = make(map[interface{}]interface{}) } else { @@ -208,14 +211,12 @@ func (p *RedisProvider) Regenerate(oldsid, sid string) (session.RawStore, error) } } - rs := &RedisSessionStore{p: p.poollist, sid: sid, data: kv, maxlifetime: p.maxlifetime} - return rs, nil + return NewRedisStore(p.c, sid, p.duration, kv), nil } // Count counts and returns number of sessions. func (p *RedisProvider) Count() int { - // FIXME - return 0 + return int(p.c.DbSize().Val()) } // GC calls GC to clean expired sessions. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey new file mode 100644 index 00000000000..8485e986e45 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go new file mode 100644 index 00000000000..9fd8e6518f8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go @@ -0,0 +1,107 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/Unknwon/macaron" + . "github.com/smartystreets/goconvey/convey" + + "github.com/macaron-contrib/session" +) + +func Test_RedisProvider(t *testing.T) { + Convey("Test redis session provider", t, func() { + opt := session.Options{ + Provider: "redis", + ProviderConfig: "addr=:6379", + } + + Convey("Basic operation", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + sess.Set("uname", "unknwon") + }) + m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := raw.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + }) + m.Get("/get", func(ctx *macaron.Context, sess session.Store) { + sid := sess.ID() + So(sid, ShouldNotBeEmpty) + + raw, err := sess.Read(sid) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + + uname := sess.Get("uname") + So(uname, ShouldNotBeNil) + So(uname, ShouldEqual, "unknwon") + + So(sess.Delete("uname"), ShouldBeNil) + So(sess.Get("uname"), ShouldBeNil) + + So(sess.Destory(ctx), ShouldBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/reg", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + + cookie = resp.Header().Get("Set-Cookie") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + }) + + Convey("Regenrate empty session", func() { + m := macaron.New() + m.Use(session.Sessioner(opt)) + m.Get("/", func(ctx *macaron.Context, sess session.Store) { + raw, err := sess.RegenerateId(ctx) + So(err, ShouldBeNil) + So(raw, ShouldNotBeNil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") + m.ServeHTTP(resp, req) + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go index 204c65d7668..9cc1d528749 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go @@ -13,7 +13,7 @@ // License for the specific language governing permissions and limitations // under the License. -// Package session a middleware that provides the session manager of Macaron. +// Package session a middleware that provides the session management of Macaron. package session // NOTE: last sync 000033e on Nov 4, 2014. @@ -28,7 +28,7 @@ import ( "github.com/Unknwon/macaron" ) -const _VERSION = "0.1.1" +const _VERSION = "0.1.6" func Version() string { return _VERSION @@ -37,11 +37,11 @@ func Version() string { // RawStore is the interface that operates the session data. type RawStore interface { // Set sets value to given key in session. - Set(key, value interface{}) error + Set(interface{}, interface{}) error // Get gets value by given key in session. - Get(key interface{}) interface{} - // Delete delete a key from session. - Delete(key interface{}) error + Get(interface{}) interface{} + // Delete deletes a key from session. + Delete(interface{}) error // ID returns current session ID. ID() string // Release releases session resource and save data to provider. @@ -54,7 +54,7 @@ type RawStore interface { type Store interface { RawStore // Read returns raw session store by session ID. - Read(sid string) (RawStore, error) + Read(string) (RawStore, error) // Destory deletes a session. Destory(*macaron.Context) error // RegenerateId regenerates a session store from old session ID to new one. @@ -111,7 +111,7 @@ func prepareOptions(options []Options) Options { if len(opt.Provider) == 0 { opt.Provider = sec.Key("PROVIDER").MustString("memory") } - if len(opt.ProviderConfig) == 0 && opt.Provider == "file" { + if len(opt.ProviderConfig) == 0 { opt.ProviderConfig = sec.Key("PROVIDER_CONFIG").MustString("data/sessions") } if len(opt.CookieName) == 0 { @@ -155,7 +155,7 @@ func Sessioner(options ...Options) macaron.Handler { return func(ctx *macaron.Context) { sess, err := manager.Start(ctx) if err != nil { - panic("session: " + err.Error()) + panic("session(start): " + err.Error()) } // Get flash. @@ -187,8 +187,8 @@ func Sessioner(options ...Options) macaron.Handler { ctx.Next() - if sess.Release() != nil { - panic("session: " + err.Error()) + if err = sess.Release(); err != nil { + panic("session(release): " + err.Error()) } } } @@ -242,17 +242,14 @@ type Manager struct { func NewManager(name string, opt Options) (*Manager, error) { p, ok := providers[name] if !ok { - return nil, fmt.Errorf("session: unknown provider ‘%q’(forgotten import?)", name) + return nil, fmt.Errorf("session: unknown provider '%s'(forgotten import?)", name) } - if err := p.Init(opt.Maxlifetime, opt.ProviderConfig); err != nil { - return nil, err - } - return &Manager{p, opt}, nil + return &Manager{p, opt}, p.Init(opt.Maxlifetime, opt.ProviderConfig) } // sessionId generates a new session ID with rand string, unix nano time, remote addr by hash function. func (m *Manager) sessionId() string { - return hex.EncodeToString(generateRandomKey(m.opt.IDLength)) + return hex.EncodeToString(generateRandomKey(m.opt.IDLength / 2)) } // Start starts a session by generating new one @@ -315,16 +312,9 @@ func (m *Manager) Destory(ctx *macaron.Context) error { func (m *Manager) RegenerateId(ctx *macaron.Context) (sess RawStore, err error) { sid := m.sessionId() oldsid := ctx.GetCookie(m.opt.CookieName) - if len(oldsid) == 0 { - sess, err = m.provider.Read(oldsid) - if err != nil { - return nil, err - } - } else { - sess, err = m.provider.Regenerate(oldsid, sid) - if err != nil { - return nil, err - } + sess, err = m.provider.Regenerate(oldsid, sid) + if err != nil { + return nil, err } ck := &http.Cookie{ Name: m.opt.CookieName, diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go index 327c1d0c6a5..82efc277c61 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go @@ -42,7 +42,7 @@ func Test_Sessioner(t *testing.T) { m.ServeHTTP(resp, req) }) - Convey("Register invalid provider that", t, func() { + Convey("Register invalid provider", t, func() { Convey("Provider not exists", func() { defer func() { So(recover(), ShouldNotBeNil) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go index a165de83070..6c9ea495fe5 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go +++ b/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go @@ -24,39 +24,19 @@ import ( "github.com/Unknwon/com" ) -func init() { - gob.Register([]interface{}{}) - gob.Register(map[int]interface{}{}) - gob.Register(map[string]interface{}{}) - gob.Register(map[interface{}]interface{}{}) - gob.Register(map[string]string{}) - gob.Register(map[int]string{}) - gob.Register(map[int]int{}) - gob.Register(map[int]int64{}) -} - func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) { for _, v := range obj { gob.Register(v) } buf := bytes.NewBuffer(nil) - enc := gob.NewEncoder(buf) - err := enc.Encode(obj) - if err != nil { - return []byte(""), err - } - return buf.Bytes(), nil + err := gob.NewEncoder(buf).Encode(obj) + return buf.Bytes(), err } -func DecodeGob(encoded []byte) (map[interface{}]interface{}, error) { +func DecodeGob(encoded []byte) (out map[interface{}]interface{}, err error) { buf := bytes.NewBuffer(encoded) - dec := gob.NewDecoder(buf) - var out map[interface{}]interface{} - err := dec.Decode(&out) - if err != nil { - return nil, err - } - return out, nil + err = gob.NewDecoder(buf).Decode(&out) + return out, err } // generateRandomKey creates a random key with the given strength. From 76ea0f432aff082e1ad3403ca7708ee8ab1d9c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 31 Mar 2015 19:20:34 +0200 Subject: [PATCH 110/274] updated master version to beta2 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1b1290b0da8..8df5f593006 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "company": "Coding Instinct AB" }, "name": "grafana", - "version": "2.0.0-beta1", + "version": "2.0.0-beta2", "repository": { "type": "git", "url": "http://github.com/torkelo/grafana.git" From 03e336ba9ea2b594c0861efa9efcfd77351d8ae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 31 Mar 2015 21:48:54 +0200 Subject: [PATCH 111/274] Fixed minor issue doing snapshot from home (root) dashboard --- public/app/features/dashboard/shareSnapshotCtrl.js | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/public/app/features/dashboard/shareSnapshotCtrl.js b/public/app/features/dashboard/shareSnapshotCtrl.js index 17a313b2b64..010f074957b 100644 --- a/public/app/features/dashboard/shareSnapshotCtrl.js +++ b/public/app/features/dashboard/shareSnapshotCtrl.js @@ -66,9 +66,15 @@ function (angular, _) { $scope.snapshotUrl = results.url; $scope.saveExternalSnapshotRef(cmdData, results); } else { - var baseUrl = $location.absUrl().replace($location.url(), ""); - $scope.snapshotUrl = baseUrl + '/dashboard/snapshot/' + results.key; - $scope.deleteUrl = baseUrl + '/api/snapshots-delete/' + results.deleteKey; + var url = $location.url(); + var baseUrl = $location.absUrl(); + + if (url !== '/') { + baseUrl = baseUrl.replace(url, '') + '/'; + } + + $scope.snapshotUrl = baseUrl + 'dashboard/snapshot/' + results.key; + $scope.deleteUrl = baseUrl + 'api/snapshots-delete/' + results.deleteKey; } $scope.step = 2; From 3c2bfbfc1c1b311f5c96c8964f5bf9bd20ce3362 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Tue, 31 Mar 2015 22:06:19 +0200 Subject: [PATCH 112/274] Search: Dashboard results should be sorted alphabetically, Fixes #1685 --- CHANGELOG.md | 1 + pkg/services/sqlstore/dashboard.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e21e94685d4..ace3efcbe62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ **FIxes** - [Issue #1681](https://github.com/grafana/grafana/issues/1681). MySQL session: fixed problem using mysql as session store - [Issue #1671](https://github.com/grafana/grafana/issues/1671). Data sources: Fixed issue with changing default data source (should not require full page load to take effect, now fixed) +- [Issue #1685](https://github.com/grafana/grafana/issues/1685). Search: Dashboard results should be sorted alphabetically # 2.0.0-Beta1 (2015-03-30) diff --git a/pkg/services/sqlstore/dashboard.go b/pkg/services/sqlstore/dashboard.go index 0384a5bb6e6..7dbebd94e4c 100644 --- a/pkg/services/sqlstore/dashboard.go +++ b/pkg/services/sqlstore/dashboard.go @@ -140,7 +140,7 @@ func SearchDashboards(query *m.SearchDashboardsQuery) error { query.Limit = 300 } - sql.WriteString(fmt.Sprintf(" LIMIT %d", query.Limit)) + sql.WriteString(fmt.Sprintf(" ORDER BY dashboard.title ASC LIMIT %d", query.Limit)) var res []DashboardSearchProjection err := x.Sql(sql.String(), params...).Find(&res) From 4ca125da412b824d348fb02fb3a86d11cea15141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 08:24:03 +0200 Subject: [PATCH 113/274] Delete snapshot fix: the action is now shows confirmation directly in the modal (not opening a new tab with api call as in beta1), Fixes #1682 --- public/app/features/dashboard/partials/shareModal.html | 10 +++++++++- public/app/features/dashboard/shareSnapshotCtrl.js | 6 ++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/public/app/features/dashboard/partials/shareModal.html b/public/app/features/dashboard/partials/shareModal.html index 881d30ada97..6b9ff8aef16 100644 --- a/public/app/features/dashboard/partials/shareModal.html +++ b/public/app/features/dashboard/partials/shareModal.html @@ -84,6 +84,13 @@

    + +
    @@ -135,7 +142,8 @@
    - Did you make a mistake? delete snapshot. + Did you make a mistake? delete snapshot.
    +
    diff --git a/public/app/features/dashboard/shareSnapshotCtrl.js b/public/app/features/dashboard/shareSnapshotCtrl.js index 010f074957b..bcdbee57d18 100644 --- a/public/app/features/dashboard/shareSnapshotCtrl.js +++ b/public/app/features/dashboard/shareSnapshotCtrl.js @@ -117,6 +117,12 @@ function (angular, _) { }); }; + $scope.deleteSnapshot = function() { + backendSrv.get($scope.deleteUrl).then(function() { + $scope.step = 3; + }); + }; + $scope.saveExternalSnapshotRef = function(cmdData, results) { // save external in local instance as well cmdData.external = true; From 5422d13607bdee899c75a038241b310ff94caa3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 08:26:02 +0200 Subject: [PATCH 114/274] Postgres fix update --- pkg/services/sqlstore/datasource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/services/sqlstore/datasource.go b/pkg/services/sqlstore/datasource.go index e82d42951e8..ebb2ad977b1 100644 --- a/pkg/services/sqlstore/datasource.go +++ b/pkg/services/sqlstore/datasource.go @@ -90,7 +90,7 @@ func updateIsDefaultFlag(ds *m.DataSource, sess *xorm.Session) error { // Handle is default flag if ds.IsDefault { rawSql := "UPDATE data_source SET is_default=? WHERE org_id=? AND id <> ?" - if _, err := sess.Exec(rawSql, False, ds.OrgId, ds.Id); err != nil { + if _, err := sess.Exec(rawSql, false, ds.OrgId, ds.Id); err != nil { return err } } From c3fa68ade85d2087b9aae3490a5e3e105486db8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 09:00:17 +0200 Subject: [PATCH 115/274] Data source proxy: Fixed issue with Gzip enabled and data source proxy, Fixes #1675 --- CHANGELOG.md | 1 + pkg/cmd/web.go | 3 ++- pkg/middleware/util.go | 20 ++++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 pkg/middleware/util.go diff --git a/CHANGELOG.md b/CHANGELOG.md index ace3efcbe62..165f7b40253 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # 2.0.0-RC1 (unreleased) **FIxes** +- [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy - [Issue #1681](https://github.com/grafana/grafana/issues/1681). MySQL session: fixed problem using mysql as session store - [Issue #1671](https://github.com/grafana/grafana/issues/1671). Data sources: Fixed issue with changing default data source (should not require full page load to take effect, now fixed) - [Issue #1685](https://github.com/grafana/grafana/issues/1685). Search: Dashboard results should be sorted alphabetically diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index 1fc6e8a999c..8d7697b9871 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -42,8 +42,9 @@ func newMacaron() *macaron.Macaron { m := macaron.New() m.Use(middleware.Logger()) m.Use(macaron.Recovery()) + if setting.EnableGzip { - m.Use(macaron.Gziper()) + m.Use(middleware.Gziper()) } mapStatic(m, "", "public") diff --git a/pkg/middleware/util.go b/pkg/middleware/util.go new file mode 100644 index 00000000000..2d7d9739ead --- /dev/null +++ b/pkg/middleware/util.go @@ -0,0 +1,20 @@ +package middleware + +import ( + "strings" + + "github.com/Unknwon/macaron" +) + +func Gziper() macaron.Handler { + macaronGziper := macaron.Gziper() + + return func(ctx *macaron.Context) { + requestPath := ctx.Req.URL.RequestURI() + if strings.HasPrefix(requestPath, "/api/datasources/proxy") { + return + } + + ctx.Invoke(macaronGziper) + } +} From cf877e6567860ce7ef4b991c688d25eb18677f32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 09:45:42 +0200 Subject: [PATCH 116/274] added postgres and redis session options --- Godeps/Godeps.json | 14 +- .../src/gopkg.in/bufio.v1/.travis.yml | 11 + .../_workspace/src/gopkg.in/bufio.v1/LICENSE | 27 + .../_workspace/src/gopkg.in/bufio.v1/Makefile | 2 + .../src/gopkg.in/bufio.v1/README.md | 4 + .../src/gopkg.in/bufio.v1/buffer.go | 413 ++ .../src/gopkg.in/bufio.v1/buffer_test.go | 527 +++ .../_workspace/src/gopkg.in/bufio.v1/bufio.go | 728 ++++ .../src/gopkg.in/bufio.v1/bufio_test.go | 1418 +++++++ .../src/gopkg.in/bufio.v1/export_test.go | 9 + .../src/gopkg.in/redis.v2/.travis.yml | 19 + .../_workspace/src/gopkg.in/redis.v2/LICENSE | 27 + .../_workspace/src/gopkg.in/redis.v2/Makefile | 3 + .../src/gopkg.in/redis.v2/README.md | 46 + .../src/gopkg.in/redis.v2/command.go | 597 +++ .../src/gopkg.in/redis.v2/commands.go | 1246 ++++++ .../_workspace/src/gopkg.in/redis.v2/doc.go | 4 + .../_workspace/src/gopkg.in/redis.v2/error.go | 23 + .../src/gopkg.in/redis.v2/example_test.go | 180 + .../src/gopkg.in/redis.v2/export_test.go | 5 + .../_workspace/src/gopkg.in/redis.v2/multi.go | 138 + .../src/gopkg.in/redis.v2/parser.go | 262 ++ .../src/gopkg.in/redis.v2/parser_test.go | 54 + .../src/gopkg.in/redis.v2/pipeline.go | 91 + .../_workspace/src/gopkg.in/redis.v2/pool.go | 405 ++ .../src/gopkg.in/redis.v2/pubsub.go | 134 + .../src/gopkg.in/redis.v2/rate_limit.go | 53 + .../src/gopkg.in/redis.v2/rate_limit_test.go | 31 + .../_workspace/src/gopkg.in/redis.v2/redis.go | 231 ++ .../src/gopkg.in/redis.v2/redis_test.go | 3333 +++++++++++++++++ .../src/gopkg.in/redis.v2/script.go | 52 + .../src/gopkg.in/redis.v2/sentinel.go | 291 ++ .../src/gopkg.in/redis.v2/sentinel_test.go | 185 + .../gopkg.in/redis.v2/testdata/sentinel.conf | 6 + pkg/cmd/web.go | 1 + pkg/setting/setting.go | 2 +- 36 files changed, 10569 insertions(+), 3 deletions(-) create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/Makefile create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/README.md create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/command.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/commands.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/doc.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/error.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/multi.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/parser.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pool.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/redis.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/script.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ce0a9ea142e..e423a574cd2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { - "ImportPath": "github.com/torkelo/grafana-pro", - "GoVersion": "go1.3", + "ImportPath": "github.com/grafana/grafana", + "GoVersion": "go1.4.2", "Packages": [ "./pkg/..." ], @@ -70,10 +70,20 @@ "ImportPath": "golang.org/x/oauth2", "Rev": "e5909d4679a1926c774c712b343f10b8298687a3" }, + { + "ImportPath": "gopkg.in/bufio.v1", + "Comment": "v1", + "Rev": "567b2bfa514e796916c4747494d6ff5132a1dfce" + }, { "ImportPath": "gopkg.in/ini.v1", "Comment": "v0-16-g1772191", "Rev": "177219109c97e7920c933e21c9b25f874357b237" + }, + { + "ImportPath": "gopkg.in/redis.v2", + "Comment": "v2.3.2", + "Rev": "e6179049628164864e6e84e973cfb56335748dea" } ] } diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml new file mode 100644 index 00000000000..ccca6bb4a61 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip + +install: + - go get launchpad.net/gocheck + - go get gopkg.in/bufio.v1 diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE new file mode 100644 index 00000000000..07a316cbf47 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The bufio Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile new file mode 100644 index 00000000000..038ed47e941 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile @@ -0,0 +1,2 @@ +all: + go test gopkg.in/bufio.v1 diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/README.md b/Godeps/_workspace/src/gopkg.in/bufio.v1/README.md new file mode 100644 index 00000000000..bfb85ee544f --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/README.md @@ -0,0 +1,4 @@ +bufio +===== + +This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature. diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go new file mode 100644 index 00000000000..8b915605b64 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go @@ -0,0 +1,413 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +// Simple byte buffer for marshaling data. + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +// A Buffer is a variable-sized buffer of bytes with Read and Write methods. +// The zero value for Buffer is an empty buffer ready to use. +type Buffer struct { + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune + bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation. + lastRead readOp // last read operation, so that Unread* can work correctly. +} + +// The readOp constants describe the last action performed on +// the buffer, so that UnreadRune and UnreadByte can +// check for invalid usage. +type readOp int + +const ( + opInvalid readOp = iota // Non-read operation. + opReadRune // Read rune. + opRead // Any other read operation. +) + +// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. +var ErrTooLarge = errors.New("bytes.Buffer: too large") + +// Bytes returns a slice of the contents of the unread portion of the buffer; +// len(b.Bytes()) == b.Len(). If the caller changes the contents of the +// returned slice, the contents of the buffer will change provided there +// are no intervening method calls on the Buffer. +func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } + +// String returns the contents of the unread portion of the buffer +// as a string. If the Buffer is a nil pointer, it returns "". +func (b *Buffer) String() string { + if b == nil { + // Special case, useful in debugging. + return "" + } + return string(b.buf[b.off:]) +} + +// Len returns the number of bytes of the unread portion of the buffer; +// b.Len() == len(b.Bytes()). +func (b *Buffer) Len() int { return len(b.buf) - b.off } + +// Truncate discards all but the first n unread bytes from the buffer. +// It panics if n is negative or greater than the length of the buffer. +func (b *Buffer) Truncate(n int) { + b.lastRead = opInvalid + switch { + case n < 0 || n > b.Len(): + panic("bytes.Buffer: truncation out of range") + case n == 0: + // Reuse buffer space. + b.off = 0 + } + b.buf = b.buf[0 : b.off+n] +} + +// Reset resets the buffer so it has no content. +// b.Reset() is the same as b.Truncate(0). +func (b *Buffer) Reset() { b.Truncate(0) } + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) grow(n int) int { + m := b.Len() + // If buffer is empty, reset to recover space. + if m == 0 && b.off != 0 { + b.Truncate(0) + } + if len(b.buf)+n > cap(b.buf) { + var buf []byte + if b.buf == nil && n <= len(b.bootstrap) { + buf = b.bootstrap[0:] + } else if m+n <= cap(b.buf)/2 { + // We can slide things down instead of allocating a new + // slice. We only need m+n <= cap(b.buf) to slide, but + // we instead let capacity get twice as large so we + // don't spend all our time copying. + copy(b.buf[:], b.buf[b.off:]) + buf = b.buf[:m] + } else { + // not enough space anywhere + buf = makeSlice(2*cap(b.buf) + n) + copy(buf, b.buf[b.off:]) + } + b.buf = buf + b.off = 0 + } + b.buf = b.buf[0 : b.off+m+n] + return b.off + m +} + +// Grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After Grow(n), at least n bytes can be written to the +// buffer without another allocation. +// If n is negative, Grow will panic. +// If the buffer can't grow it will panic with ErrTooLarge. +func (b *Buffer) Grow(n int) { + if n < 0 { + panic("bytes.Buffer.Grow: negative count") + } + m := b.grow(n) + b.buf = b.buf[0:m] +} + +// Write appends the contents of p to the buffer, growing the buffer as +// needed. The return value n is the length of p; err is always nil. If the +// buffer becomes too large, Write will panic with ErrTooLarge. +func (b *Buffer) Write(p []byte) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(p)) + return copy(b.buf[m:], p), nil +} + +// WriteString appends the contents of s to the buffer, growing the buffer as +// needed. The return value n is the length of s; err is always nil. If the +// buffer becomes too large, WriteString will panic with ErrTooLarge. +func (b *Buffer) WriteString(s string) (n int, err error) { + b.lastRead = opInvalid + m := b.grow(len(s)) + return copy(b.buf[m:], s), nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads data from r until EOF and appends it to the buffer, growing +// the buffer as needed. The return value n is the number of bytes read. Any +// error except io.EOF encountered during the read is also returned. If the +// buffer becomes too large, ReadFrom will panic with ErrTooLarge. +func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { + b.lastRead = opInvalid + // If buffer is empty, reset to recover space. + if b.off >= len(b.buf) { + b.Truncate(0) + } + for { + if free := cap(b.buf) - len(b.buf); free < MinRead { + // not enough space at end + newBuf := b.buf + if b.off+free < MinRead { + // not enough space using beginning of buffer; + // double buffer capacity + newBuf = makeSlice(2*cap(b.buf) + MinRead) + } + copy(newBuf, b.buf[b.off:]) + b.buf = newBuf[:len(b.buf)-b.off] + b.off = 0 + } + m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) + b.buf = b.buf[0 : len(b.buf)+m] + n += int64(m) + if e == io.EOF { + break + } + if e != nil { + return n, e + } + } + return n, nil // err is EOF, so return nil explicitly +} + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(ErrTooLarge) + } + }() + return make([]byte, n) +} + +// WriteTo writes data to w until the buffer is drained or an error occurs. +// The return value n is the number of bytes written; it always fits into an +// int, but it is int64 to match the io.WriterTo interface. Any error +// encountered during the write is also returned. +func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { + b.lastRead = opInvalid + if b.off < len(b.buf) { + nBytes := b.Len() + m, e := w.Write(b.buf[b.off:]) + if m > nBytes { + panic("bytes.Buffer.WriteTo: invalid Write count") + } + b.off += m + n = int64(m) + if e != nil { + return n, e + } + // all bytes should have been written, by definition of + // Write method in io.Writer + if m != nBytes { + return n, io.ErrShortWrite + } + } + // Buffer is now empty; reset. + b.Truncate(0) + return +} + +// WriteByte appends the byte c to the buffer, growing the buffer as needed. +// The returned error is always nil, but is included to match bufio.Writer's +// WriteByte. If the buffer becomes too large, WriteByte will panic with +// ErrTooLarge. +func (b *Buffer) WriteByte(c byte) error { + b.lastRead = opInvalid + m := b.grow(1) + b.buf[m] = c + return nil +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to the +// buffer, returning its length and an error, which is always nil but is +// included to match bufio.Writer's WriteRune. The buffer is grown as needed; +// if it becomes too large, WriteRune will panic with ErrTooLarge. +func (b *Buffer) WriteRune(r rune) (n int, err error) { + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + return 1, nil + } + n = utf8.EncodeRune(b.runeBytes[0:], r) + b.Write(b.runeBytes[0:n]) + return n, nil +} + +// Read reads the next len(p) bytes from the buffer or until the buffer +// is drained. The return value n is the number of bytes read. If the +// buffer has no data to return, err is io.EOF (unless len(p) is zero); +// otherwise it is nil. +func (b *Buffer) Read(p []byte) (n int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + if len(p) == 0 { + return + } + return 0, io.EOF + } + n = copy(p, b.buf[b.off:]) + b.off += n + if n > 0 { + b.lastRead = opRead + } + return +} + +// Next returns a slice containing the next n bytes from the buffer, +// advancing the buffer as if the bytes had been returned by Read. +// If there are fewer than n bytes in the buffer, Next returns the entire buffer. +// The slice is only valid until the next call to a read or write method. +func (b *Buffer) Next(n int) []byte { + b.lastRead = opInvalid + m := b.Len() + if n > m { + n = m + } + data := b.buf[b.off : b.off+n] + b.off += n + if n > 0 { + b.lastRead = opRead + } + return data +} + +// ReadByte reads and returns the next byte from the buffer. +// If no byte is available, it returns error io.EOF. +func (b *Buffer) ReadByte() (c byte, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, io.EOF + } + c = b.buf[b.off] + b.off++ + b.lastRead = opRead + return c, nil +} + +// ReadRune reads and returns the next UTF-8-encoded +// Unicode code point from the buffer. +// If no bytes are available, the error returned is io.EOF. +// If the bytes are an erroneous UTF-8 encoding, it +// consumes one byte and returns U+FFFD, 1. +func (b *Buffer) ReadRune() (r rune, size int, err error) { + b.lastRead = opInvalid + if b.off >= len(b.buf) { + // Buffer is empty, reset to recover space. + b.Truncate(0) + return 0, 0, io.EOF + } + b.lastRead = opReadRune + c := b.buf[b.off] + if c < utf8.RuneSelf { + b.off++ + return rune(c), 1, nil + } + r, n := utf8.DecodeRune(b.buf[b.off:]) + b.off += n + return r, n, nil +} + +// UnreadRune unreads the last rune returned by ReadRune. +// If the most recent read or write operation on the buffer was +// not a ReadRune, UnreadRune returns an error. (In this regard +// it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Buffer) UnreadRune() error { + if b.lastRead != opReadRune { + return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune") + } + b.lastRead = opInvalid + if b.off > 0 { + _, n := utf8.DecodeLastRune(b.buf[0:b.off]) + b.off -= n + } + return nil +} + +// UnreadByte unreads the last byte returned by the most recent +// read operation. If write has happened since the last read, UnreadByte +// returns an error. +func (b *Buffer) UnreadByte() error { + if b.lastRead != opReadRune && b.lastRead != opRead { + return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read") + } + b.lastRead = opInvalid + if b.off > 0 { + b.off-- + } + return nil +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { + slice, err := b.readSlice(delim) + // return a copy of slice. The buffer's backing array may + // be overwritten by later calls. + line = append(line, slice...) + return +} + +// readSlice is like ReadBytes but returns a reference to internal buffer data. +func (b *Buffer) readSlice(delim byte) (line []byte, err error) { + i := bytes.IndexByte(b.buf[b.off:], delim) + end := b.off + i + 1 + if i < 0 { + end = len(b.buf) + err = io.EOF + } + line = b.buf[b.off:end] + b.off = end + b.lastRead = opRead + return line, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end +// in delim. +func (b *Buffer) ReadString(delim byte) (line string, err error) { + slice, err := b.readSlice(delim) + return string(slice), err +} + +// NewBuffer creates and initializes a new Buffer using buf as its initial +// contents. It is intended to prepare a Buffer to read existing data. It +// can also be used to size the internal buffer for writing. To do that, +// buf should have the desired capacity but a length of zero. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } + +// NewBufferString creates and initializes a new Buffer using string s as its +// initial contents. It is intended to prepare a buffer to read an existing +// string. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is +// sufficient to initialize a Buffer. +func NewBufferString(s string) *Buffer { + return &Buffer{buf: []byte(s)} +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go new file mode 100644 index 00000000000..ca1ac210513 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go @@ -0,0 +1,527 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +import ( + "bytes" + "io" + "math/rand" + "runtime" + "testing" + "unicode/utf8" +) + +const N = 10000 // make this bigger for a larger (and slower) test +var data string // test data for write tests +var testBytes []byte // test data; same as data but as a slice. + +func init() { + testBytes = make([]byte, N) + for i := 0; i < N; i++ { + testBytes[i] = 'a' + byte(i%26) + } + data = string(testBytes) +} + +// Verify that contents of buf match the string s. +func check(t *testing.T, testname string, buf *Buffer, s string) { + bytes := buf.Bytes() + str := buf.String() + if buf.Len() != len(bytes) { + t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) + } + + if buf.Len() != len(str) { + t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) + } + + if buf.Len() != len(s) { + t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) + } + + if string(bytes) != s { + t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) + } +} + +// Fill buf through n writes of string fus. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.WriteString(fus) + if m != len(fus) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += fus + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +// Fill buf through n writes of byte slice fub. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.Write(fub) + if m != len(fub) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += string(fub) + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +func TestNewBuffer(t *testing.T) { + buf := NewBuffer(testBytes) + check(t, "NewBuffer", buf, data) +} + +func TestNewBufferString(t *testing.T) { + buf := NewBufferString(data) + check(t, "NewBufferString", buf, data) +} + +// Empty buf through repeated reads into fub. +// The initial contents of buf corresponds to the string s. +func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { + check(t, testname+" (empty 1)", buf, s) + + for { + n, err := buf.Read(fub) + if n == 0 { + break + } + if err != nil { + t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) + } + s = s[n:] + check(t, testname+" (empty 3)", buf, s) + } + + check(t, testname+" (empty 4)", buf, "") +} + +func TestBasicOperations(t *testing.T) { + var buf Buffer + + for i := 0; i < 5; i++ { + check(t, "TestBasicOperations (1)", &buf, "") + + buf.Reset() + check(t, "TestBasicOperations (2)", &buf, "") + + buf.Truncate(0) + check(t, "TestBasicOperations (3)", &buf, "") + + n, err := buf.Write([]byte(data[0:1])) + if n != 1 { + t.Errorf("wrote 1 byte, but n == %d", n) + } + if err != nil { + t.Errorf("err should always be nil, but err == %s", err) + } + check(t, "TestBasicOperations (4)", &buf, "a") + + buf.WriteByte(data[1]) + check(t, "TestBasicOperations (5)", &buf, "ab") + + n, err = buf.Write([]byte(data[2:26])) + if n != 24 { + t.Errorf("wrote 25 bytes, but n == %d", n) + } + check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + + buf.Truncate(26) + check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + + buf.Truncate(20) + check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + + empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) + + buf.WriteByte(data[1]) + c, err := buf.ReadByte() + if err != nil { + t.Error("ReadByte unexpected eof") + } + if c != data[1] { + t.Errorf("ReadByte wrong value c=%v", c) + } + c, err = buf.ReadByte() + if err == nil { + t.Error("ReadByte unexpected not eof") + } + } +} + +func TestLargeStringWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data) + empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeStringWrites (3)", &buf, "") +} + +func TestLargeByteWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeByteWrites (3)", &buf, "") +} + +func TestLargeStringReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeStringReads (3)", &buf, "") +} + +func TestLargeByteReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeByteReads (3)", &buf, "") +} + +func TestMixedReadsAndWrites(t *testing.T) { + var buf Buffer + s := "" + for i := 0; i < 50; i++ { + wlen := rand.Intn(len(data)) + if i%2 == 0 { + s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen]) + } else { + s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) + } + + rlen := rand.Intn(len(data)) + fub := make([]byte, rlen) + n, _ := buf.Read(fub) + s = s[n:] + } + empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) +} + +func TestNil(t *testing.T) { + var b *Buffer + if b.String() != "" { + t.Errorf("expected ; got %q", b.String()) + } +} + +func TestReadFrom(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + b.ReadFrom(&buf) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + } +} + +func TestWriteTo(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + buf.WriteTo(&b) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + } +} + +func TestRuneIO(t *testing.T) { + const NRune = 1000 + // Built a test slice while we write the data + b := make([]byte, utf8.UTFMax*NRune) + var buf Buffer + n := 0 + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(b[n:], r) + nbytes, err := buf.WriteRune(r) + if err != nil { + t.Fatalf("WriteRune(%U) error: %s", r, err) + } + if nbytes != size { + t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes) + } + n += size + } + b = b[0:n] + + // Check the resulting bytes + if !bytes.Equal(buf.Bytes(), b) { + t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b) + } + + p := make([]byte, utf8.UTFMax) + // Read it back with ReadRune + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(p, r) + nr, nbytes, err := buf.ReadRune() + if nr != r || nbytes != size || err != nil { + t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err) + } + } + + // Check that UnreadRune works + buf.Reset() + buf.Write(b) + for r := rune(0); r < NRune; r++ { + r1, size, _ := buf.ReadRune() + if err := buf.UnreadRune(); err != nil { + t.Fatalf("UnreadRune(%U) got error %q", r, err) + } + r2, nbytes, err := buf.ReadRune() + if r1 != r2 || r1 != r || nbytes != size || err != nil { + t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err) + } + } +} + +func TestNext(t *testing.T) { + b := []byte{0, 1, 2, 3, 4} + tmp := make([]byte, 5) + for i := 0; i <= 5; i++ { + for j := i; j <= 5; j++ { + for k := 0; k <= 6; k++ { + // 0 <= i <= j <= 5; 0 <= k <= 6 + // Check that if we start with a buffer + // of length j at offset i and ask for + // Next(k), we get the right bytes. + buf := NewBuffer(b[0:j]) + n, _ := buf.Read(tmp[0:i]) + if n != i { + t.Fatalf("Read %d returned %d", i, n) + } + bb := buf.Next(k) + want := k + if want > j-i { + want = j - i + } + if len(bb) != want { + t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) + } + for l, v := range bb { + if v != byte(l+i) { + t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) + } + } + } + } + } +} + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err error +}{ + {"", 0, []string{""}, io.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBufferString(test.buffer) + var err error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func TestReadString(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBufferString(test.buffer) + var err error + for _, expected := range test.expected { + var s string + s, err = buf.ReadString(test.delim) + if s != expected { + t.Errorf("expected %q, got %q", expected, s) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func BenchmarkReadString(b *testing.B) { + const n = 32 << 10 + + data := make([]byte, n) + data[n-1] = 'x' + b.SetBytes(int64(n)) + for i := 0; i < b.N; i++ { + buf := NewBuffer(data) + _, err := buf.ReadString('x') + if err != nil { + b.Fatal(err) + } + } +} + +func TestGrow(t *testing.T) { + x := []byte{'x'} + y := []byte{'y'} + tmp := make([]byte, 72) + for _, startLen := range []int{0, 100, 1000, 10000, 100000} { + xBytes := bytes.Repeat(x, startLen) + for _, growLen := range []int{0, 100, 1000, 10000, 100000} { + buf := NewBuffer(xBytes) + // If we read, this affects buf.off, which is good to test. + readBytes, _ := buf.Read(tmp) + buf.Grow(growLen) + yBytes := bytes.Repeat(y, growLen) + // Check no allocation occurs in write, as long as we're single-threaded. + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + buf.Write(yBytes) + runtime.ReadMemStats(&m2) + if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { + t.Errorf("allocation occurred during write") + } + // Check that buffer has correct data. + if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { + t.Errorf("bad initial data at %d %d", startLen, growLen) + } + if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { + t.Errorf("bad written data at %d %d", startLen, growLen) + } + } + } +} + +// Was a bug: used to give EOF reading empty slice at EOF. +func TestReadEmptyAtEOF(t *testing.T) { + b := new(Buffer) + slice := make([]byte, 0) + n, err := b.Read(slice) + if err != nil { + t.Errorf("read error: %v", err) + } + if n != 0 { + t.Errorf("wrong count; got %d want 0", n) + } +} + +func TestBufferUnreadByte(t *testing.T) { + b := new(Buffer) + b.WriteString("abcdefghijklmnopqrstuvwxyz") + + _, err := b.ReadBytes('m') + if err != nil { + t.Fatalf("ReadBytes: %v", err) + } + + err = b.UnreadByte() + if err != nil { + t.Fatalf("UnreadByte: %v", err) + } + c, err := b.ReadByte() + if err != nil { + t.Fatalf("ReadByte: %v", err) + } + if c != 'm' { + t.Errorf("ReadByte = %q; want %q", c, 'm') + } +} + +// Tests that we occasionally compact. Issue 5154. +func TestBufferGrowth(t *testing.T) { + var b Buffer + buf := make([]byte, 1024) + b.Write(buf[0:1]) + var cap0 int + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + if i == 0 { + cap0 = b.Cap() + } + } + cap1 := b.Cap() + // (*Buffer).grow allows for 2x capacity slop before sliding, + // so set our error threshold at 3x. + if cap1 > cap0*3 { + t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) + } +} + +// From Issue 5154. +func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf[0:1]) + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + } + } +} + +// Check that we don't compact too often. From Issue 5154. +func BenchmarkBufferFullSmallReads(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf) + for b.Len()+20 < b.Cap() { + b.Write(buf[:10]) + } + for i := 0; i < 5<<10; i++ { + b.Read(buf[:1]) + b.Write(buf[:1]) + } + } +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go new file mode 100644 index 00000000000..8f5cdc084d4 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go @@ -0,0 +1,728 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer +// object, creating another object (Reader or Writer) that also implements +// the interface but provides buffering and some help for textual I/O. +package bufio + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +const ( + defaultBufSize = 4096 +) + +var ( + ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte") + ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune") + ErrBufferFull = errors.New("bufio: buffer full") + ErrNegativeCount = errors.New("bufio: negative count") +) + +// Buffered input. + +// Reader implements buffering for an io.Reader object. +type Reader struct { + buf []byte + rd io.Reader + r, w int + err error + lastByte int + lastRuneSize int +} + +const minReadBufferSize = 16 +const maxConsecutiveEmptyReads = 100 + +// NewReaderSize returns a new Reader whose buffer has at least the specified +// size. If the argument io.Reader is already a Reader with large enough +// size, it returns the underlying Reader. +func NewReaderSize(rd io.Reader, size int) *Reader { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok && len(b.buf) >= size { + return b + } + if size < minReadBufferSize { + size = minReadBufferSize + } + r := new(Reader) + r.reset(make([]byte, size), rd) + return r +} + +// NewReader returns a new Reader whose buffer has the default size. +func NewReader(rd io.Reader) *Reader { + return NewReaderSize(rd, defaultBufSize) +} + +// Reset discards any buffered data, resets all state, and switches +// the buffered reader to read from r. +func (b *Reader) Reset(r io.Reader) { + b.reset(b.buf, r) +} + +func (b *Reader) reset(buf []byte, r io.Reader) { + *b = Reader{ + buf: buf, + rd: r, + lastByte: -1, + lastRuneSize: -1, + } +} + +var errNegativeRead = errors.New("bufio: reader returned negative count from Read") + +// fill reads a new chunk into the buffer. +func (b *Reader) fill() { + // Slide existing data to beginning. + if b.r > 0 { + copy(b.buf, b.buf[b.r:b.w]) + b.w -= b.r + b.r = 0 + } + + if b.w >= len(b.buf) { + panic("bufio: tried to fill full buffer") + } + + // Read new data: try a limited number of times. + for i := maxConsecutiveEmptyReads; i > 0; i-- { + n, err := b.rd.Read(b.buf[b.w:]) + if n < 0 { + panic(errNegativeRead) + } + b.w += n + if err != nil { + b.err = err + return + } + if n > 0 { + return + } + } + b.err = io.ErrNoProgress +} + +func (b *Reader) readErr() error { + err := b.err + b.err = nil + return err +} + +// Peek returns the next n bytes without advancing the reader. The bytes stop +// being valid at the next read call. If Peek returns fewer than n bytes, it +// also returns an error explaining why the read is short. The error is +// ErrBufferFull if n is larger than b's buffer size. +func (b *Reader) Peek(n int) ([]byte, error) { + if n < 0 { + return nil, ErrNegativeCount + } + if n > len(b.buf) { + return nil, ErrBufferFull + } + // 0 <= n <= len(b.buf) + for b.w-b.r < n && b.err == nil { + b.fill() // b.w-b.r < len(b.buf) => buffer is not full + } + m := b.w - b.r + if m > n { + m = n + } + var err error + if m < n { + err = b.readErr() + if err == nil { + err = ErrBufferFull + } + } + return b.buf[b.r : b.r+m], err +} + +// Read reads data into p. +// It returns the number of bytes read into p. +// It calls Read at most once on the underlying Reader, +// hence n may be less than len(p). +// At EOF, the count will be zero and err will be io.EOF. +func (b *Reader) Read(p []byte) (n int, err error) { + n = len(p) + if n == 0 { + return 0, b.readErr() + } + if b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + if len(p) >= len(b.buf) { + // Large read, empty buffer. + // Read directly into p to avoid copy. + n, b.err = b.rd.Read(p) + if n < 0 { + panic(errNegativeRead) + } + if n > 0 { + b.lastByte = int(p[n-1]) + b.lastRuneSize = -1 + } + return n, b.readErr() + } + b.fill() // buffer is empty + if b.w == b.r { + return 0, b.readErr() + } + } + + if n > b.w-b.r { + n = b.w - b.r + } + copy(p[0:n], b.buf[b.r:]) + b.r += n + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = -1 + return n, nil +} + +// ReadByte reads and returns a single byte. +// If no byte is available, returns an error. +func (b *Reader) ReadByte() (c byte, err error) { + b.lastRuneSize = -1 + for b.r == b.w { + if b.err != nil { + return 0, b.readErr() + } + b.fill() // buffer is empty + } + c = b.buf[b.r] + b.r++ + b.lastByte = int(c) + return c, nil +} + +// UnreadByte unreads the last byte. Only the most recently read byte can be unread. +func (b *Reader) UnreadByte() error { + if b.lastByte < 0 || b.r == 0 && b.w > 0 { + return ErrInvalidUnreadByte + } + // b.r > 0 || b.w == 0 + if b.r > 0 { + b.r-- + } else { + // b.r == 0 && b.w == 0 + b.w = 1 + } + b.buf[b.r] = byte(b.lastByte) + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// ReadRune reads a single UTF-8 encoded Unicode character and returns the +// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte +// and returns unicode.ReplacementChar (U+FFFD) with a size of 1. +func (b *Reader) ReadRune() (r rune, size int, err error) { + for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) { + b.fill() // b.w-b.r < len(buf) => buffer is not full + } + b.lastRuneSize = -1 + if b.r == b.w { + return 0, 0, b.readErr() + } + r, size = rune(b.buf[b.r]), 1 + if r >= 0x80 { + r, size = utf8.DecodeRune(b.buf[b.r:b.w]) + } + b.r += size + b.lastByte = int(b.buf[b.r-1]) + b.lastRuneSize = size + return r, size, nil +} + +// UnreadRune unreads the last rune. If the most recent read operation on +// the buffer was not a ReadRune, UnreadRune returns an error. (In this +// regard it is stricter than UnreadByte, which will unread the last byte +// from any read operation.) +func (b *Reader) UnreadRune() error { + if b.lastRuneSize < 0 || b.r < b.lastRuneSize { + return ErrInvalidUnreadRune + } + b.r -= b.lastRuneSize + b.lastByte = -1 + b.lastRuneSize = -1 + return nil +} + +// Buffered returns the number of bytes that can be read from the current buffer. +func (b *Reader) Buffered() int { return b.w - b.r } + +// ReadSlice reads until the first occurrence of delim in the input, +// returning a slice pointing at the bytes in the buffer. +// The bytes stop being valid at the next read. +// If ReadSlice encounters an error before finding a delimiter, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim. +// Because the data returned from ReadSlice will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +// ReadSlice returns err != nil if and only if line does not end in delim. +func (b *Reader) ReadSlice(delim byte) (line []byte, err error) { + for { + // Search buffer. + if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 { + line = b.buf[b.r : b.r+i+1] + b.r += i + 1 + break + } + + // Pending error? + if b.err != nil { + line = b.buf[b.r:b.w] + b.r = b.w + err = b.readErr() + break + } + + // Buffer full? + if n := b.Buffered(); n >= len(b.buf) { + b.r = b.w + line = b.buf + err = ErrBufferFull + break + } + + b.fill() // buffer is not full + } + + // Handle last byte, if any. + if i := len(line) - 1; i >= 0 { + b.lastByte = int(line[i]) + } + + return +} + +// ReadN tries to read exactly n bytes. +// The bytes stop being valid at the next read call. +// If ReadN encounters an error before reading n bytes, +// it returns all the data in the buffer and the error itself (often io.EOF). +// ReadN fails with error ErrBufferFull if the buffer fills +// without reading N bytes. +// Because the data returned from ReadN will be overwritten +// by the next I/O operation, most clients should use +// ReadBytes or ReadString instead. +func (b *Reader) ReadN(n int) ([]byte, error) { + for b.Buffered() < n { + if b.err != nil { + buf := b.buf[b.r:b.w] + b.r = b.w + return buf, b.readErr() + } + + // Buffer is full? + if b.Buffered() >= len(b.buf) { + b.r = b.w + return b.buf, ErrBufferFull + } + + b.fill() + } + buf := b.buf[b.r : b.r+n] + b.r += n + return buf, nil +} + +// ReadLine is a low-level line-reading primitive. Most callers should use +// ReadBytes('\n') or ReadString('\n') instead or use a Scanner. +// +// ReadLine tries to return a single line, not including the end-of-line bytes. +// If the line was too long for the buffer then isPrefix is set and the +// beginning of the line is returned. The rest of the line will be returned +// from future calls. isPrefix will be false when returning the last fragment +// of the line. The returned buffer is only valid until the next call to +// ReadLine. ReadLine either returns a non-nil line or it returns an error, +// never both. +// +// The text returned from ReadLine does not include the line end ("\r\n" or "\n"). +// No indication or error is given if the input ends without a final line end. +// Calling UnreadByte after ReadLine will always unread the last byte read +// (possibly a character belonging to the line end) even if that byte is not +// part of the line returned by ReadLine. +func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) { + line, err = b.ReadSlice('\n') + if err == ErrBufferFull { + // Handle the case where "\r\n" straddles the buffer. + if len(line) > 0 && line[len(line)-1] == '\r' { + // Put the '\r' back on buf and drop it from line. + // Let the next call to ReadLine check for "\r\n". + if b.r == 0 { + // should be unreachable + panic("bufio: tried to rewind past start of buffer") + } + b.r-- + line = line[:len(line)-1] + } + return line, true, nil + } + + if len(line) == 0 { + if err != nil { + line = nil + } + return + } + err = nil + + if line[len(line)-1] == '\n' { + drop := 1 + if len(line) > 1 && line[len(line)-2] == '\r' { + drop = 2 + } + line = line[:len(line)-drop] + } + return +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadBytes(delim byte) (line []byte, err error) { + // Use ReadSlice to look for array, + // accumulating full buffers. + var frag []byte + var full [][]byte + err = nil + + for { + var e error + frag, e = b.ReadSlice(delim) + if e == nil { // got final fragment + break + } + if e != ErrBufferFull { // unexpected error + err = e + break + } + + // Make a copy of the buffer. + buf := make([]byte, len(frag)) + copy(buf, frag) + full = append(full, buf) + } + + // Allocate new buffer to hold the full pieces and the fragment. + n := 0 + for i := range full { + n += len(full[i]) + } + n += len(frag) + + // Copy full pieces and fragment in. + buf := make([]byte, n) + n = 0 + for i := range full { + n += copy(buf[n:], full[i]) + } + copy(buf[n:], frag) + return buf, err +} + +// ReadString reads until the first occurrence of delim in the input, +// returning a string containing the data up to and including the delimiter. +// If ReadString encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadString returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (b *Reader) ReadString(delim byte) (line string, err error) { + bytes, err := b.ReadBytes(delim) + line = string(bytes) + return line, err +} + +// WriteTo implements io.WriterTo. +func (b *Reader) WriteTo(w io.Writer) (n int64, err error) { + n, err = b.writeBuf(w) + if err != nil { + return + } + + if r, ok := b.rd.(io.WriterTo); ok { + m, err := r.WriteTo(w) + n += m + return n, err + } + + if w, ok := w.(io.ReaderFrom); ok { + m, err := w.ReadFrom(b.rd) + n += m + return n, err + } + + if b.w-b.r < len(b.buf) { + b.fill() // buffer not full + } + + for b.r < b.w { + // b.r < b.w => buffer is not empty + m, err := b.writeBuf(w) + n += m + if err != nil { + return n, err + } + b.fill() // buffer is empty + } + + if b.err == io.EOF { + b.err = nil + } + + return n, b.readErr() +} + +// writeBuf writes the Reader's buffer to the writer. +func (b *Reader) writeBuf(w io.Writer) (int64, error) { + n, err := w.Write(b.buf[b.r:b.w]) + if n < b.r-b.w { + panic(errors.New("bufio: writer did not write all data")) + } + b.r += n + return int64(n), err +} + +// buffered output + +// Writer implements buffering for an io.Writer object. +// If an error occurs writing to a Writer, no more data will be +// accepted and all subsequent writes will return the error. +// After all data has been written, the client should call the +// Flush method to guarantee all data has been forwarded to +// the underlying io.Writer. +type Writer struct { + err error + buf []byte + n int + wr io.Writer +} + +// NewWriterSize returns a new Writer whose buffer has at least the specified +// size. If the argument io.Writer is already a Writer with large enough +// size, it returns the underlying Writer. +func NewWriterSize(w io.Writer, size int) *Writer { + // Is it already a Writer? + b, ok := w.(*Writer) + if ok && len(b.buf) >= size { + return b + } + if size <= 0 { + size = defaultBufSize + } + return &Writer{ + buf: make([]byte, size), + wr: w, + } +} + +// NewWriter returns a new Writer whose buffer has the default size. +func NewWriter(w io.Writer) *Writer { + return NewWriterSize(w, defaultBufSize) +} + +// Reset discards any unflushed buffered data, clears any error, and +// resets b to write its output to w. +func (b *Writer) Reset(w io.Writer) { + b.err = nil + b.n = 0 + b.wr = w +} + +// Flush writes any buffered data to the underlying io.Writer. +func (b *Writer) Flush() error { + err := b.flush() + return err +} + +func (b *Writer) flush() error { + if b.err != nil { + return b.err + } + if b.n == 0 { + return nil + } + n, err := b.wr.Write(b.buf[0:b.n]) + if n < b.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < b.n { + copy(b.buf[0:b.n-n], b.buf[n:b.n]) + } + b.n -= n + b.err = err + return err + } + b.n = 0 + return nil +} + +// Available returns how many bytes are unused in the buffer. +func (b *Writer) Available() int { return len(b.buf) - b.n } + +// Buffered returns the number of bytes that have been written into the current buffer. +func (b *Writer) Buffered() int { return b.n } + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (b *Writer) Write(p []byte) (nn int, err error) { + for len(p) > b.Available() && b.err == nil { + var n int + if b.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, b.err = b.wr.Write(p) + } else { + n = copy(b.buf[b.n:], p) + b.n += n + b.flush() + } + nn += n + p = p[n:] + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], p) + b.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (b *Writer) WriteByte(c byte) error { + if b.err != nil { + return b.err + } + if b.Available() <= 0 && b.flush() != nil { + return b.err + } + b.buf[b.n] = c + b.n++ + return nil +} + +// WriteRune writes a single Unicode code point, returning +// the number of bytes written and any error. +func (b *Writer) WriteRune(r rune) (size int, err error) { + if r < utf8.RuneSelf { + err = b.WriteByte(byte(r)) + if err != nil { + return 0, err + } + return 1, nil + } + if b.err != nil { + return 0, b.err + } + n := b.Available() + if n < utf8.UTFMax { + if b.flush(); b.err != nil { + return 0, b.err + } + n = b.Available() + if n < utf8.UTFMax { + // Can only happen if buffer is silly small. + return b.WriteString(string(r)) + } + } + size = utf8.EncodeRune(b.buf[b.n:], r) + b.n += size + return size, nil +} + +// WriteString writes a string. +// It returns the number of bytes written. +// If the count is less than len(s), it also returns an error explaining +// why the write is short. +func (b *Writer) WriteString(s string) (int, error) { + nn := 0 + for len(s) > b.Available() && b.err == nil { + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + s = s[n:] + b.flush() + } + if b.err != nil { + return nn, b.err + } + n := copy(b.buf[b.n:], s) + b.n += n + nn += n + return nn, nil +} + +// ReadFrom implements io.ReaderFrom. +func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if b.Buffered() == 0 { + if w, ok := b.wr.(io.ReaderFrom); ok { + return w.ReadFrom(r) + } + } + var m int + for { + if b.Available() == 0 { + if err1 := b.flush(); err1 != nil { + return n, err1 + } + } + nr := 0 + for nr < maxConsecutiveEmptyReads { + m, err = r.Read(b.buf[b.n:]) + if m != 0 || err != nil { + break + } + nr++ + } + if nr == maxConsecutiveEmptyReads { + return n, io.ErrNoProgress + } + b.n += m + n += int64(m) + if err != nil { + break + } + } + if err == io.EOF { + // If we filled the buffer exactly, flush pre-emptively. + if b.Available() == 0 { + err = b.flush() + } else { + err = nil + } + } + return n, err +} + +// buffered input and output + +// ReadWriter stores pointers to a Reader and a Writer. +// It implements io.ReadWriter. +type ReadWriter struct { + *Reader + *Writer +} + +// NewReadWriter allocates a new ReadWriter that dispatches to r and w. +func NewReadWriter(r *Reader, w *Writer) *ReadWriter { + return &ReadWriter{r, w} +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go new file mode 100644 index 00000000000..f19d9bd282a --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go @@ -0,0 +1,1418 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "testing/iotest" + "time" + "unicode/utf8" + + . "gopkg.in/bufio.v1" +) + +// Reads from a reader and rot13s the result. +type rot13Reader struct { + r io.Reader +} + +func newRot13Reader(r io.Reader) *rot13Reader { + r13 := new(rot13Reader) + r13.r = r + return r13 +} + +func (r13 *rot13Reader) Read(p []byte) (int, error) { + n, err := r13.r.Read(p) + if err != nil { + return n, err + } + for i := 0; i < n; i++ { + c := p[i] | 0x20 // lowercase byte + if 'a' <= c && c <= 'm' { + p[i] += 13 + } else if 'n' <= c && c <= 'z' { + p[i] -= 13 + } + } + return n, nil +} + +// Call ReadByte to accumulate the text of a file +func readBytes(buf *Reader) string { + var b [1000]byte + nb := 0 + for { + c, err := buf.ReadByte() + if err == io.EOF { + break + } + if err == nil { + b[nb] = c + nb++ + } else if err != iotest.ErrTimeout { + panic("Data: " + err.Error()) + } + } + return string(b[0:nb]) +} + +func TestReaderSimple(t *testing.T) { + data := "hello world" + b := NewReader(strings.NewReader(data)) + if s := readBytes(b); s != "hello world" { + t.Errorf("simple hello world test failed: got %q", s) + } + + b = NewReader(newRot13Reader(strings.NewReader(data))) + if s := readBytes(b); s != "uryyb jbeyq" { + t.Errorf("rot13 hello world test failed: got %q", s) + } +} + +type readMaker struct { + name string + fn func(io.Reader) io.Reader +} + +var readMakers = []readMaker{ + {"full", func(r io.Reader) io.Reader { return r }}, + {"byte", iotest.OneByteReader}, + {"half", iotest.HalfReader}, + {"data+err", iotest.DataErrReader}, + {"timeout", iotest.TimeoutReader}, +} + +// Call ReadString (which ends up calling everything else) +// to accumulate the text of a file. +func readLines(b *Reader) string { + s := "" + for { + s1, err := b.ReadString('\n') + if err == io.EOF { + break + } + if err != nil && err != iotest.ErrTimeout { + panic("GetLines: " + err.Error()) + } + s += s1 + } + return s +} + +// Call Read to accumulate the text of a file +func reads(buf *Reader, m int) string { + var b [1000]byte + nb := 0 + for { + n, err := buf.Read(b[nb : nb+m]) + nb += n + if err == io.EOF { + break + } + } + return string(b[0:nb]) +} + +type bufReader struct { + name string + fn func(*Reader) string +} + +var bufreaders = []bufReader{ + {"1", func(b *Reader) string { return reads(b, 1) }}, + {"2", func(b *Reader) string { return reads(b, 2) }}, + {"3", func(b *Reader) string { return reads(b, 3) }}, + {"4", func(b *Reader) string { return reads(b, 4) }}, + {"5", func(b *Reader) string { return reads(b, 5) }}, + {"7", func(b *Reader) string { return reads(b, 7) }}, + {"bytes", readBytes}, + {"lines", readLines}, +} + +const minReadBufferSize = 16 + +var bufsizes = []int{ + 0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096, +} + +func TestReader(t *testing.T) { + var texts [31]string + str := "" + all := "" + for i := 0; i < len(texts)-1; i++ { + texts[i] = str + "\n" + all += texts[i] + str += string(i%26 + 'a') + } + texts[len(texts)-1] = all + + for h := 0; h < len(texts); h++ { + text := texts[h] + for i := 0; i < len(readMakers); i++ { + for j := 0; j < len(bufreaders); j++ { + for k := 0; k < len(bufsizes); k++ { + readmaker := readMakers[i] + bufreader := bufreaders[j] + bufsize := bufsizes[k] + read := readmaker.fn(strings.NewReader(text)) + buf := NewReaderSize(read, bufsize) + s := bufreader.fn(buf) + if s != text { + t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q", + readmaker.name, bufreader.name, bufsize, text, s) + } + } + } + } + } +} + +type zeroReader struct{} + +func (zeroReader) Read(p []byte) (int, error) { + return 0, nil +} + +func TestZeroReader(t *testing.T) { + var z zeroReader + r := NewReader(z) + + c := make(chan error) + go func() { + _, err := r.ReadByte() + c <- err + }() + + select { + case err := <-c: + if err == nil { + t.Error("error expected") + } else if err != io.ErrNoProgress { + t.Error("unexpected error:", err) + } + case <-time.After(time.Second): + t.Error("test timed out (endless loop in ReadByte?)") + } +} + +// A StringReader delivers its data one string segment at a time via Read. +type StringReader struct { + data []string + step int +} + +func (r *StringReader) Read(p []byte) (n int, err error) { + if r.step < len(r.data) { + s := r.data[r.step] + n = copy(p, s) + r.step++ + } else { + err = io.EOF + } + return +} + +func readRuneSegments(t *testing.T, segments []string) { + got := "" + want := strings.Join(segments, "") + r := NewReader(&StringReader{data: segments}) + for { + r, _, err := r.ReadRune() + if err != nil { + if err != io.EOF { + return + } + break + } + got += string(r) + } + if got != want { + t.Errorf("segments=%v got=%s want=%s", segments, got, want) + } +} + +var segmentList = [][]string{ + {}, + {""}, + {"日", "本語"}, + {"\u65e5", "\u672c", "\u8a9e"}, + {"\U000065e5", "\U0000672c", "\U00008a9e"}, + {"\xe6", "\x97\xa5\xe6", "\x9c\xac\xe8\xaa\x9e"}, + {"Hello", ", ", "World", "!"}, + {"Hello", ", ", "", "World", "!"}, +} + +func TestReadRune(t *testing.T) { + for _, s := range segmentList { + readRuneSegments(t, s) + } +} + +func TestUnreadRune(t *testing.T) { + segments := []string{"Hello, world:", "日本語"} + r := NewReader(&StringReader{data: segments}) + got := "" + want := strings.Join(segments, "") + // Normal execution. + for { + r1, _, err := r.ReadRune() + if err != nil { + if err != io.EOF { + t.Error("unexpected error on ReadRune:", err) + } + break + } + got += string(r1) + // Put it back and read it again. + if err = r.UnreadRune(); err != nil { + t.Fatal("unexpected error on UnreadRune:", err) + } + r2, _, err := r.ReadRune() + if err != nil { + t.Fatal("unexpected error reading after unreading:", err) + } + if r1 != r2 { + t.Fatalf("incorrect rune after unread: got %c, want %c", r1, r2) + } + } + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestReaderUnreadByte(t *testing.T) { + segments := []string{"Hello, ", "world"} + r := NewReader(&StringReader{data: segments}) + got := "" + want := strings.Join(segments, "") + // Normal execution. + for { + b1, err := r.ReadByte() + if err != nil { + if err != io.EOF { + t.Error("unexpected error on ReadByte:", err) + } + break + } + got += string(b1) + // Put it back and read it again. + if err = r.UnreadByte(); err != nil { + t.Fatal("unexpected error on UnreadByte:", err) + } + b2, err := r.ReadByte() + if err != nil { + t.Fatal("unexpected error reading after unreading:", err) + } + if b1 != b2 { + t.Fatalf("incorrect byte after unread: got %q, want %q", b1, b2) + } + } + if got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestUnreadByteMultiple(t *testing.T) { + segments := []string{"Hello, ", "world"} + data := strings.Join(segments, "") + for n := 0; n <= len(data); n++ { + r := NewReader(&StringReader{data: segments}) + // Read n bytes. + for i := 0; i < n; i++ { + b, err := r.ReadByte() + if err != nil { + t.Fatalf("n = %d: unexpected error on ReadByte: %v", n, err) + } + if b != data[i] { + t.Fatalf("n = %d: incorrect byte returned from ReadByte: got %q, want %q", n, b, data[i]) + } + } + // Unread one byte if there is one. + if n > 0 { + if err := r.UnreadByte(); err != nil { + t.Errorf("n = %d: unexpected error on UnreadByte: %v", n, err) + } + } + // Test that we cannot unread any further. + if err := r.UnreadByte(); err == nil { + t.Errorf("n = %d: expected error on UnreadByte", n) + } + } +} + +func TestUnreadByteOthers(t *testing.T) { + // A list of readers to use in conjunction with UnreadByte. + var readers = []func(*Reader, byte) ([]byte, error){ + (*Reader).ReadBytes, + (*Reader).ReadSlice, + func(r *Reader, delim byte) ([]byte, error) { + data, err := r.ReadString(delim) + return []byte(data), err + }, + // ReadLine doesn't fit the data/pattern easily + // so we leave it out. It should be covered via + // the ReadSlice test since ReadLine simply calls + // ReadSlice, and it's that function that handles + // the last byte. + } + + // Try all readers with UnreadByte. + for rno, read := range readers { + // Some input data that is longer than the minimum reader buffer size. + const n = 10 + var buf bytes.Buffer + for i := 0; i < n; i++ { + buf.WriteString("abcdefg") + } + + r := NewReaderSize(&buf, minReadBufferSize) + readTo := func(delim byte, want string) { + data, err := read(r, delim) + if err != nil { + t.Fatalf("#%d: unexpected error reading to %c: %v", rno, delim, err) + } + if got := string(data); got != want { + t.Fatalf("#%d: got %q, want %q", rno, got, want) + } + } + + // Read the data with occasional UnreadByte calls. + for i := 0; i < n; i++ { + readTo('d', "abcd") + for j := 0; j < 3; j++ { + if err := r.UnreadByte(); err != nil { + t.Fatalf("#%d: unexpected error on UnreadByte: %v", rno, err) + } + readTo('d', "d") + } + readTo('g', "efg") + } + + // All data should have been read. + _, err := r.ReadByte() + if err != io.EOF { + t.Errorf("#%d: got error %v; want EOF", rno, err) + } + } +} + +// Test that UnreadRune fails if the preceding operation was not a ReadRune. +func TestUnreadRuneError(t *testing.T) { + buf := make([]byte, 3) // All runes in this test are 3 bytes long + r := NewReader(&StringReader{data: []string{"日本語日本語日本語"}}) + if r.UnreadRune() == nil { + t.Error("expected error on UnreadRune from fresh buffer") + } + _, _, err := r.ReadRune() + if err != nil { + t.Error("unexpected error on ReadRune (1):", err) + } + if err = r.UnreadRune(); err != nil { + t.Error("unexpected error on UnreadRune (1):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after UnreadRune (1)") + } + // Test error after Read. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (2):", err) + } + _, err = r.Read(buf) + if err != nil { + t.Error("unexpected error on Read (2):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after Read (2)") + } + // Test error after ReadByte. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (2):", err) + } + for _ = range buf { + _, err = r.ReadByte() + if err != nil { + t.Error("unexpected error on ReadByte (2):", err) + } + } + if r.UnreadRune() == nil { + t.Error("expected error after ReadByte") + } + // Test error after UnreadByte. + _, _, err = r.ReadRune() // reset state + if err != nil { + t.Error("unexpected error on ReadRune (3):", err) + } + _, err = r.ReadByte() + if err != nil { + t.Error("unexpected error on ReadByte (3):", err) + } + err = r.UnreadByte() + if err != nil { + t.Error("unexpected error on UnreadByte (3):", err) + } + if r.UnreadRune() == nil { + t.Error("expected error after UnreadByte (3)") + } +} + +func TestUnreadRuneAtEOF(t *testing.T) { + // UnreadRune/ReadRune should error at EOF (was a bug; used to panic) + r := NewReader(strings.NewReader("x")) + r.ReadRune() + r.ReadRune() + r.UnreadRune() + _, _, err := r.ReadRune() + if err == nil { + t.Error("expected error at EOF") + } else if err != io.EOF { + t.Error("expected EOF; got", err) + } +} + +func TestReadWriteRune(t *testing.T) { + const NRune = 1000 + byteBuf := new(bytes.Buffer) + w := NewWriter(byteBuf) + // Write the runes out using WriteRune + buf := make([]byte, utf8.UTFMax) + for r := rune(0); r < NRune; r++ { + size := utf8.EncodeRune(buf, r) + nbytes, err := w.WriteRune(r) + if err != nil { + t.Fatalf("WriteRune(0x%x) error: %s", r, err) + } + if nbytes != size { + t.Fatalf("WriteRune(0x%x) expected %d, got %d", r, size, nbytes) + } + } + w.Flush() + + r := NewReader(byteBuf) + // Read them back with ReadRune + for r1 := rune(0); r1 < NRune; r1++ { + size := utf8.EncodeRune(buf, r1) + nr, nbytes, err := r.ReadRune() + if nr != r1 || nbytes != size || err != nil { + t.Fatalf("ReadRune(0x%x) got 0x%x,%d not 0x%x,%d (err=%s)", r1, nr, nbytes, r1, size, err) + } + } +} + +func TestWriter(t *testing.T) { + var data [8192]byte + + for i := 0; i < len(data); i++ { + data[i] = byte(' ' + i%('~'-' ')) + } + w := new(bytes.Buffer) + for i := 0; i < len(bufsizes); i++ { + for j := 0; j < len(bufsizes); j++ { + nwrite := bufsizes[i] + bs := bufsizes[j] + + // Write nwrite bytes using buffer size bs. + // Check that the right amount makes it out + // and that the data is correct. + + w.Reset() + buf := NewWriterSize(w, bs) + context := fmt.Sprintf("nwrite=%d bufsize=%d", nwrite, bs) + n, e1 := buf.Write(data[0:nwrite]) + if e1 != nil || n != nwrite { + t.Errorf("%s: buf.Write %d = %d, %v", context, nwrite, n, e1) + continue + } + if e := buf.Flush(); e != nil { + t.Errorf("%s: buf.Flush = %v", context, e) + } + + written := w.Bytes() + if len(written) != nwrite { + t.Errorf("%s: %d bytes written", context, len(written)) + } + for l := 0; l < len(written); l++ { + if written[i] != data[i] { + t.Errorf("wrong bytes written") + t.Errorf("want=%q", data[0:len(written)]) + t.Errorf("have=%q", written) + } + } + } + } +} + +// Check that write errors are returned properly. + +type errorWriterTest struct { + n, m int + err error + expect error +} + +func (w errorWriterTest) Write(p []byte) (int, error) { + return len(p) * w.n / w.m, w.err +} + +var errorWriterTests = []errorWriterTest{ + {0, 1, nil, io.ErrShortWrite}, + {1, 2, nil, io.ErrShortWrite}, + {1, 1, nil, nil}, + {0, 1, io.ErrClosedPipe, io.ErrClosedPipe}, + {1, 2, io.ErrClosedPipe, io.ErrClosedPipe}, + {1, 1, io.ErrClosedPipe, io.ErrClosedPipe}, +} + +func TestWriteErrors(t *testing.T) { + for _, w := range errorWriterTests { + buf := NewWriter(w) + _, e := buf.Write([]byte("hello world")) + if e != nil { + t.Errorf("Write hello to %v: %v", w, e) + continue + } + // Two flushes, to verify the error is sticky. + for i := 0; i < 2; i++ { + e = buf.Flush() + if e != w.expect { + t.Errorf("Flush %d/2 %v: got %v, wanted %v", i+1, w, e, w.expect) + } + } + } +} + +func TestNewReaderSizeIdempotent(t *testing.T) { + const BufSize = 1000 + b := NewReaderSize(strings.NewReader("hello world"), BufSize) + // Does it recognize itself? + b1 := NewReaderSize(b, BufSize) + if b1 != b { + t.Error("NewReaderSize did not detect underlying Reader") + } + // Does it wrap if existing buffer is too small? + b2 := NewReaderSize(b, 2*BufSize) + if b2 == b { + t.Error("NewReaderSize did not enlarge buffer") + } +} + +func TestNewWriterSizeIdempotent(t *testing.T) { + const BufSize = 1000 + b := NewWriterSize(new(bytes.Buffer), BufSize) + // Does it recognize itself? + b1 := NewWriterSize(b, BufSize) + if b1 != b { + t.Error("NewWriterSize did not detect underlying Writer") + } + // Does it wrap if existing buffer is too small? + b2 := NewWriterSize(b, 2*BufSize) + if b2 == b { + t.Error("NewWriterSize did not enlarge buffer") + } +} + +func TestWriteString(t *testing.T) { + const BufSize = 8 + buf := new(bytes.Buffer) + b := NewWriterSize(buf, BufSize) + b.WriteString("0") // easy + b.WriteString("123456") // still easy + b.WriteString("7890") // easy after flush + b.WriteString("abcdefghijklmnopqrstuvwxy") // hard + b.WriteString("z") + if err := b.Flush(); err != nil { + t.Error("WriteString", err) + } + s := "01234567890abcdefghijklmnopqrstuvwxyz" + if string(buf.Bytes()) != s { + t.Errorf("WriteString wants %q gets %q", s, string(buf.Bytes())) + } +} + +func TestBufferFull(t *testing.T) { + const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party" + buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize) + line, err := buf.ReadSlice('!') + if string(line) != "And now, hello, " || err != ErrBufferFull { + t.Errorf("first ReadSlice(,) = %q, %v", line, err) + } + line, err = buf.ReadSlice('!') + if string(line) != "world!" || err != nil { + t.Errorf("second ReadSlice(,) = %q, %v", line, err) + } +} + +func TestPeek(t *testing.T) { + p := make([]byte, 10) + // string is 16 (minReadBufferSize) long. + buf := NewReaderSize(strings.NewReader("abcdefghijklmnop"), minReadBufferSize) + if s, err := buf.Peek(1); string(s) != "a" || err != nil { + t.Fatalf("want %q got %q, err=%v", "a", string(s), err) + } + if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { + t.Fatalf("want %q got %q, err=%v", "abcd", string(s), err) + } + if _, err := buf.Peek(-1); err != ErrNegativeCount { + t.Fatalf("want ErrNegativeCount got %v", err) + } + if _, err := buf.Peek(32); err != ErrBufferFull { + t.Fatalf("want ErrBufFull got %v", err) + } + if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil { + t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err) + } + if s, err := buf.Peek(1); string(s) != "d" || err != nil { + t.Fatalf("want %q got %q, err=%v", "d", string(s), err) + } + if s, err := buf.Peek(2); string(s) != "de" || err != nil { + t.Fatalf("want %q got %q, err=%v", "de", string(s), err) + } + if _, err := buf.Read(p[0:3]); string(p[0:3]) != "def" || err != nil { + t.Fatalf("want %q got %q, err=%v", "def", string(p[0:3]), err) + } + if s, err := buf.Peek(4); string(s) != "ghij" || err != nil { + t.Fatalf("want %q got %q, err=%v", "ghij", string(s), err) + } + if _, err := buf.Read(p[0:]); string(p[0:]) != "ghijklmnop" || err != nil { + t.Fatalf("want %q got %q, err=%v", "ghijklmnop", string(p[0:minReadBufferSize]), err) + } + if s, err := buf.Peek(0); string(s) != "" || err != nil { + t.Fatalf("want %q got %q, err=%v", "", string(s), err) + } + if _, err := buf.Peek(1); err != io.EOF { + t.Fatalf("want EOF got %v", err) + } + + // Test for issue 3022, not exposing a reader's error on a successful Peek. + buf = NewReaderSize(dataAndEOFReader("abcd"), 32) + if s, err := buf.Peek(2); string(s) != "ab" || err != nil { + t.Errorf(`Peek(2) on "abcd", EOF = %q, %v; want "ab", nil`, string(s), err) + } + if s, err := buf.Peek(4); string(s) != "abcd" || err != nil { + t.Errorf(`Peek(4) on "abcd", EOF = %q, %v; want "abcd", nil`, string(s), err) + } + if n, err := buf.Read(p[0:5]); string(p[0:n]) != "abcd" || err != nil { + t.Fatalf("Read after peek = %q, %v; want abcd, EOF", p[0:n], err) + } + if n, err := buf.Read(p[0:1]); string(p[0:n]) != "" || err != io.EOF { + t.Fatalf(`second Read after peek = %q, %v; want "", EOF`, p[0:n], err) + } +} + +type dataAndEOFReader string + +func (r dataAndEOFReader) Read(p []byte) (int, error) { + return copy(p, r), io.EOF +} + +func TestPeekThenUnreadRune(t *testing.T) { + // This sequence used to cause a crash. + r := NewReader(strings.NewReader("x")) + r.ReadRune() + r.Peek(1) + r.UnreadRune() + r.ReadRune() // Used to panic here +} + +var testOutput = []byte("0123456789abcdefghijklmnopqrstuvwxy") +var testInput = []byte("012\n345\n678\n9ab\ncde\nfgh\nijk\nlmn\nopq\nrst\nuvw\nxy") +var testInputrn = []byte("012\r\n345\r\n678\r\n9ab\r\ncde\r\nfgh\r\nijk\r\nlmn\r\nopq\r\nrst\r\nuvw\r\nxy\r\n\n\r\n") + +// TestReader wraps a []byte and returns reads of a specific length. +type testReader struct { + data []byte + stride int +} + +func (t *testReader) Read(buf []byte) (n int, err error) { + n = t.stride + if n > len(t.data) { + n = len(t.data) + } + if n > len(buf) { + n = len(buf) + } + copy(buf, t.data) + t.data = t.data[n:] + if len(t.data) == 0 { + err = io.EOF + } + return +} + +func testReadLine(t *testing.T, input []byte) { + //for stride := 1; stride < len(input); stride++ { + for stride := 1; stride < 2; stride++ { + done := 0 + reader := testReader{input, stride} + l := NewReaderSize(&reader, len(input)+1) + for { + line, isPrefix, err := l.ReadLine() + if len(line) > 0 && err != nil { + t.Errorf("ReadLine returned both data and error: %s", err) + } + if isPrefix { + t.Errorf("ReadLine returned prefix") + } + if err != nil { + if err != io.EOF { + t.Fatalf("Got unknown error: %s", err) + } + break + } + if want := testOutput[done : done+len(line)]; !bytes.Equal(want, line) { + t.Errorf("Bad line at stride %d: want: %x got: %x", stride, want, line) + } + done += len(line) + } + if done != len(testOutput) { + t.Errorf("ReadLine didn't return everything: got: %d, want: %d (stride: %d)", done, len(testOutput), stride) + } + } +} + +func TestReadLine(t *testing.T) { + testReadLine(t, testInput) + testReadLine(t, testInputrn) +} + +func TestLineTooLong(t *testing.T) { + data := make([]byte, 0) + for i := 0; i < minReadBufferSize*5/2; i++ { + data = append(data, '0'+byte(i%10)) + } + buf := bytes.NewReader(data) + l := NewReaderSize(buf, minReadBufferSize) + line, isPrefix, err := l.ReadLine() + if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { + t.Errorf("bad result for first line: got %q want %q %v", line, data[:minReadBufferSize], err) + } + data = data[len(line):] + line, isPrefix, err = l.ReadLine() + if !isPrefix || !bytes.Equal(line, data[:minReadBufferSize]) || err != nil { + t.Errorf("bad result for second line: got %q want %q %v", line, data[:minReadBufferSize], err) + } + data = data[len(line):] + line, isPrefix, err = l.ReadLine() + if isPrefix || !bytes.Equal(line, data[:minReadBufferSize/2]) || err != nil { + t.Errorf("bad result for third line: got %q want %q %v", line, data[:minReadBufferSize/2], err) + } + line, isPrefix, err = l.ReadLine() + if isPrefix || err == nil { + t.Errorf("expected no more lines: %x %s", line, err) + } +} + +func TestReadAfterLines(t *testing.T) { + line1 := "this is line1" + restData := "this is line2\nthis is line 3\n" + inbuf := bytes.NewReader([]byte(line1 + "\n" + restData)) + outbuf := new(bytes.Buffer) + maxLineLength := len(line1) + len(restData)/2 + l := NewReaderSize(inbuf, maxLineLength) + line, isPrefix, err := l.ReadLine() + if isPrefix || err != nil || string(line) != line1 { + t.Errorf("bad result for first line: isPrefix=%v err=%v line=%q", isPrefix, err, string(line)) + } + n, err := io.Copy(outbuf, l) + if int(n) != len(restData) || err != nil { + t.Errorf("bad result for Read: n=%d err=%v", n, err) + } + if outbuf.String() != restData { + t.Errorf("bad result for Read: got %q; expected %q", outbuf.String(), restData) + } +} + +func TestReadEmptyBuffer(t *testing.T) { + l := NewReaderSize(new(bytes.Buffer), minReadBufferSize) + line, isPrefix, err := l.ReadLine() + if err != io.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} + +func TestLinesAfterRead(t *testing.T) { + l := NewReaderSize(bytes.NewReader([]byte("foo")), minReadBufferSize) + _, err := ioutil.ReadAll(l) + if err != nil { + t.Error(err) + return + } + + line, isPrefix, err := l.ReadLine() + if err != io.EOF { + t.Errorf("expected EOF from ReadLine, got '%s' %t %s", line, isPrefix, err) + } +} + +func TestReadLineNonNilLineOrError(t *testing.T) { + r := NewReader(strings.NewReader("line 1\n")) + for i := 0; i < 2; i++ { + l, _, err := r.ReadLine() + if l != nil && err != nil { + t.Fatalf("on line %d/2; ReadLine=%#v, %v; want non-nil line or Error, but not both", + i+1, l, err) + } + } +} + +type readLineResult struct { + line []byte + isPrefix bool + err error +} + +var readLineNewlinesTests = []struct { + input string + expect []readLineResult +}{ + {"012345678901234\r\n012345678901234\r\n", []readLineResult{ + {[]byte("012345678901234"), true, nil}, + {nil, false, nil}, + {[]byte("012345678901234"), true, nil}, + {nil, false, nil}, + {nil, false, io.EOF}, + }}, + {"0123456789012345\r012345678901234\r", []readLineResult{ + {[]byte("0123456789012345"), true, nil}, + {[]byte("\r012345678901234"), true, nil}, + {[]byte("\r"), false, nil}, + {nil, false, io.EOF}, + }}, +} + +func TestReadLineNewlines(t *testing.T) { + for _, e := range readLineNewlinesTests { + testReadLineNewlines(t, e.input, e.expect) + } +} + +func testReadLineNewlines(t *testing.T, input string, expect []readLineResult) { + b := NewReaderSize(strings.NewReader(input), minReadBufferSize) + for i, e := range expect { + line, isPrefix, err := b.ReadLine() + if !bytes.Equal(line, e.line) { + t.Errorf("%q call %d, line == %q, want %q", input, i, line, e.line) + return + } + if isPrefix != e.isPrefix { + t.Errorf("%q call %d, isPrefix == %v, want %v", input, i, isPrefix, e.isPrefix) + return + } + if err != e.err { + t.Errorf("%q call %d, err == %v, want %v", input, i, err, e.err) + return + } + } +} + +func createTestInput(n int) []byte { + input := make([]byte, n) + for i := range input { + // 101 and 251 are arbitrary prime numbers. + // The idea is to create an input sequence + // which doesn't repeat too frequently. + input[i] = byte(i % 251) + if i%101 == 0 { + input[i] ^= byte(i / 101) + } + } + return input +} + +func TestReaderWriteTo(t *testing.T) { + input := createTestInput(8192) + r := NewReader(onlyReader{bytes.NewReader(input)}) + w := new(bytes.Buffer) + if n, err := r.WriteTo(w); err != nil || n != int64(len(input)) { + t.Fatalf("r.WriteTo(w) = %d, %v, want %d, nil", n, err, len(input)) + } + + for i, val := range w.Bytes() { + if val != input[i] { + t.Errorf("after write: out[%d] = %#x, want %#x", i, val, input[i]) + } + } +} + +type errorWriterToTest struct { + rn, wn int + rerr, werr error + expected error +} + +func (r errorWriterToTest) Read(p []byte) (int, error) { + return len(p) * r.rn, r.rerr +} + +func (w errorWriterToTest) Write(p []byte) (int, error) { + return len(p) * w.wn, w.werr +} + +var errorWriterToTests = []errorWriterToTest{ + {1, 0, nil, io.ErrClosedPipe, io.ErrClosedPipe}, + {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe}, + {0, 0, io.ErrUnexpectedEOF, io.ErrClosedPipe, io.ErrClosedPipe}, + {0, 1, io.EOF, nil, nil}, +} + +func TestReaderWriteToErrors(t *testing.T) { + for i, rw := range errorWriterToTests { + r := NewReader(rw) + if _, err := r.WriteTo(rw); err != rw.expected { + t.Errorf("r.WriteTo(errorWriterToTests[%d]) = _, %v, want _,%v", i, err, rw.expected) + } + } +} + +func TestWriterReadFrom(t *testing.T) { + ws := []func(io.Writer) io.Writer{ + func(w io.Writer) io.Writer { return onlyWriter{w} }, + func(w io.Writer) io.Writer { return w }, + } + + rs := []func(io.Reader) io.Reader{ + iotest.DataErrReader, + func(r io.Reader) io.Reader { return r }, + } + + for ri, rfunc := range rs { + for wi, wfunc := range ws { + input := createTestInput(8192) + b := new(bytes.Buffer) + w := NewWriter(wfunc(b)) + r := rfunc(bytes.NewReader(input)) + if n, err := w.ReadFrom(r); err != nil || n != int64(len(input)) { + t.Errorf("ws[%d],rs[%d]: w.ReadFrom(r) = %d, %v, want %d, nil", wi, ri, n, err, len(input)) + continue + } + if err := w.Flush(); err != nil { + t.Errorf("Flush returned %v", err) + continue + } + if got, want := b.String(), string(input); got != want { + t.Errorf("ws[%d], rs[%d]:\ngot %q\nwant %q\n", wi, ri, got, want) + } + } + } +} + +type errorReaderFromTest struct { + rn, wn int + rerr, werr error + expected error +} + +func (r errorReaderFromTest) Read(p []byte) (int, error) { + return len(p) * r.rn, r.rerr +} + +func (w errorReaderFromTest) Write(p []byte) (int, error) { + return len(p) * w.wn, w.werr +} + +var errorReaderFromTests = []errorReaderFromTest{ + {0, 1, io.EOF, nil, nil}, + {1, 1, io.EOF, nil, nil}, + {0, 1, io.ErrClosedPipe, nil, io.ErrClosedPipe}, + {0, 0, io.ErrClosedPipe, io.ErrShortWrite, io.ErrClosedPipe}, + {1, 0, nil, io.ErrShortWrite, io.ErrShortWrite}, +} + +func TestWriterReadFromErrors(t *testing.T) { + for i, rw := range errorReaderFromTests { + w := NewWriter(rw) + if _, err := w.ReadFrom(rw); err != rw.expected { + t.Errorf("w.ReadFrom(errorReaderFromTests[%d]) = _, %v, want _,%v", i, err, rw.expected) + } + } +} + +// TestWriterReadFromCounts tests that using io.Copy to copy into a +// bufio.Writer does not prematurely flush the buffer. For example, when +// buffering writes to a network socket, excessive network writes should be +// avoided. +func TestWriterReadFromCounts(t *testing.T) { + var w0 writeCountingDiscard + b0 := NewWriterSize(&w0, 1234) + b0.WriteString(strings.Repeat("x", 1000)) + if w0 != 0 { + t.Fatalf("write 1000 'x's: got %d writes, want 0", w0) + } + b0.WriteString(strings.Repeat("x", 200)) + if w0 != 0 { + t.Fatalf("write 1200 'x's: got %d writes, want 0", w0) + } + io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 30))}) + if w0 != 0 { + t.Fatalf("write 1230 'x's: got %d writes, want 0", w0) + } + io.Copy(b0, onlyReader{strings.NewReader(strings.Repeat("x", 9))}) + if w0 != 1 { + t.Fatalf("write 1239 'x's: got %d writes, want 1", w0) + } + + var w1 writeCountingDiscard + b1 := NewWriterSize(&w1, 1234) + b1.WriteString(strings.Repeat("x", 1200)) + b1.Flush() + if w1 != 1 { + t.Fatalf("flush 1200 'x's: got %d writes, want 1", w1) + } + b1.WriteString(strings.Repeat("x", 89)) + if w1 != 1 { + t.Fatalf("write 1200 + 89 'x's: got %d writes, want 1", w1) + } + io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 700))}) + if w1 != 1 { + t.Fatalf("write 1200 + 789 'x's: got %d writes, want 1", w1) + } + io.Copy(b1, onlyReader{strings.NewReader(strings.Repeat("x", 600))}) + if w1 != 2 { + t.Fatalf("write 1200 + 1389 'x's: got %d writes, want 2", w1) + } + b1.Flush() + if w1 != 3 { + t.Fatalf("flush 1200 + 1389 'x's: got %d writes, want 3", w1) + } +} + +// A writeCountingDiscard is like ioutil.Discard and counts the number of times +// Write is called on it. +type writeCountingDiscard int + +func (w *writeCountingDiscard) Write(p []byte) (int, error) { + *w++ + return len(p), nil +} + +type negativeReader int + +func (r *negativeReader) Read([]byte) (int, error) { return -1, nil } + +func TestNegativeRead(t *testing.T) { + // should panic with a description pointing at the reader, not at itself. + // (should NOT panic with slice index error, for example.) + b := NewReader(new(negativeReader)) + defer func() { + switch err := recover().(type) { + case nil: + t.Fatal("read did not panic") + case error: + if !strings.Contains(err.Error(), "reader returned negative count from Read") { + t.Fatalf("wrong panic: %v", err) + } + default: + t.Fatalf("unexpected panic value: %T(%v)", err, err) + } + }() + b.Read(make([]byte, 100)) +} + +var errFake = errors.New("fake error") + +type errorThenGoodReader struct { + didErr bool + nread int +} + +func (r *errorThenGoodReader) Read(p []byte) (int, error) { + r.nread++ + if !r.didErr { + r.didErr = true + return 0, errFake + } + return len(p), nil +} + +func TestReaderClearError(t *testing.T) { + r := &errorThenGoodReader{} + b := NewReader(r) + buf := make([]byte, 1) + if _, err := b.Read(nil); err != nil { + t.Fatalf("1st nil Read = %v; want nil", err) + } + if _, err := b.Read(buf); err != errFake { + t.Fatalf("1st Read = %v; want errFake", err) + } + if _, err := b.Read(nil); err != nil { + t.Fatalf("2nd nil Read = %v; want nil", err) + } + if _, err := b.Read(buf); err != nil { + t.Fatalf("3rd Read with buffer = %v; want nil", err) + } + if r.nread != 2 { + t.Errorf("num reads = %d; want 2", r.nread) + } +} + +// Test for golang.org/issue/5947 +func TestWriterReadFromWhileFull(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 10) + + // Fill buffer exactly. + n, err := w.Write([]byte("0123456789")) + if n != 10 || err != nil { + t.Fatalf("Write returned (%v, %v), want (10, nil)", n, err) + } + + // Use ReadFrom to read in some data. + n2, err := w.ReadFrom(strings.NewReader("abcdef")) + if n2 != 6 || err != nil { + t.Fatalf("ReadFrom returned (%v, %v), want (6, nil)", n2, err) + } +} + +type emptyThenNonEmptyReader struct { + r io.Reader + n int +} + +func (r *emptyThenNonEmptyReader) Read(p []byte) (int, error) { + if r.n <= 0 { + return r.r.Read(p) + } + r.n-- + return 0, nil +} + +// Test for golang.org/issue/7611 +func TestWriterReadFromUntilEOF(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 5) + + // Partially fill buffer + n, err := w.Write([]byte("0123")) + if n != 4 || err != nil { + t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err) + } + + // Use ReadFrom to read in some data. + r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 3} + n2, err := w.ReadFrom(r) + if n2 != 4 || err != nil { + t.Fatalf("ReadFrom returned (%v, %v), want (4, nil)", n2, err) + } + w.Flush() + if got, want := string(buf.Bytes()), "0123abcd"; got != want { + t.Fatalf("buf.Bytes() returned %q, want %q", got, want) + } +} + +func TestWriterReadFromErrNoProgress(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriterSize(buf, 5) + + // Partially fill buffer + n, err := w.Write([]byte("0123")) + if n != 4 || err != nil { + t.Fatalf("Write returned (%v, %v), want (4, nil)", n, err) + } + + // Use ReadFrom to read in some data. + r := &emptyThenNonEmptyReader{r: strings.NewReader("abcd"), n: 100} + n2, err := w.ReadFrom(r) + if n2 != 0 || err != io.ErrNoProgress { + t.Fatalf("buf.Bytes() returned (%v, %v), want (0, io.ErrNoProgress)", n2, err) + } +} + +func TestReaderReset(t *testing.T) { + r := NewReader(strings.NewReader("foo foo")) + buf := make([]byte, 3) + r.Read(buf) + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + r.Reset(strings.NewReader("bar bar")) + all, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(all) != "bar bar" { + t.Errorf("ReadAll = %q; want bar bar", all) + } +} + +func TestWriterReset(t *testing.T) { + var buf1, buf2 bytes.Buffer + w := NewWriter(&buf1) + w.WriteString("foo") + w.Reset(&buf2) // and not flushed + w.WriteString("bar") + w.Flush() + if buf1.String() != "" { + t.Errorf("buf1 = %q; want empty", buf1.String()) + } + if buf2.String() != "bar" { + t.Errorf("buf2 = %q; want bar", buf2.String()) + } +} + +// An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have. +type onlyReader struct { + io.Reader +} + +// An onlyWriter only implements io.Writer, no matter what other methods the underlying implementation may have. +type onlyWriter struct { + io.Writer +} + +func BenchmarkReaderCopyOptimal(b *testing.B) { + // Optimal case is where the underlying reader implements io.WriterTo + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := NewReader(srcBuf) + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + src.Reset(srcBuf) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderCopyUnoptimal(b *testing.B) { + // Unoptimal case is where the underlying reader doesn't implement io.WriterTo + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := NewReader(onlyReader{srcBuf}) + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + src.Reset(onlyReader{srcBuf}) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderCopyNoWriteTo(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + srcReader := NewReader(srcBuf) + src := onlyReader{srcReader} + dstBuf := new(bytes.Buffer) + dst := onlyWriter{dstBuf} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + srcReader.Reset(srcBuf) + dstBuf.Reset() + io.Copy(dst, src) + } +} + +func BenchmarkReaderWriteToOptimal(b *testing.B) { + const bufSize = 16 << 10 + buf := make([]byte, bufSize) + r := bytes.NewReader(buf) + srcReader := NewReaderSize(onlyReader{r}, 1<<10) + if _, ok := ioutil.Discard.(io.ReaderFrom); !ok { + b.Fatal("ioutil.Discard doesn't support ReaderFrom") + } + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + srcReader.Reset(onlyReader{r}) + n, err := srcReader.WriteTo(ioutil.Discard) + if err != nil { + b.Fatal(err) + } + if n != bufSize { + b.Fatalf("n = %d; want %d", n, bufSize) + } + } +} + +func BenchmarkWriterCopyOptimal(b *testing.B) { + // Optimal case is where the underlying writer implements io.ReaderFrom + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dst := NewWriter(dstBuf) + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dst.Reset(dstBuf) + io.Copy(dst, src) + } +} + +func BenchmarkWriterCopyUnoptimal(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dst := NewWriter(onlyWriter{dstBuf}) + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dst.Reset(onlyWriter{dstBuf}) + io.Copy(dst, src) + } +} + +func BenchmarkWriterCopyNoReadFrom(b *testing.B) { + srcBuf := bytes.NewBuffer(make([]byte, 8192)) + src := onlyReader{srcBuf} + dstBuf := new(bytes.Buffer) + dstWriter := NewWriter(dstBuf) + dst := onlyWriter{dstWriter} + for i := 0; i < b.N; i++ { + srcBuf.Reset() + dstBuf.Reset() + dstWriter.Reset(dstBuf) + io.Copy(dst, src) + } +} + +func BenchmarkReaderEmpty(b *testing.B) { + b.ReportAllocs() + str := strings.Repeat("x", 16<<10) + for i := 0; i < b.N; i++ { + br := NewReader(strings.NewReader(str)) + n, err := io.Copy(ioutil.Discard, br) + if err != nil { + b.Fatal(err) + } + if n != int64(len(str)) { + b.Fatal("wrong length") + } + } +} + +func BenchmarkWriterEmpty(b *testing.B) { + b.ReportAllocs() + str := strings.Repeat("x", 1<<10) + bs := []byte(str) + for i := 0; i < b.N; i++ { + bw := NewWriter(ioutil.Discard) + bw.Flush() + bw.WriteByte('a') + bw.Flush() + bw.WriteRune('B') + bw.Flush() + bw.Write(bs) + bw.Flush() + bw.WriteString(str) + bw.Flush() + } +} + +func BenchmarkWriterFlush(b *testing.B) { + b.ReportAllocs() + bw := NewWriter(ioutil.Discard) + str := strings.Repeat("x", 50) + for i := 0; i < b.N; i++ { + bw.WriteString(str) + bw.Flush() + } +} diff --git a/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go new file mode 100644 index 00000000000..16629d0224c --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/bufio.v1/export_test.go @@ -0,0 +1,9 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bufio + +func (b *Buffer) Cap() int { + return cap(b.buf) +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml new file mode 100644 index 00000000000..c3cf4b8a6e3 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml @@ -0,0 +1,19 @@ +language: go + +services: +- redis-server + +go: + - 1.1 + - 1.2 + - 1.3 + - tip + +install: + - go get gopkg.in/bufio.v1 + - go get gopkg.in/check.v1 + - mkdir -p $HOME/gopath/src/gopkg.in + - ln -s `pwd` $HOME/gopath/src/gopkg.in/redis.v2 + +before_script: + - redis-server testdata/sentinel.conf --sentinel & diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE new file mode 100644 index 00000000000..6855a95feb9 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Redis Go Client Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile new file mode 100644 index 00000000000..b250d9bfa96 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/Makefile @@ -0,0 +1,3 @@ +all: + go test gopkg.in/redis.v2 -cpu=1,2,4 + go test gopkg.in/redis.v2 -short -race diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/README.md b/Godeps/_workspace/src/gopkg.in/redis.v2/README.md new file mode 100644 index 00000000000..ddf875f9a19 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/README.md @@ -0,0 +1,46 @@ +Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis) +======================= + +Supports: + +- Redis 2.8 commands except QUIT, MONITOR, SLOWLOG and SYNC. +- Pub/sub. +- Transactions. +- Pipelining. +- Connection pool. +- TLS connections. +- Thread safety. +- Timeouts. +- Redis Sentinel. + +API docs: http://godoc.org/gopkg.in/redis.v2. +Examples: http://godoc.org/gopkg.in/redis.v2#pkg-examples. + +Installation +------------ + +Install: + + go get gopkg.in/redis.v2 + +Look and feel +------------- + +Some corner cases: + + SORT list LIMIT 0 2 ASC + vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + + ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 + vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, + }).Result() + + ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM + vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + + EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" + vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result() diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/command.go b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go new file mode 100644 index 00000000000..d7c76cf92a9 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/command.go @@ -0,0 +1,597 @@ +package redis + +import ( + "fmt" + "strconv" + "strings" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + _ Cmder = (*Cmd)(nil) + _ Cmder = (*SliceCmd)(nil) + _ Cmder = (*StatusCmd)(nil) + _ Cmder = (*IntCmd)(nil) + _ Cmder = (*DurationCmd)(nil) + _ Cmder = (*BoolCmd)(nil) + _ Cmder = (*StringCmd)(nil) + _ Cmder = (*FloatCmd)(nil) + _ Cmder = (*StringSliceCmd)(nil) + _ Cmder = (*BoolSliceCmd)(nil) + _ Cmder = (*StringStringMapCmd)(nil) + _ Cmder = (*ZSliceCmd)(nil) + _ Cmder = (*ScanCmd)(nil) +) + +type Cmder interface { + args() []string + parseReply(*bufio.Reader) error + setErr(error) + + writeTimeout() *time.Duration + readTimeout() *time.Duration + + Err() error + String() string +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + cmd.setErr(e) + } +} + +func cmdString(cmd Cmder, val interface{}) string { + s := strings.Join(cmd.args(), " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + return s + ": " + fmt.Sprint(val) + } + return s + +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []string + + err error + + _writeTimeout, _readTimeout *time.Duration +} + +func newBaseCmd(args ...string) *baseCmd { + return &baseCmd{ + _args: args, + } +} + +func (cmd *baseCmd) Err() error { + if cmd.err != nil { + return cmd.err + } + return nil +} + +func (cmd *baseCmd) args() []string { + return cmd._args +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) writeTimeout() *time.Duration { + return cmd._writeTimeout +} + +func (cmd *baseCmd) setWriteTimeout(d time.Duration) { + cmd._writeTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + *baseCmd + + val interface{} +} + +func NewCmd(args ...string) *Cmd { + return &Cmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) parseReply(rd *bufio.Reader) error { + cmd.val, cmd.err = parseReply(rd, parseSlice) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + *baseCmd + + val []interface{} +} + +func NewSliceCmd(args ...string) *SliceCmd { + return &SliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + *baseCmd + + val string +} + +func NewStatusCmd(args ...string) *StatusCmd { + return &StatusCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(string) + return nil +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + *baseCmd + + val int64 +} + +func NewIntCmd(args ...string) *IntCmd { + return &IntCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(int64) + return nil +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + *baseCmd + + val time.Duration + precision time.Duration +} + +func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd { + return &DurationCmd{ + baseCmd: newBaseCmd(args...), + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = time.Duration(v.(int64)) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + *baseCmd + + val bool +} + +func NewBoolCmd(args ...string) *BoolCmd { + return &BoolCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(int64) == 1 + return nil +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + *baseCmd + + val string +} + +func NewStringCmd(args ...string) *StringCmd { + return &StringCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.val, 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.val, 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.val, 64) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(string) + return nil +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + *baseCmd + + val float64 +} + +func NewFloatCmd(args ...string) *FloatCmd { + return &FloatCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, nil) + if err != nil { + cmd.err = err + return err + } + cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64) + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + *baseCmd + + val []string +} + +func NewStringSliceCmd(args ...string) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]string) + return nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + *baseCmd + + val []bool +} + +func NewBoolSliceCmd(args ...string) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseBoolSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]bool) + return nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + *baseCmd + + val map[string]string +} + +func NewStringStringMapCmd(args ...string) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseStringStringMap) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.(map[string]string) + return nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + *baseCmd + + val []Z +} + +func NewZSliceCmd(args ...string) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error { + v, err := parseReply(rd, parseZSlice) + if err != nil { + cmd.err = err + return err + } + cmd.val = v.([]Z) + return nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + *baseCmd + + cursor int64 + keys []string +} + +func NewScanCmd(args ...string) *ScanCmd { + return &ScanCmd{ + baseCmd: newBaseCmd(args...), + } +} + +func (cmd *ScanCmd) Val() (int64, []string) { + return cmd.cursor, cmd.keys +} + +func (cmd *ScanCmd) Result() (int64, []string, error) { + return cmd.cursor, cmd.keys, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.keys) +} + +func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error { + vi, err := parseReply(rd, parseSlice) + if err != nil { + cmd.err = err + return cmd.err + } + v := vi.([]interface{}) + + cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64) + if cmd.err != nil { + return cmd.err + } + + keys := v[1].([]interface{}) + for _, keyi := range keys { + cmd.keys = append(cmd.keys, keyi.(string)) + } + + return nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go new file mode 100644 index 00000000000..6068bab17e1 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/commands.go @@ -0,0 +1,1246 @@ +package redis + +import ( + "io" + "strconv" + "time" +) + +func formatFloat(f float64) string { + return strconv.FormatFloat(f, 'f', -1, 64) +} + +func readTimeout(sec int64) time.Duration { + if sec == 0 { + return 0 + } + return time.Duration(sec+1) * time.Second +} + +//------------------------------------------------------------------------------ + +func (c *Client) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("AUTH", password) + c.Process(cmd) + return cmd +} + +func (c *Client) Echo(message string) *StringCmd { + cmd := NewStringCmd("ECHO", message) + c.Process(cmd) + return cmd +} + +func (c *Client) Ping() *StatusCmd { + cmd := NewStatusCmd("PING") + c.Process(cmd) + return cmd +} + +func (c *Client) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *Client) Select(index int64) *StatusCmd { + cmd := NewStatusCmd("SELECT", strconv.FormatInt(index, 10)) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Del(keys ...string) *IntCmd { + args := append([]string{"DEL"}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) Dump(key string) *StringCmd { + cmd := NewStringCmd("DUMP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Exists(key string) *BoolCmd { + cmd := NewBoolCmd("EXISTS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Expire(key string, dur time.Duration) *BoolCmd { + cmd := NewBoolCmd("EXPIRE", key, strconv.FormatInt(int64(dur/time.Second), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("EXPIREAT", key, strconv.FormatInt(tm.Unix(), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("KEYS", pattern) + c.Process(cmd) + return cmd +} + +func (c *Client) Migrate(host, port, key string, db, timeout int64) *StatusCmd { + cmd := NewStatusCmd( + "MIGRATE", + host, + port, + key, + strconv.FormatInt(db, 10), + strconv.FormatInt(timeout, 10), + ) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("MOVE", key, strconv.FormatInt(db, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectRefCount(keys ...string) *IntCmd { + args := append([]string{"OBJECT", "REFCOUNT"}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectEncoding(keys ...string) *StringCmd { + args := append([]string{"OBJECT", "ENCODING"}, keys...) + cmd := NewStringCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ObjectIdleTime(keys ...string) *DurationCmd { + args := append([]string{"OBJECT", "IDLETIME"}, keys...) + cmd := NewDurationCmd(time.Second, args...) + c.Process(cmd) + return cmd +} + +func (c *Client) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("PERSIST", key) + c.Process(cmd) + return cmd +} + +func (c *Client) PExpire(key string, dur time.Duration) *BoolCmd { + cmd := NewBoolCmd("PEXPIRE", key, strconv.FormatInt(int64(dur/time.Millisecond), 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "PEXPIREAT", + key, + strconv.FormatInt(tm.UnixNano()/int64(time.Millisecond), 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "PTTL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) RandomKey() *StringCmd { + cmd := NewStringCmd("RANDOMKEY") + c.Process(cmd) + return cmd +} + +func (c *Client) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("RENAME", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *Client) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("RENAMENX", key, newkey) + c.Process(cmd) + return cmd +} + +func (c *Client) Restore(key string, ttl int64, value string) *StatusCmd { + cmd := NewStatusCmd( + "RESTORE", + key, + strconv.FormatInt(ttl, 10), + value, + ) + c.Process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count float64 + Get []string + Order string + IsAlpha bool + Store string +} + +func (c *Client) Sort(key string, sort Sort) *StringSliceCmd { + args := []string{"SORT", key} + if sort.By != "" { + args = append(args, "BY", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "LIMIT", formatFloat(sort.Offset), formatFloat(sort.Count)) + } + for _, get := range sort.Get { + args = append(args, "GET", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.IsAlpha { + args = append(args, "ALPHA") + } + if sort.Store != "" { + args = append(args, "STORE", sort.Store) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "TTL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Type(key string) *StatusCmd { + cmd := NewStatusCmd("TYPE", key) + c.Process(cmd) + return cmd +} + +func (c *Client) Scan(cursor int64, match string, count int64) *ScanCmd { + args := []string{"SCAN", strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"SSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"HSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZScan(key string, cursor int64, match string, count int64) *ScanCmd { + args := []string{"ZSCAN", key, strconv.FormatInt(cursor, 10)} + if match != "" { + args = append(args, "MATCH", match) + } + if count > 0 { + args = append(args, "COUNT", strconv.FormatInt(count, 10)) + } + cmd := NewScanCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Append(key, value string) *IntCmd { + cmd := NewIntCmd("APPEND", key, value) + c.Process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *Client) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []string{"BITCOUNT", key} + if bitCount != nil { + args = append( + args, + strconv.FormatInt(bitCount.Start, 10), + strconv.FormatInt(bitCount.End, 10), + ) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) bitOp(op, destKey string, keys ...string) *IntCmd { + args := []string{"BITOP", op, destKey} + args = append(args, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("AND", destKey, keys...) +} + +func (c *Client) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("OR", destKey, keys...) +} + +func (c *Client) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("XOR", destKey, keys...) +} + +func (c *Client) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("NOT", destKey, key) +} + +func (c *Client) Decr(key string) *IntCmd { + cmd := NewIntCmd("DECR", key) + c.Process(cmd) + return cmd +} + +func (c *Client) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("DECRBY", key, strconv.FormatInt(decrement, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) Get(key string) *StringCmd { + cmd := NewStringCmd("GET", key) + c.Process(cmd) + return cmd +} + +func (c *Client) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("GETBIT", key, strconv.FormatInt(offset, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd( + "GETRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(end, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) GetSet(key, value string) *StringCmd { + cmd := NewStringCmd("GETSET", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) Incr(key string) *IntCmd { + cmd := NewIntCmd("INCR", key) + c.Process(cmd) + return cmd +} + +func (c *Client) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("INCRBY", key, strconv.FormatInt(value, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("INCRBYFLOAT", key, formatFloat(value)) + c.Process(cmd) + return cmd +} + +func (c *Client) MGet(keys ...string) *SliceCmd { + args := append([]string{"MGET"}, keys...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) MSet(pairs ...string) *StatusCmd { + args := append([]string{"MSET"}, pairs...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) MSetNX(pairs ...string) *BoolCmd { + args := append([]string{"MSETNX"}, pairs...) + cmd := NewBoolCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PSetEx(key string, dur time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "PSETEX", + key, + strconv.FormatInt(int64(dur/time.Millisecond), 10), + value, + ) + c.Process(cmd) + return cmd +} + +func (c *Client) Set(key, value string) *StatusCmd { + cmd := NewStatusCmd("SET", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "SETBIT", + key, + strconv.FormatInt(offset, 10), + strconv.FormatInt(int64(value), 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) SetEx(key string, dur time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd("SETEX", key, strconv.FormatInt(int64(dur/time.Second), 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetNX(key, value string) *BoolCmd { + cmd := NewBoolCmd("SETNX", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("SETRANGE", key, strconv.FormatInt(offset, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) StrLen(key string) *IntCmd { + cmd := NewIntCmd("STRLEN", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) HDel(key string, fields ...string) *IntCmd { + args := append([]string{"HDEL", key}, fields...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("HEXISTS", key, field) + c.Process(cmd) + return cmd +} + +func (c *Client) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("HGET", key, field) + c.Process(cmd) + return cmd +} + +func (c *Client) HGetAll(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HGetAllMap(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("HGETALL", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("HINCRBY", key, field, strconv.FormatInt(incr, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("HINCRBYFLOAT", key, field, formatFloat(incr)) + c.Process(cmd) + return cmd +} + +func (c *Client) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HKEYS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HLen(key string) *IntCmd { + cmd := NewIntCmd("HLEN", key) + c.Process(cmd) + return cmd +} + +func (c *Client) HMGet(key string, fields ...string) *SliceCmd { + args := append([]string{"HMGET", key}, fields...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HMSet(key, field, value string, pairs ...string) *StatusCmd { + args := append([]string{"HMSET", key, field, value}, pairs...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) HSet(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSET", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *Client) HSetNX(key, field, value string) *BoolCmd { + cmd := NewBoolCmd("HSETNX", key, field, value) + c.Process(cmd) + return cmd +} + +func (c *Client) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("HVALS", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) BLPop(timeout int64, keys ...string) *StringSliceCmd { + args := append([]string{"BLPOP"}, keys...) + args = append(args, strconv.FormatInt(timeout, 10)) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) BRPop(timeout int64, keys ...string) *StringSliceCmd { + args := append([]string{"BRPOP"}, keys...) + args = append(args, strconv.FormatInt(timeout, 10)) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) BRPopLPush(source, destination string, timeout int64) *StringCmd { + cmd := NewStringCmd( + "BRPOPLPUSH", + source, + destination, + strconv.FormatInt(timeout, 10), + ) + cmd.setReadTimeout(readTimeout(timeout)) + c.Process(cmd) + return cmd +} + +func (c *Client) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("LINDEX", key, strconv.FormatInt(index, 10)) + c.Process(cmd) + return cmd +} + +func (c *Client) LInsert(key, op, pivot, value string) *IntCmd { + cmd := NewIntCmd("LINSERT", key, op, pivot, value) + c.Process(cmd) + return cmd +} + +func (c *Client) LLen(key string) *IntCmd { + cmd := NewIntCmd("LLEN", key) + c.Process(cmd) + return cmd +} + +func (c *Client) LPop(key string) *StringCmd { + cmd := NewStringCmd("LPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) LPush(key string, values ...string) *IntCmd { + args := append([]string{"LPUSH", key}, values...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) LPushX(key, value string) *IntCmd { + cmd := NewIntCmd("LPUSHX", key, value) + c.Process(cmd) + return cmd +} + +func (c *Client) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "LRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) LRem(key string, count int64, value string) *IntCmd { + cmd := NewIntCmd("LREM", key, strconv.FormatInt(count, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) LSet(key string, index int64, value string) *StatusCmd { + cmd := NewStatusCmd("LSET", key, strconv.FormatInt(index, 10), value) + c.Process(cmd) + return cmd +} + +func (c *Client) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "LTRIM", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) RPop(key string) *StringCmd { + cmd := NewStringCmd("RPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("RPOPLPUSH", source, destination) + c.Process(cmd) + return cmd +} + +func (c *Client) RPush(key string, values ...string) *IntCmd { + args := append([]string{"RPUSH", key}, values...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) RPushX(key string, value string) *IntCmd { + cmd := NewIntCmd("RPUSHX", key, value) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) SAdd(key string, members ...string) *IntCmd { + args := append([]string{"SADD", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SCard(key string) *IntCmd { + cmd := NewIntCmd("SCARD", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SDiff(keys ...string) *StringSliceCmd { + args := append([]string{"SDIFF"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SDiffStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SDIFFSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SInter(keys ...string) *StringSliceCmd { + args := append([]string{"SINTER"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SInterStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SINTERSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SIsMember(key, member string) *BoolCmd { + cmd := NewBoolCmd("SISMEMBER", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("SMEMBERS", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SMove(source, destination, member string) *BoolCmd { + cmd := NewBoolCmd("SMOVE", source, destination, member) + c.Process(cmd) + return cmd +} + +func (c *Client) SPop(key string) *StringCmd { + cmd := NewStringCmd("SPOP", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("SRANDMEMBER", key) + c.Process(cmd) + return cmd +} + +func (c *Client) SRem(key string, members ...string) *IntCmd { + args := append([]string{"SREM", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SUnion(keys ...string) *StringSliceCmd { + args := append([]string{"SUNION"}, keys...) + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) SUnionStore(destination string, keys ...string) *IntCmd { + args := append([]string{"SUNIONSTORE", destination}, keys...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type Z struct { + Score float64 + Member string +} + +type ZStore struct { + Weights []int64 + Aggregate string +} + +func (c *Client) ZAdd(key string, members ...Z) *IntCmd { + args := []string{"ZADD", key} + for _, m := range members { + args = append(args, formatFloat(m.Score), m.Member) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZCard(key string) *IntCmd { + cmd := NewIntCmd("ZCARD", key) + c.Process(cmd) + return cmd +} + +func (c *Client) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZCOUNT", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *Client) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("ZINCRBY", key, formatFloat(increment), member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZInterStore( + destination string, + store ZStore, + keys ...string, +) *IntCmd { + args := []string{"ZINTERSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)} + args = append(args, keys...) + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, strconv.FormatInt(weight, 10)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []string{ + "ZRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + } + if withScores { + args = append(args, "WITHSCORES") + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *Client) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + args := []string{ + "ZRANGE", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + "WITHSCORES", + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +type ZRangeByScore struct { + Min, Max string + + Offset, Count int64 +} + +func (c *Client) zRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd { + args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max} + if withScores { + args = append(args, "WITHSCORES") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + return c.zRangeByScore(key, opt, false) +} + +func (c *Client) ZRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []string{"ZRANGEBYSCORE", key, opt.Min, opt.Max, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRem(key string, members ...string) *IntCmd { + args := append([]string{"ZREM", key}, members...) + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "ZREMRANGEBYRANK", + key, + strconv.FormatInt(start, 10), + strconv.FormatInt(stop, 10), + ) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("ZREMRANGEBYSCORE", key, min, max) + c.Process(cmd) + return cmd +} + +func (c *Client) zRevRange(key, start, stop string, withScores bool) *StringSliceCmd { + args := []string{"ZREVRANGE", key, start, stop} + if withScores { + args = append(args, "WITHSCORES") + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRange(key, start, stop string) *StringSliceCmd { + return c.zRevRange(key, start, stop, false) +} + +func (c *Client) ZRevRangeWithScores(key, start, stop string) *ZSliceCmd { + args := []string{"ZREVRANGE", key, start, stop, "WITHSCORES"} + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) zRevRangeByScore(key string, opt ZRangeByScore, withScores bool) *StringSliceCmd { + args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min} + if withScores { + args = append(args, "WITHSCORES") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRangeByScore(key string, opt ZRangeByScore) *StringSliceCmd { + return c.zRevRangeByScore(key, opt, false) +} + +func (c *Client) ZRevRangeByScoreWithScores(key string, opt ZRangeByScore) *ZSliceCmd { + args := []string{"ZREVRANGEBYSCORE", key, opt.Max, opt.Min, "WITHSCORES"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "LIMIT", + strconv.FormatInt(opt.Offset, 10), + strconv.FormatInt(opt.Count, 10), + ) + } + cmd := NewZSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("ZREVRANK", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("ZSCORE", key, member) + c.Process(cmd) + return cmd +} + +func (c *Client) ZUnionStore( + destination string, + store ZStore, + keys ...string, +) *IntCmd { + args := []string{"ZUNIONSTORE", destination, strconv.FormatInt(int64(len(keys)), 10)} + args = append(args, keys...) + if len(store.Weights) > 0 { + args = append(args, "WEIGHTS") + for _, weight := range store.Weights { + args = append(args, strconv.FormatInt(weight, 10)) + } + } + if store.Aggregate != "" { + args = append(args, "AGGREGATE", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("BGREWRITEAOF") + c.Process(cmd) + return cmd +} + +func (c *Client) BgSave() *StatusCmd { + cmd := NewStatusCmd("BGSAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("CLIENT", "KILL", ipPort) + c.Process(cmd) + return cmd +} + +func (c *Client) ClientList() *StringCmd { + cmd := NewStringCmd("CLIENT", "LIST") + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigGet(parameter string) *SliceCmd { + cmd := NewSliceCmd("CONFIG", "GET", parameter) + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigResetStat() *StatusCmd { + cmd := NewStatusCmd("CONFIG", "RESETSTAT") + c.Process(cmd) + return cmd +} + +func (c *Client) ConfigSet(parameter, value string) *StatusCmd { + cmd := NewStatusCmd("CONFIG", "SET", parameter, value) + c.Process(cmd) + return cmd +} + +func (c *Client) DbSize() *IntCmd { + cmd := NewIntCmd("DBSIZE") + c.Process(cmd) + return cmd +} + +func (c *Client) FlushAll() *StatusCmd { + cmd := NewStatusCmd("FLUSHALL") + c.Process(cmd) + return cmd +} + +func (c *Client) FlushDb() *StatusCmd { + cmd := NewStatusCmd("FLUSHDB") + c.Process(cmd) + return cmd +} + +func (c *Client) Info() *StringCmd { + cmd := NewStringCmd("INFO") + c.Process(cmd) + return cmd +} + +func (c *Client) LastSave() *IntCmd { + cmd := NewIntCmd("LASTSAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) Save() *StatusCmd { + cmd := NewStatusCmd("SAVE") + c.Process(cmd) + return cmd +} + +func (c *Client) shutdown(modifier string) *StatusCmd { + var args []string + if modifier == "" { + args = []string{"SHUTDOWN"} + } else { + args = []string{"SHUTDOWN", modifier} + } + cmd := NewStatusCmd(args...) + c.Process(cmd) + if err := cmd.Err(); err != nil { + if err == io.EOF { + // Server quit as expected. + cmd.err = nil + } + } else { + // Server did not quit. String reply contains the reason. + cmd.err = errorf(cmd.val) + cmd.val = "" + } + return cmd +} + +func (c *Client) Shutdown() *StatusCmd { + return c.shutdown("") +} + +func (c *Client) ShutdownSave() *StatusCmd { + return c.shutdown("SAVE") +} + +func (c *Client) ShutdownNoSave() *StatusCmd { + return c.shutdown("NOSAVE") +} + +func (c *Client) SlaveOf(host, port string) *StatusCmd { + cmd := NewStatusCmd("SLAVEOF", host, port) + c.Process(cmd) + return cmd +} + +func (c *Client) SlowLog() { + panic("not implemented") +} + +func (c *Client) Sync() { + panic("not implemented") +} + +func (c *Client) Time() *StringSliceCmd { + cmd := NewStringSliceCmd("TIME") + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) Eval(script string, keys []string, args []string) *Cmd { + cmdArgs := []string{"EVAL", script, strconv.FormatInt(int64(len(keys)), 10)} + cmdArgs = append(cmdArgs, keys...) + cmdArgs = append(cmdArgs, args...) + cmd := NewCmd(cmdArgs...) + c.Process(cmd) + return cmd +} + +func (c *Client) EvalSha(sha1 string, keys []string, args []string) *Cmd { + cmdArgs := []string{"EVALSHA", sha1, strconv.FormatInt(int64(len(keys)), 10)} + cmdArgs = append(cmdArgs, keys...) + cmdArgs = append(cmdArgs, args...) + cmd := NewCmd(cmdArgs...) + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptExists(scripts ...string) *BoolSliceCmd { + args := append([]string{"SCRIPT", "EXISTS"}, scripts...) + cmd := NewBoolSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptFlush() *StatusCmd { + cmd := NewStatusCmd("SCRIPT", "FLUSH") + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptKill() *StatusCmd { + cmd := NewStatusCmd("SCRIPT", "KILL") + c.Process(cmd) + return cmd +} + +func (c *Client) ScriptLoad(script string) *StringCmd { + cmd := NewStringCmd("SCRIPT", "LOAD", script) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) DebugObject(key string) *StringCmd { + cmd := NewStringCmd("DEBUG", "OBJECT", key) + c.Process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *Client) PubSubChannels(pattern string) *StringSliceCmd { + args := []string{"PUBSUB", "CHANNELS"} + if pattern != "*" { + args = append(args, pattern) + } + cmd := NewStringSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PubSubNumSub(channels ...string) *SliceCmd { + args := []string{"PUBSUB", "NUMSUB"} + args = append(args, channels...) + cmd := NewSliceCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Client) PubSubNumPat() *IntCmd { + cmd := NewIntCmd("PUBSUB", "NUMPAT") + c.Process(cmd) + return cmd +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go new file mode 100644 index 00000000000..55262533a63 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/doc.go @@ -0,0 +1,4 @@ +/* +Package redis implements a Redis client. +*/ +package redis diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/error.go b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go new file mode 100644 index 00000000000..667fffdc682 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/error.go @@ -0,0 +1,23 @@ +package redis + +import ( + "fmt" +) + +// Redis nil reply. +var Nil = errorf("redis: nil") + +// Redis transaction failed. +var TxFailedErr = errorf("redis: transaction failed") + +type redisError struct { + s string +} + +func errorf(s string, args ...interface{}) redisError { + return redisError{s: fmt.Sprintf(s, args...)} +} + +func (err redisError) Error() string { + return err.s +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go new file mode 100644 index 00000000000..dbc95131033 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go @@ -0,0 +1,180 @@ +package redis_test + +import ( + "fmt" + "strconv" + + "gopkg.in/redis.v2" +) + +var client *redis.Client + +func init() { + client = redis.NewTCPClient(&redis.Options{ + Addr: ":6379", + }) + client.FlushDb() +} + +func ExampleNewTCPClient() { + client := redis.NewTCPClient(&redis.Options{ + Addr: "localhost:6379", + Password: "", // no password set + DB: 0, // use default DB + }) + + pong, err := client.Ping().Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleNewFailoverClient() { + client := redis.NewFailoverClient(&redis.FailoverOptions{ + MasterName: "master", + SentinelAddrs: []string{":26379"}, + }) + + pong, err := client.Ping().Result() + fmt.Println(pong, err) + // Output: PONG +} + +func ExampleClient() { + if err := client.Set("foo", "bar").Err(); err != nil { + panic(err) + } + + v, err := client.Get("hello").Result() + fmt.Printf("%q %q %v", v, err, err == redis.Nil) + // Output: "" "redis: nil" true +} + +func ExampleClient_Incr() { + if err := client.Incr("counter").Err(); err != nil { + panic(err) + } + + n, err := client.Get("counter").Int64() + fmt.Println(n, err) + // Output: 1 +} + +func ExampleClient_Pipelined() { + cmds, err := client.Pipelined(func(c *redis.Pipeline) error { + c.Set("key1", "hello1") + c.Get("key1") + return nil + }) + fmt.Println(err) + set := cmds[0].(*redis.StatusCmd) + fmt.Println(set) + get := cmds[1].(*redis.StringCmd) + fmt.Println(get) + // Output: + // SET key1 hello1: OK + // GET key1: hello1 +} + +func ExamplePipeline() { + pipeline := client.Pipeline() + set := pipeline.Set("key1", "hello1") + get := pipeline.Get("key1") + cmds, err := pipeline.Exec() + fmt.Println(cmds, err) + fmt.Println(set) + fmt.Println(get) + // Output: [SET key1 hello1: OK GET key1: hello1] + // SET key1 hello1: OK + // GET key1: hello1 +} + +func ExampleMulti() { + incr := func(tx *redis.Multi) ([]redis.Cmder, error) { + s, err := tx.Get("key").Result() + if err != nil && err != redis.Nil { + return nil, err + } + n, _ := strconv.ParseInt(s, 10, 64) + + return tx.Exec(func() error { + tx.Set("key", strconv.FormatInt(n+1, 10)) + return nil + }) + } + + client.Del("key") + + tx := client.Multi() + defer tx.Close() + + watch := tx.Watch("key") + _ = watch.Err() + + for { + cmds, err := incr(tx) + if err == redis.TxFailedErr { + continue + } else if err != nil { + panic(err) + } + fmt.Println(cmds, err) + break + } + + // Output: [SET key 1: OK] +} + +func ExamplePubSub() { + pubsub := client.PubSub() + defer pubsub.Close() + + err := pubsub.Subscribe("mychannel") + _ = err + + msg, err := pubsub.Receive() + fmt.Println(msg, err) + + pub := client.Publish("mychannel", "hello") + _ = pub.Err() + + msg, err = pubsub.Receive() + fmt.Println(msg, err) + + // Output: subscribe: mychannel + // Message +} + +func ExampleScript() { + setnx := redis.NewScript(` + if redis.call("get", KEYS[1]) == false then + redis.call("set", KEYS[1], ARGV[1]) + return 1 + end + return 0 + `) + + v1, err := setnx.Run(client, []string{"keynx"}, []string{"foo"}).Result() + fmt.Println(v1.(int64), err) + + v2, err := setnx.Run(client, []string{"keynx"}, []string{"bar"}).Result() + fmt.Println(v2.(int64), err) + + get := client.Get("keynx") + fmt.Println(get) + + // Output: 1 + // 0 + // GET keynx: foo +} + +func Example_customCommand() { + Get := func(client *redis.Client, key string) *redis.StringCmd { + cmd := redis.NewStringCmd("GET", key) + client.Process(cmd) + return cmd + } + + v, err := Get(client, "key_does_not_exist").Result() + fmt.Printf("%q %s", v, err) + // Output: "" redis: nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go new file mode 100644 index 00000000000..7f7fa67972b --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/export_test.go @@ -0,0 +1,5 @@ +package redis + +func (c *baseClient) Pool() pool { + return c.connPool +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go new file mode 100644 index 00000000000..bff38dfaaa4 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/multi.go @@ -0,0 +1,138 @@ +package redis + +import ( + "errors" + "fmt" +) + +var errDiscard = errors.New("redis: Discard can be used only inside Exec") + +// Not thread-safe. +type Multi struct { + *Client +} + +func (c *Client) Multi() *Multi { + return &Multi{ + Client: &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, true), + }, + }, + } +} + +func (c *Multi) Close() error { + if err := c.Unwatch().Err(); err != nil { + return err + } + return c.Client.Close() +} + +func (c *Multi) Watch(keys ...string) *StatusCmd { + args := append([]string{"WATCH"}, keys...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Unwatch(keys ...string) *StatusCmd { + args := append([]string{"UNWATCH"}, keys...) + cmd := NewStatusCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *Multi) Discard() error { + if c.cmds == nil { + return errDiscard + } + c.cmds = c.cmds[:1] + return nil +} + +// Exec always returns list of commands. If transaction fails +// TxFailedErr is returned. Otherwise Exec returns error of the first +// failed command or nil. +func (c *Multi) Exec(f func() error) ([]Cmder, error) { + c.cmds = []Cmder{NewStatusCmd("MULTI")} + if err := f(); err != nil { + return nil, err + } + c.cmds = append(c.cmds, NewSliceCmd("EXEC")) + + cmds := c.cmds + c.cmds = nil + + if len(cmds) == 2 { + return []Cmder{}, nil + } + + cn, err := c.conn() + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return cmds[1 : len(cmds)-1], err + } + + err = c.execCmds(cn, cmds) + if err != nil { + c.freeConn(cn, err) + return cmds[1 : len(cmds)-1], err + } + + c.putConn(cn) + return cmds[1 : len(cmds)-1], nil +} + +func (c *Multi) execCmds(cn *conn, cmds []Cmder) error { + err := c.writeCmd(cn, cmds...) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + + statusCmd := NewStatusCmd() + + // Omit last command (EXEC). + cmdsLen := len(cmds) - 1 + + // Parse queued replies. + for i := 0; i < cmdsLen; i++ { + if err := statusCmd.parseReply(cn.rd); err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + } + + // Parse number of replies. + line, err := readLine(cn.rd) + if err != nil { + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if line[0] != '*' { + err := fmt.Errorf("redis: expected '*', but got line %q", line) + setCmdsErr(cmds[1:len(cmds)-1], err) + return err + } + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr) + return TxFailedErr + } + + var firstCmdErr error + + // Parse replies. + // Loop starts from 1 to omit MULTI cmd. + for i := 1; i < cmdsLen; i++ { + cmd := cmds[i] + if err := cmd.parseReply(cn.rd); err != nil { + if firstCmdErr == nil { + firstCmdErr = err + } + } + } + + return firstCmdErr +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go new file mode 100644 index 00000000000..b4c380c7644 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser.go @@ -0,0 +1,262 @@ +package redis + +import ( + "errors" + "fmt" + "strconv" + + "gopkg.in/bufio.v1" +) + +type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error) + +var ( + errReaderTooSmall = errors.New("redis: reader is too small") +) + +//------------------------------------------------------------------------------ + +func appendArgs(buf []byte, args []string) []byte { + buf = append(buf, '*') + buf = strconv.AppendUint(buf, uint64(len(args)), 10) + buf = append(buf, '\r', '\n') + for _, arg := range args { + buf = append(buf, '$') + buf = strconv.AppendUint(buf, uint64(len(arg)), 10) + buf = append(buf, '\r', '\n') + buf = append(buf, arg...) + buf = append(buf, '\r', '\n') + } + return buf +} + +//------------------------------------------------------------------------------ + +func readLine(rd *bufio.Reader) ([]byte, error) { + line, isPrefix, err := rd.ReadLine() + if err != nil { + return line, err + } + if isPrefix { + return line, errReaderTooSmall + } + return line, nil +} + +func readN(rd *bufio.Reader, n int) ([]byte, error) { + b, err := rd.ReadN(n) + if err == bufio.ErrBufferFull { + tmp := make([]byte, n) + r := copy(tmp, b) + b = tmp + + for { + nn, err := rd.Read(b[r:]) + r += nn + if r >= n { + // Ignore error if we read enough. + break + } + if err != nil { + return nil, err + } + } + } else if err != nil { + return nil, err + } + return b, nil +} + +//------------------------------------------------------------------------------ + +func parseReq(rd *bufio.Reader) ([]string, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + if line[0] != '*' { + return []string{string(line)}, nil + } + numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + args := make([]string, 0, numReplies) + for i := int64(0); i < numReplies; i++ { + line, err = readLine(rd) + if err != nil { + return nil, err + } + if line[0] != '$' { + return nil, fmt.Errorf("redis: expected '$', but got %q", line) + } + + argLen, err := strconv.ParseInt(string(line[1:]), 10, 32) + if err != nil { + return nil, err + } + + arg, err := readN(rd, int(argLen)+2) + if err != nil { + return nil, err + } + args = append(args, string(arg[:argLen])) + } + return args, nil +} + +//------------------------------------------------------------------------------ + +func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) { + line, err := readLine(rd) + if err != nil { + return nil, err + } + + switch line[0] { + case '-': + return nil, errorf(string(line[1:])) + case '+': + return string(line[1:]), nil + case ':': + v, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + return v, nil + case '$': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + replyLen, err := strconv.Atoi(string(line[1:])) + if err != nil { + return nil, err + } + + b, err := readN(rd, replyLen+2) + if err != nil { + return nil, err + } + return string(b[:replyLen]), nil + case '*': + if len(line) == 3 && line[1] == '-' && line[2] == '1' { + return nil, Nil + } + + repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64) + if err != nil { + return nil, err + } + + return p(rd, repliesNum) + } + return nil, fmt.Errorf("redis: can't parse %q", line) +} + +func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := parseReply(rd, parseSlice) + if err == Nil { + vals = append(vals, nil) + } else if err != nil { + return nil, err + } else { + vals = append(vals, v) + } + } + return vals, nil +} + +func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]string, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", viface) + } + vals = append(vals, v) + } + return vals, nil +} + +func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) { + vals := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + viface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + v, ok := viface.(int64) + if !ok { + return nil, fmt.Errorf("got %T, expected int64", viface) + } + vals = append(vals, v == 1) + } + return vals, nil +} + +func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + keyiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + key, ok := keyiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", keyiface) + } + + valueiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + value, ok := valueiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", valueiface) + } + + m[key] = value + } + return m, nil +} + +func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + z := &zz[i/2] + + memberiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + member, ok := memberiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", memberiface) + } + z.Member = member + + scoreiface, err := parseReply(rd, nil) + if err != nil { + return nil, err + } + scorestr, ok := scoreiface.(string) + if !ok { + return nil, fmt.Errorf("got %T, expected string", scoreiface) + } + score, err := strconv.ParseFloat(scorestr, 64) + if err != nil { + return nil, err + } + z.Score = score + } + return zz, nil +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go new file mode 100644 index 00000000000..1b9e15810a8 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go @@ -0,0 +1,54 @@ +package redis + +import ( + "testing" + + "gopkg.in/bufio.v1" +) + +func BenchmarkParseReplyStatus(b *testing.B) { + benchmarkParseReply(b, "+OK\r\n", nil, false) +} + +func BenchmarkParseReplyInt(b *testing.B) { + benchmarkParseReply(b, ":1\r\n", nil, false) +} + +func BenchmarkParseReplyError(b *testing.B) { + benchmarkParseReply(b, "-Error message\r\n", nil, true) +} + +func BenchmarkParseReplyString(b *testing.B) { + benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false) +} + +func BenchmarkParseReplySlice(b *testing.B) { + benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", parseSlice, false) +} + +func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) { + b.StopTimer() + + buf := &bufio.Buffer{} + rd := bufio.NewReader(buf) + for i := 0; i < b.N; i++ { + buf.WriteString(reply) + } + + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := parseReply(rd, p) + if !wanterr && err != nil { + panic(err) + } + } +} + +func BenchmarkAppendArgs(b *testing.B) { + buf := make([]byte, 0, 64) + args := []string{"hello", "world", "foo", "bar"} + for i := 0; i < b.N; i++ { + appendArgs(buf, args) + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go new file mode 100644 index 00000000000..540d6c51d9b --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go @@ -0,0 +1,91 @@ +package redis + +// Not thread-safe. +type Pipeline struct { + *Client + + closed bool +} + +func (c *Client) Pipeline() *Pipeline { + return &Pipeline{ + Client: &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: c.connPool, + + cmds: make([]Cmder, 0), + }, + }, + } +} + +func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) { + pc := c.Pipeline() + if err := f(pc); err != nil { + return nil, err + } + cmds, err := pc.Exec() + pc.Close() + return cmds, err +} + +func (c *Pipeline) Close() error { + c.closed = true + return nil +} + +func (c *Pipeline) Discard() error { + if c.closed { + return errClosed + } + c.cmds = c.cmds[:0] + return nil +} + +// Exec always returns list of commands and error of the first failed +// command if any. +func (c *Pipeline) Exec() ([]Cmder, error) { + if c.closed { + return nil, errClosed + } + + cmds := c.cmds + c.cmds = make([]Cmder, 0) + + if len(cmds) == 0 { + return []Cmder{}, nil + } + + cn, err := c.conn() + if err != nil { + setCmdsErr(cmds, err) + return cmds, err + } + + if err := c.execCmds(cn, cmds); err != nil { + c.freeConn(cn, err) + return cmds, err + } + + c.putConn(cn) + return cmds, nil +} + +func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error { + if err := c.writeCmd(cn, cmds...); err != nil { + setCmdsErr(cmds, err) + return err + } + + var firstCmdErr error + for _, cmd := range cmds { + if err := cmd.parseReply(cn.rd); err != nil { + if firstCmdErr == nil { + firstCmdErr = err + } + } + } + + return firstCmdErr +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go new file mode 100644 index 00000000000..bca4d196335 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pool.go @@ -0,0 +1,405 @@ +package redis + +import ( + "container/list" + "errors" + "log" + "net" + "sync" + "time" + + "gopkg.in/bufio.v1" +) + +var ( + errClosed = errors.New("redis: client is closed") + errRateLimited = errors.New("redis: you open connections too fast") +) + +var ( + zeroTime = time.Time{} +) + +type pool interface { + Get() (*conn, bool, error) + Put(*conn) error + Remove(*conn) error + Len() int + Size() int + Close() error + Filter(func(*conn) bool) +} + +//------------------------------------------------------------------------------ + +type conn struct { + netcn net.Conn + rd *bufio.Reader + buf []byte + + inUse bool + usedAt time.Time + + readTimeout time.Duration + writeTimeout time.Duration + + elem *list.Element +} + +func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) { + return func() (*conn, error) { + netcn, err := dial() + if err != nil { + return nil, err + } + cn := &conn{ + netcn: netcn, + buf: make([]byte, 0, 64), + } + cn.rd = bufio.NewReader(cn) + return cn, nil + } +} + +func (cn *conn) Read(b []byte) (int, error) { + if cn.readTimeout != 0 { + cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout)) + } else { + cn.netcn.SetReadDeadline(zeroTime) + } + return cn.netcn.Read(b) +} + +func (cn *conn) Write(b []byte) (int, error) { + if cn.writeTimeout != 0 { + cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout)) + } else { + cn.netcn.SetWriteDeadline(zeroTime) + } + return cn.netcn.Write(b) +} + +func (cn *conn) RemoteAddr() net.Addr { + return cn.netcn.RemoteAddr() +} + +func (cn *conn) Close() error { + return cn.netcn.Close() +} + +//------------------------------------------------------------------------------ + +type connPool struct { + dial func() (*conn, error) + rl *rateLimiter + + opt *options + + cond *sync.Cond + conns *list.List + + idleNum int + closed bool +} + +func newConnPool(dial func() (*conn, error), opt *options) *connPool { + return &connPool{ + dial: dial, + rl: newRateLimiter(time.Second, 2*opt.PoolSize), + + opt: opt, + + cond: sync.NewCond(&sync.Mutex{}), + conns: list.New(), + } +} + +func (p *connPool) new() (*conn, error) { + if !p.rl.Check() { + return nil, errRateLimited + } + return p.dial() +} + +func (p *connPool) Get() (*conn, bool, error) { + p.cond.L.Lock() + + if p.closed { + p.cond.L.Unlock() + return nil, false, errClosed + } + + if p.opt.IdleTimeout > 0 { + for el := p.conns.Front(); el != nil; el = el.Next() { + cn := el.Value.(*conn) + if cn.inUse { + break + } + if time.Since(cn.usedAt) > p.opt.IdleTimeout { + if err := p.remove(cn); err != nil { + log.Printf("remove failed: %s", err) + } + } + } + } + + for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 { + p.cond.Wait() + } + + if p.idleNum > 0 { + elem := p.conns.Front() + cn := elem.Value.(*conn) + if cn.inUse { + panic("pool: precondition failed") + } + cn.inUse = true + p.conns.MoveToBack(elem) + p.idleNum-- + + p.cond.L.Unlock() + return cn, false, nil + } + + if p.conns.Len() < p.opt.PoolSize { + cn, err := p.new() + if err != nil { + p.cond.L.Unlock() + return nil, false, err + } + + cn.inUse = true + cn.elem = p.conns.PushBack(cn) + + p.cond.L.Unlock() + return cn, true, nil + } + + panic("not reached") +} + +func (p *connPool) Put(cn *conn) error { + if cn.rd.Buffered() != 0 { + b, _ := cn.rd.ReadN(cn.rd.Buffered()) + log.Printf("redis: connection has unread data: %q", b) + return p.Remove(cn) + } + + if p.opt.IdleTimeout > 0 { + cn.usedAt = time.Now() + } + + p.cond.L.Lock() + if p.closed { + p.cond.L.Unlock() + return errClosed + } + cn.inUse = false + p.conns.MoveToFront(cn.elem) + p.idleNum++ + p.cond.Signal() + p.cond.L.Unlock() + + return nil +} + +func (p *connPool) Remove(cn *conn) error { + p.cond.L.Lock() + if p.closed { + // Noop, connection is already closed. + p.cond.L.Unlock() + return nil + } + err := p.remove(cn) + p.cond.Signal() + p.cond.L.Unlock() + return err +} + +func (p *connPool) remove(cn *conn) error { + p.conns.Remove(cn.elem) + cn.elem = nil + if !cn.inUse { + p.idleNum-- + } + return cn.Close() +} + +// Len returns number of idle connections. +func (p *connPool) Len() int { + defer p.cond.L.Unlock() + p.cond.L.Lock() + return p.idleNum +} + +// Size returns number of connections in the pool. +func (p *connPool) Size() int { + defer p.cond.L.Unlock() + p.cond.L.Lock() + return p.conns.Len() +} + +func (p *connPool) Filter(f func(*conn) bool) { + p.cond.L.Lock() + for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next { + next = el.Next() + cn := el.Value.(*conn) + if !f(cn) { + p.remove(cn) + } + } + p.cond.L.Unlock() +} + +func (p *connPool) Close() error { + defer p.cond.L.Unlock() + p.cond.L.Lock() + if p.closed { + return nil + } + p.closed = true + p.rl.Close() + var retErr error + for { + e := p.conns.Front() + if e == nil { + break + } + if err := p.remove(e.Value.(*conn)); err != nil { + log.Printf("cn.Close failed: %s", err) + retErr = err + } + } + return retErr +} + +//------------------------------------------------------------------------------ + +type singleConnPool struct { + pool pool + + cnMtx sync.Mutex + cn *conn + + reusable bool + + closed bool +} + +func newSingleConnPool(pool pool, reusable bool) *singleConnPool { + return &singleConnPool{ + pool: pool, + reusable: reusable, + } +} + +func (p *singleConnPool) SetConn(cn *conn) { + p.cnMtx.Lock() + p.cn = cn + p.cnMtx.Unlock() +} + +func (p *singleConnPool) Get() (*conn, bool, error) { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + + if p.closed { + return nil, false, errClosed + } + if p.cn != nil { + return p.cn, false, nil + } + + cn, isNew, err := p.pool.Get() + if err != nil { + return nil, false, err + } + p.cn = cn + + return p.cn, isNew, nil +} + +func (p *singleConnPool) Put(cn *conn) error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return nil +} + +func (p *singleConnPool) put() error { + err := p.pool.Put(p.cn) + p.cn = nil + return err +} + +func (p *singleConnPool) Remove(cn *conn) error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + panic("p.cn == nil") + } + if p.cn != cn { + panic("p.cn != cn") + } + if p.closed { + return errClosed + } + return p.remove() +} + +func (p *singleConnPool) remove() error { + err := p.pool.Remove(p.cn) + p.cn = nil + return err +} + +func (p *singleConnPool) Len() int { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + return 0 + } + return 1 +} + +func (p *singleConnPool) Size() int { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.cn == nil { + return 0 + } + return 1 +} + +func (p *singleConnPool) Filter(f func(*conn) bool) { + p.cnMtx.Lock() + if p.cn != nil { + if !f(p.cn) { + p.remove() + } + } + p.cnMtx.Unlock() +} + +func (p *singleConnPool) Close() error { + defer p.cnMtx.Unlock() + p.cnMtx.Lock() + if p.closed { + return nil + } + p.closed = true + var err error + if p.cn != nil { + if p.reusable { + err = p.put() + } else { + err = p.remove() + } + } + return err +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go new file mode 100644 index 00000000000..6ac130bac45 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go @@ -0,0 +1,134 @@ +package redis + +import ( + "fmt" + "time" +) + +// Not thread-safe. +type PubSub struct { + *baseClient +} + +func (c *Client) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +func (c *Client) Publish(channel, message string) *IntCmd { + req := NewIntCmd("PUBLISH", channel, message) + c.Process(req) + return req +} + +type Message struct { + Channel string + Payload string +} + +func (m *Message) String() string { + return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) +} + +type PMessage struct { + Channel string + Pattern string + Payload string +} + +func (m *PMessage) String() string { + return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload) +} + +type Subscription struct { + Kind string + Channel string + Count int +} + +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} + +func (c *PubSub) Receive() (interface{}, error) { + return c.ReceiveTimeout(0) +} + +func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) { + cn, err := c.conn() + if err != nil { + return nil, err + } + cn.readTimeout = timeout + + cmd := NewSliceCmd() + if err := cmd.parseReply(cn.rd); err != nil { + return nil, err + } + + reply := cmd.Val() + + msgName := reply[0].(string) + switch msgName { + case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": + return &Subscription{ + Kind: msgName, + Channel: reply[1].(string), + Count: int(reply[2].(int64)), + }, nil + case "message": + return &Message{ + Channel: reply[1].(string), + Payload: reply[2].(string), + }, nil + case "pmessage": + return &PMessage{ + Pattern: reply[1].(string), + Channel: reply[2].(string), + Payload: reply[3].(string), + }, nil + } + return nil, fmt.Errorf("redis: unsupported message name: %q", msgName) +} + +func (c *PubSub) subscribe(cmd string, channels ...string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := append([]string{cmd}, channels...) + req := NewSliceCmd(args...) + return c.writeCmd(cn, req) +} + +func (c *PubSub) Subscribe(channels ...string) error { + return c.subscribe("SUBSCRIBE", channels...) +} + +func (c *PubSub) PSubscribe(patterns ...string) error { + return c.subscribe("PSUBSCRIBE", patterns...) +} + +func (c *PubSub) unsubscribe(cmd string, channels ...string) error { + cn, err := c.conn() + if err != nil { + return err + } + + args := append([]string{cmd}, channels...) + req := NewSliceCmd(args...) + return c.writeCmd(cn, req) +} + +func (c *PubSub) Unsubscribe(channels ...string) error { + return c.unsubscribe("UNSUBSCRIBE", channels...) +} + +func (c *PubSub) PUnsubscribe(patterns ...string) error { + return c.unsubscribe("PUNSUBSCRIBE", patterns...) +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go new file mode 100644 index 00000000000..20d85127077 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go @@ -0,0 +1,53 @@ +package redis + +import ( + "sync/atomic" + "time" +) + +type rateLimiter struct { + v int64 + + _closed int64 +} + +func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter { + rl := &rateLimiter{ + v: int64(bucketSize), + } + go rl.loop(limit, int64(bucketSize)) + return rl +} + +func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) { + for { + if rl.closed() { + break + } + if v := atomic.LoadInt64(&rl.v); v < bucketSize { + atomic.AddInt64(&rl.v, 1) + } + time.Sleep(limit) + } +} + +func (rl *rateLimiter) Check() bool { + for { + if v := atomic.LoadInt64(&rl.v); v > 0 { + if atomic.CompareAndSwapInt64(&rl.v, v, v-1) { + return true + } + } else { + return false + } + } +} + +func (rl *rateLimiter) Close() error { + atomic.StoreInt64(&rl._closed, 1) + return nil +} + +func (rl *rateLimiter) closed() bool { + return atomic.LoadInt64(&rl._closed) == 1 +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go new file mode 100644 index 00000000000..2f0d41a2eb9 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit_test.go @@ -0,0 +1,31 @@ +package redis + +import ( + "sync" + "testing" + "time" +) + +func TestRateLimiter(t *testing.T) { + var n = 100000 + if testing.Short() { + n = 1000 + } + rl := newRateLimiter(time.Minute, n) + + wg := &sync.WaitGroup{} + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + if !rl.Check() { + panic("check failed") + } + wg.Done() + }() + } + wg.Wait() + + if rl.Check() && rl.Check() { + t.Fatal("check passed") + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go new file mode 100644 index 00000000000..0d15dc8f854 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis.go @@ -0,0 +1,231 @@ +package redis + +import ( + "log" + "net" + "time" +) + +type baseClient struct { + connPool pool + opt *options + cmds []Cmder +} + +func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error { + buf := cn.buf[:0] + for _, cmd := range cmds { + buf = appendArgs(buf, cmd.args()) + } + + _, err := cn.Write(buf) + return err +} + +func (c *baseClient) conn() (*conn, error) { + cn, isNew, err := c.connPool.Get() + if err != nil { + return nil, err + } + + if isNew { + if err := c.initConn(cn); err != nil { + c.removeConn(cn) + return nil, err + } + } + + return cn, nil +} + +func (c *baseClient) initConn(cn *conn) error { + if c.opt.Password == "" && c.opt.DB == 0 { + return nil + } + + pool := newSingleConnPool(c.connPool, false) + pool.SetConn(cn) + + // Client is not closed because we want to reuse underlying connection. + client := &Client{ + baseClient: &baseClient{ + opt: c.opt, + connPool: pool, + }, + } + + if c.opt.Password != "" { + if err := client.Auth(c.opt.Password).Err(); err != nil { + return err + } + } + + if c.opt.DB > 0 { + if err := client.Select(c.opt.DB).Err(); err != nil { + return err + } + } + + return nil +} + +func (c *baseClient) freeConn(cn *conn, ei error) error { + if cn.rd.Buffered() > 0 { + return c.connPool.Remove(cn) + } + if _, ok := ei.(redisError); ok { + return c.connPool.Put(cn) + } + return c.connPool.Remove(cn) +} + +func (c *baseClient) removeConn(cn *conn) { + if err := c.connPool.Remove(cn); err != nil { + log.Printf("pool.Remove failed: %s", err) + } +} + +func (c *baseClient) putConn(cn *conn) { + if err := c.connPool.Put(cn); err != nil { + log.Printf("pool.Put failed: %s", err) + } +} + +func (c *baseClient) Process(cmd Cmder) { + if c.cmds == nil { + c.run(cmd) + } else { + c.cmds = append(c.cmds, cmd) + } +} + +func (c *baseClient) run(cmd Cmder) { + cn, err := c.conn() + if err != nil { + cmd.setErr(err) + return + } + + if timeout := cmd.writeTimeout(); timeout != nil { + cn.writeTimeout = *timeout + } else { + cn.writeTimeout = c.opt.WriteTimeout + } + + if timeout := cmd.readTimeout(); timeout != nil { + cn.readTimeout = *timeout + } else { + cn.readTimeout = c.opt.ReadTimeout + } + + if err := c.writeCmd(cn, cmd); err != nil { + c.freeConn(cn, err) + cmd.setErr(err) + return + } + + if err := cmd.parseReply(cn.rd); err != nil { + c.freeConn(cn, err) + return + } + + c.putConn(cn) +} + +// Close closes the client, releasing any open resources. +func (c *baseClient) Close() error { + return c.connPool.Close() +} + +//------------------------------------------------------------------------------ + +type options struct { + Password string + DB int64 + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + IdleTimeout time.Duration +} + +type Options struct { + Network string + Addr string + + // Dialer creates new network connection and has priority over + // Network and Addr options. + Dialer func() (net.Conn, error) + + Password string + DB int64 + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + PoolSize int + IdleTimeout time.Duration +} + +func (opt *Options) getPoolSize() int { + if opt.PoolSize == 0 { + return 10 + } + return opt.PoolSize +} + +func (opt *Options) getDialTimeout() time.Duration { + if opt.DialTimeout == 0 { + return 5 * time.Second + } + return opt.DialTimeout +} + +func (opt *Options) options() *options { + return &options{ + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.getDialTimeout(), + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.getPoolSize(), + IdleTimeout: opt.IdleTimeout, + } +} + +type Client struct { + *baseClient +} + +func NewClient(clOpt *Options) *Client { + opt := clOpt.options() + dialer := clOpt.Dialer + if dialer == nil { + dialer = func() (net.Conn, error) { + return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout) + } + } + return &Client{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(newConnFunc(dialer), opt), + }, + } +} + +// Deprecated. Use NewClient instead. +func NewTCPClient(opt *Options) *Client { + opt.Network = "tcp" + return NewClient(opt) +} + +// Deprecated. Use NewClient instead. +func NewUnixClient(opt *Options) *Client { + opt.Network = "unix" + return NewClient(opt) +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go new file mode 100644 index 00000000000..49f84d0e1e4 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go @@ -0,0 +1,3333 @@ +package redis_test + +import ( + "bytes" + "fmt" + "io" + "net" + "sort" + "strconv" + "sync" + "testing" + "time" + + "gopkg.in/redis.v2" + + . "gopkg.in/check.v1" +) + +const redisAddr = ":6379" + +//------------------------------------------------------------------------------ + +func sortStrings(slice []string) []string { + sort.Strings(slice) + return slice +} + +//------------------------------------------------------------------------------ + +type RedisConnectorTest struct{} + +var _ = Suite(&RedisConnectorTest{}) + +func (t *RedisConnectorTest) TestShutdown(c *C) { + c.Skip("shutdowns server") + + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + shutdown := client.Shutdown() + c.Check(shutdown.Err(), Equals, io.EOF) + c.Check(shutdown.Val(), Equals, "") + + ping := client.Ping() + c.Check(ping.Err(), ErrorMatches, "dial tcp :[0-9]+: connection refused") + c.Check(ping.Val(), Equals, "") +} + +func (t *RedisConnectorTest) TestNewTCPClient(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestNewUnixClient(c *C) { + c.Skip("not available on Travis CI") + + client := redis.NewUnixClient(&redis.Options{ + Addr: "/tmp/redis.sock", + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestDialer(c *C) { + client := redis.NewClient(&redis.Options{ + Dialer: func() (net.Conn, error) { + return net.Dial("tcp", redisAddr) + }, + }) + ping := client.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + c.Assert(client.Close(), IsNil) + + ping := client.Ping() + c.Assert(ping.Err(), Not(IsNil)) + c.Assert(ping.Err().Error(), Equals, "redis: client is closed") + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestPubSubClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + pubsub := client.PubSub() + c.Assert(pubsub.Close(), IsNil) + + _, err := pubsub.Receive() + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestMultiClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + multi := client.Multi() + c.Assert(multi.Close(), IsNil) + + _, err := multi.Exec(func() error { + multi.Ping() + return nil + }) + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestPipelineClose(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + + _, err := client.Pipelined(func(pipeline *redis.Pipeline) error { + c.Assert(pipeline.Close(), IsNil) + pipeline.Ping() + return nil + }) + c.Assert(err, Not(IsNil)) + c.Assert(err.Error(), Equals, "redis: client is closed") + + ping := client.Ping() + c.Assert(ping.Err(), IsNil) + + c.Assert(client.Close(), IsNil) +} + +func (t *RedisConnectorTest) TestIdleTimeout(c *C) { + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + IdleTimeout: time.Nanosecond, + }) + for i := 0; i < 10; i++ { + c.Assert(client.Ping().Err(), IsNil) + } +} + +func (t *RedisConnectorTest) TestSelectDb(c *C) { + client1 := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + DB: 1, + }) + c.Assert(client1.Set("key", "db1").Err(), IsNil) + + client2 := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + DB: 2, + }) + c.Assert(client2.Get("key").Err(), Equals, redis.Nil) +} + +//------------------------------------------------------------------------------ + +type RedisConnPoolTest struct { + client *redis.Client +} + +var _ = Suite(&RedisConnPoolTest{}) + +func (t *RedisConnPoolTest) SetUpTest(c *C) { + t.client = redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) +} + +func (t *RedisConnPoolTest) TearDownTest(c *C) { + c.Assert(t.client.FlushDb().Err(), IsNil) + c.Assert(t.client.Close(), IsNil) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSize(c *C) { + wg := &sync.WaitGroup{} + for i := 0; i < 1000; i++ { + wg.Add(1) + go func() { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPipelineClient(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + pipeline := t.client.Pipeline() + ping := pipeline.Ping() + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(pipeline.Close(), IsNil) + + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnMultiClient(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + multi := t.client.Multi() + var ping *redis.StatusCmd + cmds, err := multi.Exec(func() error { + ping = multi.Ping() + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(multi.Close(), IsNil) + + wg.Done() + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 10) + c.Assert(t.client.Pool().Len(), Equals, 10) +} + +func (t *RedisConnPoolTest) TestConnPoolMaxSizeOnPubSub(c *C) { + const N = 10 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func() { + defer wg.Done() + pubsub := t.client.PubSub() + c.Assert(pubsub.Subscribe(), IsNil) + c.Assert(pubsub.Close(), IsNil) + }() + } + wg.Wait() + + c.Assert(t.client.Pool().Size(), Equals, 0) + c.Assert(t.client.Pool().Len(), Equals, 0) +} + +func (t *RedisConnPoolTest) TestConnPoolRemovesBrokenConn(c *C) { + cn, _, err := t.client.Pool().Get() + c.Assert(err, IsNil) + c.Assert(cn.Close(), IsNil) + c.Assert(t.client.Pool().Put(cn), IsNil) + + ping := t.client.Ping() + c.Assert(ping.Err().Error(), Equals, "use of closed network connection") + c.Assert(ping.Val(), Equals, "") + + ping = t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + + c.Assert(t.client.Pool().Size(), Equals, 1) + c.Assert(t.client.Pool().Len(), Equals, 1) +} + +func (t *RedisConnPoolTest) TestConnPoolReusesConn(c *C) { + for i := 0; i < 1000; i++ { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") + } + + c.Assert(t.client.Pool().Size(), Equals, 1) + c.Assert(t.client.Pool().Len(), Equals, 1) +} + +//------------------------------------------------------------------------------ + +type RedisTest struct { + client *redis.Client +} + +var _ = Suite(&RedisTest{}) + +func Test(t *testing.T) { TestingT(t) } + +func (t *RedisTest) SetUpTest(c *C) { + t.client = redis.NewTCPClient(&redis.Options{ + Addr: ":6379", + }) + + // This is much faster than Flushall. + c.Assert(t.client.Select(1).Err(), IsNil) + c.Assert(t.client.FlushDb().Err(), IsNil) + c.Assert(t.client.Select(0).Err(), IsNil) + c.Assert(t.client.FlushDb().Err(), IsNil) +} + +func (t *RedisTest) TearDownTest(c *C) { + c.Assert(t.client.Close(), IsNil) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdStringMethod(c *C) { + set := t.client.Set("foo", "bar") + c.Assert(set.String(), Equals, "SET foo bar: OK") + + get := t.client.Get("foo") + c.Assert(get.String(), Equals, "GET foo: bar") +} + +func (t *RedisTest) TestCmdStringMethodError(c *C) { + get2 := t.client.Get("key_does_not_exists") + c.Assert(get2.String(), Equals, "GET key_does_not_exists: redis: nil") +} + +func (t *RedisTest) TestRunWithouthCheckingErrVal(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") +} + +func (t *RedisTest) TestGetSpecChars(c *C) { + set := t.client.Set("key", "hello1\r\nhello2\r\n") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello1\r\nhello2\r\n") +} + +func (t *RedisTest) TestGetBigVal(c *C) { + val := string(bytes.Repeat([]byte{'*'}, 1<<16)) + + set := t.client.Set("key", val) + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, val) +} + +func (t *RedisTest) TestManyKeys(c *C) { + var n = 100000 + + for i := 0; i < n; i++ { + t.client.Set("keys.key"+strconv.Itoa(i), "hello"+strconv.Itoa(i)) + } + keys := t.client.Keys("keys.*") + c.Assert(keys.Err(), IsNil) + c.Assert(len(keys.Val()), Equals, n) +} + +func (t *RedisTest) TestManyKeys2(c *C) { + var n = 100000 + + keys := []string{"non-existent-key"} + for i := 0; i < n; i++ { + key := "keys.key" + strconv.Itoa(i) + t.client.Set(key, "hello"+strconv.Itoa(i)) + keys = append(keys, key) + } + keys = append(keys, "non-existent-key") + + mget := t.client.MGet(keys...) + c.Assert(mget.Err(), IsNil) + c.Assert(len(mget.Val()), Equals, n+2) + vals := mget.Val() + for i := 0; i < n; i++ { + c.Assert(vals[i+1], Equals, "hello"+strconv.Itoa(i)) + } + c.Assert(vals[0], Equals, nil) + c.Assert(vals[n+1], Equals, nil) +} + +func (t *RedisTest) TestStringCmdHelpers(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + + n, err := t.client.Get("key").Int64() + c.Assert(err, IsNil) + c.Assert(n, Equals, int64(10)) + + un, err := t.client.Get("key").Uint64() + c.Assert(err, IsNil) + c.Assert(un, Equals, uint64(10)) + + f, err := t.client.Get("key").Float64() + c.Assert(err, IsNil) + c.Assert(f, Equals, float64(10)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestAuth(c *C) { + auth := t.client.Auth("password") + c.Assert(auth.Err(), ErrorMatches, "ERR Client sent AUTH, but no password is set") + c.Assert(auth.Val(), Equals, "") +} + +func (t *RedisTest) TestEcho(c *C) { + echo := t.client.Echo("hello") + c.Assert(echo.Err(), IsNil) + c.Assert(echo.Val(), Equals, "hello") +} + +func (t *RedisTest) TestPing(c *C) { + ping := t.client.Ping() + c.Assert(ping.Err(), IsNil) + c.Assert(ping.Val(), Equals, "PONG") +} + +func (t *RedisTest) TestSelect(c *C) { + sel := t.client.Select(1) + c.Assert(sel.Err(), IsNil) + c.Assert(sel.Val(), Equals, "OK") +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdKeysDel(c *C) { + set := t.client.Set("key1", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + set = t.client.Set("key2", "World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + del := t.client.Del("key1", "key2", "key3") + c.Assert(del.Err(), IsNil) + c.Assert(del.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdKeysDump(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + dump := t.client.Dump("key") + c.Assert(dump.Err(), IsNil) + c.Assert(dump.Val(), Equals, "\x00\x05hello\x06\x00\xf5\x9f\xb7\xf6\x90a\x1c\x99") +} + +func (t *RedisTest) TestCmdKeysExists(c *C) { + set := t.client.Set("key1", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + exists := t.client.Exists("key1") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, true) + + exists = t.client.Exists("key2") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdKeysExpire(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 10*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) + + set = t.client.Set("key", "Hello World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) +} + +func (t *RedisTest) TestCmdKeysExpireAt(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + exists := t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, true) + + expireAt := t.client.ExpireAt("key", time.Now().Add(-time.Hour)) + c.Assert(expireAt.Err(), IsNil) + c.Assert(expireAt.Val(), Equals, true) + + exists = t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdKeysKeys(c *C) { + mset := t.client.MSet("one", "1", "two", "2", "three", "3", "four", "4") + c.Assert(mset.Err(), IsNil) + c.Assert(mset.Val(), Equals, "OK") + + keys := t.client.Keys("*o*") + c.Assert(keys.Err(), IsNil) + c.Assert(sortStrings(keys.Val()), DeepEquals, []string{"four", "one", "two"}) + + keys = t.client.Keys("t??") + c.Assert(keys.Err(), IsNil) + c.Assert(keys.Val(), DeepEquals, []string{"two"}) + + keys = t.client.Keys("*") + c.Assert(keys.Err(), IsNil) + c.Assert( + sortStrings(keys.Val()), + DeepEquals, + []string{"four", "one", "three", "two"}, + ) +} + +func (t *RedisTest) TestCmdKeysMigrate(c *C) { + migrate := t.client.Migrate("localhost", "6380", "key", 0, 0) + c.Assert(migrate.Err(), IsNil) + c.Assert(migrate.Val(), Equals, "NOKEY") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + migrate = t.client.Migrate("localhost", "6380", "key", 0, 0) + c.Assert(migrate.Err(), ErrorMatches, "IOERR error or timeout writing to target instance") + c.Assert(migrate.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdKeysMove(c *C) { + move := t.client.Move("key", 1) + c.Assert(move.Err(), IsNil) + c.Assert(move.Val(), Equals, false) + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + move = t.client.Move("key", 1) + c.Assert(move.Err(), IsNil) + c.Assert(move.Val(), Equals, true) + + get := t.client.Get("key") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + sel := t.client.Select(1) + c.Assert(sel.Err(), IsNil) + c.Assert(sel.Val(), Equals, "OK") + + get = t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysObject(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + refCount := t.client.ObjectRefCount("key") + c.Assert(refCount.Err(), IsNil) + c.Assert(refCount.Val(), Equals, int64(1)) + + enc := t.client.ObjectEncoding("key") + c.Assert(enc.Err(), IsNil) + c.Assert(enc.Val(), Equals, "raw") + + idleTime := t.client.ObjectIdleTime("key") + c.Assert(idleTime.Err(), IsNil) + c.Assert(idleTime.Val(), Equals, time.Duration(0)) +} + +func (t *RedisTest) TestCmdKeysPersist(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 10*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) + + persist := t.client.Persist("key") + c.Assert(persist.Err(), IsNil) + c.Assert(persist.Val(), Equals, true) + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPExpire(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := 900 * time.Millisecond + pexpire := t.client.PExpire("key", expiration) + c.Assert(pexpire.Err(), IsNil) + c.Assert(pexpire.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, time.Second) + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPExpireAt(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := 900 * time.Millisecond + pexpireat := t.client.PExpireAt("key", time.Now().Add(expiration)) + c.Assert(pexpireat.Err(), IsNil) + c.Assert(pexpireat.Val(), Equals, true) + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, time.Second) + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysPTTL(c *C) { + set := t.client.Set("key", "Hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expiration := time.Second + expire := t.client.Expire("key", expiration) + c.Assert(expire.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) +} + +func (t *RedisTest) TestCmdKeysRandomKey(c *C) { + randomKey := t.client.RandomKey() + c.Assert(randomKey.Err(), Equals, redis.Nil) + c.Assert(randomKey.Val(), Equals, "") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + randomKey = t.client.RandomKey() + c.Assert(randomKey.Err(), IsNil) + c.Assert(randomKey.Val(), Equals, "key") +} + +func (t *RedisTest) TestCmdKeysRename(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + status := t.client.Rename("key", "key1") + c.Assert(status.Err(), IsNil) + c.Assert(status.Val(), Equals, "OK") + + get := t.client.Get("key1") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysRenameNX(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + renameNX := t.client.RenameNX("key", "key1") + c.Assert(renameNX.Err(), IsNil) + c.Assert(renameNX.Val(), Equals, true) + + get := t.client.Get("key1") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysRestore(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + dump := t.client.Dump("key") + c.Assert(dump.Err(), IsNil) + + del := t.client.Del("key") + c.Assert(del.Err(), IsNil) + + restore := t.client.Restore("key", 0, dump.Val()) + c.Assert(restore.Err(), IsNil) + c.Assert(restore.Val(), Equals, "OK") + + type_ := t.client.Type("key") + c.Assert(type_.Err(), IsNil) + c.Assert(type_.Val(), Equals, "string") + + lRange := t.client.Get("key") + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdKeysSort(c *C) { + lPush := t.client.LPush("list", "1") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(1)) + lPush = t.client.LPush("list", "3") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(2)) + lPush = t.client.LPush("list", "2") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(3)) + + sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}) + c.Assert(sort.Err(), IsNil) + c.Assert(sort.Val(), DeepEquals, []string{"1", "2"}) +} + +func (t *RedisTest) TestCmdKeysSortBy(c *C) { + lPush := t.client.LPush("list", "1") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(1)) + lPush = t.client.LPush("list", "3") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(2)) + lPush = t.client.LPush("list", "2") + c.Assert(lPush.Err(), IsNil) + c.Assert(lPush.Val(), Equals, int64(3)) + + set := t.client.Set("weight_1", "5") + c.Assert(set.Err(), IsNil) + set = t.client.Set("weight_2", "2") + c.Assert(set.Err(), IsNil) + set = t.client.Set("weight_3", "8") + c.Assert(set.Err(), IsNil) + + sort := t.client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC", By: "weight_*"}) + c.Assert(sort.Err(), IsNil) + c.Assert(sort.Val(), DeepEquals, []string{"2", "1"}) +} + +func (t *RedisTest) TestCmdKeysTTL(c *C) { + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val() < 0, Equals, true) + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + expire := t.client.Expire("key", 60*time.Second) + c.Assert(expire.Err(), IsNil) + c.Assert(expire.Val(), Equals, true) + + ttl = t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 60*time.Second) +} + +func (t *RedisTest) TestCmdKeysType(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + type_ := t.client.Type("key") + c.Assert(type_.Err(), IsNil) + c.Assert(type_.Val(), Equals, "string") +} + +func (t *RedisTest) TestCmdScan(c *C) { + for i := 0; i < 1000; i++ { + set := t.client.Set(fmt.Sprintf("key%d", i), "hello") + c.Assert(set.Err(), IsNil) + } + + cursor, keys, err := t.client.Scan(0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdSScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.SAdd("myset", fmt.Sprintf("member%d", i)) + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.SScan("myset", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdHScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.HSet("myhash", fmt.Sprintf("key%d", i), "hello") + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.HScan("myhash", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdZScan(c *C) { + for i := 0; i < 1000; i++ { + sadd := t.client.ZAdd("myset", redis.Z{float64(i), fmt.Sprintf("member%d", i)}) + c.Assert(sadd.Err(), IsNil) + } + + cursor, keys, err := t.client.ZScan("myset", 0, "", 0).Result() + c.Assert(err, IsNil) + c.Assert(cursor > 0, Equals, true) + c.Assert(len(keys) > 0, Equals, true) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestStringsAppend(c *C) { + exists := t.client.Exists("key") + c.Assert(exists.Err(), IsNil) + c.Assert(exists.Val(), Equals, false) + + append := t.client.Append("key", "Hello") + c.Assert(append.Err(), IsNil) + c.Assert(append.Val(), Equals, int64(5)) + + append = t.client.Append("key", " World") + c.Assert(append.Err(), IsNil) + c.Assert(append.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "Hello World") +} + +func (t *RedisTest) TestStringsBitCount(c *C) { + set := t.client.Set("key", "foobar") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitCount := t.client.BitCount("key", nil) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(26)) + + bitCount = t.client.BitCount("key", &redis.BitCount{0, 0}) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(4)) + + bitCount = t.client.BitCount("key", &redis.BitCount{1, 1}) + c.Assert(bitCount.Err(), IsNil) + c.Assert(bitCount.Val(), Equals, int64(6)) +} + +func (t *RedisTest) TestStringsBitOpAnd(c *C) { + set := t.client.Set("key1", "1") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "0") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpAnd := t.client.BitOpAnd("dest", "key1", "key2") + c.Assert(bitOpAnd.Err(), IsNil) + c.Assert(bitOpAnd.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "0") +} + +func (t *RedisTest) TestStringsBitOpOr(c *C) { + set := t.client.Set("key1", "1") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "0") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpOr := t.client.BitOpOr("dest", "key1", "key2") + c.Assert(bitOpOr.Err(), IsNil) + c.Assert(bitOpOr.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "1") +} + +func (t *RedisTest) TestStringsBitOpXor(c *C) { + set := t.client.Set("key1", "\xff") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + set = t.client.Set("key2", "\x0f") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpXor := t.client.BitOpXor("dest", "key1", "key2") + c.Assert(bitOpXor.Err(), IsNil) + c.Assert(bitOpXor.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "\xf0") +} + +func (t *RedisTest) TestStringsBitOpNot(c *C) { + set := t.client.Set("key1", "\x00") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + bitOpNot := t.client.BitOpNot("dest", "key1") + c.Assert(bitOpNot.Err(), IsNil) + c.Assert(bitOpNot.Val(), Equals, int64(1)) + + get := t.client.Get("dest") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "\xff") +} + +func (t *RedisTest) TestStringsDecr(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decr := t.client.Decr("key") + c.Assert(decr.Err(), IsNil) + c.Assert(decr.Val(), Equals, int64(9)) + + set = t.client.Set("key", "234293482390480948029348230948") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decr = t.client.Decr("key") + c.Assert(decr.Err(), ErrorMatches, "ERR value is not an integer or out of range") + c.Assert(decr.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestStringsDecrBy(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + decrBy := t.client.DecrBy("key", 5) + c.Assert(decrBy.Err(), IsNil) + c.Assert(decrBy.Val(), Equals, int64(5)) +} + +func (t *RedisTest) TestStringsGet(c *C) { + get := t.client.Get("_") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get = t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsGetBit(c *C) { + setBit := t.client.SetBit("key", 7, 1) + c.Assert(setBit.Err(), IsNil) + c.Assert(setBit.Val(), Equals, int64(0)) + + getBit := t.client.GetBit("key", 0) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(0)) + + getBit = t.client.GetBit("key", 7) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(1)) + + getBit = t.client.GetBit("key", 100) + c.Assert(getBit.Err(), IsNil) + c.Assert(getBit.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestStringsGetRange(c *C) { + set := t.client.Set("key", "This is a string") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + getRange := t.client.GetRange("key", 0, 3) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "This") + + getRange = t.client.GetRange("key", -3, -1) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "ing") + + getRange = t.client.GetRange("key", 0, -1) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "This is a string") + + getRange = t.client.GetRange("key", 10, 100) + c.Assert(getRange.Err(), IsNil) + c.Assert(getRange.Val(), Equals, "string") +} + +func (t *RedisTest) TestStringsGetSet(c *C) { + incr := t.client.Incr("key") + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(1)) + + getSet := t.client.GetSet("key", "0") + c.Assert(getSet.Err(), IsNil) + c.Assert(getSet.Val(), Equals, "1") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "0") +} + +func (t *RedisTest) TestStringsIncr(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incr := t.client.Incr("key") + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "11") +} + +func (t *RedisTest) TestStringsIncrBy(c *C) { + set := t.client.Set("key", "10") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrBy := t.client.IncrBy("key", 5) + c.Assert(incrBy.Err(), IsNil) + c.Assert(incrBy.Val(), Equals, int64(15)) +} + +func (t *RedisTest) TestIncrByFloat(c *C) { + set := t.client.Set("key", "10.50") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrByFloat := t.client.IncrByFloat("key", 0.1) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, 10.6) + + set = t.client.Set("key", "5.0e3") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + incrByFloat = t.client.IncrByFloat("key", 2.0e2) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, float64(5200)) +} + +func (t *RedisTest) TestIncrByFloatOverflow(c *C) { + incrByFloat := t.client.IncrByFloat("key", 996945661) + c.Assert(incrByFloat.Err(), IsNil) + c.Assert(incrByFloat.Val(), Equals, float64(996945661)) +} + +func (t *RedisTest) TestStringsMSetMGet(c *C) { + mSet := t.client.MSet("key1", "hello1", "key2", "hello2") + c.Assert(mSet.Err(), IsNil) + c.Assert(mSet.Val(), Equals, "OK") + + mGet := t.client.MGet("key1", "key2", "_") + c.Assert(mGet.Err(), IsNil) + c.Assert(mGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil}) +} + +func (t *RedisTest) TestStringsMSetNX(c *C) { + mSetNX := t.client.MSetNX("key1", "hello1", "key2", "hello2") + c.Assert(mSetNX.Err(), IsNil) + c.Assert(mSetNX.Val(), Equals, true) + + mSetNX = t.client.MSetNX("key2", "hello1", "key3", "hello2") + c.Assert(mSetNX.Err(), IsNil) + c.Assert(mSetNX.Val(), Equals, false) +} + +func (t *RedisTest) TestStringsPSetEx(c *C) { + expiration := 50 * time.Millisecond + psetex := t.client.PSetEx("key", expiration, "hello") + c.Assert(psetex.Err(), IsNil) + c.Assert(psetex.Val(), Equals, "OK") + + pttl := t.client.PTTL("key") + c.Assert(pttl.Err(), IsNil) + c.Assert(pttl.Val() <= expiration, Equals, true) + c.Assert(pttl.Val() >= expiration-time.Millisecond, Equals, true) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetGet(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetEx(c *C) { + setEx := t.client.SetEx("key", 10*time.Second, "hello") + c.Assert(setEx.Err(), IsNil) + c.Assert(setEx.Val(), Equals, "OK") + + ttl := t.client.TTL("key") + c.Assert(ttl.Err(), IsNil) + c.Assert(ttl.Val(), Equals, 10*time.Second) +} + +func (t *RedisTest) TestStringsSetNX(c *C) { + setNX := t.client.SetNX("key", "hello") + c.Assert(setNX.Err(), IsNil) + c.Assert(setNX.Val(), Equals, true) + + setNX = t.client.SetNX("key", "hello2") + c.Assert(setNX.Err(), IsNil) + c.Assert(setNX.Val(), Equals, false) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestStringsSetRange(c *C) { + set := t.client.Set("key", "Hello World") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + range_ := t.client.SetRange("key", 6, "Redis") + c.Assert(range_.Err(), IsNil) + c.Assert(range_.Val(), Equals, int64(11)) + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "Hello Redis") +} + +func (t *RedisTest) TestStringsStrLen(c *C) { + set := t.client.Set("key", "hello") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + strLen := t.client.StrLen("key") + c.Assert(strLen.Err(), IsNil) + c.Assert(strLen.Val(), Equals, int64(5)) + + strLen = t.client.StrLen("_") + c.Assert(strLen.Err(), IsNil) + c.Assert(strLen.Val(), Equals, int64(0)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdHDel(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hDel := t.client.HDel("hash", "key") + c.Assert(hDel.Err(), IsNil) + c.Assert(hDel.Val(), Equals, int64(1)) + + hDel = t.client.HDel("hash", "key") + c.Assert(hDel.Err(), IsNil) + c.Assert(hDel.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestCmdHExists(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hExists := t.client.HExists("hash", "key") + c.Assert(hExists.Err(), IsNil) + c.Assert(hExists.Val(), Equals, true) + + hExists = t.client.HExists("hash", "key1") + c.Assert(hExists.Err(), IsNil) + c.Assert(hExists.Val(), Equals, false) +} + +func (t *RedisTest) TestCmdHGet(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") + + hGet = t.client.HGet("hash", "key1") + c.Assert(hGet.Err(), Equals, redis.Nil) + c.Assert(hGet.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdHGetAll(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hGetAll := t.client.HGetAll("hash") + c.Assert(hGetAll.Err(), IsNil) + c.Assert(hGetAll.Val(), DeepEquals, []string{"key1", "hello1", "key2", "hello2"}) +} + +func (t *RedisTest) TestCmdHGetAllMap(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hGetAll := t.client.HGetAllMap("hash") + c.Assert(hGetAll.Err(), IsNil) + c.Assert(hGetAll.Val(), DeepEquals, map[string]string{"key1": "hello1", "key2": "hello2"}) +} + +func (t *RedisTest) TestCmdHIncrBy(c *C) { + hSet := t.client.HSet("hash", "key", "5") + c.Assert(hSet.Err(), IsNil) + + hIncrBy := t.client.HIncrBy("hash", "key", 1) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(6)) + + hIncrBy = t.client.HIncrBy("hash", "key", -1) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(5)) + + hIncrBy = t.client.HIncrBy("hash", "key", -10) + c.Assert(hIncrBy.Err(), IsNil) + c.Assert(hIncrBy.Val(), Equals, int64(-5)) +} + +func (t *RedisTest) TestCmdHIncrByFloat(c *C) { + hSet := t.client.HSet("hash", "field", "10.50") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, true) + + hIncrByFloat := t.client.HIncrByFloat("hash", "field", 0.1) + c.Assert(hIncrByFloat.Err(), IsNil) + c.Assert(hIncrByFloat.Val(), Equals, 10.6) + + hSet = t.client.HSet("hash", "field", "5.0e3") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, false) + + hIncrByFloat = t.client.HIncrByFloat("hash", "field", 2.0e2) + c.Assert(hIncrByFloat.Err(), IsNil) + c.Assert(hIncrByFloat.Val(), Equals, float64(5200)) +} + +func (t *RedisTest) TestCmdHKeys(c *C) { + hkeys := t.client.HKeys("hash") + c.Assert(hkeys.Err(), IsNil) + c.Assert(hkeys.Val(), DeepEquals, []string{}) + + hset := t.client.HSet("hash", "key1", "hello1") + c.Assert(hset.Err(), IsNil) + hset = t.client.HSet("hash", "key2", "hello2") + c.Assert(hset.Err(), IsNil) + + hkeys = t.client.HKeys("hash") + c.Assert(hkeys.Err(), IsNil) + c.Assert(hkeys.Val(), DeepEquals, []string{"key1", "key2"}) +} + +func (t *RedisTest) TestCmdHLen(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hLen := t.client.HLen("hash") + c.Assert(hLen.Err(), IsNil) + c.Assert(hLen.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdHMGet(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hMGet := t.client.HMGet("hash", "key1", "key2", "_") + c.Assert(hMGet.Err(), IsNil) + c.Assert(hMGet.Val(), DeepEquals, []interface{}{"hello1", "hello2", nil}) +} + +func (t *RedisTest) TestCmdHMSet(c *C) { + hMSet := t.client.HMSet("hash", "key1", "hello1", "key2", "hello2") + c.Assert(hMSet.Err(), IsNil) + c.Assert(hMSet.Val(), Equals, "OK") + + hGet := t.client.HGet("hash", "key1") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello1") + + hGet = t.client.HGet("hash", "key2") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello2") +} + +func (t *RedisTest) TestCmdHSet(c *C) { + hSet := t.client.HSet("hash", "key", "hello") + c.Assert(hSet.Err(), IsNil) + c.Assert(hSet.Val(), Equals, true) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdHSetNX(c *C) { + hSetNX := t.client.HSetNX("hash", "key", "hello") + c.Assert(hSetNX.Err(), IsNil) + c.Assert(hSetNX.Val(), Equals, true) + + hSetNX = t.client.HSetNX("hash", "key", "hello") + c.Assert(hSetNX.Err(), IsNil) + c.Assert(hSetNX.Val(), Equals, false) + + hGet := t.client.HGet("hash", "key") + c.Assert(hGet.Err(), IsNil) + c.Assert(hGet.Val(), Equals, "hello") +} + +func (t *RedisTest) TestCmdHVals(c *C) { + hSet := t.client.HSet("hash", "key1", "hello1") + c.Assert(hSet.Err(), IsNil) + hSet = t.client.HSet("hash", "key2", "hello2") + c.Assert(hSet.Err(), IsNil) + + hVals := t.client.HVals("hash") + c.Assert(hVals.Err(), IsNil) + c.Assert(hVals.Val(), DeepEquals, []string{"hello1", "hello2"}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdListsBLPop(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bLPop := t.client.BLPop(0, "list1", "list2") + c.Assert(bLPop.Err(), IsNil) + c.Assert(bLPop.Val(), DeepEquals, []string{"list1", "a"}) +} + +func (t *RedisTest) TestCmdListsBLPopBlocks(c *C) { + started := make(chan bool) + done := make(chan bool) + go func() { + started <- true + bLPop := t.client.BLPop(0, "list") + c.Assert(bLPop.Err(), IsNil) + c.Assert(bLPop.Val(), DeepEquals, []string{"list", "a"}) + done <- true + }() + <-started + + select { + case <-done: + c.Error("BLPop is not blocked") + case <-time.After(time.Second): + // ok + } + + rPush := t.client.RPush("list", "a") + c.Assert(rPush.Err(), IsNil) + + select { + case <-done: + // ok + case <-time.After(time.Second): + c.Error("BLPop is still blocked") + // ok + } +} + +func (t *RedisTest) TestCmdListsBLPopTimeout(c *C) { + bLPop := t.client.BLPop(1, "list1") + c.Assert(bLPop.Err(), Equals, redis.Nil) + c.Assert(bLPop.Val(), IsNil) +} + +func (t *RedisTest) TestCmdListsBRPop(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bRPop := t.client.BRPop(0, "list1", "list2") + c.Assert(bRPop.Err(), IsNil) + c.Assert(bRPop.Val(), DeepEquals, []string{"list1", "c"}) +} + +func (t *RedisTest) TestCmdListsBRPopBlocks(c *C) { + started := make(chan bool) + done := make(chan bool) + go func() { + started <- true + brpop := t.client.BRPop(0, "list") + c.Assert(brpop.Err(), IsNil) + c.Assert(brpop.Val(), DeepEquals, []string{"list", "a"}) + done <- true + }() + <-started + + select { + case <-done: + c.Error("BRPop is not blocked") + case <-time.After(time.Second): + // ok + } + + rPush := t.client.RPush("list", "a") + c.Assert(rPush.Err(), IsNil) + + select { + case <-done: + // ok + case <-time.After(time.Second): + c.Error("BRPop is still blocked") + // ok + } +} + +func (t *RedisTest) TestCmdListsBRPopLPush(c *C) { + rPush := t.client.RPush("list1", "a", "b", "c") + c.Assert(rPush.Err(), IsNil) + + bRPopLPush := t.client.BRPopLPush("list1", "list2", 0) + c.Assert(bRPopLPush.Err(), IsNil) + c.Assert(bRPopLPush.Val(), Equals, "c") +} + +func (t *RedisTest) TestCmdListsLIndex(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lIndex := t.client.LIndex("list", 0) + c.Assert(lIndex.Err(), IsNil) + c.Assert(lIndex.Val(), Equals, "Hello") + + lIndex = t.client.LIndex("list", -1) + c.Assert(lIndex.Err(), IsNil) + c.Assert(lIndex.Val(), Equals, "World") + + lIndex = t.client.LIndex("list", 3) + c.Assert(lIndex.Err(), Equals, redis.Nil) + c.Assert(lIndex.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdListsLInsert(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "World") + c.Assert(rPush.Err(), IsNil) + + lInsert := t.client.LInsert("list", "BEFORE", "World", "There") + c.Assert(lInsert.Err(), IsNil) + c.Assert(lInsert.Val(), Equals, int64(3)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "There", "World"}) +} + +func (t *RedisTest) TestCmdListsLLen(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lLen := t.client.LLen("list") + c.Assert(lLen.Err(), IsNil) + c.Assert(lLen.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestCmdListsLPop(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lPop := t.client.LPop("list") + c.Assert(lPop.Err(), IsNil) + c.Assert(lPop.Val(), Equals, "one") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestCmdListsLPush(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + lPush = t.client.LPush("list", "Hello") + c.Assert(lPush.Err(), IsNil) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestCmdListsLPushX(c *C) { + lPush := t.client.LPush("list", "World") + c.Assert(lPush.Err(), IsNil) + + lPushX := t.client.LPushX("list", "Hello") + c.Assert(lPushX.Err(), IsNil) + c.Assert(lPushX.Val(), Equals, int64(2)) + + lPushX = t.client.LPushX("list2", "Hello") + c.Assert(lPushX.Err(), IsNil) + c.Assert(lPushX.Val(), Equals, int64(0)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestCmdListsLRange(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lRange := t.client.LRange("list", 0, 0) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one"}) + + lRange = t.client.LRange("list", -3, 2) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + lRange = t.client.LRange("list", -100, 100) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + lRange = t.client.LRange("list", 5, 10) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestCmdListsLRem(c *C) { + rPush := t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "key") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "hello") + c.Assert(rPush.Err(), IsNil) + + lRem := t.client.LRem("list", -2, "hello") + c.Assert(lRem.Err(), IsNil) + c.Assert(lRem.Val(), Equals, int64(2)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"hello", "key"}) +} + +func (t *RedisTest) TestCmdListsLSet(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lSet := t.client.LSet("list", 0, "four") + c.Assert(lSet.Err(), IsNil) + c.Assert(lSet.Val(), Equals, "OK") + + lSet = t.client.LSet("list", -2, "five") + c.Assert(lSet.Err(), IsNil) + c.Assert(lSet.Val(), Equals, "OK") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"four", "five", "three"}) +} + +func (t *RedisTest) TestCmdListsLTrim(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + lTrim := t.client.LTrim("list", 1, -1) + c.Assert(lTrim.Err(), IsNil) + c.Assert(lTrim.Val(), Equals, "OK") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestCmdListsRPop(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + rPop := t.client.RPop("list") + c.Assert(rPop.Err(), IsNil) + c.Assert(rPop.Val(), Equals, "three") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"}) +} + +func (t *RedisTest) TestCmdListsRPopLPush(c *C) { + rPush := t.client.RPush("list", "one") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "two") + c.Assert(rPush.Err(), IsNil) + rPush = t.client.RPush("list", "three") + c.Assert(rPush.Err(), IsNil) + + rPopLPush := t.client.RPopLPush("list", "list2") + c.Assert(rPopLPush.Err(), IsNil) + c.Assert(rPopLPush.Val(), Equals, "three") + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"one", "two"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"three"}) +} + +func (t *RedisTest) TestCmdListsRPush(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(1)) + + rPush = t.client.RPush("list", "World") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(2)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestCmdListsRPushX(c *C) { + rPush := t.client.RPush("list", "Hello") + c.Assert(rPush.Err(), IsNil) + c.Assert(rPush.Val(), Equals, int64(1)) + + rPushX := t.client.RPushX("list", "World") + c.Assert(rPushX.Err(), IsNil) + c.Assert(rPushX.Val(), Equals, int64(2)) + + rPushX = t.client.RPushX("list2", "World") + c.Assert(rPushX.Err(), IsNil) + c.Assert(rPushX.Val(), Equals, int64(0)) + + lRange := t.client.LRange("list", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{"Hello", "World"}) + + lRange = t.client.LRange("list2", 0, -1) + c.Assert(lRange.Err(), IsNil) + c.Assert(lRange.Val(), DeepEquals, []string{}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestSAdd(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(0)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestSCard(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + c.Assert(sAdd.Val(), Equals, int64(1)) + + sCard := t.client.SCard("set") + c.Assert(sCard.Err(), IsNil) + c.Assert(sCard.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestSDiff(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sDiff := t.client.SDiff("set1", "set2") + c.Assert(sDiff.Err(), IsNil) + c.Assert(sortStrings(sDiff.Val()), DeepEquals, []string{"a", "b"}) +} + +func (t *RedisTest) TestSDiffStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sDiffStore := t.client.SDiffStore("set", "set1", "set2") + c.Assert(sDiffStore.Err(), IsNil) + c.Assert(sDiffStore.Val(), Equals, int64(2)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"a", "b"}) +} + +func (t *RedisTest) TestSInter(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sInter := t.client.SInter("set1", "set2") + c.Assert(sInter.Err(), IsNil) + c.Assert(sInter.Val(), DeepEquals, []string{"c"}) +} + +func (t *RedisTest) TestSInterStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sInterStore := t.client.SInterStore("set", "set1", "set2") + c.Assert(sInterStore.Err(), IsNil) + c.Assert(sInterStore.Val(), Equals, int64(1)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), DeepEquals, []string{"c"}) +} + +func (t *RedisTest) TestIsMember(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + + sIsMember := t.client.SIsMember("set", "one") + c.Assert(sIsMember.Err(), IsNil) + c.Assert(sIsMember.Val(), Equals, true) + + sIsMember = t.client.SIsMember("set", "two") + c.Assert(sIsMember.Err(), IsNil) + c.Assert(sIsMember.Val(), Equals, false) +} + +func (t *RedisTest) TestSMembers(c *C) { + sAdd := t.client.SAdd("set", "Hello") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "World") + c.Assert(sAdd.Err(), IsNil) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"Hello", "World"}) +} + +func (t *RedisTest) TestSMove(c *C) { + sAdd := t.client.SAdd("set1", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "two") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "three") + c.Assert(sAdd.Err(), IsNil) + + sMove := t.client.SMove("set1", "set2", "two") + c.Assert(sMove.Err(), IsNil) + c.Assert(sMove.Val(), Equals, true) + + sMembers := t.client.SMembers("set1") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), DeepEquals, []string{"one"}) + + sMembers = t.client.SMembers("set2") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sortStrings(sMembers.Val()), DeepEquals, []string{"three", "two"}) +} + +func (t *RedisTest) TestSPop(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sPop := t.client.SPop("set") + c.Assert(sPop.Err(), IsNil) + c.Assert(sPop.Val(), Not(Equals), "") + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 2) +} + +func (t *RedisTest) TestSRandMember(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sRandMember := t.client.SRandMember("set") + c.Assert(sRandMember.Err(), IsNil) + c.Assert(sRandMember.Val(), Not(Equals), "") + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 3) +} + +func (t *RedisTest) TestSRem(c *C) { + sAdd := t.client.SAdd("set", "one") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "two") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set", "three") + c.Assert(sAdd.Err(), IsNil) + + sRem := t.client.SRem("set", "one") + c.Assert(sRem.Err(), IsNil) + c.Assert(sRem.Val(), Equals, int64(1)) + + sRem = t.client.SRem("set", "four") + c.Assert(sRem.Err(), IsNil) + c.Assert(sRem.Val(), Equals, int64(0)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert( + sortStrings(sMembers.Val()), + DeepEquals, + []string{"three", "two"}, + ) +} + +func (t *RedisTest) TestSUnion(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sUnion := t.client.SUnion("set1", "set2") + c.Assert(sUnion.Err(), IsNil) + c.Assert(sUnion.Val(), HasLen, 5) +} + +func (t *RedisTest) TestSUnionStore(c *C) { + sAdd := t.client.SAdd("set1", "a") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "b") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set1", "c") + c.Assert(sAdd.Err(), IsNil) + + sAdd = t.client.SAdd("set2", "c") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "d") + c.Assert(sAdd.Err(), IsNil) + sAdd = t.client.SAdd("set2", "e") + c.Assert(sAdd.Err(), IsNil) + + sUnionStore := t.client.SUnionStore("set", "set1", "set2") + c.Assert(sUnionStore.Err(), IsNil) + c.Assert(sUnionStore.Val(), Equals, int64(5)) + + sMembers := t.client.SMembers("set") + c.Assert(sMembers.Err(), IsNil) + c.Assert(sMembers.Val(), HasLen, 5) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestZAdd(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{1, "uno"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(1)) + + zAdd = t.client.ZAdd("zset", redis.Z{3, "two"}) + c.Assert(zAdd.Err(), IsNil) + c.Assert(zAdd.Val(), Equals, int64(0)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {1, "uno"}, {3, "two"}}) +} + +func (t *RedisTest) TestZCard(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zCard := t.client.ZCard("zset") + c.Assert(zCard.Err(), IsNil) + c.Assert(zCard.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestZCount(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zCount := t.client.ZCount("zset", "-inf", "+inf") + c.Assert(zCount.Err(), IsNil) + c.Assert(zCount.Val(), Equals, int64(3)) + + zCount = t.client.ZCount("zset", "(1", "3") + c.Assert(zCount.Err(), IsNil) + c.Assert(zCount.Val(), Equals, int64(2)) +} + +func (t *RedisTest) TestZIncrBy(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zIncrBy := t.client.ZIncrBy("zset", 2, "one") + c.Assert(zIncrBy.Err(), IsNil) + c.Assert(zIncrBy.Val(), Equals, float64(3)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "one"}}) +} + +func (t *RedisTest) TestZInterStore(c *C) { + zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset3", redis.Z{3, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zInterStore := t.client.ZInterStore( + "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2") + c.Assert(zInterStore.Err(), IsNil) + c.Assert(zInterStore.Val(), Equals, int64(2)) + + val, err := t.client.ZRangeWithScores("out", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {10, "two"}}) +} + +func (t *RedisTest) TestZRange(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRange := t.client.ZRange("zset", 0, -1) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"one", "two", "three"}) + + zRange = t.client.ZRange("zset", 2, 3) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"three"}) + + zRange = t.client.ZRange("zset", -2, -1) + c.Assert(zRange.Err(), IsNil) + c.Assert(zRange.Val(), DeepEquals, []string{"two", "three"}) +} + +func (t *RedisTest) TestZRangeWithScores(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}) + + val, err = t.client.ZRangeWithScores("zset", 2, 3).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}}) + + val, err = t.client.ZRangeWithScores("zset", -2, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRangeByScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRangeByScore := t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two", "three"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "1", + Max: "2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"one", "two"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{"two"}) + + zRangeByScore = t.client.ZRangeByScore("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "(2", + }) + c.Assert(zRangeByScore.Err(), IsNil) + c.Assert(zRangeByScore.Val(), DeepEquals, []string{}) +} + +func (t *RedisTest) TestZRangeByScoreWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "-inf", + Max: "+inf", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}, {3, "three"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "1", + Max: "2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {2, "two"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}}) + + val, err = t.client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{ + Min: "(1", + Max: "(2", + }).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{}) +} + +func (t *RedisTest) TestZRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRank := t.client.ZRank("zset", "three") + c.Assert(zRank.Err(), IsNil) + c.Assert(zRank.Val(), Equals, int64(2)) + + zRank = t.client.ZRank("zset", "four") + c.Assert(zRank.Err(), Equals, redis.Nil) + c.Assert(zRank.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestZRem(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRem := t.client.ZRem("zset", "two") + c.Assert(zRem.Err(), IsNil) + c.Assert(zRem.Val(), Equals, int64(1)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRemRangeByRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRemRangeByRank := t.client.ZRemRangeByRank("zset", 0, 1) + c.Assert(zRemRangeByRank.Err(), IsNil) + c.Assert(zRemRangeByRank.Val(), Equals, int64(2)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}}) +} + +func (t *RedisTest) TestZRemRangeByScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRemRangeByScore := t.client.ZRemRangeByScore("zset", "-inf", "(2") + c.Assert(zRemRangeByScore.Err(), IsNil) + c.Assert(zRemRangeByScore.Val(), Equals, int64(1)) + + val, err := t.client.ZRangeWithScores("zset", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {3, "three"}}) +} + +func (t *RedisTest) TestZRevRange(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRevRange := t.client.ZRevRange("zset", "0", "-1") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"three", "two", "one"}) + + zRevRange = t.client.ZRevRange("zset", "2", "3") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"one"}) + + zRevRange = t.client.ZRevRange("zset", "-2", "-1") + c.Assert(zRevRange.Err(), IsNil) + c.Assert(zRevRange.Val(), DeepEquals, []string{"two", "one"}) +} + +func (t *RedisTest) TestZRevRangeWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRevRangeWithScores("zset", "0", "-1").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) + + val, err = t.client.ZRevRangeWithScores("zset", "2", "3").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{1, "one"}}) + + val, err = t.client.ZRevRangeWithScores("zset", "-2", "-1").Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}, {1, "one"}}) +} + +func (t *RedisTest) TestZRevRangeByScore(c *C) { + zadd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zadd.Err(), IsNil) + + vals, err := t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{"three", "two", "one"}) + + vals, err = t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{"two"}) + + vals, err = t.client.ZRevRangeByScore( + "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []string{}) +} + +func (t *RedisTest) TestZRevRangeByScoreWithScores(c *C) { + zadd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zadd.Err(), IsNil) + zadd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zadd.Err(), IsNil) + + vals, err := t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(vals, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) +} + +func (t *RedisTest) TestZRevRangeByScoreWithScoresMap(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + val, err := t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "+inf", Min: "-inf"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{3, "three"}, {2, "two"}, {1, "one"}}) + + val, err = t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{2, "two"}}) + + val, err = t.client.ZRevRangeByScoreWithScores( + "zset", redis.ZRangeByScore{Max: "(2", Min: "(1"}).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{}) +} + +func (t *RedisTest) TestZRevRank(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zRevRank := t.client.ZRevRank("zset", "one") + c.Assert(zRevRank.Err(), IsNil) + c.Assert(zRevRank.Val(), Equals, int64(2)) + + zRevRank = t.client.ZRevRank("zset", "four") + c.Assert(zRevRank.Err(), Equals, redis.Nil) + c.Assert(zRevRank.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestZScore(c *C) { + zAdd := t.client.ZAdd("zset", redis.Z{1.001, "one"}) + c.Assert(zAdd.Err(), IsNil) + + zScore := t.client.ZScore("zset", "one") + c.Assert(zScore.Err(), IsNil) + c.Assert(zScore.Val(), Equals, float64(1.001)) +} + +func (t *RedisTest) TestZUnionStore(c *C) { + zAdd := t.client.ZAdd("zset1", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset1", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + + zAdd = t.client.ZAdd("zset2", redis.Z{1, "one"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{2, "two"}) + c.Assert(zAdd.Err(), IsNil) + zAdd = t.client.ZAdd("zset2", redis.Z{3, "three"}) + c.Assert(zAdd.Err(), IsNil) + + zUnionStore := t.client.ZUnionStore( + "out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2") + c.Assert(zUnionStore.Err(), IsNil) + c.Assert(zUnionStore.Val(), Equals, int64(3)) + + val, err := t.client.ZRangeWithScores("out", 0, -1).Result() + c.Assert(err, IsNil) + c.Assert(val, DeepEquals, []redis.Z{{5, "one"}, {9, "three"}, {10, "two"}}) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestPatternPubSub(c *C) { + pubsub := t.client.PubSub() + defer func() { + c.Assert(pubsub.Close(), IsNil) + }() + + c.Assert(pubsub.PSubscribe("mychannel*"), IsNil) + + pub := t.client.Publish("mychannel1", "hello") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + c.Assert(pubsub.PUnsubscribe("mychannel*"), IsNil) + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "psubscribe") + c.Assert(subscr.Channel, Equals, "mychannel*") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.PMessage) + c.Assert(subscr.Channel, Equals, "mychannel1") + c.Assert(subscr.Pattern, Equals, "mychannel*") + c.Assert(subscr.Payload, Equals, "hello") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "punsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel*") + c.Assert(subscr.Count, Equals, 0) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err.(net.Error).Timeout(), Equals, true) + c.Assert(msgi, IsNil) + } +} + +func (t *RedisTest) TestPubSub(c *C) { + pubsub := t.client.PubSub() + defer func() { + c.Assert(pubsub.Close(), IsNil) + }() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + pub := t.client.Publish("mychannel", "hello") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + pub = t.client.Publish("mychannel2", "hello2") + c.Assert(pub.Err(), IsNil) + c.Assert(pub.Val(), Equals, int64(1)) + + c.Assert(pubsub.Unsubscribe("mychannel", "mychannel2"), IsNil) + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "subscribe") + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "subscribe") + c.Assert(subscr.Channel, Equals, "mychannel2") + c.Assert(subscr.Count, Equals, 2) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Message) + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Payload, Equals, "hello") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + msg := msgi.(*redis.Message) + c.Assert(msg.Channel, Equals, "mychannel2") + c.Assert(msg.Payload, Equals, "hello2") + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "unsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel") + c.Assert(subscr.Count, Equals, 1) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err, IsNil) + subscr := msgi.(*redis.Subscription) + c.Assert(subscr.Kind, Equals, "unsubscribe") + c.Assert(subscr.Channel, Equals, "mychannel2") + c.Assert(subscr.Count, Equals, 0) + } + + { + msgi, err := pubsub.ReceiveTimeout(time.Second) + c.Assert(err.(net.Error).Timeout(), Equals, true) + c.Assert(msgi, IsNil) + } +} + +func (t *RedisTest) TestPubSubChannels(c *C) { + channels, err := t.client.PubSubChannels("mychannel*").Result() + c.Assert(err, IsNil) + c.Assert(channels, HasLen, 0) + c.Assert(channels, Not(IsNil)) + + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + channels, err = t.client.PubSubChannels("mychannel*").Result() + c.Assert(err, IsNil) + c.Assert(sortStrings(channels), DeepEquals, []string{"mychannel", "mychannel2"}) + + channels, err = t.client.PubSubChannels("").Result() + c.Assert(err, IsNil) + c.Assert(channels, HasLen, 0) + + channels, err = t.client.PubSubChannels("*").Result() + c.Assert(err, IsNil) + c.Assert(len(channels) >= 2, Equals, true) +} + +func (t *RedisTest) TestPubSubNumSub(c *C) { + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.Subscribe("mychannel", "mychannel2"), IsNil) + + channels, err := t.client.PubSubNumSub("mychannel", "mychannel2", "mychannel3").Result() + c.Assert(err, IsNil) + c.Assert( + channels, + DeepEquals, + []interface{}{"mychannel", int64(1), "mychannel2", int64(1), "mychannel3", int64(0)}, + ) +} + +func (t *RedisTest) TestPubSubNumPat(c *C) { + num, err := t.client.PubSubNumPat().Result() + c.Assert(err, IsNil) + c.Assert(num, Equals, int64(0)) + + pubsub := t.client.PubSub() + defer pubsub.Close() + + c.Assert(pubsub.PSubscribe("mychannel*"), IsNil) + + num, err = t.client.PubSubNumPat().Result() + c.Assert(err, IsNil) + c.Assert(num, Equals, int64(1)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestPipeline(c *C) { + set := t.client.Set("key2", "hello2") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + set = pipeline.Set("key1", "hello1") + get := pipeline.Get("key2") + incr := pipeline.Incr("key3") + getNil := pipeline.Get("key4") + + cmds, err := pipeline.Exec() + c.Assert(err, Equals, redis.Nil) + c.Assert(cmds, HasLen, 4) + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello2") + + c.Assert(incr.Err(), IsNil) + c.Assert(incr.Val(), Equals, int64(1)) + + c.Assert(getNil.Err(), Equals, redis.Nil) + c.Assert(getNil.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineDiscardQueued(c *C) { + pipeline := t.client.Pipeline() + + pipeline.Get("key") + pipeline.Discard() + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) + + c.Assert(pipeline.Close(), IsNil) +} + +func (t *RedisTest) TestPipelined(c *C) { + var get *redis.StringCmd + cmds, err := t.client.Pipelined(func(pipe *redis.Pipeline) error { + get = pipe.Get("foo") + return nil + }) + c.Assert(err, Equals, redis.Nil) + c.Assert(cmds, HasLen, 1) + c.Assert(cmds[0], Equals, get) + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineErrValNotSet(c *C) { + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + get := pipeline.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "") +} + +func (t *RedisTest) TestPipelineRunQueuedOnEmptyQueue(c *C) { + pipeline := t.client.Pipeline() + defer func() { + c.Assert(pipeline.Close(), IsNil) + }() + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) +} + +// TODO: make thread safe? +func (t *RedisTest) TestPipelineIncr(c *C) { + const N = 20000 + key := "TestPipelineIncr" + + pipeline := t.client.Pipeline() + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + pipeline.Incr(key) + wg.Done() + } + wg.Wait() + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(len(cmds), Equals, 20000) + for _, cmd := range cmds { + if cmd.Err() != nil { + c.Errorf("got %v, expected nil", cmd.Err()) + } + } + + get := t.client.Get(key) + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, strconv.Itoa(N)) + + c.Assert(pipeline.Close(), IsNil) +} + +func (t *RedisTest) TestPipelineEcho(c *C) { + const N = 1000 + + wg := &sync.WaitGroup{} + wg.Add(N) + for i := 0; i < N; i++ { + go func(i int) { + pipeline := t.client.Pipeline() + + msg1 := "echo" + strconv.Itoa(i) + msg2 := "echo" + strconv.Itoa(i+1) + + echo1 := pipeline.Echo(msg1) + echo2 := pipeline.Echo(msg2) + + cmds, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 2) + + c.Assert(echo1.Err(), IsNil) + c.Assert(echo1.Val(), Equals, msg1) + + c.Assert(echo2.Err(), IsNil) + c.Assert(echo2.Val(), Equals, msg2) + + c.Assert(pipeline.Close(), IsNil) + + wg.Done() + }(i) + } + wg.Wait() +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestMultiExec(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + var ( + set *redis.StatusCmd + get *redis.StringCmd + ) + cmds, err := multi.Exec(func() error { + set = multi.Set("key", "hello") + get = multi.Get("key") + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 2) + + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello") +} + +func (t *RedisTest) TestMultiExecDiscard(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { + multi.Set("key1", "hello1") + multi.Discard() + multi.Set("key2", "hello2") + return nil + }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + + get := t.client.Get("key1") + c.Assert(get.Err(), Equals, redis.Nil) + c.Assert(get.Val(), Equals, "") + + get = t.client.Get("key2") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "hello2") +} + +func (t *RedisTest) TestMultiExecEmpty(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { return nil }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) + + ping := multi.Ping() + c.Check(ping.Err(), IsNil) + c.Check(ping.Val(), Equals, "PONG") +} + +func (t *RedisTest) TestMultiExecOnEmptyQueue(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { return nil }) + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 0) +} + +func (t *RedisTest) TestMultiExecIncr(c *C) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + cmds, err := multi.Exec(func() error { + for i := int64(0); i < 20000; i++ { + multi.Incr("key") + } + return nil + }) + c.Assert(err, IsNil) + c.Assert(len(cmds), Equals, 20000) + for _, cmd := range cmds { + if cmd.Err() != nil { + c.Errorf("got %v, expected nil", cmd.Err()) + } + } + + get := t.client.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Equals, "20000") +} + +func (t *RedisTest) transactionalIncr(c *C) ([]redis.Cmder, error) { + multi := t.client.Multi() + defer func() { + c.Assert(multi.Close(), IsNil) + }() + + watch := multi.Watch("key") + c.Assert(watch.Err(), IsNil) + c.Assert(watch.Val(), Equals, "OK") + + get := multi.Get("key") + c.Assert(get.Err(), IsNil) + c.Assert(get.Val(), Not(Equals), redis.Nil) + + v, err := strconv.ParseInt(get.Val(), 10, 64) + c.Assert(err, IsNil) + + return multi.Exec(func() error { + multi.Set("key", strconv.FormatInt(v+1, 10)) + return nil + }) +} + +func (t *RedisTest) TestWatchUnwatch(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + set := t.client.Set("key", "0") + c.Assert(set.Err(), IsNil) + + wg := &sync.WaitGroup{} + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + cmds, err := t.transactionalIncr(c) + if err == redis.TxFailedErr { + continue + } + c.Assert(err, IsNil) + c.Assert(cmds, HasLen, 1) + c.Assert(cmds[0].Err(), IsNil) + break + } + }() + } + wg.Wait() + + val, err := t.client.Get("key").Int64() + c.Assert(err, IsNil) + c.Assert(val, Equals, int64(n)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestRaceEcho(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + wg := &sync.WaitGroup{} + wg.Add(n) + for i := 0; i < n; i++ { + go func(i int) { + msg := "echo" + strconv.Itoa(i) + echo := t.client.Echo(msg) + c.Assert(echo.Err(), IsNil) + c.Assert(echo.Val(), Equals, msg) + wg.Done() + }(i) + } + wg.Wait() +} + +func (t *RedisTest) TestRaceIncr(c *C) { + var n = 10000 + if testing.Short() { + n = 1000 + } + + wg := &sync.WaitGroup{} + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + incr := t.client.Incr("TestRaceIncr") + if err := incr.Err(); err != nil { + panic(err) + } + wg.Done() + }() + } + wg.Wait() + + val, err := t.client.Get("TestRaceIncr").Result() + c.Assert(err, IsNil) + c.Assert(val, Equals, strconv.Itoa(n)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdBgRewriteAOF(c *C) { + r := t.client.BgRewriteAOF() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "Background append only file rewriting started") +} + +func (t *RedisTest) TestCmdBgSave(c *C) { + // workaround for "ERR Can't BGSAVE while AOF log rewriting is in progress" + time.Sleep(time.Second) + + r := t.client.BgSave() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "Background saving started") +} + +func (t *RedisTest) TestCmdClientKill(c *C) { + r := t.client.ClientKill("1.1.1.1:1111") + c.Assert(r.Err(), ErrorMatches, "ERR No such client") + c.Assert(r.Val(), Equals, "") +} + +func (t *RedisTest) TestCmdConfigGet(c *C) { + r := t.client.ConfigGet("*") + c.Assert(r.Err(), IsNil) + c.Assert(len(r.Val()) > 0, Equals, true) +} + +func (t *RedisTest) TestCmdConfigResetStat(c *C) { + r := t.client.ConfigResetStat() + c.Assert(r.Err(), IsNil) + c.Assert(r.Val(), Equals, "OK") +} + +func (t *RedisTest) TestCmdConfigSet(c *C) { + configGet := t.client.ConfigGet("maxmemory") + c.Assert(configGet.Err(), IsNil) + c.Assert(configGet.Val(), HasLen, 2) + c.Assert(configGet.Val()[0], Equals, "maxmemory") + + configSet := t.client.ConfigSet("maxmemory", configGet.Val()[1].(string)) + c.Assert(configSet.Err(), IsNil) + c.Assert(configSet.Val(), Equals, "OK") +} + +func (t *RedisTest) TestCmdDbSize(c *C) { + dbSize := t.client.DbSize() + c.Assert(dbSize.Err(), IsNil) + c.Assert(dbSize.Val(), Equals, int64(0)) +} + +func (t *RedisTest) TestCmdFlushAll(c *C) { + // TODO +} + +func (t *RedisTest) TestCmdFlushDb(c *C) { + // TODO +} + +func (t *RedisTest) TestCmdInfo(c *C) { + info := t.client.Info() + c.Assert(info.Err(), IsNil) + c.Assert(info.Val(), Not(Equals), "") +} + +func (t *RedisTest) TestCmdLastSave(c *C) { + lastSave := t.client.LastSave() + c.Assert(lastSave.Err(), IsNil) + c.Assert(lastSave.Val(), Not(Equals), 0) +} + +func (t *RedisTest) TestCmdSave(c *C) { + save := t.client.Save() + c.Assert(save.Err(), IsNil) + c.Assert(save.Val(), Equals, "OK") +} + +func (t *RedisTest) TestSlaveOf(c *C) { + slaveOf := t.client.SlaveOf("localhost", "8888") + c.Assert(slaveOf.Err(), IsNil) + c.Assert(slaveOf.Val(), Equals, "OK") + + slaveOf = t.client.SlaveOf("NO", "ONE") + c.Assert(slaveOf.Err(), IsNil) + c.Assert(slaveOf.Val(), Equals, "OK") +} + +func (t *RedisTest) TestTime(c *C) { + time := t.client.Time() + c.Assert(time.Err(), IsNil) + c.Assert(time.Val(), HasLen, 2) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestScriptingEval(c *C) { + eval := t.client.Eval( + "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", + []string{"key1", "key2"}, + []string{"first", "second"}, + ) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), DeepEquals, []interface{}{"key1", "key2", "first", "second"}) + + eval = t.client.Eval( + "return redis.call('set',KEYS[1],'bar')", + []string{"foo"}, + []string{}, + ) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, "OK") + + eval = t.client.Eval("return 10", []string{}, []string{}) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, int64(10)) + + eval = t.client.Eval("return {1,2,{3,'Hello World!'}}", []string{}, []string{}) + c.Assert(eval.Err(), IsNil) + // DeepEquals can't compare nested slices. + c.Assert( + fmt.Sprintf("%#v", eval.Val()), + Equals, + `[]interface {}{1, 2, []interface {}{3, "Hello World!"}}`, + ) +} + +func (t *RedisTest) TestScriptingEvalSha(c *C) { + set := t.client.Set("foo", "bar") + c.Assert(set.Err(), IsNil) + c.Assert(set.Val(), Equals, "OK") + + eval := t.client.Eval("return redis.call('get','foo')", nil, nil) + c.Assert(eval.Err(), IsNil) + c.Assert(eval.Val(), Equals, "bar") + + evalSha := t.client.EvalSha("6b1bf486c81ceb7edf3c093f4c48582e38c0e791", nil, nil) + c.Assert(evalSha.Err(), IsNil) + c.Assert(evalSha.Val(), Equals, "bar") + + evalSha = t.client.EvalSha("ffffffffffffffffffffffffffffffffffffffff", nil, nil) + c.Assert(evalSha.Err(), ErrorMatches, "NOSCRIPT No matching script. Please use EVAL.") + c.Assert(evalSha.Val(), Equals, nil) +} + +func (t *RedisTest) TestScriptingScriptExists(c *C) { + scriptLoad := t.client.ScriptLoad("return 1") + c.Assert(scriptLoad.Err(), IsNil) + c.Assert(scriptLoad.Val(), Equals, "e0e1f9fabfc9d4800c877a703b823ac0578ff8db") + + scriptExists := t.client.ScriptExists( + "e0e1f9fabfc9d4800c877a703b823ac0578ff8db", + "ffffffffffffffffffffffffffffffffffffffff", + ) + c.Assert(scriptExists.Err(), IsNil) + c.Assert(scriptExists.Val(), DeepEquals, []bool{true, false}) +} + +func (t *RedisTest) TestScriptingScriptFlush(c *C) { + scriptFlush := t.client.ScriptFlush() + c.Assert(scriptFlush.Err(), IsNil) + c.Assert(scriptFlush.Val(), Equals, "OK") +} + +func (t *RedisTest) TestScriptingScriptKill(c *C) { + scriptKill := t.client.ScriptKill() + c.Assert(scriptKill.Err(), ErrorMatches, ".*No scripts in execution right now.") + c.Assert(scriptKill.Val(), Equals, "") +} + +func (t *RedisTest) TestScriptingScriptLoad(c *C) { + scriptLoad := t.client.ScriptLoad("return redis.call('get','foo')") + c.Assert(scriptLoad.Err(), IsNil) + c.Assert(scriptLoad.Val(), Equals, "6b1bf486c81ceb7edf3c093f4c48582e38c0e791") +} + +func (t *RedisTest) TestScriptingNewScript(c *C) { + s := redis.NewScript("return 1") + run := s.Run(t.client, nil, nil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +func (t *RedisTest) TestScriptingEvalAndPipeline(c *C) { + pipeline := t.client.Pipeline() + s := redis.NewScript("return 1") + run := s.Eval(pipeline, nil, nil) + _, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +func (t *RedisTest) TestScriptingEvalShaAndPipeline(c *C) { + s := redis.NewScript("return 1") + c.Assert(s.Load(t.client).Err(), IsNil) + + pipeline := t.client.Pipeline() + run := s.Eval(pipeline, nil, nil) + _, err := pipeline.Exec() + c.Assert(err, IsNil) + c.Assert(run.Err(), IsNil) + c.Assert(run.Val(), Equals, int64(1)) +} + +//------------------------------------------------------------------------------ + +func (t *RedisTest) TestCmdDebugObject(c *C) { + { + debug := t.client.DebugObject("foo") + c.Assert(debug.Err(), Not(IsNil)) + c.Assert(debug.Err().Error(), Equals, "ERR no such key") + } + + { + t.client.Set("foo", "bar") + debug := t.client.DebugObject("foo") + c.Assert(debug.Err(), IsNil) + c.Assert(debug.Val(), FitsTypeOf, "") + c.Assert(debug.Val(), Not(Equals), "") + } +} + +//------------------------------------------------------------------------------ + +func BenchmarkRedisPing(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Ping().Err(); err != nil { + panic(err) + } + } +} + +func BenchmarkRedisSet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Set("key", "hello").Err(); err != nil { + panic(err) + } + } +} + +func BenchmarkRedisGetNil(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.FlushDb().Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Get("key").Err(); err != redis.Nil { + b.Fatal(err) + } + } +} + +func BenchmarkRedisGet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.Set("key", "hello").Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Get("key").Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRedisMGet(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + if err := client.MSet("key1", "hello1", "key2", "hello2").Err(); err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.MGet("key1", "key2").Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkSetExpire(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + if err := client.Set("key", "hello").Err(); err != nil { + b.Fatal(err) + } + if err := client.Expire("key", time.Second).Err(); err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkPipeline(b *testing.B) { + b.StopTimer() + client := redis.NewTCPClient(&redis.Options{ + Addr: redisAddr, + }) + b.StartTimer() + + for i := 0; i < b.N; i++ { + _, err := client.Pipelined(func(pipe *redis.Pipeline) error { + pipe.Set("key", "hello") + pipe.Expire("key", time.Second) + return nil + }) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/script.go b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go new file mode 100644 index 00000000000..96c35f5149e --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/script.go @@ -0,0 +1,52 @@ +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +type scripter interface { + Eval(script string, keys []string, args []string) *Cmd + EvalSha(sha1 string, keys []string, args []string) *Cmd + ScriptExists(scripts ...string) *BoolSliceCmd + ScriptLoad(script string) *StringCmd +} + +type Script struct { + src, hash string +} + +func NewScript(src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{ + src: src, + hash: hex.EncodeToString(h.Sum(nil)), + } +} + +func (s *Script) Load(c scripter) *StringCmd { + return c.ScriptLoad(s.src) +} + +func (s *Script) Exists(c scripter) *BoolSliceCmd { + return c.ScriptExists(s.src) +} + +func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd { + return c.Eval(s.src, keys, args) +} + +func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd { + return c.EvalSha(s.hash, keys, args) +} + +func (s *Script) Run(c *Client, keys []string, args []string) *Cmd { + r := s.EvalSha(c, keys, args) + if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { + return s.Eval(c, keys, args) + } + return r +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go new file mode 100644 index 00000000000..d3ffeca9a59 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go @@ -0,0 +1,291 @@ +package redis + +import ( + "errors" + "log" + "net" + "strings" + "sync" + "time" +) + +//------------------------------------------------------------------------------ + +type FailoverOptions struct { + MasterName string + SentinelAddrs []string + + Password string + DB int64 + + PoolSize int + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration +} + +func (opt *FailoverOptions) getPoolSize() int { + if opt.PoolSize == 0 { + return 10 + } + return opt.PoolSize +} + +func (opt *FailoverOptions) getDialTimeout() time.Duration { + if opt.DialTimeout == 0 { + return 5 * time.Second + } + return opt.DialTimeout +} + +func (opt *FailoverOptions) options() *options { + return &options{ + DB: opt.DB, + Password: opt.Password, + + DialTimeout: opt.getDialTimeout(), + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.getPoolSize(), + IdleTimeout: opt.IdleTimeout, + } +} + +func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + opt := failoverOpt.options() + failover := &sentinelFailover{ + masterName: failoverOpt.MasterName, + sentinelAddrs: failoverOpt.SentinelAddrs, + + opt: opt, + } + return &Client{ + baseClient: &baseClient{ + opt: opt, + connPool: failover.Pool(), + }, + } +} + +//------------------------------------------------------------------------------ + +type sentinelClient struct { + *baseClient +} + +func newSentinel(clOpt *Options) *sentinelClient { + opt := clOpt.options() + opt.Password = "" + opt.DB = 0 + dialer := func() (net.Conn, error) { + return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout) + } + return &sentinelClient{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(newConnFunc(dialer), opt), + }, + } +} + +func (c *sentinelClient) PubSub() *PubSub { + return &PubSub{ + baseClient: &baseClient{ + opt: c.opt, + connPool: newSingleConnPool(c.connPool, false), + }, + } +} + +func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd { + cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name) + c.Process(cmd) + return cmd +} + +func (c *sentinelClient) Sentinels(name string) *SliceCmd { + cmd := NewSliceCmd("SENTINEL", "sentinels", name) + c.Process(cmd) + return cmd +} + +type sentinelFailover struct { + masterName string + sentinelAddrs []string + + opt *options + + pool pool + poolOnce sync.Once + + lock sync.RWMutex + _sentinel *sentinelClient +} + +func (d *sentinelFailover) dial() (net.Conn, error) { + addr, err := d.MasterAddr() + if err != nil { + return nil, err + } + return net.DialTimeout("tcp", addr, d.opt.DialTimeout) +} + +func (d *sentinelFailover) Pool() pool { + d.poolOnce.Do(func() { + d.pool = newConnPool(newConnFunc(d.dial), d.opt) + }) + return d.pool +} + +func (d *sentinelFailover) MasterAddr() (string, error) { + defer d.lock.Unlock() + d.lock.Lock() + + // Try last working sentinel. + if d._sentinel != nil { + addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + d.resetSentinel() + } else { + addr := net.JoinHostPort(addr[0], addr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + } + + for i, sentinelAddr := range d.sentinelAddrs { + sentinel := newSentinel(&Options{ + Addr: sentinelAddr, + + DB: d.opt.DB, + Password: d.opt.Password, + + DialTimeout: d.opt.DialTimeout, + ReadTimeout: d.opt.ReadTimeout, + WriteTimeout: d.opt.WriteTimeout, + + PoolSize: d.opt.PoolSize, + IdleTimeout: d.opt.IdleTimeout, + }) + masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err) + sentinel.Close() + continue + } + + // Push working sentinel to the top. + d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0] + + d.setSentinel(sentinel) + addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) + log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr) + return addr, nil + } + + return "", errors.New("redis: all sentinels are unreachable") +} + +func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) { + d.discoverSentinels(sentinel) + d._sentinel = sentinel + go d.listen() +} + +func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) { + sentinels, err := sentinel.Sentinels(d.masterName).Result() + if err != nil { + log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err) + return + } + for _, sentinel := range sentinels { + vals := sentinel.([]interface{}) + for i := 0; i < len(vals); i += 2 { + key := vals[i].(string) + if key == "name" { + sentinelAddr := vals[i+1].(string) + if !contains(d.sentinelAddrs, sentinelAddr) { + log.Printf( + "redis-sentinel: discovered new %q sentinel: %s", + d.masterName, sentinelAddr, + ) + d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr) + } + } + } + } +} + +func (d *sentinelFailover) listen() { + var pubsub *PubSub + for { + if pubsub == nil { + pubsub = d._sentinel.PubSub() + if err := pubsub.Subscribe("+switch-master"); err != nil { + log.Printf("redis-sentinel: Subscribe failed: %s", err) + d.lock.Lock() + d.resetSentinel() + d.lock.Unlock() + return + } + } + + msgIface, err := pubsub.Receive() + if err != nil { + log.Printf("redis-sentinel: Receive failed: %s", err) + pubsub.Close() + return + } + + switch msg := msgIface.(type) { + case *Message: + switch msg.Channel { + case "+switch-master": + parts := strings.Split(msg.Payload, " ") + if parts[0] != d.masterName { + log.Printf("redis-sentinel: ignore new %s addr", parts[0]) + continue + } + addr := net.JoinHostPort(parts[3], parts[4]) + log.Printf( + "redis-sentinel: new %q addr is %s", + d.masterName, addr, + ) + d.pool.Filter(func(cn *conn) bool { + if cn.RemoteAddr().String() != addr { + log.Printf( + "redis-sentinel: closing connection to old master %s", + cn.RemoteAddr(), + ) + return false + } + return true + }) + default: + log.Printf("redis-sentinel: unsupported message: %s", msg) + } + case *Subscription: + // Ignore. + default: + log.Printf("redis-sentinel: unsupported message: %s", msgIface) + } + } +} + +func (d *sentinelFailover) resetSentinel() { + d._sentinel.Close() + d._sentinel = nil +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go new file mode 100644 index 00000000000..ede59bd51e2 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/sentinel_test.go @@ -0,0 +1,185 @@ +package redis_test + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + "text/template" + "time" + + "gopkg.in/redis.v2" +) + +func startRedis(port string) (*exec.Cmd, error) { + cmd := exec.Command("redis-server", "--port", port) + if false { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + return cmd, nil +} + +func startRedisSlave(port, slave string) (*exec.Cmd, error) { + cmd := exec.Command("redis-server", "--port", port, "--slaveof", "127.0.0.1", slave) + if false { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + return cmd, nil +} + +func startRedisSentinel(port, masterName, masterPort string) (*exec.Cmd, error) { + dir, err := ioutil.TempDir("", "sentinel") + if err != nil { + return nil, err + } + + sentinelConfFilepath := filepath.Join(dir, "sentinel.conf") + tpl, err := template.New("sentinel.conf").Parse(sentinelConf) + if err != nil { + return nil, err + } + + data := struct { + Port string + MasterName string + MasterPort string + }{ + Port: port, + MasterName: masterName, + MasterPort: masterPort, + } + if err := writeTemplateToFile(sentinelConfFilepath, tpl, data); err != nil { + return nil, err + } + + cmd := exec.Command("redis-server", sentinelConfFilepath, "--sentinel") + if true { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + } + if err := cmd.Start(); err != nil { + return nil, err + } + + return cmd, nil +} + +func writeTemplateToFile(path string, t *template.Template, data interface{}) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + return t.Execute(f, data) +} + +func TestSentinel(t *testing.T) { + masterName := "mymaster" + masterPort := "8123" + slavePort := "8124" + sentinelPort := "8125" + + masterCmd, err := startRedis(masterPort) + if err != nil { + t.Fatal(err) + } + defer masterCmd.Process.Kill() + + // Wait for master to start. + time.Sleep(200 * time.Millisecond) + + master := redis.NewTCPClient(&redis.Options{ + Addr: ":" + masterPort, + }) + if err := master.Ping().Err(); err != nil { + t.Fatal(err) + } + + slaveCmd, err := startRedisSlave(slavePort, masterPort) + if err != nil { + t.Fatal(err) + } + defer slaveCmd.Process.Kill() + + // Wait for slave to start. + time.Sleep(200 * time.Millisecond) + + slave := redis.NewTCPClient(&redis.Options{ + Addr: ":" + slavePort, + }) + if err := slave.Ping().Err(); err != nil { + t.Fatal(err) + } + + sentinelCmd, err := startRedisSentinel(sentinelPort, masterName, masterPort) + if err != nil { + t.Fatal(err) + } + defer sentinelCmd.Process.Kill() + + // Wait for sentinel to start. + time.Sleep(200 * time.Millisecond) + + sentinel := redis.NewTCPClient(&redis.Options{ + Addr: ":" + sentinelPort, + }) + if err := sentinel.Ping().Err(); err != nil { + t.Fatal(err) + } + defer sentinel.Shutdown() + + client := redis.NewFailoverClient(&redis.FailoverOptions{ + MasterName: masterName, + SentinelAddrs: []string{":" + sentinelPort}, + }) + + if err := client.Set("foo", "master").Err(); err != nil { + t.Fatal(err) + } + + val, err := master.Get("foo").Result() + if err != nil { + t.Fatal(err) + } + if val != "master" { + t.Fatalf(`got %q, expected "master"`, val) + } + + // Kill Redis master. + if err := masterCmd.Process.Kill(); err != nil { + t.Fatal(err) + } + if err := master.Ping().Err(); err == nil { + t.Fatalf("master was not killed") + } + + // Wait for Redis sentinel to elect new master. + time.Sleep(5 * time.Second) + + // Check that client picked up new master. + val, err = client.Get("foo").Result() + if err != nil { + t.Fatal(err) + } + if val != "master" { + t.Fatalf(`got %q, expected "master"`, val) + } +} + +var sentinelConf = ` +port {{ .Port }} + +sentinel monitor {{ .MasterName }} 127.0.0.1 {{ .MasterPort }} 1 +sentinel down-after-milliseconds {{ .MasterName }} 1000 +sentinel failover-timeout {{ .MasterName }} 2000 +sentinel parallel-syncs {{ .MasterName }} 1 +` diff --git a/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf new file mode 100644 index 00000000000..3da90b380f7 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/redis.v2/testdata/sentinel.conf @@ -0,0 +1,6 @@ +port 26379 + +sentinel monitor master 127.0.0.1 6379 1 +sentinel down-after-milliseconds master 2000 +sentinel failover-timeout master 5000 +sentinel parallel-syncs master 4 diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index 8d7697b9871..1b6e9078f61 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -17,6 +17,7 @@ import ( "github.com/macaron-contrib/session" _ "github.com/macaron-contrib/session/mysql" _ "github.com/macaron-contrib/session/postgres" + _ "github.com/macaron-contrib/session/redis" "github.com/grafana/grafana/pkg/api" "github.com/grafana/grafana/pkg/api/static" diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index 87d943cdd5c..24701c48d75 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -246,7 +246,7 @@ func NewConfigContext(config string) { func readSessionConfig() { sec := Cfg.Section("session") SessionOptions = session.Options{} - SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql"}) + SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql", "postgres"}) SessionOptions.ProviderConfig = strings.Trim(sec.Key("provider_config").String(), "\" ") SessionOptions.CookieName = sec.Key("cookie_name").MustString("grafana_sess") SessionOptions.CookiePath = AppSubUrl From 1f330d77534c403af7c3d579eaf47e69a6f1bb8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 15:23:26 +0200 Subject: [PATCH 117/274] Basic auth: Fixed issue when using basic auth proxy infront of Grafana, Fixes #1673 --- CHANGELOG.md | 1 + pkg/middleware/auth.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 165f7b40253..1c99d6efef3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [Issue #1681](https://github.com/grafana/grafana/issues/1681). MySQL session: fixed problem using mysql as session store - [Issue #1671](https://github.com/grafana/grafana/issues/1671). Data sources: Fixed issue with changing default data source (should not require full page load to take effect, now fixed) - [Issue #1685](https://github.com/grafana/grafana/issues/1685). Search: Dashboard results should be sorted alphabetically +- [Issue #1673](https://github.com/grafana/grafana/issues/1673). Basic auth: Fixed issue when using basic auth proxy infront of Grafana # 2.0.0-Beta1 (2015-03-30) diff --git a/pkg/middleware/auth.go b/pkg/middleware/auth.go index 3d73b15c7f3..9fb09a5c395 100644 --- a/pkg/middleware/auth.go +++ b/pkg/middleware/auth.go @@ -28,7 +28,7 @@ func getRequestUserId(c *Context) int64 { func getApiKey(c *Context) string { header := c.Req.Header.Get("Authorization") parts := strings.SplitN(header, " ", 2) - if len(parts) == 2 || parts[0] == "Bearer" { + if len(parts) == 2 && parts[0] == "Bearer" { key := parts[1] return key } From 36110d097774d4db1dc6e5c8895b390d759e8673 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 15:56:39 +0200 Subject: [PATCH 118/274] Updated migration docs --- docs/sources/installation/migrating_to2.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/sources/installation/migrating_to2.md b/docs/sources/installation/migrating_to2.md index 6e561bc0424..b3d4dab62ce 100644 --- a/docs/sources/installation/migrating_to2.md +++ b/docs/sources/installation/migrating_to2.md @@ -8,9 +8,9 @@ page_keywords: grafana, installation, migration, documentation Grafana 2.0 represents a major update to Grafana. It brings new capabilities, many of which are enabled by its new backend server and integrated database. -The new backend lays a solid foundation that we hope to build on over the coming months. For the 2.0 release, it enables authentication as well as server-side sharing and rendering. +The new backend lays a solid foundation that we hope to build on over the coming months. For the 2.0 release, it enables authentication as well as server-side sharing and rendering. -We've attempted to provide a smooth migration path for V1.9 users to migrate to Grafana 2.0. +We've attempted to provide a smooth migration path for V1.9 users to migrate to Grafana 2.0. ## Adding Data sources @@ -18,9 +18,12 @@ The config.js file has been deprecated. Data sources are now managed via the UI From here, you can add any Graphite, InfluxDB, elasticsearch, and OpenTSDB datasources that you were using with Grafana 1.x. Grafana 2.0 can be configured to communicate with your datasource using a backend mode which can eliminate many CORS-related issues, as well as provide more secure authentication to your datasources. +> *Note* When you add your data sources please name them exacly as you named them in config.js in Grafana 1.x. That name is referenced by panels +> , annotation and template queries. That way when you import your old dashboard they will work without any changes. + ## Importing your existing dashboards -Grafana 2.0 now has integrated dashboard storage engine that can be configured to use an internal sqlite database, MySQL, or Postgres. This eliminates the need to use Elasticsearch for dashboard storage for Graphite users. Grafana 2.0 does not support storing dashboards in InfluxDB. +Grafana 2.0 now has integrated dashboard storage engine that can be configured to use an internal sqlite database, MySQL, or Postgres. This eliminates the need to use Elasticsearch for dashboard storage for Graphite users. Grafana 2.0 does not support storing dashboards in InfluxDB. You can seamlessly import your existing dashboards. @@ -47,7 +50,7 @@ and click the `Import` button. ![](/img/v2/migrate_dashboards.jpg) -Your dashboards should be automatically imported into the Grafana 2.0 backend. +Your dashboards should be automatically imported into the Grafana 2.0 backend. Dashboards will no longer be stored in your previous elasticsearch or InfluxDB databases. From a96e4a343c2853eb371e0a808d6d0464eb860387 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 16:05:42 +0200 Subject: [PATCH 119/274] Only create admin user specified in config file when there are no users in the database, Fixes #1680 --- pkg/services/sqlstore/sqlstore.go | 37 ++++++++++++++++++------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/pkg/services/sqlstore/sqlstore.go b/pkg/services/sqlstore/sqlstore.go index 757953f5b4e..609a4d9fb9e 100644 --- a/pkg/services/sqlstore/sqlstore.go +++ b/pkg/services/sqlstore/sqlstore.go @@ -33,24 +33,29 @@ var ( ) func EnsureAdminUser() { - adminQuery := m.GetUserByLoginQuery{LoginOrEmail: setting.AdminUser} + statsQuery := m.GetSystemStatsQuery{} - if err := bus.Dispatch(&adminQuery); err == m.ErrUserNotFound { - cmd := m.CreateUserCommand{} - cmd.Login = setting.AdminUser - cmd.Email = setting.AdminUser + "@localhost" - cmd.Password = setting.AdminPassword - cmd.IsAdmin = true - - if err = bus.Dispatch(&cmd); err != nil { - log.Error(3, "Failed to create default admin user", err) - return - } - - log.Info("Created default admin user: %v", setting.AdminUser) - } else if err != nil { - log.Error(3, "Could not determine if admin user exists: %v", err) + if err := bus.Dispatch(&statsQuery); err != nil { + log.Fatal(3, "Could not determine if admin user exists: %v", err) + return } + + if statsQuery.Result.UserCount > 0 { + return + } + + cmd := m.CreateUserCommand{} + cmd.Login = setting.AdminUser + cmd.Email = setting.AdminUser + "@localhost" + cmd.Password = setting.AdminPassword + cmd.IsAdmin = true + + if err := bus.Dispatch(&cmd); err != nil { + log.Error(3, "Failed to create default admin user", err) + return + } + + log.Info("Created default admin user: %v", setting.AdminUser) } func NewEngine() { From fde5ba85a015c0be0dbcfb42631294447fe74faa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Wed, 1 Apr 2015 17:24:08 +0200 Subject: [PATCH 120/274] Docs: Updated configuration docs with port 80 tips --- docs/sources/installation/configuration.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/sources/installation/configuration.md b/docs/sources/installation/configuration.md index b1bf879cc1d..7d30f61dce5 100644 --- a/docs/sources/installation/configuration.md +++ b/docs/sources/installation/configuration.md @@ -42,7 +42,16 @@ Then you can override that using: The ip address to bind to, if empty will bind to all interfaces ### http_port -The port to bind to, defaults to `3000` +The port to bind to, defaults to `3000`. To use port 80 you need to either give the grafana binary permission for example: + +``` +$ sudo setcap 'cap_net_bind_service=+ep' /opt/grafana/current/grafana +``` + +Or redirect port 80 to the grafana port using: +``` +$ sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 3000 +``` ### domain This setting is only used in as a part of the root_url setting (see below). Important if you From 00fa7f5e86c76e8c8704d75f8fcbb76175bb9892 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 2 Apr 2015 07:51:16 +0200 Subject: [PATCH 121/274] Removed unusued config, updated sample config with session config --- conf/defaults.ini | 4 ---- conf/sample.ini | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/conf/defaults.ini b/conf/defaults.ini index d328733abc9..ff159dc8531 100644 --- a/conf/defaults.ini +++ b/conf/defaults.ini @@ -57,10 +57,6 @@ cookie_name = grafana_sess cookie_secure = false ; Session life time, default is 86400 session_life_time = 86400 -; session id hash func, Either "sha1", "sha256" or "md5" default is sha1 -session_id_hashfunc = sha1 -; Session hash key, default is use random string -session_id_hashkey = [security] ; default admin user, created on startup diff --git a/conf/sample.ini b/conf/sample.ini index 3d4f6cf8e57..e33938dbef7 100644 --- a/conf/sample.ini +++ b/conf/sample.ini @@ -42,6 +42,22 @@ ssl_mode = disable ; For "sqlite3" only path = /opt/grafana/data/grafana.db +[session] +; Either "memory", "file", "redis", "mysql", default is "memory" +provider = file +; Provider config options +; memory: not have any config yet +; file: session file path, e.g. `data/sessions` +; redis: config like redis server addr, poolSize, password, e.g. `127.0.0.1:6379,100,grafana` +; mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1)/database_name` +provider_config = /opt/grafana/data/sessions +; Session cookie name +cookie_name = grafana_sess +; If you use session in https only, default is false +cookie_secure = false +; Session life time, default is 86400 +session_life_time = 86400 + [security] ; default admin user, created on startup admin_user = admin From 3b737999d6fc92662527a74f7eed44ba5edc4819 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 2 Apr 2015 08:08:22 +0200 Subject: [PATCH 122/274] Unsaved changes: Do not show for users with role , Fixes #1703 --- CHANGELOG.md | 1 + public/app/features/dashboard/unsavedChangesSrv.js | 12 ++++++++---- public/views/index.html | 4 ---- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c99d6efef3..fdd384626dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # 2.0.0-RC1 (unreleased) **FIxes** +- [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer` - [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy - [Issue #1681](https://github.com/grafana/grafana/issues/1681). MySQL session: fixed problem using mysql as session store - [Issue #1671](https://github.com/grafana/grafana/issues/1671). Data sources: Fixed issue with changing default data source (should not require full page load to take effect, now fixed) diff --git a/public/app/features/dashboard/unsavedChangesSrv.js b/public/app/features/dashboard/unsavedChangesSrv.js index 0e8f6b34e0d..ab933094799 100644 --- a/public/app/features/dashboard/unsavedChangesSrv.js +++ b/public/app/features/dashboard/unsavedChangesSrv.js @@ -12,7 +12,7 @@ function(angular, _, config) { var module = angular.module('grafana.services'); - module.service('unsavedChangesSrv', function($rootScope, $modal, $q, $location, $timeout) { + module.service('unsavedChangesSrv', function($rootScope, $modal, $q, $location, $timeout, contextSrv) { var self = this; var modalScope = $rootScope.$new(); @@ -36,7 +36,11 @@ function(angular, _, config) { self.originalPath = $location.path(); }); + this.ignoreChangeCheck = function() { + }; + window.onbeforeunload = function() { + if (contextSrv.hasRole('Viewer')) { return true; } if (self.has_unsaved_changes()) { return "There are unsaved changes to this dashboard"; } @@ -44,9 +48,9 @@ function(angular, _, config) { this.init = function() { $rootScope.$on("$locationChangeStart", function(event, next) { - if (self.originalPath === $location.path()) { - return; - } + // check if we should look for changes + if (self.originalPath === $location.path()) { return true; } + if (contextSrv.hasRole('Viewer')) { return true; } if (self.has_unsaved_changes()) { event.preventDefault(); diff --git a/public/views/index.html b/public/views/index.html index 29a31d1eda3..d5dfab06a64 100644 --- a/public/views/index.html +++ b/public/views/index.html @@ -16,10 +16,6 @@ - - From e6492f7db9bd7332046118ccfd12eb2f31ac9756 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Thu, 2 Apr 2015 09:21:38 +0200 Subject: [PATCH 123/274] Share modal: Override UI theme via URL param for Share link, rendered panel, or embedded panel, Closes #1701 --- CHANGELOG.md | 3 + docs/sources/guides/whats-new-in-v2.md | 2 +- pkg/api/index.go | 5 ++ .../dashboard/partials/shareModal.html | 81 ++++++++++++++----- .../app/features/dashboard/shareModalCtrl.js | 8 +- public/css/bootstrap.dark.min.css | 2 +- public/css/bootstrap.light.min.css | 2 +- public/css/grafana.dark.min.css | 2 +- public/css/grafana.light.min.css | 2 +- public/css/less/grafana.less | 17 ++-- public/test/specs/shareModalCtrl-specs.js | 14 +++- 11 files changed, 104 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fdd384626dd..96a7f7ce0c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ # 2.0.0-RC1 (unreleased) +**Enhancements** +- [Issue #1701](https://github.com/grafana/grafana/issues/1701). Share modal: Override UI theme via URL param for Share link, rendered panel, or embedded panel + **FIxes** - [Issue #1703](https://github.com/grafana/grafana/issues/1703). Unsaved changes: Do not show for users with role `Viewer` - [Issue #1675](https://github.com/grafana/grafana/issues/1675). Data source proxy: Fixed issue with Gzip enabled and data source proxy diff --git a/docs/sources/guides/whats-new-in-v2.md b/docs/sources/guides/whats-new-in-v2.md index fc40506ef36..ed27cd9af42 100644 --- a/docs/sources/guides/whats-new-in-v2.md +++ b/docs/sources/guides/whats-new-in-v2.md @@ -68,7 +68,7 @@ You can embed a single panel on another web page using the panel share dialog. B with a graph panel (taken from dashoard snapshot at [snapshot.raintank.io](http://snapshot.raintank.io). Try hovering or zooming on the panel below! - + ## New dashboard top header diff --git a/pkg/api/index.go b/pkg/api/index.go index ceafc37b7c1..d9ecf65b699 100644 --- a/pkg/api/index.go +++ b/pkg/api/index.go @@ -28,6 +28,11 @@ func setIndexViewData(c *middleware.Context) error { currentUser.Name = currentUser.Login } + themeUrlParam := c.Query("theme") + if themeUrlParam == "light" { + currentUser.LightTheme = true + } + c.Data["User"] = currentUser c.Data["Settings"] = settings c.Data["AppUrl"] = setting.AppUrl diff --git a/public/app/features/dashboard/partials/shareModal.html b/public/app/features/dashboard/partials/shareModal.html index 6b9ff8aef16..75af7ee4c22 100644 --- a/public/app/features/dashboard/partials/shareModal.html +++ b/public/app/features/dashboard/partials/shareModal.html @@ -16,19 +16,25 @@
    - +
    + +