Merge branch 'master' into feature/add_es_alerting

This commit is contained in:
Marcus Efraimsson 2018-05-28 18:10:34 +02:00
commit e5e1683840
No known key found for this signature in database
GPG Key ID: EBFE0FB04612DD4A
443 changed files with 35788 additions and 17855 deletions

View File

@ -1,3 +1,14 @@
aliases:
# Workflow filters
- &filter-only-release
branches:
ignore: /.*/
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
- &filter-not-release
tags:
ignore: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
version: 2
jobs:
@ -46,7 +57,6 @@ jobs:
command: 'sudo npm install -g yarn --quiet'
- restore_cache:
key: dependency-cache-{{ checksum "yarn.lock" }}
# Could we skip this step if the cache has been restored? `[ -d node_modules ] || yarn install ...` should be able to apply to build step as well
- run:
name: yarn install
command: 'yarn install --pure-lockfile --no-progress'
@ -68,15 +78,27 @@ jobs:
name: build backend and run go tests
command: './scripts/circle-test-backend.sh'
build:
build-all:
docker:
- image: grafana/build-container:v0.1
- image: grafana/build-container:1.0.0
working_directory: /go/src/github.com/grafana/grafana
steps:
- checkout
- run:
name: prepare build tools
command: '/tmp/bootstrap.sh'
- restore_cache:
key: phantomjs-binaries-{{ checksum "scripts/build/download-phantomjs.sh" }}
- run:
name: download phantomjs binaries
command: './scripts/build/download-phantomjs.sh'
- save_cache:
key: phantomjs-binaries-{{ checksum "scripts/build/download-phantomjs.sh" }}
paths:
- /tmp/phantomjs
- run:
name: build and package grafana
command: './scripts/build/build.sh'
command: './scripts/build/build-all.sh'
- run:
name: sign packages
command: './scripts/build/sign_packages.sh'
@ -92,6 +114,8 @@ jobs:
- dist/grafana*
- scripts/*.sh
- scripts/publish
- store_artifacts:
path: dist
build-enterprise:
docker:
@ -154,45 +178,43 @@ workflows:
version: 2
test-and-build:
jobs:
- build-all:
filters: *filter-not-release
- codespell:
filters:
tags:
only: /.*/
filters: *filter-not-release
- gometalinter:
filters:
tags:
only: /.*/
- build:
filters:
tags:
only: /.*/
filters: *filter-not-release
- test-frontend:
filters:
tags:
only: /.*/
filters: *filter-not-release
- test-backend:
filters:
tags:
only: /.*/
filters: *filter-not-release
- deploy-master:
requires:
- build-all
- test-backend
- test-frontend
- build
- codespell
- gometalinter
filters:
branches:
only: master
release:
jobs:
- build-all:
filters: *filter-only-release
- codespell:
filters: *filter-only-release
- gometalinter:
filters: *filter-only-release
- test-frontend:
filters: *filter-only-release
- test-backend:
filters: *filter-only-release
- deploy-release:
requires:
- build-all
- test-backend
- test-frontend
- build
filters:
branches:
ignore: /.*/
tags:
only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
# - build-enterprise:
# filters:
# tags:
# only: /.*/
- codespell
- gometalinter
filters: *filter-only-release

2
.gitignore vendored
View File

@ -65,4 +65,4 @@ debug.test
/vendor/**/*_test.go
/vendor/**/.editorconfig
/vendor/**/appengine*
*.orig
*.orig

View File

@ -12,6 +12,7 @@
* **Dashboard**: JSON Model under dashboard settings can now be updated & changes saved, [#1429](https://github.com/grafana/grafana/issues/1429), thx [@jereksel](https://github.com/jereksel)
* **Security**: Fix XSS vulnerabilities in dashboard links [#11813](https://github.com/grafana/grafana/pull/11813)
* **Singlestat**: Fix "time of last point" shows local time when dashboard timezone set to UTC [#10338](https://github.com/grafana/grafana/issues/10338)
* **Prometheus**: Add support for passing timeout parameter to Prometheus [#11788](https://github.com/grafana/grafana/pull/11788), thx [@mtanda](https://github.com/mtanda)
# 5.1.3 (2018-05-16)

129
Gopkg.lock generated
View File

@ -4,8 +4,8 @@
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
version = "v0.18.0"
revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811"
version = "v0.22.0"
[[projects]]
name = "github.com/BurntSushi/toml"
@ -19,12 +19,6 @@
packages = ["."]
revision = "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520"
[[projects]]
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
revision = "b2a4d4ae21c789b689dd162deb819665567f481c"
version = "0.10.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
@ -44,6 +38,8 @@
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/ec2query",
@ -58,8 +54,8 @@
"service/s3",
"service/sts"
]
revision = "decd990ddc5dcdf2f73309cbcab90d06b996ca28"
version = "v1.12.67"
revision = "c7cd1ebe87257cde9b65112fc876b0339ea0ac30"
version = "v1.13.49"
[[projects]]
branch = "master"
@ -71,7 +67,7 @@
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
@ -126,14 +122,14 @@
[[projects]]
name = "github.com/fatih/color"
packages = ["."]
revision = "570b54cabe6b8eb0bc2dfce68d964677d63b5260"
version = "v1.5.0"
revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4"
version = "v1.7.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
version = "v1.32.0"
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
version = "v1.36.0"
[[projects]]
name = "github.com/go-ldap/ldap"
@ -182,10 +178,10 @@
version = "v1.7.0"
[[projects]]
branch = "master"
name = "github.com/go-xorm/builder"
packages = ["."]
revision = "488224409dd8aa2ce7a5baf8d10d55764a913738"
revision = "bad0a612f0d6277b953910822ab5dfb30dd18237"
version = "v0.2.0"
[[projects]]
name = "github.com/go-xorm/core"
@ -209,13 +205,13 @@
"ptypes/duration",
"ptypes/timestamp"
]
revision = "c65a0412e71e8b9b3bfd22925720d23c0f054237"
revision = "927b65914520a8b7d44f5c9057611cfec6b2e2d0"
[[projects]]
branch = "master"
name = "github.com/gopherjs/gopherjs"
packages = ["js"]
revision = "178c176a91fe05e3e6c58fa5c989bad19e6cdcb3"
revision = "8dffc02ea1cb8398bb73f30424697c60fcf8d4c5"
[[projects]]
name = "github.com/gorilla/websocket"
@ -230,33 +226,36 @@
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/grafana/grafana_plugin_model"
packages = ["go/datasource"]
revision = "dfe5dc0a6ce05825ba7fe2d0323d92e631bffa89"
branch = "renderer"
name = "github.com/grafana/grafana-plugin-model"
packages = [
"go/datasource",
"go/renderer"
]
revision = "84176c64269d8060f99e750ee8aba6f062753336"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-hclog"
packages = ["."]
revision = "5bcb0f17e36442247290887cc914a6e507afa5c4"
revision = "69ff559dc25f3b435631604f573a5fa1efdb6433"
[[projects]]
name = "github.com/hashicorp/go-plugin"
packages = ["."]
revision = "3e6d191694b5a3a2b99755f31b47fa209e4bcd09"
revision = "e8d22c780116115ae5624720c9af0c97afe4f551"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-version"
packages = ["."]
revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee"
revision = "23480c0665776210b5fbbac6eaaee40e3e6a96b7"
[[projects]]
branch = "master"
name = "github.com/hashicorp/yamux"
packages = ["."]
revision = "683f49123a33db61abfb241b7ac5e4af4dc54d55"
revision = "2658be15c5f05e76244154714161f17e3e77de2e"
[[projects]]
name = "github.com/inconshreveable/log15"
@ -297,16 +296,16 @@
version = "v1.1"
[[projects]]
branch = "master"
name = "github.com/kr/pretty"
packages = ["."]
revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4"
revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/kr/text"
packages = ["."]
revision = "7cafcd837844e784b526369c9bce262804aebc60"
revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f"
version = "v0.1.0"
[[projects]]
branch = "master"
@ -315,7 +314,7 @@
".",
"oid"
]
revision = "61fe37aa2ee24fabcdbe5c4ac1d4ac566f88f345"
revision = "d34b9ff171c21ad295489235aec8b6626023cd04"
[[projects]]
name = "github.com/mattn/go-colorable"
@ -347,6 +346,12 @@
packages = ["."]
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
[[projects]]
name = "github.com/oklog/run"
packages = ["."]
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
[[projects]]
name = "github.com/opentracing/opentracing-go"
packages = [
@ -394,7 +399,7 @@
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "89604d197083d4781071d3c65855d24ecfb0a563"
revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c"
[[projects]]
branch = "master"
@ -402,10 +407,10 @@
packages = [
".",
"internal/util",
"nfsd",
"nfs",
"xfs"
]
revision = "85fadb6e89903ef7cca6f6a804474cd5ea85b6e1"
revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e"
[[projects]]
branch = "master"
@ -414,10 +419,10 @@
revision = "cb7f23ec59bec0d61b19c56cd88cee3d0cc1870c"
[[projects]]
branch = "master"
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]]
name = "github.com/smartystreets/assertions"
@ -426,8 +431,8 @@
"internal/go-render/render",
"internal/oglematchers"
]
revision = "0b37b35ec7434b77e77a4bb29b79677cced992ea"
version = "1.8.1"
revision = "7678a5452ebea5b7090a6b163f844c133f523da2"
version = "1.8.3"
[[projects]]
name = "github.com/smartystreets/goconvey"
@ -453,8 +458,11 @@
"internal/baggage",
"internal/baggage/remote",
"internal/spanlog",
"internal/throttler",
"internal/throttler/remote",
"log",
"rpcmetrics",
"thrift",
"thrift-gen/agent",
"thrift-gen/baggage",
"thrift-gen/jaeger",
@ -462,14 +470,14 @@
"thrift-gen/zipkincore",
"utils"
]
revision = "3ac96c6e679cb60a74589b0d0aa7c70a906183f7"
version = "v2.11.2"
revision = "b043381d944715b469fd6b37addfd30145ca1758"
version = "v2.14.0"
[[projects]]
name = "github.com/uber/jaeger-lib"
packages = ["metrics"]
revision = "7f95f4f7e80028096410abddaae2556e4c61b59f"
version = "v1.3.1"
revision = "ed3a127ec5fef7ae9ea95b01b542c47fbd999ce5"
version = "v1.5.0"
[[projects]]
name = "github.com/yudai/gojsondiff"
@ -493,7 +501,7 @@
"md4",
"pbkdf2"
]
revision = "3d37316aaa6bd9929127ac9a527abf408178ea7b"
revision = "1a580b3eff7814fc9b40602fd35256c63b50f491"
[[projects]]
branch = "master"
@ -501,14 +509,14 @@
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
revision = "2491c5de3490fced2f6cff376127c667efeed857"
[[projects]]
branch = "master"
@ -520,22 +528,21 @@
"jws",
"jwt"
]
revision = "b28fcf2b08a19742b43084fb40ab78ac6c3d8067"
revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = ["errgroup"]
revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5"
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "af50095a40f9041b3b38960738837185c26e9419"
revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"collate",
@ -553,7 +560,8 @@
"unicode/norm",
"unicode/rangetable"
]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
name = "google.golang.org/appengine"
@ -577,7 +585,7 @@
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "a8101f21cf983e773d0c1133ebc5424792003214"
revision = "7bb2a897381c9c5ab2aeb8614f758d7766af68ff"
[[projects]]
name = "google.golang.org/grpc"
@ -590,6 +598,7 @@
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"health",
@ -607,8 +616,8 @@
"tap",
"transport"
]
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef"
version = "v1.9.2"
revision = "1e2570b1b19ade82d8dbb31bba4e65e9f9ef5b34"
version = "v1.11.1"
[[projects]]
branch = "v3"
@ -631,14 +640,14 @@
[[projects]]
name = "gopkg.in/ini.v1"
packages = ["."]
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
version = "v1.32.0"
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
version = "v1.36.0"
[[projects]]
name = "gopkg.in/macaron.v1"
packages = ["."]
revision = "75f2e9b42e99652f0d82b28ccb73648f44615faa"
version = "v1.2.4"
revision = "c1be95e6d21e769e44e1ec33cec9da5837861c10"
version = "v1.3.1"
[[projects]]
branch = "v2"
@ -653,14 +662,14 @@
version = "v2.3.2"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "bd54a1a836599d90b36d4ac1af56d716ef9ca5be4865e217bddd49e3d32a1997"
inputs-digest = "6c7ae4bcbe7fa4430d3bdbf204df1b7c59cba88151fbcefa167ce15e6351b6d3"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -85,11 +85,11 @@ ignored = [
[[constraint]]
name = "github.com/go-xorm/core"
version = "0.5.7"
version = "=0.5.7"
[[constraint]]
name = "github.com/go-xorm/xorm"
version = "0.6.4"
version = "=0.6.4"
[[constraint]]
name = "github.com/gorilla/websocket"
@ -100,13 +100,17 @@ ignored = [
version = "1.1.1"
[[constraint]]
branch = "master"
name = "github.com/grafana/grafana_plugin_model"
branch = "renderer"
name = "github.com/grafana/grafana-plugin-model"
[[constraint]]
branch = "master"
name = "github.com/hashicorp/go-hclog"
[[constraint]]
name = "github.com/hashicorp/go-plugin"
revision = "e8d22c780116115ae5624720c9af0c97afe4f551"
[[constraint]]
branch = "master"
name = "github.com/hashicorp/go-version"

View File

@ -12,6 +12,10 @@ module.exports = function (grunt) {
platform: process.platform.replace('win32', 'windows'),
};
if (grunt.option('platform')) {
config.platform = grunt.option('platform');
}
if (grunt.option('arch')) {
config.arch = grunt.option('arch');
} else {

View File

@ -64,8 +64,6 @@ Run karma tests
npm run karma
```
Run
### Recompile backend on source change
To rebuild on source change.

View File

@ -38,16 +38,3 @@ artifacts:
- path: grafana-*windows-*.*
name: binzip
type: zip
deploy:
- provider: Environment
name: GrafanaReleaseMaster
on:
buildType: master
- provider: Environment
name: GrafanaReleaseRelease
on:
buildType: release

View File

@ -27,8 +27,7 @@ var (
goarch string
goos string
gocc string
gocxx string
cgo string
cgo bool
pkgArch string
version string = "v1"
// deb & rpm does not support semver so have to handle their version a little differently
@ -53,8 +52,7 @@ func main() {
flag.StringVar(&goarch, "goarch", runtime.GOARCH, "GOARCH")
flag.StringVar(&goos, "goos", runtime.GOOS, "GOOS")
flag.StringVar(&gocc, "cc", "", "CC")
flag.StringVar(&gocxx, "cxx", "", "CXX")
flag.StringVar(&cgo, "cgo-enabled", "", "CGO_ENABLED")
flag.BoolVar(&cgo, "cgo-enabled", cgo, "Enable cgo")
flag.StringVar(&pkgArch, "pkg-arch", "", "PKG ARCH")
flag.StringVar(&phjsToRelease, "phjs", "", "PhantomJS binary")
flag.BoolVar(&race, "race", race, "Use race detector")
@ -93,20 +91,24 @@ func main() {
build("grafana-server", "./pkg/cmd/grafana-server", []string{})
case "build":
clean()
//clean()
for _, binary := range binaries {
build(binary, "./pkg/cmd/"+binary, []string{})
}
case "build-frontend":
grunt(gruntBuildArg("build")...)
case "test":
test("./pkg/...")
grunt("test")
case "package":
grunt(gruntBuildArg("release")...)
if runtime.GOOS != "windows" {
createLinuxPackages()
}
grunt(gruntBuildArg("build")...)
packageGrafana()
case "package-only":
packageGrafana()
case "pkg-rpm":
grunt(gruntBuildArg("release")...)
@ -131,6 +133,22 @@ func main() {
}
}
func packageGrafana() {
platformArg := fmt.Sprintf("--platform=%v", goos)
previousPkgArch := pkgArch
if pkgArch == "" {
pkgArch = goarch
}
postProcessArgs := gruntBuildArg("package")
postProcessArgs = append(postProcessArgs, platformArg)
grunt(postProcessArgs...)
pkgArch = previousPkgArch
if goos == "linux" {
createLinuxPackages()
}
}
func makeLatestDistCopies() {
files, err := ioutil.ReadDir("dist")
if err != nil {
@ -138,9 +156,9 @@ func makeLatestDistCopies() {
}
latestMapping := map[string]string{
".deb": "dist/grafana_latest_amd64.deb",
".rpm": "dist/grafana-latest-1.x86_64.rpm",
".tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
"_amd64.deb": "dist/grafana_latest_amd64.deb",
".x86_64.rpm": "dist/grafana-latest-1.x86_64.rpm",
".linux-amd64.tar.gz": "dist/grafana-latest.linux-x64.tar.gz",
}
for _, file := range files {
@ -211,6 +229,10 @@ type linuxPackageOptions struct {
}
func createDebPackages() {
previousPkgArch := pkgArch
if pkgArch == "armv7" {
pkgArch = "armhf"
}
createPackage(linuxPackageOptions{
packageType: "deb",
homeDir: "/usr/share/grafana",
@ -228,9 +250,17 @@ func createDebPackages() {
depends: []string{"adduser", "libfontconfig"},
})
pkgArch = previousPkgArch
}
func createRpmPackages() {
previousPkgArch := pkgArch
switch {
case pkgArch == "armv7":
pkgArch = "armhfp"
case pkgArch == "arm64":
pkgArch = "aarch64"
}
createPackage(linuxPackageOptions{
packageType: "rpm",
homeDir: "/usr/share/grafana",
@ -248,6 +278,7 @@ func createRpmPackages() {
depends: []string{"/sbin/service", "fontconfig", "freetype", "urw-fonts"},
})
pkgArch = previousPkgArch
}
func createLinuxPackages() {
@ -386,7 +417,12 @@ func test(pkg string) {
}
func build(binaryName, pkg string, tags []string) {
binary := "./bin/" + binaryName
binary := fmt.Sprintf("./bin/%s-%s/%s", goos, goarch, binaryName)
if isDev {
//dont include os and arch in output path in dev environment
binary = fmt.Sprintf("./bin/%s", binaryName)
}
if goos == "windows" {
binary += ".exe"
}
@ -408,6 +444,7 @@ func build(binaryName, pkg string, tags []string) {
if !isDev {
setBuildEnv()
runPrint("go", "version")
fmt.Printf("Targeting %s/%s\n", goos, goarch)
}
runPrint("go", args...)
@ -451,6 +488,14 @@ func clean() {
func setBuildEnv() {
os.Setenv("GOOS", goos)
if goos == "windows" {
// require windows >=7
os.Setenv("CGO_CFLAGS", "-D_WIN32_WINNT=0x0601")
}
if goarch != "amd64" || goos != "linux" {
// needed for all other archs
cgo = true
}
if strings.HasPrefix(goarch, "armv") {
os.Setenv("GOARCH", "arm")
os.Setenv("GOARM", goarch[4:])
@ -460,15 +505,12 @@ func setBuildEnv() {
if goarch == "386" {
os.Setenv("GO386", "387")
}
if cgo != "" {
os.Setenv("CGO_ENABLED", cgo)
if cgo {
os.Setenv("CGO_ENABLED", "1")
}
if gocc != "" {
os.Setenv("CC", gocc)
}
if gocxx != "" {
os.Setenv("CXX", gocxx)
}
}
func getGitSha() string {

View File

@ -237,6 +237,9 @@ disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy
disable_signout_menu = false
# URL to redirect the user to after sign out
signout_redirect_url =
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access

View File

@ -217,6 +217,9 @@ log_queries =
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
;signout_redirect_url =
#################################### Anonymous Auth ##########################
[auth.anonymous]
# enable anonymous access

View File

@ -70,7 +70,7 @@ JSON Body schema:
Content-Type: application/json
{
"deleteKey":"XXXXXXX",
"deleteUrl":"myurl/dashboard/snapshot/XXXXXXX",
"deleteUrl":"myurl/api/snapshots-delete/XXXXXXX",
"key":"YYYYYYY",
"url":"myurl/dashboard/snapshot/YYYYYYY"
}
@ -81,7 +81,46 @@ Keys:
- **deleteKey** Key generated to delete the snapshot
- **key** Key generated to share the dashboard
## Get Snapshot by Id
## Get list of Snapshots
`GET /api/dashboard/snapshots`
Query parameters:
- **query** Search Query
- **limit** Limit the number of returned results
**Example Request**:
```http
GET /api/dashboard/snapshots HTTP/1.1
Accept: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
**Example Response**:
```http
HTTP/1.1 200
Content-Type: application/json
[
{
"id":8,
"name":"Home",
"key":"YYYYYYY",
"orgId":1,
"userId":1,
"external":false,
"externalUrl":"",
"expires":"2200-13-32T25:23:23+02:00",
"created":"2200-13-32T28:24:23+02:00",
"updated":"2200-13-32T28:24:23+02:00"
}
]
```
## Get Snapshot by Key
`GET /api/snapshots/:key`
@ -90,7 +129,6 @@ Keys:
```http
GET /api/snapshots/YYYYYYY HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
@ -140,16 +178,15 @@ Content-Type: application/json
}
```
## Delete Snapshot by deleteKey
## Delete Snapshot by Key
`GET /api/snapshots-delete/:deleteKey`
`DELETE /api/snapshots/:key`
**Example Request**:
```http
GET /api/snapshots/YYYYYYY HTTP/1.1
DELETE /api/snapshots/YYYYYYY HTTP/1.1
Accept: application/json
Content-Type: application/json
Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
```
@ -159,5 +196,27 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
HTTP/1.1 200
Content-Type: application/json
{"message":"Snapshot deleted. It might take an hour before it's cleared from a CDN cache."}
{"message":"Snapshot deleted. It might take an hour before it's cleared from any CDN caches."}
```
## Delete Snapshot by deleteKey
This API call can be used without authentication by using the secret delete key for the snapshot.
`GET /api/snapshots-delete/:deleteKey`
**Example Request**:
```http
GET /api/snapshots-delete/XXXXXXX HTTP/1.1
Accept: application/json
```
**Example Response**:
```http
HTTP/1.1 200
Content-Type: application/json
{"message":"Snapshot deleted. It might take an hour before it's cleared from any CDN caches."}
```

View File

@ -18,10 +18,9 @@
"@types/react-dom": "^16.0.3",
"angular-mocks": "^1.6.6",
"autoprefixer": "^6.4.0",
"awesome-typescript-loader": "^3.2.3",
"awesome-typescript-loader": "^4.0.0",
"axios": "^0.17.1",
"babel-core": "^6.26.0",
"babel-loader": "^7.1.2",
"babel-plugin-syntax-dynamic-import": "^6.18.0",
"babel-preset-es2015": "^6.24.1",
"clean-webpack-plugin": "^0.1.19",
@ -34,7 +33,7 @@
"expect.js": "~0.2.0",
"expose-loader": "^0.7.3",
"extract-text-webpack-plugin": "^3.0.0",
"file-loader": "^0.11.2",
"file-loader": "^1.1.11",
"gaze": "^1.1.2",
"glob": "~7.0.0",
"grunt": "1.0.1",
@ -61,7 +60,6 @@
"husky": "^0.14.3",
"jest": "^22.0.4",
"jshint-stylish": "~2.2.1",
"json-loader": "^0.5.7",
"karma": "1.7.0",
"karma-chrome-launcher": "~2.2.0",
"karma-expect": "~1.1.3",
@ -83,16 +81,15 @@
"postcss-loader": "^2.0.6",
"postcss-reporter": "^5.0.0",
"prettier": "1.9.2",
"react-hot-loader": "^4.0.1",
"react-hot-loader": "^4.2.0",
"react-test-renderer": "^16.0.0",
"sass-lint": "^1.10.2",
"sass-loader": "^6.0.6",
"sass-loader": "^7.0.1",
"sinon": "1.17.6",
"style-loader": "^0.20.3",
"style-loader": "^0.21.0",
"systemjs": "0.20.19",
"systemjs-plugin-css": "^0.1.36",
"ts-jest": "^22.0.0",
"ts-loader": "^3.2.0",
"tslint": "^5.8.0",
"tslint-loader": "^3.5.3",
"typescript": "^2.6.2",
@ -105,7 +102,7 @@
},
"scripts": {
"dev": "webpack --progress --colors --config scripts/webpack/webpack.dev.js",
"start": "webpack-dev-server --progress --colors --config scripts/webpack/webpack.dev.js",
"start": "webpack-dev-server --progress --colors --config scripts/webpack/webpack.hot.js",
"watch": "webpack --progress --colors --watch --config scripts/webpack/webpack.dev.js",
"build": "grunt build",
"test": "grunt test",
@ -183,4 +180,4 @@
"resolutions": {
"caniuse-db": "1.0.30000772"
}
}
}

View File

@ -107,7 +107,8 @@ func (hs *HTTPServer) registerRoutes() {
r.Post("/api/snapshots/", bind(m.CreateDashboardSnapshotCommand{}), CreateDashboardSnapshot)
r.Get("/api/snapshot/shared-options/", GetSharingOptions)
r.Get("/api/snapshots/:key", GetDashboardSnapshot)
r.Get("/api/snapshots-delete/:key", reqEditorRole, wrap(DeleteDashboardSnapshot))
r.Get("/api/snapshots-delete/:deleteKey", wrap(DeleteDashboardSnapshotByDeleteKey))
r.Delete("/api/snapshots/:key", reqEditorRole, wrap(DeleteDashboardSnapshot))
// api renew session based on remember cookie
r.Get("/api/login/ping", quota("session"), LoginAPIPing)
@ -374,7 +375,7 @@ func (hs *HTTPServer) registerRoutes() {
}, reqGrafanaAdmin)
// rendering
r.Get("/render/*", reqSignedIn, RenderToPng)
r.Get("/render/*", reqSignedIn, hs.RenderToPng)
// grafana.net proxy
r.Any("/api/gnet/*", reqSignedIn, ProxyGnetRequest)

View File

@ -46,6 +46,31 @@ func loggedInUserScenarioWithRole(desc string, method string, url string, routeP
})
}
func anonymousUserScenario(desc string, method string, url string, routePattern string, fn scenarioFunc) {
Convey(desc+" "+url, func() {
defer bus.ClearBusHandlers()
sc := setupScenarioContext(url)
sc.defaultHandler = wrap(func(c *m.ReqContext) Response {
sc.context = c
if sc.handlerFunc != nil {
return sc.handlerFunc(sc.context)
}
return nil
})
switch method {
case "GET":
sc.m.Get(routePattern, sc.defaultHandler)
case "DELETE":
sc.m.Delete(routePattern, sc.defaultHandler)
}
fn(sc)
})
}
func (sc *scenarioContext) fakeReq(method, url string) *scenarioContext {
sc.resp = httptest.NewRecorder()
req, err := http.NewRequest(method, url, nil)

View File

@ -91,11 +91,31 @@ func GetDashboardSnapshot(c *m.ReqContext) {
c.JSON(200, dto)
}
// GET /api/snapshots-delete/:key
// GET /api/snapshots-delete/:deleteKey
func DeleteDashboardSnapshotByDeleteKey(c *m.ReqContext) Response {
key := c.Params(":deleteKey")
query := &m.GetDashboardSnapshotQuery{DeleteKey: key}
err := bus.Dispatch(query)
if err != nil {
return Error(500, "Failed to get dashboard snapshot", err)
}
cmd := &m.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := bus.Dispatch(cmd); err != nil {
return Error(500, "Failed to delete dashboard snapshot", err)
}
return JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches."})
}
// DELETE /api/snapshots/:key
func DeleteDashboardSnapshot(c *m.ReqContext) Response {
key := c.Params(":key")
query := &m.GetDashboardSnapshotQuery{DeleteKey: key}
query := &m.GetDashboardSnapshotQuery{Key: key}
err := bus.Dispatch(query)
if err != nil {
@ -118,13 +138,13 @@ func DeleteDashboardSnapshot(c *m.ReqContext) Response {
return Error(403, "Access denied to this snapshot", nil)
}
cmd := &m.DeleteDashboardSnapshotCommand{DeleteKey: key}
cmd := &m.DeleteDashboardSnapshotCommand{DeleteKey: query.Result.DeleteKey}
if err := bus.Dispatch(cmd); err != nil {
return Error(500, "Failed to delete dashboard snapshot", err)
}
return JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it's cleared from a CDN cache."})
return JSON(200, util.DynMap{"message": "Snapshot deleted. It might take an hour before it's cleared from any CDN caches."})
}
// GET /api/dashboard/snapshots
@ -154,7 +174,6 @@ func SearchDashboardSnapshots(c *m.ReqContext) Response {
Id: snapshot.Id,
Name: snapshot.Name,
Key: snapshot.Key,
DeleteKey: snapshot.DeleteKey,
OrgId: snapshot.OrgId,
UserId: snapshot.UserId,
External: snapshot.External,

View File

@ -47,15 +47,30 @@ func TestDashboardSnapshotApiEndpoint(t *testing.T) {
Convey("When user has editor role and is not in the ACL", func() {
Convey("Should not be able to delete snapshot", func() {
loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/snapshots-delete/12345", "/api/snapshots-delete/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
loggedInUserScenarioWithRole("When calling DELETE on", "DELETE", "/api/snapshots/12345", "/api/snapshots/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
sc.handlerFunc = DeleteDashboardSnapshot
sc.fakeReqWithParams("GET", sc.url, map[string]string{"key": "12345"}).exec()
sc.fakeReqWithParams("DELETE", sc.url, map[string]string{"key": "12345"}).exec()
So(sc.resp.Code, ShouldEqual, 403)
})
})
})
Convey("When user is anonymous", func() {
Convey("Should be able to delete snapshot by deleteKey", func() {
anonymousUserScenario("When calling GET on", "GET", "/api/snapshots-delete/12345", "/api/snapshots-delete/:deleteKey", func(sc *scenarioContext) {
sc.handlerFunc = DeleteDashboardSnapshotByDeleteKey
sc.fakeReqWithParams("GET", sc.url, map[string]string{"deleteKey": "12345"}).exec()
So(sc.resp.Code, ShouldEqual, 200)
respJSON, err := simplejson.NewJson(sc.resp.Body.Bytes())
So(err, ShouldBeNil)
So(respJSON.Get("message").MustString(), ShouldStartWith, "Snapshot deleted")
})
})
})
Convey("When user is editor and dashboard has default ACL", func() {
aclMockResp = []*m.DashboardAclInfoDTO{
{Role: &viewerRole, Permission: m.PERMISSION_VIEW},
@ -63,9 +78,9 @@ func TestDashboardSnapshotApiEndpoint(t *testing.T) {
}
Convey("Should be able to delete a snapshot", func() {
loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/snapshots-delete/12345", "/api/snapshots-delete/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
loggedInUserScenarioWithRole("When calling DELETE on", "DELETE", "/api/snapshots/12345", "/api/snapshots/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
sc.handlerFunc = DeleteDashboardSnapshot
sc.fakeReqWithParams("GET", sc.url, map[string]string{"key": "12345"}).exec()
sc.fakeReqWithParams("DELETE", sc.url, map[string]string{"key": "12345"}).exec()
So(sc.resp.Code, ShouldEqual, 200)
respJSON, err := simplejson.NewJson(sc.resp.Body.Bytes())
@ -81,9 +96,9 @@ func TestDashboardSnapshotApiEndpoint(t *testing.T) {
mockSnapshotResult.UserId = TestUserID
Convey("Should be able to delete a snapshot", func() {
loggedInUserScenarioWithRole("When calling GET on", "GET", "/api/snapshots-delete/12345", "/api/snapshots-delete/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
loggedInUserScenarioWithRole("When calling DELETE on", "DELETE", "/api/snapshots/12345", "/api/snapshots/:key", m.ROLE_EDITOR, func(sc *scenarioContext) {
sc.handlerFunc = DeleteDashboardSnapshot
sc.fakeReqWithParams("GET", sc.url, map[string]string{"key": "12345"}).exec()
sc.fakeReqWithParams("DELETE", sc.url, map[string]string{"key": "12345"}).exec()
So(sc.resp.Code, ShouldEqual, 200)
respJSON, err := simplejson.NewJson(sc.resp.Body.Bytes())

View File

@ -27,6 +27,7 @@ import (
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/rendering"
"github.com/grafana/grafana/pkg/setting"
)
@ -42,8 +43,10 @@ type HTTPServer struct {
cache *gocache.Cache
httpSrv *http.Server
RouteRegister RouteRegister `inject:""`
Bus bus.Bus `inject:""`
RouteRegister RouteRegister `inject:""`
Bus bus.Bus `inject:""`
RenderService rendering.Service `inject:""`
Cfg *setting.Cfg `inject:""`
}
func (hs *HTTPServer) Init() error {
@ -179,7 +182,7 @@ func (hs *HTTPServer) newMacaron() *macaron.Macaron {
hs.mapStatic(m, setting.StaticRootPath, "robots.txt", "robots.txt")
if setting.ImageUploadProvider == "local" {
hs.mapStatic(m, setting.ImagesDir, "", "/public/img/attachments")
hs.mapStatic(m, hs.Cfg.ImagesDir, "", "/public/img/attachments")
}
m.Use(macaron.Renderer(macaron.RenderOptions{

View File

@ -155,5 +155,9 @@ func Logout(c *m.ReqContext) {
c.SetCookie(setting.CookieUserName, "", -1, setting.AppSubUrl+"/")
c.SetCookie(setting.CookieRememberName, "", -1, setting.AppSubUrl+"/")
c.Session.Destory(c.Context)
c.Redirect(setting.AppSubUrl + "/login")
if setting.SignoutRedirectUrl != "" {
c.Redirect(setting.SignoutRedirectUrl)
} else {
c.Redirect(setting.AppSubUrl + "/login")
}
}

View File

@ -3,35 +3,54 @@ package api
import (
"fmt"
"net/http"
"strconv"
"time"
"github.com/grafana/grafana/pkg/components/renderer"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/rendering"
"github.com/grafana/grafana/pkg/util"
)
func RenderToPng(c *m.ReqContext) {
func (hs *HTTPServer) RenderToPng(c *m.ReqContext) {
queryReader, err := util.NewUrlQueryReader(c.Req.URL)
if err != nil {
c.Handle(400, "Render parameters error", err)
return
}
queryParams := fmt.Sprintf("?%s", c.Req.URL.RawQuery)
renderOpts := &renderer.RenderOpts{
Path: c.Params("*") + queryParams,
Width: queryReader.Get("width", "800"),
Height: queryReader.Get("height", "400"),
Timeout: queryReader.Get("timeout", "60"),
width, err := strconv.Atoi(queryReader.Get("width", "800"))
if err != nil {
c.Handle(400, "Render parameters error", fmt.Errorf("Cannot parse width as int: %s", err))
return
}
height, err := strconv.Atoi(queryReader.Get("height", "400"))
if err != nil {
c.Handle(400, "Render parameters error", fmt.Errorf("Cannot parse height as int: %s", err))
return
}
timeout, err := strconv.Atoi(queryReader.Get("timeout", "60"))
if err != nil {
c.Handle(400, "Render parameters error", fmt.Errorf("Cannot parse timeout as int: %s", err))
return
}
result, err := hs.RenderService.Render(c.Req.Context(), rendering.Opts{
Width: width,
Height: height,
Timeout: time.Duration(timeout) * time.Second,
OrgId: c.OrgId,
UserId: c.UserId,
OrgRole: c.OrgRole,
Path: c.Params("*") + queryParams,
Timezone: queryReader.Get("tz", ""),
Encoding: queryReader.Get("encoding", ""),
}
})
pngPath, err := renderer.RenderToPng(renderOpts)
if err != nil && err == renderer.ErrTimeout {
if err != nil && err == rendering.ErrTimeout {
c.Handle(500, err.Error(), err)
return
}
@ -42,5 +61,5 @@ func RenderToPng(c *m.ReqContext) {
}
c.Resp.Header().Set("Content-Type", "image/png")
http.ServeFile(c.Resp, c.Req.Request, pngPath)
http.ServeFile(c.Resp, c.Req.Request, result.FilePath)
}

View File

@ -33,6 +33,7 @@ import (
_ "github.com/grafana/grafana/pkg/services/cleanup"
_ "github.com/grafana/grafana/pkg/services/notifications"
_ "github.com/grafana/grafana/pkg/services/provisioning"
_ "github.com/grafana/grafana/pkg/services/rendering"
_ "github.com/grafana/grafana/pkg/services/search"
_ "github.com/grafana/grafana/pkg/services/sqlstore"
_ "github.com/grafana/grafana/pkg/tracing"

View File

@ -1,161 +0,0 @@
package renderer
import (
"errors"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"strconv"
"strings"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
type RenderOpts struct {
Path string
Width string
Height string
Timeout string
OrgId int64
UserId int64
OrgRole models.RoleType
Timezone string
IsAlertContext bool
Encoding string
}
var ErrTimeout = errors.New("Timeout error. You can set timeout in seconds with &timeout url parameter")
var rendererLog log.Logger = log.New("png-renderer")
func isoTimeOffsetToPosixTz(isoOffset string) string {
// invert offset
if strings.HasPrefix(isoOffset, "UTC+") {
return strings.Replace(isoOffset, "UTC+", "UTC-", 1)
}
if strings.HasPrefix(isoOffset, "UTC-") {
return strings.Replace(isoOffset, "UTC-", "UTC+", 1)
}
return isoOffset
}
func appendEnviron(baseEnviron []string, name string, value string) []string {
results := make([]string, 0)
prefix := fmt.Sprintf("%s=", name)
for _, v := range baseEnviron {
if !strings.HasPrefix(v, prefix) {
results = append(results, v)
}
}
return append(results, fmt.Sprintf("%s=%s", name, value))
}
func RenderToPng(params *RenderOpts) (string, error) {
rendererLog.Info("Rendering", "path", params.Path)
var executable = "phantomjs"
if runtime.GOOS == "windows" {
executable = executable + ".exe"
}
localDomain := "localhost"
if setting.HttpAddr != setting.DEFAULT_HTTP_ADDR {
localDomain = setting.HttpAddr
}
// &render=1 signals to the legacy redirect layer to
// avoid redirect these requests.
url := fmt.Sprintf("%s://%s:%s/%s&render=1", setting.Protocol, localDomain, setting.HttpPort, params.Path)
binPath, _ := filepath.Abs(filepath.Join(setting.PhantomDir, executable))
scriptPath, _ := filepath.Abs(filepath.Join(setting.PhantomDir, "render.js"))
pngPath, _ := filepath.Abs(filepath.Join(setting.ImagesDir, util.GetRandomString(20)))
pngPath = pngPath + ".png"
orgRole := params.OrgRole
if params.IsAlertContext {
orgRole = models.ROLE_ADMIN
}
renderKey := middleware.AddRenderAuthKey(params.OrgId, params.UserId, orgRole)
defer middleware.RemoveRenderAuthKey(renderKey)
timeout, err := strconv.Atoi(params.Timeout)
if err != nil {
timeout = 15
}
phantomDebugArg := "--debug=false"
if log.GetLogLevelFor("png-renderer") >= log.LvlDebug {
phantomDebugArg = "--debug=true"
}
cmdArgs := []string{
"--ignore-ssl-errors=true",
"--web-security=false",
phantomDebugArg,
scriptPath,
"url=" + url,
"width=" + params.Width,
"height=" + params.Height,
"png=" + pngPath,
"domain=" + localDomain,
"timeout=" + strconv.Itoa(timeout),
"renderKey=" + renderKey,
}
if params.Encoding != "" {
cmdArgs = append([]string{fmt.Sprintf("--output-encoding=%s", params.Encoding)}, cmdArgs...)
}
cmd := exec.Command(binPath, cmdArgs...)
output, err := cmd.StdoutPipe()
if err != nil {
rendererLog.Error("Could not acquire stdout pipe", err)
return "", err
}
cmd.Stderr = cmd.Stdout
if params.Timezone != "" {
baseEnviron := os.Environ()
cmd.Env = appendEnviron(baseEnviron, "TZ", isoTimeOffsetToPosixTz(params.Timezone))
}
err = cmd.Start()
if err != nil {
rendererLog.Error("Could not start command", err)
return "", err
}
logWriter := log.NewLogWriter(rendererLog, log.LvlDebug, "[phantom] ")
go io.Copy(logWriter, output)
done := make(chan error)
go func() {
if err := cmd.Wait(); err != nil {
rendererLog.Error("failed to render an image", "error", err)
}
close(done)
}()
select {
case <-time.After(time.Duration(timeout) * time.Second):
if err := cmd.Process.Kill(); err != nil {
rendererLog.Error("failed to kill", "error", err)
}
return "", ErrTimeout
case <-done:
}
rendererLog.Debug("Image rendered", "path", pngPath)
return pngPath, nil
}

View File

@ -1,35 +0,0 @@
package renderer
//
// import (
// "io/ioutil"
// "os"
// "testing"
//
// . "github.com/smartystreets/goconvey/convey"
// )
//
// func TestPhantomRender(t *testing.T) {
//
// Convey("Can render url", t, func() {
// tempDir, _ := ioutil.TempDir("", "img")
// ipng, err := RenderToPng("http://www.google.com")
// So(err, ShouldBeNil)
// So(exists(png), ShouldEqual, true)
//
// //_, err = os.Stat(store.getFilePathForDashboard("hello"))
// //So(err, ShouldBeNil)
// })
//
// }
//
// func exists(path string) bool {
// _, err := os.Stat(path)
// if err == nil {
// return true
// }
// if os.IsNotExist(err) {
// return false
// }
// return false
// }

View File

@ -332,6 +332,8 @@ func updateTotalStats() {
M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
}
var usageStatsURL = "https://stats.grafana.org/grafana-usage-report"
func sendUsageStats() {
if !setting.ReportingEnabled {
return
@ -366,6 +368,12 @@ func sendUsageStats() {
metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
metrics["stats.datasources.count"] = statsQuery.Result.Datasources
metrics["stats.stars.count"] = statsQuery.Result.Stars
metrics["stats.folders.count"] = statsQuery.Result.Folders
metrics["stats.dashboard_permissions.count"] = statsQuery.Result.DashboardPermissions
metrics["stats.folder_permissions.count"] = statsQuery.Result.FolderPermissions
metrics["stats.provisioned_dashboards.count"] = statsQuery.Result.ProvisionedDashboards
metrics["stats.snapshots.count"] = statsQuery.Result.Snapshots
metrics["stats.teams.count"] = statsQuery.Result.Teams
dsStats := models.GetDataSourceStatsQuery{}
if err := bus.Dispatch(&dsStats); err != nil {
@ -386,9 +394,38 @@ func sendUsageStats() {
}
metrics["stats.ds.other.count"] = dsOtherCount
dsAccessStats := models.GetDataSourceAccessStatsQuery{}
if err := bus.Dispatch(&dsAccessStats); err != nil {
metricsLogger.Error("Failed to get datasource access stats", "error", err)
return
}
// send access counters for each data source
// but ignore any custom data sources
// as sending that name could be sensitive information
dsAccessOtherCount := make(map[string]int64)
for _, dsAccessStat := range dsAccessStats.Result {
if dsAccessStat.Access == "" {
continue
}
access := strings.ToLower(dsAccessStat.Access)
if models.IsKnownDataSourcePlugin(dsAccessStat.Type) {
metrics["stats.ds_access."+dsAccessStat.Type+"."+access+".count"] = dsAccessStat.Count
} else {
old := dsAccessOtherCount[access]
dsAccessOtherCount[access] = old + dsAccessStat.Count
}
}
for access, count := range dsAccessOtherCount {
metrics["stats.ds_access.other."+access+".count"] = count
}
out, _ := json.MarshalIndent(report, "", " ")
data := bytes.NewBuffer(out)
client := http.Client{Timeout: 5 * time.Second}
go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
go client.Post(usageStatsURL, "application/json", data)
}

222
pkg/metrics/metrics_test.go Normal file
View File

@ -0,0 +1,222 @@
package metrics
import (
"bytes"
"io/ioutil"
"runtime"
"sync"
"testing"
"time"
"net/http"
"net/http/httptest"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/setting"
. "github.com/smartystreets/goconvey/convey"
)
func TestMetrics(t *testing.T) {
Convey("Test send usage stats", t, func() {
var getSystemStatsQuery *models.GetSystemStatsQuery
bus.AddHandler("test", func(query *models.GetSystemStatsQuery) error {
query.Result = &models.SystemStats{
Dashboards: 1,
Datasources: 2,
Users: 3,
ActiveUsers: 4,
Orgs: 5,
Playlists: 6,
Alerts: 7,
Stars: 8,
Folders: 9,
DashboardPermissions: 10,
FolderPermissions: 11,
ProvisionedDashboards: 12,
Snapshots: 13,
Teams: 14,
}
getSystemStatsQuery = query
return nil
})
var getDataSourceStatsQuery *models.GetDataSourceStatsQuery
bus.AddHandler("test", func(query *models.GetDataSourceStatsQuery) error {
query.Result = []*models.DataSourceStats{
{
Type: models.DS_ES,
Count: 9,
},
{
Type: models.DS_PROMETHEUS,
Count: 10,
},
{
Type: "unknown_ds",
Count: 11,
},
{
Type: "unknown_ds2",
Count: 12,
},
}
getDataSourceStatsQuery = query
return nil
})
var getDataSourceAccessStatsQuery *models.GetDataSourceAccessStatsQuery
bus.AddHandler("test", func(query *models.GetDataSourceAccessStatsQuery) error {
query.Result = []*models.DataSourceAccessStats{
{
Type: models.DS_ES,
Access: "direct",
Count: 1,
},
{
Type: models.DS_ES,
Access: "proxy",
Count: 2,
},
{
Type: models.DS_PROMETHEUS,
Access: "proxy",
Count: 3,
},
{
Type: "unknown_ds",
Access: "proxy",
Count: 4,
},
{
Type: "unknown_ds2",
Access: "",
Count: 5,
},
{
Type: "unknown_ds3",
Access: "direct",
Count: 6,
},
{
Type: "unknown_ds4",
Access: "direct",
Count: 7,
},
{
Type: "unknown_ds5",
Access: "proxy",
Count: 8,
},
}
getDataSourceAccessStatsQuery = query
return nil
})
var wg sync.WaitGroup
var responseBuffer *bytes.Buffer
var req *http.Request
ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
req = r
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("Failed to read response body, err=%v", err)
}
responseBuffer = bytes.NewBuffer(buf)
wg.Done()
}))
usageStatsURL = ts.URL
sendUsageStats()
Convey("Given reporting not enabled and sending usage stats", func() {
setting.ReportingEnabled = false
sendUsageStats()
Convey("Should not gather stats or call http endpoint", func() {
So(getSystemStatsQuery, ShouldBeNil)
So(getDataSourceStatsQuery, ShouldBeNil)
So(getDataSourceAccessStatsQuery, ShouldBeNil)
So(req, ShouldBeNil)
})
})
Convey("Given reporting enabled and sending usage stats", func() {
setting.ReportingEnabled = true
setting.BuildVersion = "5.0.0"
wg.Add(1)
sendUsageStats()
Convey("Should gather stats and call http endpoint", func() {
if waitTimeout(&wg, 2*time.Second) {
t.Fatalf("Timed out waiting for http request")
}
So(getSystemStatsQuery, ShouldNotBeNil)
So(getDataSourceStatsQuery, ShouldNotBeNil)
So(getDataSourceAccessStatsQuery, ShouldNotBeNil)
So(req, ShouldNotBeNil)
So(req.Method, ShouldEqual, http.MethodPost)
So(req.Header.Get("Content-Type"), ShouldEqual, "application/json")
So(responseBuffer, ShouldNotBeNil)
j, err := simplejson.NewFromReader(responseBuffer)
So(err, ShouldBeNil)
So(j.Get("version").MustString(), ShouldEqual, "5_0_0")
So(j.Get("os").MustString(), ShouldEqual, runtime.GOOS)
So(j.Get("arch").MustString(), ShouldEqual, runtime.GOARCH)
metrics := j.Get("metrics")
So(metrics.Get("stats.dashboards.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Dashboards)
So(metrics.Get("stats.users.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Users)
So(metrics.Get("stats.orgs.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Orgs)
So(metrics.Get("stats.playlist.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Playlists)
So(metrics.Get("stats.plugins.apps.count").MustInt(), ShouldEqual, len(plugins.Apps))
So(metrics.Get("stats.plugins.panels.count").MustInt(), ShouldEqual, len(plugins.Panels))
So(metrics.Get("stats.plugins.datasources.count").MustInt(), ShouldEqual, len(plugins.DataSources))
So(metrics.Get("stats.alerts.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Alerts)
So(metrics.Get("stats.active_users.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.ActiveUsers)
So(metrics.Get("stats.datasources.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Datasources)
So(metrics.Get("stats.stars.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Stars)
So(metrics.Get("stats.folders.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Folders)
So(metrics.Get("stats.dashboard_permissions.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.DashboardPermissions)
So(metrics.Get("stats.folder_permissions.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.FolderPermissions)
So(metrics.Get("stats.provisioned_dashboards.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.ProvisionedDashboards)
So(metrics.Get("stats.snapshots.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Snapshots)
So(metrics.Get("stats.teams.count").MustInt(), ShouldEqual, getSystemStatsQuery.Result.Teams)
So(metrics.Get("stats.ds."+models.DS_ES+".count").MustInt(), ShouldEqual, 9)
So(metrics.Get("stats.ds."+models.DS_PROMETHEUS+".count").MustInt(), ShouldEqual, 10)
So(metrics.Get("stats.ds.other.count").MustInt(), ShouldEqual, 11+12)
So(metrics.Get("stats.ds_access."+models.DS_ES+".direct.count").MustInt(), ShouldEqual, 1)
So(metrics.Get("stats.ds_access."+models.DS_ES+".proxy.count").MustInt(), ShouldEqual, 2)
So(metrics.Get("stats.ds_access."+models.DS_PROMETHEUS+".proxy.count").MustInt(), ShouldEqual, 3)
So(metrics.Get("stats.ds_access.other.direct.count").MustInt(), ShouldEqual, 6+7)
So(metrics.Get("stats.ds_access.other.proxy.count").MustInt(), ShouldEqual, 4+8)
})
})
Reset(func() {
ts.Close()
})
})
}
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false // completed normally
case <-time.After(timeout):
return true // timed out
}
}

View File

@ -49,7 +49,6 @@ func GetContextHandler() macaron.Handler {
c.Map(ctx)
// update last seen at
// update last seen every 5min
if ctx.ShouldUpdateLastSeenAt() {
ctx.Logger.Debug("Updating last user_seen_at", "user_id", ctx.UserId)

View File

@ -2,6 +2,7 @@ package middleware
import (
"sync"
"time"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/util"
@ -28,6 +29,7 @@ func initContextWithRenderAuth(ctx *m.ReqContext) bool {
ctx.IsSignedIn = true
ctx.SignedInUser = renderUser
ctx.IsRenderCall = true
ctx.LastSeenAt = time.Now()
return true
}

View File

@ -29,7 +29,6 @@ type DashboardSnapshotDTO struct {
Id int64 `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
DeleteKey string `json:"deleteKey"`
OrgId int64 `json:"orgId"`
UserId int64 `json:"userId"`
External bool `json:"external"`

View File

@ -1,14 +1,20 @@
package models
type SystemStats struct {
Dashboards int64
Datasources int64
Users int64
ActiveUsers int64
Orgs int64
Playlists int64
Alerts int64
Stars int64
Dashboards int64
Datasources int64
Users int64
ActiveUsers int64
Orgs int64
Playlists int64
Alerts int64
Stars int64
Snapshots int64
Teams int64
DashboardPermissions int64
FolderPermissions int64
Folders int64
ProvisionedDashboards int64
}
type DataSourceStats struct {
@ -24,6 +30,16 @@ type GetDataSourceStatsQuery struct {
Result []*DataSourceStats
}
type DataSourceAccessStats struct {
Type string
Access string
Count int64
}
type GetDataSourceAccessStatsQuery struct {
Result []*DataSourceAccessStats
}
type AdminStats struct {
Users int `json:"users"`
Orgs int `json:"orgs"`
@ -40,3 +56,11 @@ type AdminStats struct {
type GetAdminStatsQuery struct {
Result *AdminStats
}
type SystemUserCountStats struct {
Count int64
}
type GetSystemUserCountStatsQuery struct {
Result *SystemUserCountStats
}

View File

@ -0,0 +1,19 @@
package plugins
import (
"fmt"
"runtime"
"strings"
)
func ComposePluginStartCommmand(executable string) string {
os := strings.ToLower(runtime.GOOS)
arch := runtime.GOARCH
extension := ""
if os == "windows" {
extension = ".exe"
}
return fmt.Sprintf("%s_%s_%s%s", executable, os, strings.ToLower(arch), extension)
}

View File

@ -5,12 +5,12 @@ import (
"errors"
"fmt"
"github.com/grafana/grafana-plugin-model/go/datasource"
"github.com/grafana/grafana/pkg/components/null"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana_plugin_model/go/datasource"
)
func NewDatasourcePluginWrapper(log log.Logger, plugin datasource.DatasourcePlugin) *DatasourcePluginWrapper {

View File

@ -3,9 +3,9 @@ package wrapper
import (
"testing"
"github.com/grafana/grafana-plugin-model/go/datasource"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana_plugin_model/go/datasource"
)
func TestMapTables(t *testing.T) {

View File

@ -3,20 +3,17 @@ package plugins
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/grafana/grafana-plugin-model/go/datasource"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins/datasource/wrapper"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana_plugin_model/go/datasource"
plugin "github.com/hashicorp/go-plugin"
)
@ -66,16 +63,6 @@ var handshakeConfig = plugin.HandshakeConfig{
MagicCookieValue: "datasource",
}
func composeBinaryName(executable, os, arch string) string {
var extension string
os = strings.ToLower(os)
if os == "windows" {
extension = ".exe"
}
return fmt.Sprintf("%s_%s_%s%s", executable, os, strings.ToLower(arch), extension)
}
func (p *DataSourcePlugin) startBackendPlugin(ctx context.Context, log log.Logger) error {
p.log = log.New("plugin-id", p.Id)
@ -88,7 +75,7 @@ func (p *DataSourcePlugin) startBackendPlugin(ctx context.Context, log log.Logge
}
func (p *DataSourcePlugin) spawnSubProcess() error {
cmd := composeBinaryName(p.Executable, runtime.GOOS, runtime.GOARCH)
cmd := ComposePluginStartCommmand(p.Executable)
fullpath := path.Join(p.PluginDir, cmd)
p.client = plugin.NewClient(&plugin.ClientConfig{

View File

@ -1,35 +0,0 @@
package plugins
import (
"testing"
)
func TestComposeBinaryName(t *testing.T) {
tests := []struct {
name string
os string
arch string
expectedPath string
}{
{
name: "simple-json",
os: "linux",
arch: "amd64",
expectedPath: `simple-json_linux_amd64`,
},
{
name: "simple-json",
os: "windows",
arch: "amd64",
expectedPath: `simple-json_windows_amd64.exe`,
},
}
for _, v := range tests {
have := composeBinaryName(v.name, v.os, v.arch)
if have != v.expectedPath {
t.Errorf("expected %s got %s", v.expectedPath, have)
}
}
}

View File

@ -26,6 +26,7 @@ var (
Apps map[string]*AppPlugin
Plugins map[string]*PluginBase
PluginTypes map[string]interface{}
Renderer *RendererPlugin
GrafanaLatestVersion string
GrafanaHasUpdate bool
@ -58,6 +59,7 @@ func (pm *PluginManager) Init() error {
"panel": PanelPlugin{},
"datasource": DataSourcePlugin{},
"app": AppPlugin{},
"renderer": RendererPlugin{},
}
pm.log.Info("Starting plugin search")

View File

@ -0,0 +1,22 @@
package plugins
import "encoding/json"
type RendererPlugin struct {
PluginBase
Executable string `json:"executable,omitempty"`
}
func (r *RendererPlugin) Load(decoder *json.Decoder, pluginDir string) error {
if err := decoder.Decode(&r); err != nil {
return err
}
if err := r.registerPlugin(pluginDir); err != nil {
return err
}
Renderer = r
return nil
}

View File

@ -12,11 +12,14 @@ import (
"github.com/benbjohnson/clock"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/rendering"
"github.com/grafana/grafana/pkg/setting"
"golang.org/x/sync/errgroup"
)
type AlertingService struct {
RenderService rendering.Service `inject:""`
execQueue chan *Job
//clock clock.Clock
ticker *Ticker
@ -48,7 +51,7 @@ func (e *AlertingService) Init() error {
e.evalHandler = NewEvalHandler()
e.ruleReader = NewRuleReader()
e.log = log.New("alerting.engine")
e.resultHandler = NewResultHandler()
e.resultHandler = NewResultHandler(e.RenderService)
return nil
}

View File

@ -9,91 +9,93 @@ import (
. "github.com/smartystreets/goconvey/convey"
)
func TestStateIsUpdatedWhenNeeded(t *testing.T) {
ctx := NewEvalContext(context.TODO(), &Rule{Conditions: []Condition{&conditionStub{firing: true}}})
t.Run("ok -> alerting", func(t *testing.T) {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.State = models.AlertStateAlerting
if !ctx.ShouldUpdateAlertState() {
t.Fatalf("expected should updated to be true")
}
})
t.Run("ok -> ok", func(t *testing.T) {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.State = models.AlertStateOK
if ctx.ShouldUpdateAlertState() {
t.Fatalf("expected should updated to be false")
}
})
}
func TestAlertingEvalContext(t *testing.T) {
Convey("Eval context", t, func() {
Convey("Should compute and replace properly new rule state", t, func() {
ctx := NewEvalContext(context.TODO(), &Rule{Conditions: []Condition{&conditionStub{firing: true}}})
dummieError := fmt.Errorf("dummie error")
Convey("Should update alert state when needed", func() {
Convey("ok -> alerting", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Firing = true
Convey("ok -> alerting", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.State = models.AlertStateAlerting
So(ctx.ShouldUpdateAlertState(), ShouldBeTrue)
})
Convey("ok -> ok", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.State = models.AlertStateOK
So(ctx.ShouldUpdateAlertState(), ShouldBeFalse)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
Convey("Should compute and replace properly new rule state", func() {
dummieError := fmt.Errorf("dummie error")
Convey("ok -> error(alerting)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorSetAlerting
Convey("ok -> alerting", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Firing = true
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
Convey("ok -> error(keep_last)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorKeepState
Convey("ok -> error(alerting)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorSetAlerting
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateOK)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
Convey("pending -> error(keep_last)", func() {
ctx.PrevAlertState = models.AlertStatePending
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorKeepState
Convey("ok -> error(keep_last)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorKeepState
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStatePending)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateOK)
})
Convey("ok -> no_data(alerting)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.NoDataState = models.NoDataSetAlerting
ctx.NoDataFound = true
Convey("pending -> error(keep_last)", func() {
ctx.PrevAlertState = models.AlertStatePending
ctx.Error = dummieError
ctx.Rule.ExecutionErrorState = models.ExecutionErrorKeepState
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStatePending)
})
Convey("ok -> no_data(keep_last)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.NoDataState = models.NoDataKeepState
ctx.NoDataFound = true
Convey("ok -> no_data(alerting)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.NoDataState = models.NoDataSetAlerting
ctx.NoDataFound = true
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateOK)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateAlerting)
})
Convey("pending -> no_data(keep_last)", func() {
ctx.PrevAlertState = models.AlertStatePending
ctx.Rule.NoDataState = models.NoDataKeepState
ctx.NoDataFound = true
Convey("ok -> no_data(keep_last)", func() {
ctx.PrevAlertState = models.AlertStateOK
ctx.Rule.NoDataState = models.NoDataKeepState
ctx.NoDataFound = true
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStateOK)
})
Convey("pending -> no_data(keep_last)", func() {
ctx.PrevAlertState = models.AlertStatePending
ctx.Rule.NoDataState = models.NoDataKeepState
ctx.NoDataFound = true
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStatePending)
})
ctx.Rule.State = ctx.GetNewState()
So(ctx.Rule.State, ShouldEqual, models.AlertStatePending)
})
})
}

View File

@ -3,14 +3,15 @@ package alerting
import (
"errors"
"fmt"
"time"
"golang.org/x/sync/errgroup"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/components/imguploader"
"github.com/grafana/grafana/pkg/components/renderer"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/metrics"
"github.com/grafana/grafana/pkg/services/rendering"
m "github.com/grafana/grafana/pkg/models"
)
@ -27,18 +28,16 @@ type NotificationService interface {
SendIfNeeded(context *EvalContext) error
}
func NewNotificationService() NotificationService {
return newNotificationService()
func NewNotificationService(renderService rendering.Service) NotificationService {
return &notificationService{
log: log.New("alerting.notifier"),
renderService: renderService,
}
}
type notificationService struct {
log log.Logger
}
func newNotificationService() *notificationService {
return &notificationService{
log: log.New("alerting.notifier"),
}
log log.Logger
renderService rendering.Service
}
func (n *notificationService) SendIfNeeded(context *EvalContext) error {
@ -79,26 +78,27 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
return err
}
renderOpts := &renderer.RenderOpts{
Width: "800",
Height: "400",
Timeout: "30",
OrgId: context.Rule.OrgId,
IsAlertContext: true,
renderOpts := rendering.Opts{
Width: 1000,
Height: 500,
Timeout: time.Second * 30,
OrgId: context.Rule.OrgId,
OrgRole: m.ROLE_ADMIN,
}
ref, err := context.GetDashboardUID()
if err != nil {
return err
}
renderOpts.Path = fmt.Sprintf("d-solo/%s/%s?panelId=%d", ref.Uid, ref.Slug, context.Rule.PanelId)
imagePath, err := renderer.RenderToPng(renderOpts)
result, err := n.renderService.Render(context.Ctx, renderOpts)
if err != nil {
return err
}
context.ImageOnDiskPath = imagePath
context.ImageOnDiskPath = result.FilePath
context.ImagePublicUrl, err = uploader.Upload(context.Ctx, context.ImageOnDiskPath)
if err != nil {
return err

View File

@ -9,6 +9,7 @@ import (
"github.com/grafana/grafana/pkg/metrics"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/annotations"
"github.com/grafana/grafana/pkg/services/rendering"
)
type ResultHandler interface {
@ -20,10 +21,10 @@ type DefaultResultHandler struct {
log log.Logger
}
func NewResultHandler() *DefaultResultHandler {
func NewResultHandler(renderService rendering.Service) *DefaultResultHandler {
return &DefaultResultHandler{
log: log.New("alerting.resultHandler"),
notifier: NewNotificationService(),
notifier: NewNotificationService(renderService),
}
}

View File

@ -24,7 +24,7 @@ func init() {
}
func handleNotificationTestCommand(cmd *NotificationTestCommand) error {
notifier := newNotificationService()
notifier := NewNotificationService(nil).(*notificationService)
model := &m.AlertNotification{
Name: cmd.Name,

View File

@ -0,0 +1,68 @@
package rendering
import (
"context"
"io"
"net"
"net/http"
"net/url"
"os"
"strconv"
"time"
)
var netTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
DualStack: true,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
}
func (rs *RenderingService) renderViaHttp(ctx context.Context, opts Opts) (*RenderResult, error) {
filePath := rs.getFilePathForNewImage()
var netClient = &http.Client{
Timeout: opts.Timeout,
Transport: netTransport,
}
rendererUrl, err := url.Parse(rs.Cfg.RendererUrl)
if err != nil {
return nil, err
}
queryParams := rendererUrl.Query()
queryParams.Add("url", rs.getURL(opts.Path))
queryParams.Add("renderKey", rs.getRenderKey(opts.UserId, opts.OrgId, opts.OrgRole))
queryParams.Add("width", strconv.Itoa(opts.Width))
queryParams.Add("height", strconv.Itoa(opts.Height))
queryParams.Add("domain", rs.getLocalDomain())
queryParams.Add("timezone", isoTimeOffsetToPosixTz(opts.Timezone))
queryParams.Add("encoding", opts.Encoding)
queryParams.Add("timeout", strconv.Itoa(int(opts.Timeout.Seconds())))
rendererUrl.RawQuery = queryParams.Encode()
req, err := http.NewRequest("GET", rendererUrl.String(), nil)
if err != nil {
return nil, err
}
// make request to renderer server
resp, err := netClient.Do(req)
if err != nil {
return nil, err
}
// save response to file
defer resp.Body.Close()
out, err := os.Create(filePath)
if err != nil {
return nil, err
}
defer out.Close()
io.Copy(out, resp.Body)
return &RenderResult{FilePath: filePath}, err
}

View File

@ -0,0 +1,34 @@
package rendering
import (
"context"
"errors"
"time"
"github.com/grafana/grafana/pkg/models"
)
var ErrTimeout = errors.New("Timeout error. You can set timeout in seconds with &timeout url parameter")
var ErrNoRenderer = errors.New("No renderer plugin found nor is an external render server configured")
type Opts struct {
Width int
Height int
Timeout time.Duration
OrgId int64
UserId int64
OrgRole models.RoleType
Path string
Encoding string
Timezone string
}
type RenderResult struct {
FilePath string
}
type renderFunc func(ctx context.Context, options Opts) (*RenderResult, error)
type Service interface {
Render(ctx context.Context, opts Opts) (*RenderResult, error)
}

View File

@ -0,0 +1,104 @@
package rendering
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/middleware"
)
func (rs *RenderingService) renderViaPhantomJS(ctx context.Context, opts Opts) (*RenderResult, error) {
rs.log.Info("Rendering", "path", opts.Path)
var executable = "phantomjs"
if runtime.GOOS == "windows" {
executable = executable + ".exe"
}
url := rs.getURL(opts.Path)
binPath, _ := filepath.Abs(filepath.Join(rs.Cfg.PhantomDir, executable))
scriptPath, _ := filepath.Abs(filepath.Join(rs.Cfg.PhantomDir, "render.js"))
pngPath := rs.getFilePathForNewImage()
renderKey := middleware.AddRenderAuthKey(opts.OrgId, opts.UserId, opts.OrgRole)
defer middleware.RemoveRenderAuthKey(renderKey)
phantomDebugArg := "--debug=false"
if log.GetLogLevelFor("renderer") >= log.LvlDebug {
phantomDebugArg = "--debug=true"
}
cmdArgs := []string{
"--ignore-ssl-errors=true",
"--web-security=false",
phantomDebugArg,
scriptPath,
fmt.Sprintf("url=%v", url),
fmt.Sprintf("width=%v", opts.Width),
fmt.Sprintf("height=%v", opts.Height),
fmt.Sprintf("png=%v", pngPath),
fmt.Sprintf("domain=%v", rs.getLocalDomain()),
fmt.Sprintf("timeout=%v", opts.Timeout.Seconds()),
fmt.Sprintf("renderKey=%v", renderKey),
}
if opts.Encoding != "" {
cmdArgs = append([]string{fmt.Sprintf("--output-encoding=%s", opts.Encoding)}, cmdArgs...)
}
commandCtx, _ := context.WithTimeout(ctx, opts.Timeout+time.Second*2)
cmd := exec.CommandContext(commandCtx, binPath, cmdArgs...)
cmd.Stderr = cmd.Stdout
if opts.Timezone != "" {
baseEnviron := os.Environ()
cmd.Env = appendEnviron(baseEnviron, "TZ", isoTimeOffsetToPosixTz(opts.Timezone))
}
out, err := cmd.Output()
// check for timeout first
if commandCtx.Err() == context.DeadlineExceeded {
rs.log.Info("Rendering timed out")
return nil, ErrTimeout
}
if err != nil {
rs.log.Error("Phantomjs exited with non zero exit code", "error", err)
return nil, err
}
rs.log.Debug("Phantomjs output", "out", string(out))
rs.log.Debug("Image rendered", "path", pngPath)
return &RenderResult{FilePath: pngPath}, nil
}
func isoTimeOffsetToPosixTz(isoOffset string) string {
// invert offset
if strings.HasPrefix(isoOffset, "UTC+") {
return strings.Replace(isoOffset, "UTC+", "UTC-", 1)
}
if strings.HasPrefix(isoOffset, "UTC-") {
return strings.Replace(isoOffset, "UTC-", "UTC+", 1)
}
return isoOffset
}
func appendEnviron(baseEnviron []string, name string, value string) []string {
results := make([]string, 0)
prefix := fmt.Sprintf("%s=", name)
for _, v := range baseEnviron {
if !strings.HasPrefix(v, prefix) {
results = append(results, v)
}
}
return append(results, fmt.Sprintf("%s=%s", name, value))
}

View File

@ -0,0 +1,95 @@
package rendering
import (
"context"
"fmt"
"os/exec"
"path"
"time"
pluginModel "github.com/grafana/grafana-plugin-model/go/renderer"
"github.com/grafana/grafana/pkg/plugins"
plugin "github.com/hashicorp/go-plugin"
)
func (rs *RenderingService) startPlugin(ctx context.Context) error {
cmd := plugins.ComposePluginStartCommmand("plugin_start")
fullpath := path.Join(rs.pluginInfo.PluginDir, cmd)
var handshakeConfig = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "grafana_plugin_type",
MagicCookieValue: "renderer",
}
rs.log.Info("Renderer plugin found, starting", "cmd", cmd)
rs.pluginClient = plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: handshakeConfig,
Plugins: map[string]plugin.Plugin{
plugins.Renderer.Id: &pluginModel.RendererPluginImpl{},
},
Cmd: exec.Command(fullpath),
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
Logger: plugins.LogWrapper{Logger: rs.log},
})
rpcClient, err := rs.pluginClient.Client()
if err != nil {
return err
}
raw, err := rpcClient.Dispense(rs.pluginInfo.Id)
if err != nil {
return err
}
rs.grpcPlugin = raw.(pluginModel.RendererPlugin)
return nil
}
func (rs *RenderingService) watchAndRestartPlugin(ctx context.Context) error {
ticker := time.NewTicker(time.Second * 1)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
if rs.pluginClient.Exited() {
err := rs.startPlugin(ctx)
rs.log.Debug("Render plugin existed, restarting...")
if err != nil {
rs.log.Error("Failed to start render plugin", err)
}
}
}
}
}
func (rs *RenderingService) renderViaPlugin(ctx context.Context, opts Opts) (*RenderResult, error) {
pngPath := rs.getFilePathForNewImage()
rsp, err := rs.grpcPlugin.Render(ctx, &pluginModel.RenderRequest{
Url: rs.getURL(opts.Path),
Width: int32(opts.Width),
Height: int32(opts.Height),
FilePath: pngPath,
Timeout: int32(opts.Timeout.Seconds()),
RenderKey: rs.getRenderKey(opts.UserId, opts.OrgId, opts.OrgRole),
Encoding: opts.Encoding,
Timezone: isoTimeOffsetToPosixTz(opts.Timezone),
Domain: rs.getLocalDomain(),
})
if err != nil {
return nil, err
}
if rsp.Error != "" {
return nil, fmt.Errorf("Rendering failed: %v", rsp.Error)
}
return &RenderResult{FilePath: pngPath}, err
}

View File

@ -0,0 +1,99 @@
package rendering
import (
"context"
"fmt"
"path/filepath"
plugin "github.com/hashicorp/go-plugin"
pluginModel "github.com/grafana/grafana-plugin-model/go/renderer"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
func init() {
registry.RegisterService(&RenderingService{})
}
type RenderingService struct {
log log.Logger
pluginClient *plugin.Client
grpcPlugin pluginModel.RendererPlugin
pluginInfo *plugins.RendererPlugin
renderAction renderFunc
Cfg *setting.Cfg `inject:""`
}
func (rs *RenderingService) Init() error {
rs.log = log.New("rendering")
return nil
}
func (rs *RenderingService) Run(ctx context.Context) error {
if rs.Cfg.RendererUrl != "" {
rs.log.Info("Backend rendering via external http server")
rs.renderAction = rs.renderViaHttp
<-ctx.Done()
return nil
}
if plugins.Renderer == nil {
rs.renderAction = rs.renderViaPhantomJS
<-ctx.Done()
return nil
}
rs.pluginInfo = plugins.Renderer
if err := rs.startPlugin(ctx); err != nil {
return err
}
rs.renderAction = rs.renderViaPlugin
err := rs.watchAndRestartPlugin(ctx)
if rs.pluginClient != nil {
rs.log.Debug("Killing renderer plugin process")
rs.pluginClient.Kill()
}
return err
}
func (rs *RenderingService) Render(ctx context.Context, opts Opts) (*RenderResult, error) {
if rs.renderAction != nil {
return rs.renderAction(ctx, opts)
} else {
return nil, fmt.Errorf("No renderer found")
}
}
func (rs *RenderingService) getFilePathForNewImage() string {
pngPath, _ := filepath.Abs(filepath.Join(rs.Cfg.ImagesDir, util.GetRandomString(20)))
return pngPath + ".png"
}
func (rs *RenderingService) getURL(path string) string {
// &render=1 signals to the legacy redirect layer to
return fmt.Sprintf("%s://%s:%s/%s&render=1", setting.Protocol, rs.getLocalDomain(), setting.HttpPort, path)
}
func (rs *RenderingService) getLocalDomain() string {
if setting.HttpAddr != setting.DEFAULT_HTTP_ADDR {
return setting.HttpAddr
}
return "localhost"
}
func (rs *RenderingService) getRenderKey(orgId, userId int64, orgRole models.RoleType) string {
return middleware.AddRenderAuthKey(orgId, userId, orgRole)
}

View File

@ -86,13 +86,13 @@ func (ss *SqlStore) Init() error {
}
func (ss *SqlStore) ensureAdminUser() error {
statsQuery := m.GetSystemStatsQuery{}
systemUserCountQuery := m.GetSystemUserCountStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
if err := bus.Dispatch(&systemUserCountQuery); err != nil {
fmt.Errorf("Could not determine if admin user exists: %v", err)
}
if statsQuery.Result.Users > 0 {
if systemUserCountQuery.Result.Count > 0 {
return nil
}
@ -277,8 +277,8 @@ func InitTestDB(t *testing.T) *SqlStore {
t.Fatalf("Failed to init test database: %v", err)
}
//// sqlstore.engine.DatabaseTZ = time.UTC
//// sqlstore.engine.TZLocation = time.UTC
sqlstore.engine.DatabaseTZ = time.UTC
sqlstore.engine.TZLocation = time.UTC
return sqlstore
}

View File

@ -10,7 +10,9 @@ import (
func init() {
bus.AddHandler("sql", GetSystemStats)
bus.AddHandler("sql", GetDataSourceStats)
bus.AddHandler("sql", GetDataSourceAccessStats)
bus.AddHandler("sql", GetAdminStats)
bus.AddHandler("sql", GetSystemUserCountStats)
}
var activeUserTimeLimit = time.Hour * 24 * 30
@ -22,43 +24,51 @@ func GetDataSourceStats(query *m.GetDataSourceStatsQuery) error {
return err
}
func GetDataSourceAccessStats(query *m.GetDataSourceAccessStatsQuery) error {
var rawSql = `SELECT COUNT(*) as count, type, access FROM data_source GROUP BY type, access`
query.Result = make([]*m.DataSourceAccessStats, 0)
err := x.SQL(rawSql).Find(&query.Result)
return err
}
func GetSystemStats(query *m.GetSystemStatsQuery) error {
var rawSql = `SELECT
(
SELECT COUNT(*)
FROM ` + dialect.Quote("user") + `
) AS users,
(
SELECT COUNT(*)
FROM ` + dialect.Quote("org") + `
) AS orgs,
(
SELECT COUNT(*)
FROM ` + dialect.Quote("dashboard") + `
) AS dashboards,
(
SELECT COUNT(*)
FROM ` + dialect.Quote("data_source") + `
) AS datasources,
(
SELECT COUNT(*) FROM ` + dialect.Quote("star") + `
) AS stars,
(
SELECT COUNT(*)
FROM ` + dialect.Quote("playlist") + `
) AS playlists,
(
SELECT COUNT(*)
FROM ` + dialect.Quote("alert") + `
) AS alerts,
(
SELECT COUNT(*) FROM ` + dialect.Quote("user") + ` where last_seen_at > ?
) as active_users
`
sb := &SqlBuilder{}
sb.Write("SELECT ")
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("user") + `) AS users,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("org") + `) AS orgs,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("dashboard") + `) AS dashboards,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("data_source") + `) AS datasources,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("star") + `) AS stars,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("playlist") + `) AS playlists,`)
sb.Write(`(SELECT COUNT(*) FROM ` + dialect.Quote("alert") + `) AS alerts,`)
activeUserDeadlineDate := time.Now().Add(-activeUserTimeLimit)
sb.Write(`(SELECT COUNT(*) FROM `+dialect.Quote("user")+` where last_seen_at > ?) AS active_users,`, activeUserDeadlineDate)
sb.Write(`(SELECT COUNT(id) FROM `+dialect.Quote("dashboard")+` where is_folder = ?) AS folders,`, dialect.BooleanStr(true))
sb.Write(`(
SELECT COUNT(acl.id)
FROM `+dialect.Quote("dashboard_acl")+` as acl
inner join `+dialect.Quote("dashboard")+` as d
on d.id = acl.dashboard_id
WHERE d.is_folder = ?
) AS dashboard_permissions,`, dialect.BooleanStr(false))
sb.Write(`(
SELECT COUNT(acl.id)
FROM `+dialect.Quote("dashboard_acl")+` as acl
inner join `+dialect.Quote("dashboard")+` as d
on d.id = acl.dashboard_id
WHERE d.is_folder = ?
) AS folder_permissions,`, dialect.BooleanStr(true))
sb.Write(`(SELECT COUNT(id) FROM ` + dialect.Quote("dashboard_provisioning") + `) AS provisioned_dashboards,`)
sb.Write(`(SELECT COUNT(id) FROM ` + dialect.Quote("dashboard_snapshot") + `) AS snapshots,`)
sb.Write(`(SELECT COUNT(id) FROM ` + dialect.Quote("team") + `) AS teams`)
var stats m.SystemStats
_, err := x.SQL(rawSql, activeUserDeadlineDate).Get(&stats)
_, err := x.SQL(sb.GetSqlString(), sb.params...).Get(&stats)
if err != nil {
return err
}
@ -122,3 +132,16 @@ func GetAdminStats(query *m.GetAdminStatsQuery) error {
query.Result = &stats
return err
}
func GetSystemUserCountStats(query *m.GetSystemUserCountStatsQuery) error {
var rawSql = `SELECT COUNT(id) AS Count FROM ` + dialect.Quote("user")
var stats m.SystemUserCountStats
_, err := x.SQL(rawSql).Get(&stats)
if err != nil {
return err
}
query.Result = &stats
return err
}

View File

@ -0,0 +1,39 @@
package sqlstore
import (
"testing"
m "github.com/grafana/grafana/pkg/models"
. "github.com/smartystreets/goconvey/convey"
)
func TestStatsDataAccess(t *testing.T) {
Convey("Testing Stats Data Access", t, func() {
InitTestDB(t)
Convey("Get system stats should not results in error", func() {
query := m.GetSystemStatsQuery{}
err := GetSystemStats(&query)
So(err, ShouldBeNil)
})
Convey("Get system user count stats should not results in error", func() {
query := m.GetSystemUserCountStatsQuery{}
err := GetSystemUserCountStats(&query)
So(err, ShouldBeNil)
})
Convey("Get datasource stats should not results in error", func() {
query := m.GetDataSourceStatsQuery{}
err := GetDataSourceStats(&query)
So(err, ShouldBeNil)
})
Convey("Get datasource access stats should not results in error", func() {
query := m.GetDataSourceAccessStatsQuery{}
err := GetDataSourceAccessStats(&query)
So(err, ShouldBeNil)
})
})
}

View File

@ -104,6 +104,7 @@ var (
DefaultTheme string
DisableLoginForm bool
DisableSignoutMenu bool
SignoutRedirectUrl string
ExternalUserMngLinkUrl string
ExternalUserMngLinkName string
ExternalUserMngInfo string
@ -141,10 +142,6 @@ var (
ConfRootPath string
IsWindows bool
// PhantomJs Rendering
ImagesDir string
PhantomDir string
// for logging purposes
configFiles []string
appliedCommandLineProperties []string
@ -193,7 +190,10 @@ type Cfg struct {
// SMTP email settings
Smtp SmtpSettings
// Rendering
ImagesDir string
PhantomDir string
RendererUrl string
DisableBruteForceLoginProtection bool
}
@ -601,6 +601,7 @@ func (cfg *Cfg) Load(args *CommandLineArgs) error {
auth := iniFile.Section("auth")
DisableLoginForm = auth.Key("disable_login_form").MustBool(false)
DisableSignoutMenu = auth.Key("disable_signout_menu").MustBool(false)
SignoutRedirectUrl = auth.Key("signout_redirect_url").String()
// anonymous access
AnonymousEnabled = iniFile.Section("auth.anonymous").Key("enabled").MustBool(false)
@ -631,10 +632,11 @@ func (cfg *Cfg) Load(args *CommandLineArgs) error {
// global plugin settings
PluginAppsSkipVerifyTLS = iniFile.Section("plugins").Key("app_tls_skip_verify_insecure").MustBool(false)
// PhantomJS rendering
// Rendering
renderSec := iniFile.Section("rendering")
cfg.RendererUrl = renderSec.Key("server_url").String()
cfg.ImagesDir = filepath.Join(DataPath, "png")
ImagesDir = cfg.ImagesDir
PhantomDir = filepath.Join(HomePath, "tools/phantomjs")
cfg.PhantomDir = filepath.Join(HomePath, "tools/phantomjs")
analytics := iniFile.Section("analytics")
ReportingEnabled = analytics.Key("reporting_enabled").MustBool(true)

View File

@ -1,44 +0,0 @@
define(['jquery', 'angular', 'lodash'],
function ($, angular, _) {
'use strict';
var $win = $(window);
$.fn.place_tt = (function () {
var defaults = {
offset: 5,
};
return function (x, y, opts) {
opts = $.extend(true, {}, defaults, opts);
return this.each(function () {
var $tooltip = $(this), width, height;
$tooltip.addClass('grafana-tooltip');
$("#tooltip").remove();
$tooltip.appendTo(document.body);
if (opts.compile) {
angular.element(document).injector().invoke(["$compile", "$rootScope", function($compile, $rootScope) {
var tmpScope = $rootScope.$new(true);
_.extend(tmpScope, opts.scopeData);
$compile($tooltip)(tmpScope);
tmpScope.$digest();
tmpScope.$destroy();
}]);
}
width = $tooltip.outerWidth(true);
height = $tooltip.outerHeight(true);
$tooltip.css('left', x + opts.offset + width > $win.width() ? x - opts.offset - width : x + opts.offset);
$tooltip.css('top', y + opts.offset + height > $win.height() ? y - opts.offset - height : y + opts.offset);
});
};
})();
return $;
});

View File

@ -0,0 +1,50 @@
import $ from 'jquery';
import angular from 'angular';
import _ from 'lodash';
var $win = $(window);
$.fn.place_tt = (function() {
var defaults = {
offset: 5,
};
return function(x, y, opts) {
opts = $.extend(true, {}, defaults, opts);
return this.each(function() {
var $tooltip = $(this),
width,
height;
$tooltip.addClass('grafana-tooltip');
$('#tooltip').remove();
$tooltip.appendTo(document.body);
if (opts.compile) {
angular
.element(document)
.injector()
.invoke([
'$compile',
'$rootScope',
function($compile, $rootScope) {
var tmpScope = $rootScope.$new(true);
_.extend(tmpScope, opts.scopeData);
$compile($tooltip)(tmpScope);
tmpScope.$digest();
tmpScope.$destroy();
},
]);
}
width = $tooltip.outerWidth(true);
height = $tooltip.outerHeight(true);
$tooltip.css('left', x + opts.offset + width > $win.width() ? x - opts.offset - width : x + opts.offset);
$tooltip.css('top', y + opts.offset + height > $win.height() ? y - opts.offset - height : y + opts.offset);
});
};
})();

View File

@ -1,32 +0,0 @@
define([
'lodash-src'
],
function () {
'use strict';
var _ = window._;
/*
Mixins :)
*/
_.mixin({
move: function (array, fromIndex, toIndex) {
array.splice(toIndex, 0, array.splice(fromIndex, 1)[0]);
return array;
},
// If variable is value, then return alt. If variable is anything else, return value;
toggle: function (variable, value, alt) {
return variable === value ? alt : value;
},
toggleInOut: function(array,value) {
if(_.includes(array,value)) {
array = _.without(array,value);
} else {
array.push(value);
}
return array;
}
});
return _;
});

View File

@ -0,0 +1,11 @@
import _ from 'lodash';
/*
Mixins :)
*/
_.mixin({
move: function(array, fromIndex, toIndex) {
array.splice(toIndex, 0, array.splice(fromIndex, 1)[0]);
return array;
},
});

View File

@ -9,12 +9,14 @@ export class Profiler {
digestCounter: any;
$rootScope: any;
scopeCount: any;
window: any;
init(config, $rootScope) {
this.enabled = config.buildInfo.env === 'development';
this.timings = {};
this.timings.appStart = { loadStart: new Date().getTime() };
this.$rootScope = $rootScope;
this.window = window;
if (!this.enabled) {
return;
@ -102,7 +104,10 @@ export class Profiler {
// add render counter to root scope
// used by phantomjs render.js to know when panel has rendered
this.panelsRendered = (this.panelsRendered || 0) + 1;
this.$rootScope.panelsRendered = this.panelsRendered;
// this window variable is used by backend rendering tools to know
// all panels have completed rendering
this.window.panelsRendered = this.panelsRendered;
if (this.enabled) {
panelTimings.renderEnd = new Date().getTime();

View File

@ -448,6 +448,7 @@ kbn.valueFormats.currencyISK = kbn.formatBuilders.currency('kr');
kbn.valueFormats.currencyNOK = kbn.formatBuilders.currency('kr');
kbn.valueFormats.currencySEK = kbn.formatBuilders.currency('kr');
kbn.valueFormats.currencyCZK = kbn.formatBuilders.currency('czk');
kbn.valueFormats.currencyCHF = kbn.formatBuilders.currency('CHF');
// Data (Binary)
kbn.valueFormats.bits = kbn.formatBuilders.binarySIPrefix('b');
@ -873,6 +874,7 @@ kbn.getUnitFormats = function() {
{ text: 'Norwegian Krone (kr)', value: 'currencyNOK' },
{ text: 'Swedish Krona (kr)', value: 'currencySEK' },
{ text: 'Czech koruna (czk)', value: 'currencyCZK' },
{ text: 'Swiss franc (CHF)', value: 'currencyCHF' },
],
},
{

View File

@ -1,6 +1,6 @@
import angular from 'angular';
import moment from 'moment';
import config from 'app/core/config';
import moment from 'moment';
export class ShareModalCtrl {
/** @ngInject */
@ -86,9 +86,30 @@ export class ShareModalCtrl {
config.appSubUrl + '/render/dashboard-solo/'
);
$scope.imageUrl = $scope.imageUrl.replace(config.appSubUrl + '/d-solo/', config.appSubUrl + '/render/d-solo/');
$scope.imageUrl += '&width=1000';
$scope.imageUrl += '&height=500';
$scope.imageUrl += '&tz=UTC' + encodeURIComponent(moment().format('Z'));
$scope.imageUrl += '&width=1000&height=500' + $scope.getLocalTimeZone();
};
// This function will try to return the proper full name of the local timezone
// Chrome does not handle the timezone offset (but phantomjs does)
$scope.getLocalTimeZone = function() {
let utcOffset = '&tz=UTC' + encodeURIComponent(moment().format('Z'));
// Older browser does not the internationalization API
if (!(<any>window).Intl) {
return utcOffset;
}
const dateFormat = (<any>window).Intl.DateTimeFormat();
if (!dateFormat.resolvedOptions) {
return utcOffset;
}
const options = dateFormat.resolvedOptions();
if (!options.timeZone) {
return utcOffset;
}
return '&tz=' + encodeURIComponent(options.timeZone);
};
$scope.getShareUrl = function() {

View File

@ -15,12 +15,9 @@ export class SnapshotsCtrl {
removeSnapshotConfirmed(snapshot) {
_.remove(this.snapshots, { key: snapshot.key });
this.backendSrv.get('/api/snapshots-delete/' + snapshot.deleteKey).then(
this.backendSrv.delete('/api/snapshots/' + snapshot.key).then(
() => {},
() => {
this.$rootScope.appEvent('alert-success', ['Snapshot deleted', '']);
},
() => {
this.$rootScope.appEvent('alert-error', ['Unable to delete snapshot', '']);
this.snapshots.push(snapshot);
}
);

View File

@ -580,6 +580,7 @@ class SingleStatCtrl extends MetricsPanelCtrl {
lines: {
show: true,
fill: 1,
zero: false,
lineWidth: 1,
fillColor: panel.sparkline.fillColor,
},

69
scripts/build/build-all.sh Executable file
View File

@ -0,0 +1,69 @@
#!/bin/bash
#
# This script is executed from within the container.
#
CCARMV7=arm-linux-gnueabihf-gcc
CCARM64=aarch64-linux-gnu-gcc
CCOSX64=/tmp/osxcross/target/bin/o64-clang
CCWIN64=x86_64-w64-mingw32-gcc
CCX64=/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc
GOPATH=/go
REPO_PATH=$GOPATH/src/github.com/grafana/grafana
cd /go/src/github.com/grafana/grafana
echo "current dir: $(pwd)"
if [ "$CIRCLE_TAG" != "" ]; then
echo "Building releases from tag $CIRCLE_TAG"
OPT="-includeBuildNumber=false"
else
echo "Building incremental build for $CIRCLE_BRANCH"
OPT="-buildNumber=${CIRCLE_BUILD_NUM}"
fi
go run build.go -goarch armv7 -cc ${CCARMV7} ${OPT} build
go run build.go -goarch arm64 -cc ${CCARM64} ${OPT} build
go run build.go -goos darwin -cc ${CCOSX64} ${OPT} build
go run build.go -goos windows -cc ${CCWIN64} ${OPT} build
CC=${CCX64} go run build.go ${OPT} build
yarn install --pure-lockfile --no-progress
echo "current dir: $(pwd)"
if [ -d "dist" ]; then
rm -rf dist
fi
if [ "$CIRCLE_TAG" != "" ]; then
echo "Building frontend and packaging from tag $CIRCLE_TAG"
else
echo "Building frontend and packaging incremental build for $CIRCLE_BRANCH"
fi
echo "Building frontend"
go run build.go ${OPT} build-frontend
echo "Packaging"
go run build.go -goos linux -pkg-arch amd64 ${OPT} package-only latest
#removing amd64 phantomjs bin for armv7/arm64 packages
rm tools/phantomjs/phantomjs
go run build.go -goos linux -pkg-arch armv7 ${OPT} package-only
go run build.go -goos linux -pkg-arch arm64 ${OPT} package-only
if [ -d '/tmp/phantomjs/darwin' ]; then
cp /tmp/phantomjs/darwin/phantomjs tools/phantomjs/phantomjs
else
echo 'PhantomJS binaries for darwin missing!'
fi
go run build.go -goos darwin -pkg-arch amd64 ${OPT} package-only
if [ -d '/tmp/phantomjs/windows' ]; then
cp /tmp/phantomjs/windows/phantomjs.exe tools/phantomjs/phantomjs.exe
rm tools/phantomjs/phantomjs
else
echo 'PhantomJS binaries for darwin missing!'
fi
go run build.go -goos windows -pkg-arch amd64 ${OPT} package-only

View File

@ -4,6 +4,8 @@
# This script is executed from within the container.
#
CCX64=/tmp/x86_64-centos6-linux-gnu/bin/x86_64-centos6-linux-gnu-gcc
GOPATH=/go
REPO_PATH=$GOPATH/src/github.com/grafana/grafana
@ -11,23 +13,29 @@ cd /go/src/github.com/grafana/grafana
echo "current dir: $(pwd)"
if [ "$CIRCLE_TAG" != "" ]; then
echo "Building a release from tag $CIRCLE_TAG"
go run build.go -buildNumber=${CIRCLE_BUILD_NUM} -includeBuildNumber=false build
echo "Building releases from tag $CIRCLE_TAG"
CC=${CCX64} go run build.go -includeBuildNumber=false build
else
echo "Building incremental build for $CIRCLE_BRANCH"
go run build.go -buildNumber=${CIRCLE_BUILD_NUM} build
CC=${CCX64} go run build.go -buildNumber=${CIRCLE_BUILD_NUM} build
fi
yarn install --pure-lockfile --no-progress
source /etc/profile.d/rvm.sh
echo "current dir: $(pwd)"
if [ "$CIRCLE_TAG" != "" ]; then
echo "Packaging a release from tag $CIRCLE_TAG"
go run build.go -buildNumber=${CIRCLE_BUILD_NUM} -includeBuildNumber=false package latest
else
echo "Packaging incremental build for $CIRCLE_BRANCH"
go run build.go -buildNumber=${CIRCLE_BUILD_NUM} package latest
if [ -d "dist" ]; then
rm -rf dist
fi
if [ "$CIRCLE_TAG" != "" ]; then
echo "Building frontend from tag $CIRCLE_TAG"
go run build.go -includeBuildNumber=false build-frontend
echo "Packaging a release from tag $CIRCLE_TAG"
go run build.go -goos linux -pkg-arch amd64 -includeBuildNumber=false package-only latest
else
echo "Building frontend for $CIRCLE_BRANCH"
go run build.go -buildNumber=${CIRCLE_BUILD_NUM} build-frontend
echo "Packaging incremental build for $CIRCLE_BRANCH"
go run build.go -goos linux -pkg-arch amd64 -buildNumber=${CIRCLE_BUILD_NUM} package-only latest
fi

View File

@ -0,0 +1,17 @@
#!/bin/bash -e
if [ ! -d '/tmp/phantomjs' ]; then
_version="2.1.1"
curl -L https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-$_version-windows.zip > /tmp/phantomjs-win.zip
curl -L https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-$_version-macosx.zip > /tmp/phantomjs-mac.zip
cd /tmp
unzip /tmp/phantomjs-win.zip
unzip /tmp/phantomjs-mac.zip
mkdir -p /tmp/phantomjs/windows /tmp/phantomjs/darwin
cp /tmp/phantomjs-$_version-windows/bin/phantomjs.exe /tmp/phantomjs/windows/phantomjs.exe
cp /tmp/phantomjs-$_version-macosx/bin/phantomjs /tmp/phantomjs/darwin/phantomjs
fi

View File

@ -18,8 +18,18 @@ import (
var apiUrl = flag.String("apiUrl", "https://grafana.com/api", "api url")
var apiKey = flag.String("apiKey", "", "api key")
var version = ""
var versionRe = regexp.MustCompile(`grafana-(.*)\.(linux|windows)`)
var versionRe = regexp.MustCompile(`grafana-(.*)(\.|_)(arm64|armhfp|aarch64|armv7|darwin|linux|windows|x86_64)`)
var debVersionRe = regexp.MustCompile(`grafana_(.*)_(arm64|armv7|armhf|amd64)\.deb`)
var builds = []build{}
var architectureMapping = map[string]string{
"armv7":"armv7",
"armhfp":"armv7",
"armhf":"armv7",
"arm64":"arm64",
"aarch64":"arm64",
"amd64":"amd64",
"x86_64":"amd64",
}
func main() {
flag.Parse()
@ -60,17 +70,62 @@ func main() {
}
}
func packageWalker(path string, f os.FileInfo, err error) error {
if f.Name() == "dist" || strings.Contains(f.Name(), "sha256") || strings.Contains(f.Name(), "latest") {
return nil
}
log.Printf("Finding package file %s", f.Name())
result := versionRe.FindSubmatch([]byte(f.Name()))
func mapPackage(path string, name string, shaBytes []byte) (build, error) {
log.Printf("Finding package file %s", name)
result := versionRe.FindSubmatch([]byte(name))
debResult := debVersionRe.FindSubmatch([]byte(name))
if len(result) > 0 {
version = string(result[1])
log.Printf("Version detected: %v", version)
} else if (len(debResult) > 0) {
version = string(debResult[1])
} else {
return build{}, fmt.Errorf("Unable to figure out version from '%v'", name)
}
os := ""
if strings.Contains(name, "linux") {
os = "linux"
}
if strings.HasSuffix(name, "windows-amd64.zip") {
os = "win"
}
if strings.HasSuffix(name, "darwin-amd64.tar.gz") {
os = "darwin"
}
if strings.HasSuffix(name, ".rpm") {
os = "rhel"
}
if strings.HasSuffix(name, ".deb") {
os = "deb"
}
if os == "" {
return build{}, fmt.Errorf("Unable to figure out os from '%v'", name)
}
arch := ""
for archListed, archReal := range architectureMapping {
if strings.Contains(name, archListed) {
arch = archReal
break
}
}
if arch == "" {
return build{}, fmt.Errorf("Unable to figure out arch from '%v'", name)
}
return build{
Os: os,
Arch: arch,
Url: "https://s3-us-west-2.amazonaws.com/grafana-releases/master/" + name,
Sha256: string(shaBytes),
}, nil
}
func packageWalker(path string, f os.FileInfo, err error) error {
if f.Name() == "dist" || strings.Contains(f.Name(), "sha256") || strings.Contains(f.Name(), "latest") {
return nil
}
shaBytes, err := ioutil.ReadFile(path + ".sha256")
@ -78,27 +133,14 @@ func packageWalker(path string, f os.FileInfo, err error) error {
log.Fatalf("Failed to read sha256 file %v", err)
}
os := ""
if strings.Contains(f.Name(), "linux-x64.tar.gz") {
os = "linux"
}
if strings.HasSuffix(f.Name(), "windows-x64.zip") {
os = "win"
}
if strings.HasSuffix(f.Name(), ".rpm") {
os = "rhel"
}
if strings.HasSuffix(f.Name(), ".deb") {
os = "deb"
build, err := mapPackage(path, f.Name(), shaBytes)
if err != nil {
log.Printf("Could not map metadata from package: %v", err)
return nil
}
builds = append(builds, build{
Os: os,
Arch: "amd64",
Url: "https://s3-us-west-2.amazonaws.com/grafana-releases/master/" + f.Name(),
Sha256: string(shaBytes),
})
builds = append(builds, build)
return nil
}

View File

@ -0,0 +1,110 @@
package main
import (
"testing"
)
type testPackage struct {
path string
version string
os string
arch string
}
var testData = []testPackage{
{
path: "grafana-5.2.0-474pre1.aarch64.rpm",
version: "5.2.0-474pre1",
os: "rhel",
arch: "arm64",
},
{
path: "grafana-5.2.0-474pre1.armhfp.rpm",
version: "5.2.0-474pre1",
os: "rhel",
arch: "armv7",
},
{
path: "grafana-5.2.0-474pre1.darwin-amd64.tar.gz",
version: "5.2.0-474pre1",
os: "darwin",
arch: "amd64",
},
{
path: "grafana-5.2.0-474pre1.linux-amd64.tar.gz",
version: "5.2.0-474pre1",
os: "linux",
arch: "amd64",
},
{
path: "grafana-5.2.0-474pre1.linux-arm64.tar.gz",
version: "5.2.0-474pre1",
os: "linux",
arch: "arm64",
},
{
path: "grafana-5.2.0-474pre1.linux-armv7.tar.gz",
version: "5.2.0-474pre1",
os: "linux",
arch: "armv7",
},
{
path: "grafana-5.2.0-474pre1.windows-amd64.zip",
version: "5.2.0-474pre1",
os: "win",
arch: "amd64",
},
{
path: "grafana-5.2.0-474pre1.x86_64.rpm",
version: "5.2.0-474pre1",
os: "rhel",
arch: "amd64",
},
{
path: "grafana_5.2.0-474pre1_amd64.deb",
version: "5.2.0-474pre1",
os: "deb",
arch: "amd64",
},
{
path: "grafana_5.2.0-474pre1_arm64.deb",
version: "5.2.0-474pre1",
os: "deb",
arch: "arm64",
},
{
path: "grafana_5.2.0-474pre1_armhf.deb",
version: "5.2.0-474pre1",
os: "deb",
arch: "armv7",
},
}
func TestFileWalker(t *testing.T) {
for _, packageInfo := range testData {
version = ""
actualPackageInfo, err := mapPackage(packageInfo.path, packageInfo.path, []byte{})
if err != nil {
t.Error(err)
continue
}
if version != packageInfo.version {
t.Errorf("Testing (%v), expected %v to be %v.", packageInfo.path, version, packageInfo.version)
}
if actualPackageInfo.Os != packageInfo.os {
t.Errorf("Testing (%v), expected %v to be %v.", packageInfo.path, actualPackageInfo.Os, packageInfo.os)
}
if actualPackageInfo.Arch != packageInfo.arch {
t.Errorf("Testing (%v), expected %v to be %v.", packageInfo.path, actualPackageInfo.Arch, packageInfo.arch)
}
}
incorrectPackageName := "grafana_5.2.0-474pre1_armfoo.deb"
_, err := mapPackage(incorrectPackageName, incorrectPackageName, []byte{})
if err == nil {
t.Errorf("Testing (%v), expected to fail due to an unrecognized arch, but signalled no error.", incorrectPackageName)
}
}

View File

@ -3,13 +3,20 @@ var path = require('path');
module.exports = function(grunt) {
"use strict";
// build, then zip and upload to s3
// build then zip
grunt.registerTask('release', [
'build',
'build-post-process',
'compress:release'
]);
// package into archives
grunt.registerTask('package', [
'clean:temp',
'build-post-process',
'compress:release'
]);
grunt.registerTask('build-post-process', function() {
grunt.config('copy.public_to_temp', {
expand: true,
@ -18,7 +25,7 @@ module.exports = function(grunt) {
dest: '<%= tempDir %>/public/',
});
grunt.config('copy.backend_bin', {
cwd: 'bin',
cwd: 'bin/<%= platform %>-<%= arch %>',
expand: true,
src: ['*'],
options: { mode: true},

View File

@ -5,61 +5,29 @@ const common = require('./webpack.common.js');
const path = require('path');
const webpack = require('webpack');
const HtmlWebpackPlugin = require("html-webpack-plugin");
const HtmlWebpackHarddiskPlugin = require('html-webpack-harddisk-plugin');
const ExtractTextPlugin = require("extract-text-webpack-plugin");
const CleanWebpackPlugin = require('clean-webpack-plugin');
const BundleAnalyzerPlugin = require('webpack-bundle-analyzer').BundleAnalyzerPlugin;
const TARGET = process.env.npm_lifecycle_event;
const HOT = TARGET === 'start';
const extractSass = new ExtractTextPlugin({
filename: "grafana.[name].css",
disable: HOT
filename: "grafana.[name].css"
});
const entries = HOT ? {
app: [
'webpack-dev-server/client?http://localhost:3333',
'./public/app/dev.ts',
],
vendor: require('./dependencies'),
} : {
app: './public/app/index.ts',
dark: './public/sass/grafana.dark.scss',
light: './public/sass/grafana.light.scss',
vendor: require('./dependencies'),
};
const output = HOT ? {
path: path.resolve(__dirname, '../../public/build'),
filename: '[name].[hash].js',
publicPath: "/public/build/",
} : {
path: path.resolve(__dirname, '../../public/build'),
filename: '[name].[hash].js',
// Keep publicPath relative for host.com/grafana/ deployments
publicPath: "public/build/",
};
module.exports = merge(common, {
devtool: "cheap-module-source-map",
entry: entries,
output: output,
resolve: {
extensions: ['.scss', '.ts', '.tsx', '.es6', '.js', '.json', '.svg', '.woff2', '.png'],
entry: {
app: './public/app/index.ts',
dark: './public/sass/grafana.dark.scss',
light: './public/sass/grafana.light.scss',
vendor: require('./dependencies'),
},
devServer: {
publicPath: '/public/build/',
hot: HOT,
port: 3333,
proxy: {
'!/public/build': 'http://localhost:3000'
}
output: {
path: path.resolve(__dirname, '../../public/build'),
filename: '[name].[hash].js',
// Keep publicPath relative for host.com/grafana/ deployments
publicPath: "public/build/",
},
module: {
@ -83,33 +51,16 @@ module.exports = merge(common, {
loader: 'awesome-typescript-loader',
options: {
useCache: true,
useBabel: HOT,
babelOptions: {
babelrc: false,
plugins: [
'syntax-dynamic-import',
'react-hot-loader/babel'
]
}
},
}
},
require('./sass.rule.js')({
sourceMap: true, minimize: false, preserveUrl: HOT
sourceMap: true, minimize: false, preserveUrl: false
}, extractSass),
{
test: /\.(ttf|eot|svg|woff(2)?)(\?[a-z0-9=&.]+)?$/,
test: /\.(png|jpg|gif|ttf|eot|svg|woff(2)?)(\?[a-z0-9=&.]+)?$/,
loader: 'file-loader'
},
{
test: /\.(png|jpg|gif)$/,
use: [
{
loader: 'file-loader',
options: {}
}
]
},
]
},
@ -121,13 +72,10 @@ module.exports = merge(common, {
template: path.resolve(__dirname, '../../public/views/index.template.html'),
inject: 'body',
chunks: ['manifest', 'vendor', 'app'],
alwaysWriteToDisk: HOT
}),
new HtmlWebpackHarddiskPlugin(),
new webpack.NamedModulesPlugin(),
new webpack.HotModuleReplacementPlugin(),
new webpack.DefinePlugin({
'GRAFANA_THEME': JSON.stringify(process.env.GRAFANA_THEME || 'dark'),
'process.env': {
'NODE_ENV': JSON.stringify('development')
}

View File

@ -0,0 +1,91 @@
'use strict';
const merge = require('webpack-merge');
const common = require('./webpack.common.js');
const path = require('path');
const webpack = require('webpack');
const HtmlWebpackPlugin = require("html-webpack-plugin");
const HtmlWebpackHarddiskPlugin = require('html-webpack-harddisk-plugin');
const CleanWebpackPlugin = require('clean-webpack-plugin');
module.exports = merge(common, {
entry: {
app: [
'webpack-dev-server/client?http://localhost:3333',
'./public/app/dev.ts',
],
},
output: {
path: path.resolve(__dirname, '../../public/build'),
filename: '[name].[hash].js',
publicPath: "/public/build/",
},
resolve: {
extensions: ['.scss', '.ts', '.tsx', '.es6', '.js', '.json', '.svg', '.woff2', '.png'],
},
devServer: {
publicPath: '/public/build/',
hot: true,
port: 3333,
proxy: {
'!/public/build': 'http://localhost:3000'
}
},
module: {
rules: [
{
test: /\.tsx?$/,
exclude: /node_modules/,
use: {
loader: 'awesome-typescript-loader',
options: {
useCache: true,
useBabel: true,
babelOptions: {
babelrc: false,
plugins: [
'syntax-dynamic-import',
'react-hot-loader/babel'
]
}
},
}
},
{
test: /\.scss$/,
use: [
"style-loader", // creates style nodes from JS strings
"css-loader", // translates CSS into CommonJS
"sass-loader" // compiles Sass to CSS
]
},
{
test: /\.(png|jpg|gif|ttf|eot|svg|woff(2)?)(\?[a-z0-9=&.]+)?$/,
loader: 'file-loader'
},
]
},
plugins: [
new CleanWebpackPlugin('../public/build', { allowExternal: true }),
new HtmlWebpackPlugin({
filename: path.resolve(__dirname, '../../public/views/index.html'),
template: path.resolve(__dirname, '../../public/views/index.template.html'),
inject: 'body',
alwaysWriteToDisk: true
}),
new HtmlWebpackHarddiskPlugin(),
new webpack.NamedModulesPlugin(),
new webpack.HotModuleReplacementPlugin(),
new webpack.DefinePlugin({
'GRAFANA_THEME': JSON.stringify(process.env.GRAFANA_THEME || 'dark'),
'process.env': {
'NODE_ENV': JSON.stringify('development')
}
}),
]
});

View File

@ -50,15 +50,8 @@
function checkIsReady() {
var panelsRendered = page.evaluate(function() {
if (!window.angular) { return false; }
var body = window.angular.element(document.body);
if (!body.injector) { return false; }
if (!body.injector()) { return false; }
var rootScope = body.injector().get('$rootScope');
if (!rootScope) {return false;}
var panels = angular.element('plugin-component').length;
return rootScope.panelsRendered >= panels;
var panelCount = document.querySelectorAll('.panel').length;
return window.panelsRendered >= panelCount;
});
if (panelsRendered || totalWaitMs > timeoutMs) {

View File

@ -22,6 +22,7 @@ David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>

View File

@ -1,239 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the lib/nodejs/lib/thrift/json_parse.js:
/*
json_parse.js
2015-05-02
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
*/
(By Douglas Crockford <douglas@crockford.com>)
--------------------------------------------------

View File

@ -1,5 +0,0 @@
Apache Thrift
Copyright 2006-2010 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -1,16 +0,0 @@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.

View File

@ -1,129 +0,0 @@
This package was debianized by Thrift Developer's <dev@thrift.apache.org>.
This package and the Debian packaging is licensed under the Apache License,
see `/usr/share/common-licenses/Apache-2.0'.
The following information was copied from Apache Thrift LICENSE file.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
The following files contain some portions of code contributed under
the Thrift Software License (see doc/old-thrift-license.txt), and relicensed
under the Apache 2.0 License:
compiler/cpp/Makefile.am
compiler/cpp/src/generate/t_cocoa_generator.cc
compiler/cpp/src/generate/t_cpp_generator.cc
compiler/cpp/src/generate/t_csharp_generator.cc
compiler/cpp/src/generate/t_erl_generator.cc
compiler/cpp/src/generate/t_hs_generator.cc
compiler/cpp/src/generate/t_java_generator.cc
compiler/cpp/src/generate/t_ocaml_generator.cc
compiler/cpp/src/generate/t_perl_generator.cc
compiler/cpp/src/generate/t_php_generator.cc
compiler/cpp/src/generate/t_py_generator.cc
compiler/cpp/src/generate/t_rb_generator.cc
compiler/cpp/src/generate/t_st_generator.cc
compiler/cpp/src/generate/t_xsd_generator.cc
compiler/cpp/src/main.cc
compiler/cpp/src/parse/t_field.h
compiler/cpp/src/parse/t_program.h
compiler/cpp/src/platform.h
compiler/cpp/src/thriftl.ll
compiler/cpp/src/thrifty.yy
lib/csharp/src/Protocol/TBinaryProtocol.cs
lib/csharp/src/Protocol/TField.cs
lib/csharp/src/Protocol/TList.cs
lib/csharp/src/Protocol/TMap.cs
lib/csharp/src/Protocol/TMessage.cs
lib/csharp/src/Protocol/TMessageType.cs
lib/csharp/src/Protocol/TProtocol.cs
lib/csharp/src/Protocol/TProtocolException.cs
lib/csharp/src/Protocol/TProtocolFactory.cs
lib/csharp/src/Protocol/TProtocolUtil.cs
lib/csharp/src/Protocol/TSet.cs
lib/csharp/src/Protocol/TStruct.cs
lib/csharp/src/Protocol/TType.cs
lib/csharp/src/Server/TServer.cs
lib/csharp/src/Server/TSimpleServer.cs
lib/csharp/src/Server/TThreadPoolServer.cs
lib/csharp/src/TApplicationException.cs
lib/csharp/src/Thrift.csproj
lib/csharp/src/Thrift.sln
lib/csharp/src/TProcessor.cs
lib/csharp/src/Transport/TServerSocket.cs
lib/csharp/src/Transport/TServerTransport.cs
lib/csharp/src/Transport/TSocket.cs
lib/csharp/src/Transport/TStreamTransport.cs
lib/csharp/src/Transport/TTransport.cs
lib/csharp/src/Transport/TTransportException.cs
lib/csharp/src/Transport/TTransportFactory.cs
lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs
lib/csharp/ThriftMSBuildTask/ThriftBuild.cs
lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj
lib/rb/lib/thrift.rb
lib/st/README
lib/st/thrift.st
test/OptionalRequiredTest.cpp
test/OptionalRequiredTest.thrift
test/ThriftTest.thrift
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the compiler/cpp/src/md5.[ch] components:
/*
Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
L. Peter Deutsch
ghost@aladdin.com
*/
---------------------------------------------------
For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki,
lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components:
Copyright (C) 1999 - 2007 Markus Mottl
Licensed under the terms of the GNU Lesser General Public License 2.1
(see doc/lgpl-2.1.txt for the full terms of this license)

View File

@ -1,16 +0,0 @@
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.

View File

@ -1,91 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
)
type TBufferedTransportFactory struct {
size int
}
type TBufferedTransport struct {
bufio.ReadWriter
tp TTransport
}
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
return NewTBufferedTransport(trans, p.size)
}
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
return &TBufferedTransportFactory{size: bufferSize}
}
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
return &TBufferedTransport{
ReadWriter: bufio.ReadWriter{
Reader: bufio.NewReaderSize(trans, bufferSize),
Writer: bufio.NewWriterSize(trans, bufferSize),
},
tp: trans,
}
}
func (p *TBufferedTransport) IsOpen() bool {
return p.tp.IsOpen()
}
func (p *TBufferedTransport) Open() (err error) {
return p.tp.Open()
}
func (p *TBufferedTransport) Close() (err error) {
return p.tp.Close()
}
func (p *TBufferedTransport) Read(b []byte) (int, error) {
n, err := p.ReadWriter.Read(b)
if err != nil {
p.ReadWriter.Reader.Reset(p.tp)
}
return n, err
}
func (p *TBufferedTransport) Write(b []byte) (int, error) {
n, err := p.ReadWriter.Write(b)
if err != nil {
p.ReadWriter.Writer.Reset(p.tp)
}
return n, err
}
func (p *TBufferedTransport) Flush() error {
if err := p.ReadWriter.Flush(); err != nil {
p.ReadWriter.Writer.Reset(p.tp)
return err
}
return p.tp.Flush()
}
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
return p.tp.RemainingBytes()
}

View File

@ -1,269 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
)
type TDebugProtocol struct {
Delegate TProtocol
LogPrefix string
}
type TDebugProtocolFactory struct {
Underlying TProtocolFactory
LogPrefix string
}
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
return &TDebugProtocolFactory{
Underlying: underlying,
LogPrefix: logPrefix,
}
}
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return &TDebugProtocol{
Delegate: t.Underlying.GetProtocol(trans),
LogPrefix: t.LogPrefix,
}
}
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
return err
}
func (tdp *TDebugProtocol) WriteMessageEnd() error {
err := tdp.Delegate.WriteMessageEnd()
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
err := tdp.Delegate.WriteStructBegin(name)
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
return err
}
func (tdp *TDebugProtocol) WriteStructEnd() error {
err := tdp.Delegate.WriteStructEnd()
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldEnd() error {
err := tdp.Delegate.WriteFieldEnd()
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldStop() error {
err := tdp.Delegate.WriteFieldStop()
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteMapEnd() error {
err := tdp.Delegate.WriteMapEnd()
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteListBegin(elemType, size)
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteListEnd() error {
err := tdp.Delegate.WriteListEnd()
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteSetBegin(elemType, size)
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteSetEnd() error {
err := tdp.Delegate.WriteSetEnd()
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteBool(value bool) error {
err := tdp.Delegate.WriteBool(value)
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteByte(value int8) error {
err := tdp.Delegate.WriteByte(value)
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI16(value int16) error {
err := tdp.Delegate.WriteI16(value)
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI32(value int32) error {
err := tdp.Delegate.WriteI32(value)
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI64(value int64) error {
err := tdp.Delegate.WriteI64(value)
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
err := tdp.Delegate.WriteDouble(value)
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteString(value string) error {
err := tdp.Delegate.WriteString(value)
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
err := tdp.Delegate.WriteBinary(value)
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
return
}
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
err = tdp.Delegate.ReadMessageEnd()
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
name, err = tdp.Delegate.ReadStructBegin()
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
return
}
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
err = tdp.Delegate.ReadStructEnd()
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
return
}
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
err = tdp.Delegate.ReadFieldEnd()
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
return
}
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
err = tdp.Delegate.ReadMapEnd()
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadListBegin()
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
err = tdp.Delegate.ReadListEnd()
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadSetBegin()
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
err = tdp.Delegate.ReadSetEnd()
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
value, err = tdp.Delegate.ReadBool()
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
value, err = tdp.Delegate.ReadByte()
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
value, err = tdp.Delegate.ReadI16()
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
value, err = tdp.Delegate.ReadI32()
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
value, err = tdp.Delegate.ReadI64()
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
value, err = tdp.Delegate.ReadDouble()
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
value, err = tdp.Delegate.ReadString()
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
value, err = tdp.Delegate.ReadBinary()
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
err = tdp.Delegate.Skip(fieldType)
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
return
}
func (tdp *TDebugProtocol) Flush() (err error) {
err = tdp.Delegate.Flush()
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) Transport() TTransport {
return tdp.Delegate.Transport()
}

View File

@ -1,58 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
type TDeserializer struct {
Transport TTransport
Protocol TProtocol
}
func NewTDeserializer() *TDeserializer {
var transport TTransport
transport = NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
return &TDeserializer{
transport,
protocol}
}
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
err = nil
if _, err = t.Transport.Write([]byte(s)); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
err = nil
if _, err = t.Transport.Write(b); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}

View File

@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Helper class that encapsulates field metadata.
type field struct {
name string
typeId TType
id int
}
func newField(n string, t TType, i int) *field {
return &field{name: n, typeId: t, id: i}
}
func (p *field) Name() string {
if p == nil {
return ""
}
return p.name
}
func (p *field) TypeId() TType {
if p == nil {
return TType(VOID)
}
return p.typeId
}
func (p *field) Id() int {
if p == nil {
return -1
}
return p.id
}
func (p *field) String() string {
if p == nil {
return "<nil>"
}
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
}
var ANONYMOUS_FIELD *field
type fieldSlice []field
func (p fieldSlice) Len() int {
return len(p)
}
func (p fieldSlice) Less(i, j int) bool {
return p[i].Id() < p[j].Id()
}
func (p fieldSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func init() {
ANONYMOUS_FIELD = newField("", STOP, 0)
}

View File

@ -1,167 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
)
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
buf bytes.Buffer
reader *bufio.Reader
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
buffer [4]byte
maxLength uint32
}
type tFramedTransportFactory struct {
factory TTransportFactory
maxLength uint32
}
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < uint32(len(buf)) {
frameSize := p.frameSize
tmp := make([]byte, p.frameSize)
l, err = p.Read(tmp)
copy(buf, tmp)
if err == nil {
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
return
}
}
got, err := p.reader.Read(buf)
p.frameSize = p.frameSize - uint32(got)
//sanity check
if p.frameSize < 0 {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
}
return got, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < 1 {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
}
c, err = p.reader.ReadByte()
if err == nil {
p.frameSize--
}
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.buf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.buf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush() error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush()
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return 0, err
}
size := binary.BigEndian.Uint32(buf)
if size < 0 || size > p.maxLength {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
return size, nil
}
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
return uint64(p.frameSize)
}

View File

@ -1,258 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
// Default to using the shared http client. Library users are
// free to change this global client or specify one through
// THttpClientOptions.
var DefaultHttpClient *http.Client = http.DefaultClient
type THttpClient struct {
client *http.Client
response *http.Response
url *url.URL
requestBuffer *bytes.Buffer
header http.Header
nsecConnectTimeout int64
nsecReadTimeout int64
}
type THttpClientTransportFactory struct {
options THttpClientOptions
url string
isPost bool
}
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
if trans != nil {
t, ok := trans.(*THttpClient)
if ok && t.url != nil {
if t.requestBuffer != nil {
t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options)
return t2
}
t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options)
return t2
}
}
if p.isPost {
s, _ := NewTHttpPostClientWithOptions(p.url, p.options)
return s
}
s, _ := NewTHttpClientWithOptions(p.url, p.options)
return s
}
type THttpClientOptions struct {
// If nil, DefaultHttpClient is used
Client *http.Client
}
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return &THttpClientTransportFactory{url: url, isPost: false, options: options}
}
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return &THttpClientTransportFactory{url: url, isPost: true, options: options}
}
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
parsedURL, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
response, err := http.Get(urlstr)
if err != nil {
return nil, err
}
client := options.Client
if client == nil {
client = DefaultHttpClient
}
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil
}
func NewTHttpClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
}
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
parsedURL, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
buf := make([]byte, 0, 1024)
client := options.Client
if client == nil {
client = DefaultHttpClient
}
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
}
func NewTHttpPostClient(urlstr string) (TTransport, error) {
return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{})
}
// Set the HTTP Header for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
func (p *THttpClient) SetHeader(key string, value string) {
p.header.Add(key, value)
}
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// hdrValue := httpTrans.GetHeader("User-Agent")
func (p *THttpClient) GetHeader(key string) string {
return p.header.Get(key)
}
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// httpTrans.DelHeader("User-Agent")
func (p *THttpClient) DelHeader(key string) {
p.header.Del(key)
}
func (p *THttpClient) Open() error {
// do nothing
return nil
}
func (p *THttpClient) IsOpen() bool {
return p.response != nil || p.requestBuffer != nil
}
func (p *THttpClient) closeResponse() error {
var err error
if p.response != nil && p.response.Body != nil {
// The docs specify that if keepalive is enabled and the response body is not
// read to completion the connection will never be returned to the pool and
// reused. Errors are being ignored here because if the connection is invalid
// and this fails for some reason, the Close() method will do any remaining
// cleanup.
io.Copy(ioutil.Discard, p.response.Body)
err = p.response.Body.Close()
}
p.response = nil
return err
}
func (p *THttpClient) Close() error {
if p.requestBuffer != nil {
p.requestBuffer.Reset()
p.requestBuffer = nil
}
return p.closeResponse()
}
func (p *THttpClient) Read(buf []byte) (int, error) {
if p.response == nil {
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
}
n, err := p.response.Body.Read(buf)
if n > 0 && (err == nil || err == io.EOF) {
return n, nil
}
return n, NewTTransportExceptionFromError(err)
}
func (p *THttpClient) ReadByte() (c byte, err error) {
return readByte(p.response.Body)
}
func (p *THttpClient) Write(buf []byte) (int, error) {
n, err := p.requestBuffer.Write(buf)
return n, err
}
func (p *THttpClient) WriteByte(c byte) error {
return p.requestBuffer.WriteByte(c)
}
func (p *THttpClient) WriteString(s string) (n int, err error) {
return p.requestBuffer.WriteString(s)
}
func (p *THttpClient) Flush() error {
// Close any previous response body to avoid leaking connections.
p.closeResponse()
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
if err != nil {
return NewTTransportExceptionFromError(err)
}
req.Header = p.header
response, err := p.client.Do(req)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if response.StatusCode != http.StatusOK {
// Close the response to avoid leaking file descriptors. closeResponse does
// more than just call Close(), so temporarily assign it and reuse the logic.
p.response = response
p.closeResponse()
// TODO(pomack) log bad response
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
}
p.response = response
return nil
}
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
len := p.response.ContentLength
if len >= 0 {
return uint64(len)
}
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -1,34 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "net/http"
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
func NewThriftHandlerFunc(processor TProcessor,
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/x-thrift")
transport := NewStreamTransport(r.Body, w)
processor.Process(inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
}
}

View File

@ -1,214 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"io"
)
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
type StreamTransport struct {
io.Reader
io.Writer
isReadWriter bool
closed bool
}
type StreamTransportFactory struct {
Reader io.Reader
Writer io.Writer
isReadWriter bool
}
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
if trans != nil {
t, ok := trans.(*StreamTransport)
if ok {
if t.isReadWriter {
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
}
if t.Reader != nil && t.Writer != nil {
return NewStreamTransport(t.Reader, t.Writer)
}
if t.Reader != nil && t.Writer == nil {
return NewStreamTransportR(t.Reader)
}
if t.Reader == nil && t.Writer != nil {
return NewStreamTransportW(t.Writer)
}
return &StreamTransport{}
}
}
if p.isReadWriter {
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
}
if p.Reader != nil && p.Writer != nil {
return NewStreamTransport(p.Reader, p.Writer)
}
if p.Reader != nil && p.Writer == nil {
return NewStreamTransportR(p.Reader)
}
if p.Reader == nil && p.Writer != nil {
return NewStreamTransportW(p.Writer)
}
return &StreamTransport{}
}
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
}
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
}
func NewStreamTransportR(r io.Reader) *StreamTransport {
return &StreamTransport{Reader: bufio.NewReader(r)}
}
func NewStreamTransportW(w io.Writer) *StreamTransport {
return &StreamTransport{Writer: bufio.NewWriter(w)}
}
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
}
func (p *StreamTransport) IsOpen() bool {
return !p.closed
}
// implicitly opened on creation, can't be reopened once closed
func (p *StreamTransport) Open() error {
if !p.closed {
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
} else {
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
}
}
// Closes both the input and output streams.
func (p *StreamTransport) Close() error {
if p.closed {
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
}
p.closed = true
closedReader := false
if p.Reader != nil {
c, ok := p.Reader.(io.Closer)
if ok {
e := c.Close()
closedReader = true
if e != nil {
return e
}
}
p.Reader = nil
}
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
c, ok := p.Writer.(io.Closer)
if ok {
e := c.Close()
if e != nil {
return e
}
}
p.Writer = nil
}
return nil
}
// Flushes the underlying output stream if not null.
func (p *StreamTransport) Flush() error {
if p.Writer == nil {
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
}
f, ok := p.Writer.(Flusher)
if ok {
err := f.Flush()
if err != nil {
return NewTTransportExceptionFromError(err)
}
}
return nil
}
func (p *StreamTransport) Read(c []byte) (n int, err error) {
n, err = p.Reader.Read(c)
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) ReadByte() (c byte, err error) {
f, ok := p.Reader.(io.ByteReader)
if ok {
c, err = f.ReadByte()
} else {
c, err = readByte(p.Reader)
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) Write(c []byte) (n int, err error) {
n, err = p.Writer.Write(c)
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) WriteByte(c byte) (err error) {
f, ok := p.Writer.(io.ByteWriter)
if ok {
err = f.WriteByte(c)
} else {
err = writeByte(p.Writer, c)
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) WriteString(s string) (n int, err error) {
f, ok := p.Writer.(stringWriter)
if ok {
n, err = f.WriteString(s)
} else {
n, err = p.Writer.Write([]byte(s))
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -1,583 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"encoding/base64"
"fmt"
)
const (
THRIFT_JSON_PROTOCOL_VERSION = 1
)
// for references to _ParseContext see tsimplejson_protocol.go
// JSON protocol implementation for thrift.
//
// This protocol produces/consumes a simple output format
// suitable for parsing by scripting languages. It should not be
// confused with the full-featured TJSONProtocol.
//
type TJSONProtocol struct {
*TSimpleJSONProtocol
}
// Constructor
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
return v
}
// Factory
type TJSONProtocolFactory struct{}
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTJSONProtocol(trans)
}
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
return &TJSONProtocolFactory{}
}
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
p.resetContextStack() // THRIFT-3735
if e := p.OutputListBegin(); e != nil {
return e
}
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
return e
}
if e := p.WriteString(name); e != nil {
return e
}
if e := p.WriteByte(int8(typeId)); e != nil {
return e
}
if e := p.WriteI32(seqId); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteMessageEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteStructBegin(name string) error {
if e := p.OutputObjectBegin(); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteStructEnd() error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
if e := p.WriteI16(id); e != nil {
return e
}
if e := p.OutputObjectBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(typeId)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteFieldEnd() error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(keyType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
s, e1 = p.TypeIdToString(valueType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
return e
}
return p.OutputObjectBegin()
}
func (p *TJSONProtocol) WriteMapEnd() error {
if e := p.OutputObjectEnd(); e != nil {
return e
}
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteListEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteSetEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteBool(b bool) error {
if b {
return p.WriteI32(1)
}
return p.WriteI32(0)
}
func (p *TJSONProtocol) WriteByte(b int8) error {
return p.WriteI32(int32(b))
}
func (p *TJSONProtocol) WriteI16(v int16) error {
return p.WriteI32(int32(v))
}
func (p *TJSONProtocol) WriteI32(v int32) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteI64(v int64) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteDouble(v float64) error {
return p.OutputF64(v)
}
func (p *TJSONProtocol) WriteString(v string) error {
return p.OutputString(v)
}
func (p *TJSONProtocol) WriteBinary(v []byte) error {
// JSON library only takes in a string,
// not an arbitrary byte array, to ensure bytes are transmitted
// efficiently we must convert this into a valid JSON string
// therefore we use base64 encoding to avoid excessive escaping/quoting
if e := p.OutputPreValue(); e != nil {
return e
}
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
return NewTProtocolException(e)
}
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
if _, e := writer.Write(v); e != nil {
p.writer.Reset(p.trans) // THRIFT-3735
return NewTProtocolException(e)
}
if e := writer.Close(); e != nil {
return NewTProtocolException(e)
}
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
return NewTProtocolException(e)
}
return p.OutputPostValue()
}
// Reading methods.
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
p.resetContextStack() // THRIFT-3735
if isNull, err := p.ParseListBegin(); isNull || err != nil {
return name, typeId, seqId, err
}
version, err := p.ReadI32()
if err != nil {
return name, typeId, seqId, err
}
if version != THRIFT_JSON_PROTOCOL_VERSION {
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
if name, err = p.ReadString(); err != nil {
return name, typeId, seqId, err
}
bTypeId, err := p.ReadByte()
typeId = TMessageType(bTypeId)
if err != nil {
return name, typeId, seqId, err
}
if seqId, err = p.ReadI32(); err != nil {
return name, typeId, seqId, err
}
return name, typeId, seqId, nil
}
func (p *TJSONProtocol) ReadMessageEnd() error {
err := p.ParseListEnd()
return err
}
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
_, err = p.ParseObjectStart()
return "", err
}
func (p *TJSONProtocol) ReadStructEnd() error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
b, _ := p.reader.Peek(1)
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
return "", STOP, -1, nil
}
fieldId, err := p.ReadI16()
if err != nil {
return "", STOP, fieldId, err
}
if _, err = p.ParseObjectStart(); err != nil {
return "", STOP, fieldId, err
}
sType, err := p.ReadString()
if err != nil {
return "", STOP, fieldId, err
}
fType, err := p.StringToTypeId(sType)
return "", fType, fieldId, err
}
func (p *TJSONProtocol) ReadFieldEnd() error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, VOID, 0, e
}
// read keyType
sKeyType, e := p.ReadString()
if e != nil {
return keyType, valueType, size, e
}
keyType, e = p.StringToTypeId(sKeyType)
if e != nil {
return keyType, valueType, size, e
}
// read valueType
sValueType, e := p.ReadString()
if e != nil {
return keyType, valueType, size, e
}
valueType, e = p.StringToTypeId(sValueType)
if e != nil {
return keyType, valueType, size, e
}
// read size
iSize, e := p.ReadI64()
if e != nil {
return keyType, valueType, size, e
}
size = int(iSize)
_, e = p.ParseObjectStart()
return keyType, valueType, size, e
}
func (p *TJSONProtocol) ReadMapEnd() error {
e := p.ParseObjectEnd()
if e != nil {
return e
}
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadListEnd() error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadSetEnd() error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadBool() (bool, error) {
value, err := p.ReadI32()
return (value != 0), err
}
func (p *TJSONProtocol) ReadByte() (int8, error) {
v, err := p.ReadI64()
return int8(v), err
}
func (p *TJSONProtocol) ReadI16() (int16, error) {
v, err := p.ReadI64()
return int16(v), err
}
func (p *TJSONProtocol) ReadI32() (int32, error) {
v, err := p.ReadI64()
return int32(v), err
}
func (p *TJSONProtocol) ReadI64() (int64, error) {
v, _, err := p.ParseI64()
return v, err
}
func (p *TJSONProtocol) ReadDouble() (float64, error) {
v, _, err := p.ParseF64()
return v, err
}
func (p *TJSONProtocol) ReadString() (string, error) {
var v string
if err := p.ParsePreValue(); err != nil {
return v, err
}
f, _ := p.reader.Peek(1)
if len(f) > 0 && f[0] == JSON_QUOTE {
p.reader.ReadByte()
value, err := p.ParseStringBody()
v = value
if err != nil {
return v, err
}
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
b := make([]byte, len(JSON_NULL))
_, err := p.reader.Read(b)
if err != nil {
return v, NewTProtocolException(err)
}
if string(b) != string(JSON_NULL) {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
} else {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
var v []byte
if err := p.ParsePreValue(); err != nil {
return nil, err
}
f, _ := p.reader.Peek(1)
if len(f) > 0 && f[0] == JSON_QUOTE {
p.reader.ReadByte()
value, err := p.ParseBase64EncodedBody()
v = value
if err != nil {
return v, err
}
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
b := make([]byte, len(JSON_NULL))
_, err := p.reader.Read(b)
if err != nil {
return v, NewTProtocolException(err)
}
if string(b) != string(JSON_NULL) {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
} else {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) Flush() (err error) {
err = p.writer.Flush()
if err == nil {
err = p.trans.Flush()
}
return NewTProtocolException(err)
}
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TJSONProtocol) Transport() TTransport {
return p.trans
}
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(elemType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
if err != nil {
return VOID, size, err
}
elemType, err = p.StringToTypeId(sElemType)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
size = int(nSize)
return elemType, size, err2
}
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
if err != nil {
return VOID, size, err
}
elemType, err = p.StringToTypeId(sElemType)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
size = int(nSize)
return elemType, size, err2
}
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(elemType)
if e1 != nil {
return e1
}
if e := p.OutputString(s); e != nil {
return e
}
if e := p.OutputI64(int64(size)); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
switch byte(fieldType) {
case BOOL:
return "tf", nil
case BYTE:
return "i8", nil
case I16:
return "i16", nil
case I32:
return "i32", nil
case I64:
return "i64", nil
case DOUBLE:
return "dbl", nil
case STRING:
return "str", nil
case STRUCT:
return "rec", nil
case MAP:
return "map", nil
case SET:
return "set", nil
case LIST:
return "lst", nil
}
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
}
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
switch fieldType {
case "tf":
return TType(BOOL), nil
case "i8":
return TType(BYTE), nil
case "i16":
return TType(I16), nil
case "i32":
return TType(I32), nil
case "i64":
return TType(I64), nil
case "dbl":
return TType(DOUBLE), nil
case "str":
return TType(STRING), nil
case "rec":
return TType(STRUCT), nil
case "map":
return TType(MAP), nil
case "set":
return TType(SET), nil
case "lst":
return TType(LIST), nil
}
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
}

View File

@ -1,169 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"fmt"
"strings"
)
/*
TMultiplexedProtocol is a protocol-independent concrete decorator
that allows a Thrift client to communicate with a multiplexing Thrift server,
by prepending the service name to the function name during function calls.
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
from a multiplexing client.
This example uses a single socket transport to invoke two services:
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
transport := thrift.NewTFramedTransport(socket)
protocol := thrift.NewTBinaryProtocolTransport(transport)
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
service := Calculator.NewCalculatorClient(mp)
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
service2 := WeatherReport.NewWeatherReportClient(mp2)
err := transport.Open()
if err != nil {
t.Fatal("Unable to open client socket", err)
}
fmt.Println(service.Add(2,2))
fmt.Println(service2.GetTemperature())
*/
type TMultiplexedProtocol struct {
TProtocol
serviceName string
}
const MULTIPLEXED_SEPARATOR = ":"
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
return &TMultiplexedProtocol{
TProtocol: protocol,
serviceName: serviceName,
}
}
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
if typeId == CALL || typeId == ONEWAY {
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
} else {
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
}
}
/*
TMultiplexedProcessor is a TProcessor allowing
a single TServer to provide multiple services.
To do so, you instantiate the processor and then register additional
processors with it, as shown in the following example:
var processor = thrift.NewTMultiplexedProcessor()
firstProcessor :=
processor.RegisterProcessor("FirstService", firstProcessor)
processor.registerProcessor(
"Calculator",
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
)
processor.registerProcessor(
"WeatherReport",
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
)
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
if err != nil {
t.Fatal("Unable to create server socket", err)
}
server := thrift.NewTSimpleServer2(processor, serverTransport)
server.Serve();
*/
type TMultiplexedProcessor struct {
serviceProcessorMap map[string]TProcessor
DefaultProcessor TProcessor
}
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
return &TMultiplexedProcessor{
serviceProcessorMap: make(map[string]TProcessor),
}
}
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
t.DefaultProcessor = processor
}
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
if t.serviceProcessorMap == nil {
t.serviceProcessorMap = make(map[string]TProcessor)
}
t.serviceProcessorMap[name] = processor
}
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
name, typeId, seqid, err := in.ReadMessageBegin()
if err != nil {
return false, err
}
if typeId != CALL && typeId != ONEWAY {
return false, fmt.Errorf("Unexpected message type %v", typeId)
}
//extract the service name
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
if len(v) != 2 {
if t.DefaultProcessor != nil {
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
return t.DefaultProcessor.Process(smb, out)
}
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
}
actualProcessor, ok := t.serviceProcessorMap[v[0]]
if !ok {
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
}
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
return actualProcessor.Process(smb, out)
}
//Protocol that use stored message for ReadMessageBegin
type storedMessageProtocol struct {
TProtocol
name string
typeId TMessageType
seqid int32
}
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
return &storedMessageProtocol{protocol, name, typeId, seqid}
}
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
return s.name, s.typeId, s.seqid, nil
}

View File

@ -1,50 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
///////////////////////////////////////////////////////////////////////////////
// This file is home to helpers that convert from various base types to
// respective pointer types. This is necessary because Go does not permit
// references to constants, nor can a pointer type to base type be allocated
// and initialized in a single expression.
//
// E.g., this is not allowed:
//
// var ip *int = &5
//
// But this *is* allowed:
//
// func IntPtr(i int) *int { return &i }
// var ip *int = IntPtr(5)
//
// Since pointers to base types are commonplace as [optional] fields in
// exported thrift structs, we factor such helpers here.
///////////////////////////////////////////////////////////////////////////////
func Float32Ptr(v float32) *float32 { return &v }
func Float64Ptr(v float64) *float64 { return &v }
func IntPtr(v int) *int { return &v }
func Int32Ptr(v int32) *int32 { return &v }
func Int64Ptr(v int64) *int64 { return &v }
func StringPtr(v string) *string { return &v }
func Uint32Ptr(v uint32) *uint32 { return &v }
func Uint64Ptr(v uint64) *uint64 { return &v }
func BoolPtr(v bool) *bool { return &v }
func ByteSlicePtr(v []byte) *[]byte { return &v }

View File

@ -1,58 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// The default processor factory just returns a singleton
// instance.
type TProcessorFactory interface {
GetProcessor(trans TTransport) TProcessor
}
type tProcessorFactory struct {
processor TProcessor
}
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
return &tProcessorFactory{processor: p}
}
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
return p.processor
}
/**
* The default processor factory just returns a singleton
* instance.
*/
type TProcessorFunctionFactory interface {
GetProcessorFunction(trans TTransport) TProcessorFunction
}
type tProcessorFunctionFactory struct {
processor TProcessorFunction
}
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
return &tProcessorFunctionFactory{processor: p}
}
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
return p.processor
}

View File

@ -1,35 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
type TServer interface {
ProcessorFactory() TProcessorFactory
ServerTransport() TServerTransport
InputTransportFactory() TTransportFactory
OutputTransportFactory() TTransportFactory
InputProtocolFactory() TProtocolFactory
OutputProtocolFactory() TProtocolFactory
// Starts the server
Serve() error
// Stops the server. This is optional on a per-implementation basis. Not
// all servers are required to be cleanly stoppable.
Stop() error
}

View File

@ -1,122 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"net"
"sync"
"time"
)
type TServerSocket struct {
listener net.Listener
addr net.Addr
clientTimeout time.Duration
// Protects the interrupted value to make it thread safe.
mu sync.RWMutex
interrupted bool
}
func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
return NewTServerSocketTimeout(listenAddr, 0)
}
func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
if err != nil {
return nil, err
}
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
}
func (p *TServerSocket) Listen() error {
if p.IsListening() {
return nil
}
l, err := net.Listen(p.addr.Network(), p.addr.String())
if err != nil {
return err
}
p.listener = l
return nil
}
func (p *TServerSocket) Accept() (TTransport, error) {
p.mu.RLock()
interrupted := p.interrupted
p.mu.RUnlock()
if interrupted {
return nil, errTransportInterrupted
}
if p.listener == nil {
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
}
conn, err := p.listener.Accept()
if err != nil {
return nil, NewTTransportExceptionFromError(err)
}
return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
}
// Checks whether the socket is listening.
func (p *TServerSocket) IsListening() bool {
return p.listener != nil
}
// Connects the socket, creating a new socket object if necessary.
func (p *TServerSocket) Open() error {
if p.IsListening() {
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
}
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
return err
} else {
p.listener = l
}
return nil
}
func (p *TServerSocket) Addr() net.Addr {
if p.listener != nil {
return p.listener.Addr()
}
return p.addr
}
func (p *TServerSocket) Close() error {
defer func() {
p.listener = nil
}()
if p.IsListening() {
return p.listener.Close()
}
return nil
}
func (p *TServerSocket) Interrupt() error {
p.mu.Lock()
p.interrupted = true
p.Close()
p.mu.Unlock()
return nil
}

View File

@ -1,34 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Server transport. Object which provides client transports.
type TServerTransport interface {
Listen() error
Accept() (TTransport, error)
Close() error
// Optional method implementation. This signals to the server transport
// that it should break out of any accept() or listen() that it is currently
// blocked on. This method, if implemented, MUST be thread safe, as it may
// be called from a different thread context than the other TServerTransport
// methods.
Interrupt() error
}

View File

@ -1,196 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
"runtime/debug"
"sync"
)
// Simple, non-concurrent server for testing.
type TSimpleServer struct {
quit chan struct{}
processorFactory TProcessorFactory
serverTransport TServerTransport
inputTransportFactory TTransportFactory
outputTransportFactory TTransportFactory
inputProtocolFactory TProtocolFactory
outputProtocolFactory TProtocolFactory
}
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
}
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
serverTransport,
transportFactory,
protocolFactory,
)
}
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
serverTransport,
inputTransportFactory,
outputTransportFactory,
inputProtocolFactory,
outputProtocolFactory,
)
}
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
NewTTransportFactory(),
NewTTransportFactory(),
NewTBinaryProtocolFactoryDefault(),
NewTBinaryProtocolFactoryDefault(),
)
}
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
return NewTSimpleServerFactory6(processorFactory,
serverTransport,
transportFactory,
transportFactory,
protocolFactory,
protocolFactory,
)
}
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
return &TSimpleServer{
processorFactory: processorFactory,
serverTransport: serverTransport,
inputTransportFactory: inputTransportFactory,
outputTransportFactory: outputTransportFactory,
inputProtocolFactory: inputProtocolFactory,
outputProtocolFactory: outputProtocolFactory,
quit: make(chan struct{}, 1),
}
}
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
return p.processorFactory
}
func (p *TSimpleServer) ServerTransport() TServerTransport {
return p.serverTransport
}
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
return p.inputTransportFactory
}
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
return p.outputTransportFactory
}
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
return p.inputProtocolFactory
}
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
return p.outputProtocolFactory
}
func (p *TSimpleServer) Listen() error {
return p.serverTransport.Listen()
}
func (p *TSimpleServer) AcceptLoop() error {
for {
client, err := p.serverTransport.Accept()
if err != nil {
select {
case <-p.quit:
return nil
default:
}
return err
}
if client != nil {
go func() {
if err := p.processRequests(client); err != nil {
log.Println("error processing request:", err)
}
}()
}
}
}
func (p *TSimpleServer) Serve() error {
err := p.Listen()
if err != nil {
return err
}
p.AcceptLoop()
return nil
}
var once sync.Once
func (p *TSimpleServer) Stop() error {
q := func() {
p.quit <- struct{}{}
p.serverTransport.Interrupt()
}
once.Do(q)
return nil
}
func (p *TSimpleServer) processRequests(client TTransport) error {
processor := p.processorFactory.GetProcessor(client)
inputTransport := p.inputTransportFactory.GetTransport(client)
outputTransport := p.outputTransportFactory.GetTransport(client)
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
defer func() {
if e := recover(); e != nil {
log.Printf("panic in processor: %s: %s", e, debug.Stack())
}
}()
if inputTransport != nil {
defer inputTransport.Close()
}
if outputTransport != nil {
defer outputTransport.Close()
}
for {
ok, err := processor.Process(inputProtocol, outputProtocol)
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
return nil
} else if err != nil {
log.Printf("error processing request: %s", err)
return err
}
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
continue
}
if !ok {
break
}
}
return nil
}

View File

@ -1,166 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"net"
"time"
)
type TSocket struct {
conn net.Conn
addr net.Addr
timeout time.Duration
}
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
//
// Example:
// trans, err := thrift.NewTSocket("localhost:9090")
func NewTSocket(hostPort string) (*TSocket, error) {
return NewTSocketTimeout(hostPort, 0)
}
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
// it also accepts a timeout as a time.Duration
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
//conn, err := net.DialTimeout(network, address, timeout)
addr, err := net.ResolveTCPAddr("tcp", hostPort)
if err != nil {
return nil, err
}
return NewTSocketFromAddrTimeout(addr, timeout), nil
}
// Creates a TSocket from a net.Addr
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
return &TSocket{addr: addr, timeout: timeout}
}
// Creates a TSocket from an existing net.Conn
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
}
// Sets the socket timeout
func (p *TSocket) SetTimeout(timeout time.Duration) error {
p.timeout = timeout
return nil
}
func (p *TSocket) pushDeadline(read, write bool) {
var t time.Time
if p.timeout > 0 {
t = time.Now().Add(time.Duration(p.timeout))
}
if read && write {
p.conn.SetDeadline(t)
} else if read {
p.conn.SetReadDeadline(t)
} else if write {
p.conn.SetWriteDeadline(t)
}
}
// Connects the socket, creating a new socket object if necessary.
func (p *TSocket) Open() error {
if p.IsOpen() {
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
}
if p.addr == nil {
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
}
if len(p.addr.Network()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
}
if len(p.addr.String()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
}
var err error
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
return nil
}
// Retrieve the underlying net.Conn
func (p *TSocket) Conn() net.Conn {
return p.conn
}
// Returns true if the connection is open
func (p *TSocket) IsOpen() bool {
if p.conn == nil {
return false
}
return true
}
// Closes the socket.
func (p *TSocket) Close() error {
// Close the socket
if p.conn != nil {
err := p.conn.Close()
if err != nil {
return err
}
p.conn = nil
}
return nil
}
//Returns the remote address of the socket.
func (p *TSocket) Addr() net.Addr {
return p.addr
}
func (p *TSocket) Read(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(true, false)
n, err := p.conn.Read(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TSocket) Write(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(false, true)
return p.conn.Write(buf)
}
func (p *TSocket) Flush() error {
return nil
}
func (p *TSocket) Interrupt() error {
if !p.IsOpen() {
return nil
}
return p.conn.Close()
}
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -1,109 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"net"
"time"
"crypto/tls"
)
type TSSLServerSocket struct {
listener net.Listener
addr net.Addr
clientTimeout time.Duration
interrupted bool
cfg *tls.Config
}
func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
}
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
if err != nil {
return nil, err
}
return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
}
func (p *TSSLServerSocket) Listen() error {
if p.IsListening() {
return nil
}
l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
if err != nil {
return err
}
p.listener = l
return nil
}
func (p *TSSLServerSocket) Accept() (TTransport, error) {
if p.interrupted {
return nil, errTransportInterrupted
}
if p.listener == nil {
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
}
conn, err := p.listener.Accept()
if err != nil {
return nil, NewTTransportExceptionFromError(err)
}
return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
}
// Checks whether the socket is listening.
func (p *TSSLServerSocket) IsListening() bool {
return p.listener != nil
}
// Connects the socket, creating a new socket object if necessary.
func (p *TSSLServerSocket) Open() error {
if p.IsListening() {
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
}
if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
return err
} else {
p.listener = l
}
return nil
}
func (p *TSSLServerSocket) Addr() net.Addr {
return p.addr
}
func (p *TSSLServerSocket) Close() error {
defer func() {
p.listener = nil
}()
if p.IsListening() {
return p.listener.Close()
}
return nil
}
func (p *TSSLServerSocket) Interrupt() error {
p.interrupted = true
return nil
}

View File

@ -1,171 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"crypto/tls"
"net"
"time"
)
type TSSLSocket struct {
conn net.Conn
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
// only valid if addr is nil.
hostPort string
// addr is nil when hostPort is not "", and is only used when the
// TSSLSocket is constructed from a net.Addr.
addr net.Addr
timeout time.Duration
cfg *tls.Config
}
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
//
// Example:
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
return NewTSSLSocketTimeout(hostPort, cfg, 0)
}
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
// it also accepts a tls Configuration and a timeout as a time.Duration
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
}
// Creates a TSSLSocket from a net.Addr
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
}
// Creates a TSSLSocket from an existing net.Conn
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
}
// Sets the socket timeout
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
p.timeout = timeout
return nil
}
func (p *TSSLSocket) pushDeadline(read, write bool) {
var t time.Time
if p.timeout > 0 {
t = time.Now().Add(time.Duration(p.timeout))
}
if read && write {
p.conn.SetDeadline(t)
} else if read {
p.conn.SetReadDeadline(t)
} else if write {
p.conn.SetWriteDeadline(t)
}
}
// Connects the socket, creating a new socket object if necessary.
func (p *TSSLSocket) Open() error {
var err error
// If we have a hostname, we need to pass the hostname to tls.Dial for
// certificate hostname checks.
if p.hostPort != "" {
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
} else {
if p.IsOpen() {
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
}
if p.addr == nil {
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
}
if len(p.addr.Network()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
}
if len(p.addr.String()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
}
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
}
return nil
}
// Retrieve the underlying net.Conn
func (p *TSSLSocket) Conn() net.Conn {
return p.conn
}
// Returns true if the connection is open
func (p *TSSLSocket) IsOpen() bool {
if p.conn == nil {
return false
}
return true
}
// Closes the socket.
func (p *TSSLSocket) Close() error {
// Close the socket
if p.conn != nil {
err := p.conn.Close()
if err != nil {
return err
}
p.conn = nil
}
return nil
}
func (p *TSSLSocket) Read(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(true, false)
n, err := p.conn.Read(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TSSLSocket) Write(buf []byte) (int, error) {
if !p.IsOpen() {
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
}
p.pushDeadline(false, true)
return p.conn.Write(buf)
}
func (p *TSSLSocket) Flush() error {
return nil
}
func (p *TSSLSocket) Interrupt() error {
if !p.IsOpen() {
return nil
}
return p.conn.Close()
}
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -1,117 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"compress/zlib"
"io"
"log"
)
// TZlibTransportFactory is a factory for TZlibTransport instances
type TZlibTransportFactory struct {
level int
}
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
type TZlibTransport struct {
reader io.ReadCloser
transport TTransport
writer *zlib.Writer
}
// GetTransport constructs a new instance of NewTZlibTransport
func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport {
t, _ := NewTZlibTransport(trans, p.level)
return t
}
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
return &TZlibTransportFactory{level: level}
}
// NewTZlibTransport constructs a new instance of TZlibTransport
func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
w, err := zlib.NewWriterLevel(trans, level)
if err != nil {
log.Println(err)
return nil, err
}
return &TZlibTransport{
writer: w,
transport: trans,
}, nil
}
// Close closes the reader and writer (flushing any unwritten data) and closes
// the underlying transport.
func (z *TZlibTransport) Close() error {
if z.reader != nil {
if err := z.reader.Close(); err != nil {
return err
}
}
if err := z.writer.Close(); err != nil {
return err
}
return z.transport.Close()
}
// Flush flushes the writer and its underlying transport.
func (z *TZlibTransport) Flush() error {
if err := z.writer.Flush(); err != nil {
return err
}
return z.transport.Flush()
}
// IsOpen returns true if the transport is open
func (z *TZlibTransport) IsOpen() bool {
return z.transport.IsOpen()
}
// Open opens the transport for communication
func (z *TZlibTransport) Open() error {
return z.transport.Open()
}
func (z *TZlibTransport) Read(p []byte) (int, error) {
if z.reader == nil {
r, err := zlib.NewReader(z.transport)
if err != nil {
return 0, NewTTransportExceptionFromError(err)
}
z.reader = r
}
return z.reader.Read(p)
}
// RemainingBytes returns the size in bytes of the data that is still to be
// read.
func (z *TZlibTransport) RemainingBytes() uint64 {
return z.transport.RemainingBytes()
}
func (z *TZlibTransport) Write(p []byte) (int, error) {
return z.writer.Write(p)
}

View File

@ -1 +0,0 @@
server.sh

View File

@ -15,6 +15,12 @@ type Config struct {
Endpoint string
SigningRegion string
SigningName string
// States that the signing name did not come from a modeled source but
// was derived based on other data. Used by service client constructors
// to determine if the signin name can be overriden based on metadata the
// service has.
SigningNameDerived bool
}
// ConfigProvider provides a generic way for a service client to receive

View File

@ -1,12 +1,11 @@
package client
import (
"math/rand"
"strconv"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkrand"
)
// DefaultRetryer implements basic retry logic using exponential backoff for
@ -31,8 +30,6 @@ func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
@ -53,7 +50,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
retryCount = 13
}
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
}
@ -65,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 {
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
@ -117,22 +114,3 @@ func canUseRetryAfterHeader(r *request.Request) bool {
return true
}
// lockedSource is a thread-safe implementation of rand.Source
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}

View File

@ -46,6 +46,7 @@ func (reader *teeReaderCloser) Close() error {
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
bodySeekable := aws.IsReaderSeekable(r.Body)
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
@ -53,6 +54,9 @@ func logRequest(r *request.Request) {
}
if logBody {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.

View File

@ -151,6 +151,15 @@ type Config struct {
// with accelerate.
S3UseAccelerate *bool
// S3DisableContentMD5Validation config option is temporarily disabled,
// For S3 GetObject API calls, #1837.
//
// Set this to `true` to disable the S3 service client from automatically
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
// will also disable the SDK from performing object ContentMD5 validation
// on GetObject API calls.
S3DisableContentMD5Validation *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config {
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithS3DisableContentMD5Validation sets a config
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
c.S3DisableContentMD5Validation = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.S3DisableContentMD5Validation != nil {
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}

View File

@ -3,12 +3,10 @@ package corehandlers
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"runtime"
"strconv"
"time"
@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
if r.Body != nil {
var err error
length, err = aws.SeekerLen(r.Body)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
return
}
}
}
@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
}
}}
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
var SDKVersionUserAgentHandler = request.NamedHandler{
Name: "core.SDKVersionUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
runtime.Version(), runtime.GOOS, runtime.GOARCH),
}
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's

Some files were not shown because too many files have changed in this diff Show More