mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Tracing: Support configuring Jaeger client from environment (#21103)
* Tracing: Support configuring Jaeger client from environment * Tracing: Replace deprecated Jaeger Client call
This commit is contained in:
parent
7aeba652c9
commit
66b70b50b8
17
go.mod
17
go.mod
@ -4,9 +4,7 @@ go 1.13
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/DataDog/zstd v1.4.4 // indirect
|
||||
github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f
|
||||
github.com/apache/thrift v0.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.25.48
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3
|
||||
@ -61,21 +59,20 @@ require (
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf
|
||||
github.com/ua-parser/uap-go v0.0.0-20190826212731-daf92ba38329
|
||||
github.com/uber-go/atomic v1.3.2 // indirect
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible // indirect
|
||||
github.com/uber/jaeger-client-go v2.20.1+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
|
||||
github.com/unknwon/com v1.0.1
|
||||
github.com/xitongsys/parquet-go v1.4.0 // indirect
|
||||
github.com/xitongsys/parquet-go-source v0.0.0-20191104003508-ecfa341356a6 // indirect
|
||||
github.com/yudai/gojsondiff v1.0.0
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
|
||||
github.com/yudai/pp v2.0.1+incompatible // indirect
|
||||
go.uber.org/atomic v1.3.2 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392
|
||||
go.uber.org/atomic v1.5.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478
|
||||
golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
|
||||
golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/ini.v1 v1.46.0
|
||||
|
36
go.sum
36
go.sum
@ -3,16 +3,12 @@ cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/zstd v1.4.4 h1:+IawcoXhCBylN7ccwdwf8LOH2jKq7NavGpEPanrlTzE=
|
||||
github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f h1:HR5nRmUQgXrwqZOwZ2DAc/aCi3Bu3xENpspW935vxu0=
|
||||
github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20190716210558-5f564424c71c h1:iHUHzx3S1TU5xt+D7vLb0PAk3e+RfayF9IhR6+hyO/k=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20190716210558-5f564424c71c/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
|
||||
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk=
|
||||
github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beevik/etree v1.0.1/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
@ -280,20 +276,14 @@ github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf h1:Z2X3Os7oRzpdJ7
|
||||
github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0=
|
||||
github.com/ua-parser/uap-go v0.0.0-20190826212731-daf92ba38329 h1:VBsKFh4W1JEMz3eLCmM9zOJKZdDkP5W4b3Y4hc7SbZc=
|
||||
github.com/ua-parser/uap-go v0.0.0-20190826212731-daf92ba38329/go.mod h1:OBcG9bn7sHtXgarhUEb3OfCnNsgtGnkVf41ilSZ3K3E=
|
||||
github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
|
||||
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible h1:Q2Pp6v3QYiocMxomCaJuwQGFt7E53bPYqEgug/AoBtY=
|
||||
github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw=
|
||||
github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/jaeger-client-go v2.20.1+incompatible h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU=
|
||||
github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM=
|
||||
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
|
||||
github.com/unknwon/com v1.0.1 h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=
|
||||
github.com/unknwon/com v1.0.1/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
|
||||
github.com/xitongsys/parquet-go v1.4.0 h1:+3+QFRRwAilhTdNcJU2hPxslLCAKJ+Tn8C2OhnCVWDo=
|
||||
github.com/xitongsys/parquet-go v1.4.0/go.mod h1:on8bl2K/PEouGNEJqxht0t3K4IyN/ABeFu84Hh3lzrE=
|
||||
github.com/xitongsys/parquet-go-source v0.0.0-20191104003508-ecfa341356a6 h1:KPDKkdchSII+K5KS7iMpE062MVh2OucaM31599ER4U0=
|
||||
github.com/xitongsys/parquet-go-source v0.0.0-20191104003508-ecfa341356a6/go.mod h1:xxCx7Wpym/3QCo6JhujJX51dzSXrwmb0oH6FQb39SEA=
|
||||
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
|
||||
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
|
||||
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
|
||||
@ -303,8 +293,8 @@ github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZ
|
||||
github.com/zenazn/goji v0.9.1-0.20160507202103-64eb34159fe5/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
|
||||
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@ -313,8 +303,15 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49N
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/image v0.0.0-20190507092727-e4e5bf290fec/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -366,10 +363,17 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||
golang.org/x/tools v0.0.0-20190802220118-1d1727260058/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190805222050-c5a2fd39b72a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191101200257-8dbcdeb83d3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191104213129-fda23558a172 h1:P8RiGjkia45TmO9knol8QoeArkhXZKSxjaUDKngEVtw=
|
||||
golang.org/x/tools v0.0.0-20191104213129-fda23558a172/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 h1:kJQZhwFzSwJS2BxboKjdZzWczQOZx8VuH7Y8hhuGUtM=
|
||||
golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -62,7 +62,7 @@ func (ts *TracingService) parseSettings() {
|
||||
ts.disableSharedZipkinSpans = section.Key("disable_shared_zipkin_spans").MustBool(false)
|
||||
}
|
||||
|
||||
func (ts *TracingService) initGlobalTracer() error {
|
||||
func (ts *TracingService) initJaegerCfg() (jaegercfg.Configuration, error) {
|
||||
cfg := jaegercfg.Configuration{
|
||||
ServiceName: "grafana",
|
||||
Disabled: !ts.enabled,
|
||||
@ -76,6 +76,19 @@ func (ts *TracingService) initGlobalTracer() error {
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cfg.FromEnv()
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func (ts *TracingService) initGlobalTracer() error {
|
||||
cfg, err := ts.initJaegerCfg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jLogger := &jaegerLogWrapper{logger: log.New("jaeger")}
|
||||
|
||||
options := []jaegercfg.Option{}
|
||||
@ -102,7 +115,7 @@ func (ts *TracingService) initGlobalTracer() error {
|
||||
return err
|
||||
}
|
||||
|
||||
opentracing.InitGlobalTracer(tracer)
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
|
||||
ts.closer = closer
|
||||
|
||||
|
@ -1,6 +1,11 @@
|
||||
package tracing
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGroupSplit(t *testing.T) {
|
||||
tests := []struct {
|
||||
@ -28,9 +33,62 @@ func TestGroupSplit(t *testing.T) {
|
||||
tags := splitTagSettings(test.input)
|
||||
for k, v := range test.expected {
|
||||
value, exists := tags[k]
|
||||
if !exists || value != v {
|
||||
t.Errorf("tags does not match %v ", test)
|
||||
}
|
||||
assert.Truef(t, exists, "Tag %q not found for input %q", k, test.input)
|
||||
assert.Equalf(t, v, value, "Tag %q has wrong value for input %q", k, test.input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitJaegerCfg_Default(t *testing.T) {
|
||||
ts := &TracingService{}
|
||||
cfg, err := ts.initJaegerCfg()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, cfg.Disabled)
|
||||
}
|
||||
|
||||
func TestInitJaegerCfg_Enabled(t *testing.T) {
|
||||
ts := &TracingService{enabled: true}
|
||||
cfg, err := ts.initJaegerCfg()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, cfg.Disabled)
|
||||
assert.Equal(t, "localhost:6831", cfg.Reporter.LocalAgentHostPort)
|
||||
}
|
||||
|
||||
func TestInitJaegerCfg_DisabledViaEnv(t *testing.T) {
|
||||
os.Setenv("JAEGER_DISABLED", "true")
|
||||
defer func() {
|
||||
os.Unsetenv("JAEGER_DISABLED")
|
||||
}()
|
||||
|
||||
ts := &TracingService{enabled: true}
|
||||
cfg, err := ts.initJaegerCfg()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, cfg.Disabled)
|
||||
}
|
||||
|
||||
func TestInitJaegerCfg_EnabledViaEnv(t *testing.T) {
|
||||
os.Setenv("JAEGER_DISABLED", "false")
|
||||
defer func() {
|
||||
os.Unsetenv("JAEGER_DISABLED")
|
||||
}()
|
||||
|
||||
ts := &TracingService{enabled: false}
|
||||
cfg, err := ts.initJaegerCfg()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, cfg.Disabled)
|
||||
}
|
||||
|
||||
func TestInitJaegerCfg_InvalidEnvVar(t *testing.T) {
|
||||
os.Setenv("JAEGER_DISABLED", "totallybogus")
|
||||
defer func() {
|
||||
os.Unsetenv("JAEGER_DISABLED")
|
||||
}()
|
||||
|
||||
ts := &TracingService{}
|
||||
_, err := ts.initJaegerCfg()
|
||||
require.EqualError(t, err, "cannot parse env var JAEGER_DISABLED=totallybogus: strconv.ParseBool: parsing \"totallybogus\": invalid syntax")
|
||||
}
|
||||
|
80
vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
generated
vendored
80
vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
generated
vendored
@ -1,6 +1,86 @@
|
||||
Changes by Version
|
||||
==================
|
||||
|
||||
2.20.1 (2019-11-08)
|
||||
-------------------
|
||||
|
||||
Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
|
||||
|
||||
- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
|
||||
- Create `OperationNameLateBinding` sampler option and config option
|
||||
- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
|
||||
|
||||
|
||||
2.20.0 (2019-11-06)
|
||||
-------------------
|
||||
|
||||
## New Features
|
||||
|
||||
- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
|
||||
|
||||
Sampling state is shared between all spans of the trace that are still in memory.
|
||||
This allows implementation of delayed sampling decisions (see below).
|
||||
|
||||
- Support delayed sampling decisions (#449) -- Yuri Shkuro
|
||||
|
||||
This is a large structural change to how the samplers work.
|
||||
It allows some samplers to be executed multiple times on different
|
||||
span events (like setting a tag) and make a positive sampling decision
|
||||
later in the span life cycle, or even based on children spans.
|
||||
See [README](./README.md#delayed-sampling) for more details.
|
||||
|
||||
There is a related minor change in behavior of the adaptive (per-operation) sampler,
|
||||
which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
|
||||
operation used to make the sampling decision is always the one provided at span creation.
|
||||
|
||||
- Add experimental tag matching sampler (#452) -- Yuri Shkuro
|
||||
|
||||
A sampler that can sample a trace based on a certain tag added to the root
|
||||
span or one of its local (in-process) children. The sampler can be used with
|
||||
another experimental `PrioritySampler` that allows multiple samplers to try
|
||||
to make a sampling decision, in a certain priority order.
|
||||
|
||||
- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
|
||||
- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
|
||||
|
||||
## Minor patches
|
||||
|
||||
- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
|
||||
- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
|
||||
- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
|
||||
- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
|
||||
|
||||
2.19.0 (2019-09-23)
|
||||
-------------------
|
||||
|
||||
- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
|
||||
|
||||
|
||||
2.18.1 (2019-09-16)
|
||||
-------------------
|
||||
|
||||
- Remove go.mod / go.sum that interfere with `go get` (#432)
|
||||
|
||||
|
||||
2.18.0 (2019-09-09)
|
||||
-------------------
|
||||
|
||||
- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) <Jun Guo>
|
||||
|
||||
|
||||
2.17.0 (2019-08-30)
|
||||
-------------------
|
||||
|
||||
- Add a flag for firehose mode (#419) <Prithvi Raj>
|
||||
- Default sampling server URL to agent (#414) <Bryan Boreham>
|
||||
- Update default sampling rate when sampling strategy is refreshed (#413) <Bryan Boreham>
|
||||
- Support "Self" Span Reference (#411) <dm03514>
|
||||
- Don't complain about blank service name if tracing is Disabled (#410) Yuri <Shkuro>
|
||||
- Use IP address from tag if exist (#402) <NikoKVCS>
|
||||
- Expose span data to custom reporters [fixes #394] (#399) <Curtis Allen>
|
||||
- Fix the span allocation in the pool (#381) <Dmitry Ponomarev>
|
||||
|
||||
|
||||
2.16.0 (2019-03-24)
|
||||
-------------------
|
||||
|
||||
|
10
vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
generated
vendored
10
vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
generated
vendored
@ -19,7 +19,7 @@ file for details.
|
||||
|
||||
## Getting Started
|
||||
|
||||
This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies.
|
||||
This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
|
||||
|
||||
To get started, make sure you clone the Git repository into the correct location
|
||||
`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
|
||||
@ -29,13 +29,13 @@ mkdir -p $GOPATH/src/github.com/uber
|
||||
cd $GOPATH/src/github.com/uber
|
||||
git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
|
||||
cd jaeger-client-go
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
Then install dependencies and run the tests:
|
||||
|
||||
```
|
||||
git submodule update --init --recursive
|
||||
glide install
|
||||
make install
|
||||
make test
|
||||
```
|
||||
|
||||
@ -45,13 +45,13 @@ This projects follows the following pattern for grouping imports in Go files:
|
||||
* imports from standard library
|
||||
* imports from other projects
|
||||
* imports from `jaeger-client-go` project
|
||||
|
||||
|
||||
For example:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
|
||||
"github.com/uber/jaeger-lib/metrics"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
182
vendor/github.com/uber/jaeger-client-go/Gopkg.lock
generated
vendored
182
vendor/github.com/uber/jaeger-client-go/Gopkg.lock
generated
vendored
@ -2,12 +2,20 @@
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9f3b30d9f8e0d7040f729b82dcbc8f0dead820a133b3147ce355fc451f32d761"
|
||||
name = "github.com/BurntSushi/toml"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
|
||||
version = "v0.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "UT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -38,12 +46,12 @@
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:318f1c959a8a740366fce4b1e1eb2fd914036b4af58fbd0a003349b305f118ad"
|
||||
digest = "1:573ca21d3669500ff845bdebee890eb7fc7f0f50c59f2132f2a0c6b03d85086a"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = "UT"
|
||||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||
@ -83,12 +91,15 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b6221ec0f8903b556e127c449e7106b63e6867170c2d10a7c058623d086f2081"
|
||||
digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/internal",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -96,10 +107,10 @@
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "UT"
|
||||
revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8"
|
||||
revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:35cf6bdf68db765988baa9c4f10cc5d7dda1126a54bd62e252dbcd0b1fc8da90"
|
||||
digest = "1:f119e3205d3a1f0f19dbd7038eb37528e2c6f0933269dc344e305951fb87d632"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
@ -107,25 +118,23 @@
|
||||
"model",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
|
||||
version = "v0.2.0"
|
||||
revision = "287d3e634a1e550c9e463dd7e5a75a422c614505"
|
||||
version = "v0.7.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c31163bd62461e0c5f7ddc7363e39ef8d9e929693e77b5c11c709b05f9cb9219"
|
||||
digest = "1:a210815b437763623ecca8eb91e6a0bf4f2d6773c5a6c9aec0e28f19e5fd6deb"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/fs",
|
||||
"internal/util",
|
||||
"iostats",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "55ae3d9d557340b5bc24cd8aa5f6fa2c2ab31352"
|
||||
revision = "499c85531f756d1129edd26485a5f73871eeb308"
|
||||
version = "v0.0.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8ff03ccc603abb0d7cce94d34b613f5f6251a9e1931eba1a3f9888a9029b055c"
|
||||
digest = "1:0496f0e99014b7fd0a560c539f51d0882731137b85494142f47e550e4657176a"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
@ -133,19 +142,11 @@
|
||||
"suite",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
|
||||
version = "v1.3.0"
|
||||
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d"
|
||||
name = "github.com/uber-go/atomic"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f5c5ad1e08141e18aee1b9c37729d93d06805840421ccfc9d407787ffe969ce6"
|
||||
digest = "1:0ec60ffd594af00ba1660bc746aa0e443d27dd4003dee55f9d08a0b4ff5431a3"
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
packages = [
|
||||
"metrics",
|
||||
@ -153,27 +154,35 @@
|
||||
"metrics/prometheus",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "0e30338a695636fe5bcf7301e8030ce8dd2a8530"
|
||||
version = "v2.0.0"
|
||||
revision = "a87ae9d84fb038a8d79266298970720be7c80fcd"
|
||||
version = "v2.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d"
|
||||
digest = "1:0bdcb0c740d79d400bd3f7946ac22a715c94db62b20bfd2e01cd50693aba0600"
|
||||
name = "go.uber.org/atomic"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289"
|
||||
version = "v1.3.2"
|
||||
revision = "9dc4df04d0d1c39369750a9f6c32c39560672089"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a"
|
||||
digest = "1:002ebc50f3ef475ac325e1904be931d9dcba6dc6d73b5682afce0c63436e3902"
|
||||
name = "go.uber.org/multierr"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
|
||||
version = "v1.1.0"
|
||||
revision = "c3fc3d02ec864719d8e25be2d7dde1e35a36aa27"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c52caf7bd44f92e54627a31b85baf06a68333a196b3d8d241480a774733dcf8b"
|
||||
branch = "master"
|
||||
digest = "1:3032e90a153750ea149f68bf081f97ca738f041fba45c41c80737f572ffdf2f4"
|
||||
name = "go.uber.org/tools"
|
||||
packages = ["update-license"]
|
||||
pruneopts = "UT"
|
||||
revision = "2cfd321de3ee5d5f8a5fda2521d1703478334d98"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6be13632ab4bd5842a097abb3aabac045a8601e19a10da4239e7d8bd83d4b83c"
|
||||
name = "go.uber.org/zap"
|
||||
packages = [
|
||||
".",
|
||||
@ -184,8 +193,19 @@
|
||||
"zapcore",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
|
||||
version = "v1.9.1"
|
||||
revision = "a6015e13fab9b744d96085308ce4e8f11bad1996"
|
||||
version = "v1.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:21d7bad9b7da270fd2d50aba8971a041bd691165c95096a2a4c68db823cbc86a"
|
||||
name = "golang.org/x/lint"
|
||||
packages = [
|
||||
".",
|
||||
"golint",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "16217165b5de779cb6a5e4fc81fa9c1166fda457"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -196,7 +216,81 @@
|
||||
"context/ctxhttp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "addf6b3196f61cd44ce5a76657913698c73479d0"
|
||||
revision = "0deb6923b6d97481cb43bc1043fe5b72a0143032"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5dfb17d45415b7b8927382f53955a66f55f9d9d11557aa82f7f481d642ab247a"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["windows"]
|
||||
pruneopts = "UT"
|
||||
revision = "f43be2a4598cf3a47be9f94f0c28197ed9eae611"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bae8b3bf837d9d7f601776f37f44e031d46943677beff8fb2eb9c7317d44de2f"
|
||||
name = "golang.org/x/tools"
|
||||
packages = [
|
||||
"go/analysis",
|
||||
"go/analysis/passes/inspect",
|
||||
"go/ast/astutil",
|
||||
"go/ast/inspector",
|
||||
"go/buildutil",
|
||||
"go/gcexportdata",
|
||||
"go/internal/gcimporter",
|
||||
"go/internal/packagesdriver",
|
||||
"go/packages",
|
||||
"go/types/objectpath",
|
||||
"go/types/typeutil",
|
||||
"internal/fastwalk",
|
||||
"internal/gopathwalk",
|
||||
"internal/semver",
|
||||
"internal/span",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8dbcdeb83d3faec5315146800b375c4962a42fc6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:59f10c1537d2199d9115d946927fe31165959a95190849c82ff11e05803528b0"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f221b8435cfb71e54062f6c6e99e9ade30b124d5"
|
||||
version = "v2.2.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:131158a88aad1f94854d0aa21a64af2802d0a470fb0f01cb33c04fafd2047111"
|
||||
name = "honnef.co/go/tools"
|
||||
packages = [
|
||||
"arg",
|
||||
"cmd/staticcheck",
|
||||
"config",
|
||||
"deprecated",
|
||||
"facts",
|
||||
"functions",
|
||||
"go/types/typeutil",
|
||||
"internal/cache",
|
||||
"internal/passes/buildssa",
|
||||
"internal/renameio",
|
||||
"internal/sharedcheck",
|
||||
"lint",
|
||||
"lint/lintdsl",
|
||||
"lint/lintutil",
|
||||
"lint/lintutil/format",
|
||||
"loader",
|
||||
"printf",
|
||||
"simple",
|
||||
"ssa",
|
||||
"ssautil",
|
||||
"staticcheck",
|
||||
"staticcheck/vrp",
|
||||
"stylecheck",
|
||||
"unused",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "afd67930eec2a9ed3e9b19f684d17a062285f16a"
|
||||
version = "2019.2.3"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
@ -212,10 +306,10 @@
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/stretchr/testify/suite",
|
||||
"github.com/uber-go/atomic",
|
||||
"github.com/uber/jaeger-lib/metrics",
|
||||
"github.com/uber/jaeger-lib/metrics/metricstest",
|
||||
"github.com/uber/jaeger-lib/metrics/prometheus",
|
||||
"go.uber.org/atomic",
|
||||
"go.uber.org/zap",
|
||||
"go.uber.org/zap/zapcore",
|
||||
]
|
||||
|
6
vendor/github.com/uber/jaeger-client-go/Gopkg.toml
generated
vendored
6
vendor/github.com/uber/jaeger-client-go/Gopkg.toml
generated
vendored
@ -8,19 +8,19 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
version = "^1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "^1.1.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber-go/atomic"
|
||||
name = "go.uber.org/atomic"
|
||||
version = "^1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
version = "^2.0"
|
||||
version = "^2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.uber.org/zap"
|
||||
|
29
vendor/github.com/uber/jaeger-client-go/Makefile
generated
vendored
29
vendor/github.com/uber/jaeger-client-go/Makefile
generated
vendored
@ -1,14 +1,14 @@
|
||||
PROJECT_ROOT=github.com/uber/jaeger-client-go
|
||||
PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...)
|
||||
PACKAGES := . $(shell go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
|
||||
# all .go files that don't exist in hidden directories
|
||||
ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
|
||||
-e ".*/\..*" \
|
||||
-e ".*/_.*" \
|
||||
-e ".*/mocks.*")
|
||||
|
||||
-include crossdock/rules.mk
|
||||
USE_DEP := true
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
-include crossdock/rules.mk
|
||||
|
||||
RACE=-race
|
||||
GOTEST=go test -v $(RACE)
|
||||
@ -58,19 +58,24 @@ lint:
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
glide --version || go get github.com/Masterminds/glide
|
||||
@echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
|
||||
ifeq ($(USE_DEP),true)
|
||||
dep version || make install-dep
|
||||
dep ensure
|
||||
else
|
||||
endif
|
||||
ifeq ($(USE_GLIDE),true)
|
||||
glide --version || go get github.com/Masterminds/glide
|
||||
glide install
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
./scripts/cover.sh $(shell go list $(PACKAGES))
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
$(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
|
||||
|
||||
.PHONY: cover-html
|
||||
cover-html: cover
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
# This is not part of the regular test target because we don't want to slow it
|
||||
# down.
|
||||
@ -101,23 +106,23 @@ idl-submodule:
|
||||
thrift-image:
|
||||
$(THRIFT) -version
|
||||
|
||||
.PHONY: install-dep-ci
|
||||
install-dep-ci:
|
||||
.PHONY: install-dep
|
||||
install-dep:
|
||||
- curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
|
||||
- chmod +x $$GOPATH/bin/dep
|
||||
|
||||
.PHONY: install-ci
|
||||
install-ci: install-dep-ci install
|
||||
install-ci: install
|
||||
go get github.com/wadey/gocovmerge
|
||||
go get github.com/mattn/goveralls
|
||||
go get golang.org/x/tools/cmd/cover
|
||||
go get golang.org/x/lint/golint
|
||||
|
||||
.PHONY: test-ci
|
||||
test-ci:
|
||||
@./scripts/cover.sh $(shell go list $(PACKAGES))
|
||||
test-ci: cover
|
||||
ifeq ($(CI_SKIP_LINT),true)
|
||||
echo 'skipping lint'
|
||||
else
|
||||
make lint
|
||||
endif
|
||||
|
||||
|
56
vendor/github.com/uber/jaeger-client-go/README.md
generated
vendored
56
vendor/github.com/uber/jaeger-client-go/README.md
generated
vendored
@ -3,7 +3,7 @@
|
||||
# Jaeger Bindings for Go OpenTracing API
|
||||
|
||||
Instrumentation library that implements an
|
||||
[OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io).
|
||||
[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
|
||||
|
||||
**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
|
||||
* :white_check_mark: `import "github.com/uber/jaeger-client-go"`
|
||||
@ -15,19 +15,20 @@ Please see [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## Installation
|
||||
|
||||
We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide)
|
||||
We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
|
||||
and [semantic versioning](http://semver.org/) when including this library into an application.
|
||||
For example, Jaeger backend imports this library like this:
|
||||
|
||||
```yaml
|
||||
- package: github.com/uber/jaeger-client-go
|
||||
version: ^2.7.0
|
||||
```toml
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-client-go"
|
||||
version = "2.17"
|
||||
```
|
||||
|
||||
If you instead want to use the latest version in `master`, you can pull it via `go get`.
|
||||
Note that during `go get` you may see build errors due to incompatible dependencies, which is why
|
||||
we recommend using semantic versions for dependencies. The error may be fixed by running
|
||||
`make install` (it will install `glide` if you don't have it):
|
||||
`make install` (it will install `dep` if you don't have it):
|
||||
|
||||
```shell
|
||||
go get -u github.com/uber/jaeger-client-go/
|
||||
@ -181,6 +182,29 @@ are available:
|
||||
1. `RateLimitingSampler` can be used to allow only a certain fixed
|
||||
number of traces to be sampled per second.
|
||||
|
||||
#### Delayed sampling
|
||||
|
||||
Version 2.20 introduced the ability to delay sampling decisions in the life cycle
|
||||
of the root span. It involves several features and architectural changes:
|
||||
* **Shared sampling state**: the sampling state is shared across all local
|
||||
(i.e. in-process) spans for a given trace.
|
||||
* **New `SamplerV2` API** allows the sampler to be called at multiple points
|
||||
in the life cycle of a span:
|
||||
* on span creation
|
||||
* on overwriting span operation name
|
||||
* on setting span tags
|
||||
* on finishing the span
|
||||
* **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
|
||||
to indicate if the negative sampling decision is final or not (positive sampling
|
||||
decisions are always final). If the decision is not final, the sampler will be
|
||||
called again on further span life cycle events, like setting tags.
|
||||
|
||||
These new features are used in the experimental `x.TagMatchingSampler`, which
|
||||
can sample a trace based on a certain tag added to the root
|
||||
span or one of its local (in-process) children. The sampler can be used with
|
||||
another experimental `x.PrioritySampler` that allows multiple samplers to try
|
||||
to make a sampling decision, in a certain priority order.
|
||||
|
||||
### Baggage Injection
|
||||
|
||||
The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
|
||||
@ -222,7 +246,7 @@ import (
|
||||
)
|
||||
|
||||
span := opentracing.SpanFromContext(ctx)
|
||||
ext.SamplingPriority.Set(span, 1)
|
||||
ext.SamplingPriority.Set(span, 1)
|
||||
```
|
||||
|
||||
#### Via HTTP Headers
|
||||
@ -253,6 +277,24 @@ by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction wi
|
||||
|
||||
However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
|
||||
|
||||
## SelfRef
|
||||
|
||||
Jaeger Tracer supports an additional [reference](https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans)
|
||||
type call `Self`. This allows a caller to provide an already established `SpanContext`.
|
||||
This allows loading and continuing spans/traces from offline (ie log-based) storage. The `Self` reference
|
||||
bypasses trace and span id generation.
|
||||
|
||||
|
||||
Usage requires passing in a `SpanContext` and the jaeger `Self` reference type:
|
||||
```
|
||||
span := tracer.StartSpan(
|
||||
"continued_span",
|
||||
SelfRef(yourSpanContext),
|
||||
)
|
||||
...
|
||||
defer span.finish()
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0 License](LICENSE).
|
||||
|
46
vendor/github.com/uber/jaeger-client-go/config/config.go
generated
vendored
46
vendor/github.com/uber/jaeger-client-go/config/config.go
generated
vendored
@ -76,16 +76,28 @@ type SamplerConfig struct {
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT
|
||||
SamplingServerURL string `yaml:"samplingServerURL"`
|
||||
|
||||
// MaxOperations is the maximum number of operations that the sampler
|
||||
// will keep track of. If an operation is not tracked, a default probabilistic
|
||||
// sampler will be used rather than the per operation specific sampler.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS
|
||||
MaxOperations int `yaml:"maxOperations"`
|
||||
|
||||
// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
|
||||
// jaeger-agent for the appropriate sampling strategy.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
|
||||
SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
|
||||
|
||||
// MaxOperations is the maximum number of operations that the PerOperationSampler
|
||||
// will keep track of. If an operation is not tracked, a default probabilistic
|
||||
// sampler will be used rather than the per operation specific sampler.
|
||||
// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
|
||||
MaxOperations int `yaml:"maxOperations"`
|
||||
|
||||
// Opt-in feature for applications that require late binding of span name via explicit
|
||||
// call to SetOperationName when using PerOperationSampler. When this feature is enabled,
|
||||
// the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
|
||||
// decision as non-final (and the span as writeable). This may lead to degraded performance
|
||||
// in applications that always provide the correct span name on trace creation.
|
||||
//
|
||||
// For backwards compatibility this option is off by default.
|
||||
OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
|
||||
|
||||
// Options can be used to programmatically pass additional options to the Remote sampler.
|
||||
Options []jaeger.SamplerOption
|
||||
}
|
||||
|
||||
// ReporterConfig configures the reporter. All fields are optional.
|
||||
@ -181,13 +193,14 @@ func (c Configuration) New(
|
||||
// NewTracer returns a new tracer based on the current configuration, using the given options,
|
||||
// and a closer func that can be used to flush buffers before shutdown.
|
||||
func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
|
||||
if c.Disabled {
|
||||
return &opentracing.NoopTracer{}, &nullCloser{}, nil
|
||||
}
|
||||
|
||||
if c.ServiceName == "" {
|
||||
return nil, nil, errors.New("no service name provided")
|
||||
}
|
||||
|
||||
if c.Disabled {
|
||||
return &opentracing.NoopTracer{}, &nullCloser{}, nil
|
||||
}
|
||||
opts := applyOptions(options...)
|
||||
tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
|
||||
if c.RPCMetrics {
|
||||
@ -234,6 +247,7 @@ func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Clos
|
||||
jaeger.TracerOptions.PoolSpans(opts.poolSpans),
|
||||
jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
|
||||
jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
|
||||
jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
|
||||
}
|
||||
|
||||
for _, tag := range opts.tags {
|
||||
@ -330,7 +344,7 @@ func (sc *SamplerConfig) NewSampler(
|
||||
return jaeger.NewProbabilisticSampler(sc.Param)
|
||||
}
|
||||
return nil, fmt.Errorf(
|
||||
"Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1",
|
||||
"invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
|
||||
sc.Param,
|
||||
)
|
||||
}
|
||||
@ -348,16 +362,14 @@ func (sc *SamplerConfig) NewSampler(
|
||||
jaeger.SamplerOptions.Metrics(metrics),
|
||||
jaeger.SamplerOptions.InitialSampler(initSampler),
|
||||
jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
|
||||
jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
|
||||
jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
|
||||
jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
|
||||
}
|
||||
if sc.MaxOperations != 0 {
|
||||
options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations))
|
||||
}
|
||||
if sc.SamplingRefreshInterval != 0 {
|
||||
options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval))
|
||||
}
|
||||
options = append(options, sc.Options...)
|
||||
return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
|
||||
}
|
||||
return nil, fmt.Errorf("Unknown sampler type %v", sc.Type)
|
||||
return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
|
||||
}
|
||||
|
||||
// NewReporter instantiates a new reporter that submits spans to the collector
|
||||
|
27
vendor/github.com/uber/jaeger-client-go/config/config_env.go
generated
vendored
27
vendor/github.com/uber/jaeger-client-go/config/config_env.go
generated
vendored
@ -52,7 +52,11 @@ const (
|
||||
// FromEnv uses environment variables to set the tracer's Configuration
|
||||
func FromEnv() (*Configuration, error) {
|
||||
c := &Configuration{}
|
||||
return c.FromEnv()
|
||||
}
|
||||
|
||||
// FromEnv uses environment variables and overrides existing tracer's Configuration
|
||||
func (c *Configuration) FromEnv() (*Configuration, error) {
|
||||
if e := os.Getenv(envServiceName); e != "" {
|
||||
c.ServiceName = e
|
||||
}
|
||||
@ -77,13 +81,21 @@ func FromEnv() (*Configuration, error) {
|
||||
c.Tags = parseTags(e)
|
||||
}
|
||||
|
||||
if s, err := samplerConfigFromEnv(); err == nil {
|
||||
if c.Sampler == nil {
|
||||
c.Sampler = &SamplerConfig{}
|
||||
}
|
||||
|
||||
if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
|
||||
c.Sampler = s
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "cannot obtain sampler config from env")
|
||||
}
|
||||
|
||||
if r, err := reporterConfigFromEnv(); err == nil {
|
||||
if c.Reporter == nil {
|
||||
c.Reporter = &ReporterConfig{}
|
||||
}
|
||||
|
||||
if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
|
||||
c.Reporter = r
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "cannot obtain reporter config from env")
|
||||
@ -93,9 +105,7 @@ func FromEnv() (*Configuration, error) {
|
||||
}
|
||||
|
||||
// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
|
||||
func samplerConfigFromEnv() (*SamplerConfig, error) {
|
||||
sc := &SamplerConfig{}
|
||||
|
||||
func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
|
||||
if e := os.Getenv(envSamplerType); e != "" {
|
||||
sc.Type = e
|
||||
}
|
||||
@ -110,6 +120,9 @@ func samplerConfigFromEnv() (*SamplerConfig, error) {
|
||||
|
||||
if e := os.Getenv(envSamplerManagerHostPort); e != "" {
|
||||
sc.SamplingServerURL = e
|
||||
} else if e := os.Getenv(envAgentHost); e != "" {
|
||||
// Fallback if we know the agent host - try the sampling endpoint there
|
||||
sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
|
||||
}
|
||||
|
||||
if e := os.Getenv(envSamplerMaxOperations); e != "" {
|
||||
@ -132,9 +145,7 @@ func samplerConfigFromEnv() (*SamplerConfig, error) {
|
||||
}
|
||||
|
||||
// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
|
||||
func reporterConfigFromEnv() (*ReporterConfig, error) {
|
||||
rc := &ReporterConfig{}
|
||||
|
||||
func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
|
||||
if e := os.Getenv(envReporterMaxQueueSize); e != "" {
|
||||
if value, err := strconv.ParseInt(e, 10, 0); err == nil {
|
||||
rc.QueueSize = int(value)
|
||||
|
35
vendor/github.com/uber/jaeger-client-go/config/options.go
generated
vendored
35
vendor/github.com/uber/jaeger-client-go/config/options.go
generated
vendored
@ -26,19 +26,20 @@ type Option func(c *Options)
|
||||
|
||||
// Options control behavior of the client.
|
||||
type Options struct {
|
||||
metrics metrics.Factory
|
||||
logger jaeger.Logger
|
||||
reporter jaeger.Reporter
|
||||
sampler jaeger.Sampler
|
||||
contribObservers []jaeger.ContribObserver
|
||||
observers []jaeger.Observer
|
||||
gen128Bit bool
|
||||
poolSpans bool
|
||||
zipkinSharedRPCSpan bool
|
||||
maxTagValueLength int
|
||||
tags []opentracing.Tag
|
||||
injectors map[interface{}]jaeger.Injector
|
||||
extractors map[interface{}]jaeger.Extractor
|
||||
metrics metrics.Factory
|
||||
logger jaeger.Logger
|
||||
reporter jaeger.Reporter
|
||||
sampler jaeger.Sampler
|
||||
contribObservers []jaeger.ContribObserver
|
||||
observers []jaeger.Observer
|
||||
gen128Bit bool
|
||||
poolSpans bool
|
||||
zipkinSharedRPCSpan bool
|
||||
maxTagValueLength int
|
||||
noDebugFlagOnForcedSampling bool
|
||||
tags []opentracing.Tag
|
||||
injectors map[interface{}]jaeger.Injector
|
||||
extractors map[interface{}]jaeger.Extractor
|
||||
}
|
||||
|
||||
// Metrics creates an Option that initializes Metrics in the tracer,
|
||||
@ -117,6 +118,14 @@ func MaxTagValueLength(maxTagValueLength int) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
|
||||
// when calling span.setSamplingPriority to force sample a span.
|
||||
func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
|
||||
return func(c *Options) {
|
||||
c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
|
||||
}
|
||||
}
|
||||
|
||||
// Tag creates an option that adds a tracer-level tag.
|
||||
func Tag(key string, value interface{}) Option {
|
||||
return func(c *Options) {
|
||||
|
20
vendor/github.com/uber/jaeger-client-go/constants.go
generated
vendored
20
vendor/github.com/uber/jaeger-client-go/constants.go
generated
vendored
@ -14,9 +14,15 @@
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// JaegerClientVersion is the version of the client library reported as Span tag.
|
||||
JaegerClientVersion = "Go-2.16.0"
|
||||
JaegerClientVersion = "Go-2.20.1"
|
||||
|
||||
// JaegerClientVersionTagKey is the name of the tag used to report client version.
|
||||
JaegerClientVersionTagKey = "jaeger.version"
|
||||
@ -83,6 +89,18 @@ const (
|
||||
// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
|
||||
DefaultUDPSpanServerPort = 6831
|
||||
|
||||
// DefaultSamplingServerPort is the default port to fetch sampling config from, via http
|
||||
DefaultSamplingServerPort = 5778
|
||||
|
||||
// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
|
||||
DefaultMaxTagValueLength = 256
|
||||
|
||||
// SelfRefType is a jaeger specific reference type that supports creating a span
|
||||
// with an already defined context.
|
||||
selfRefType opentracing.SpanReferenceType = 99
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
|
||||
DefaultSamplingServerURL = fmt.Sprintf("http://localhost:%d/sampling", DefaultSamplingServerPort)
|
||||
)
|
||||
|
46
vendor/github.com/uber/jaeger-client-go/glide.lock
generated
vendored
46
vendor/github.com/uber/jaeger-client-go/glide.lock
generated
vendored
@ -1,8 +1,8 @@
|
||||
hash: 92cc8f956428fc65bee07d809a752f34376aece141c934eff02aefa08d450b72
|
||||
updated: 2019-03-23T18:26:09.960887-04:00
|
||||
hash: a4a449cfc060c2d7be850a69b171e4382a3bd00d1a0a72cfc944facc3fe263bf
|
||||
updated: 2019-09-23T17:10:15.213856-04:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 3a771d992973f24aa725d07868b467d1ddfceafb
|
||||
version: 37c8de3658fcb183f997c4e13e8337516ab753e6
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/codahale/hdrhistogram
|
||||
@ -17,11 +17,11 @@ imports:
|
||||
subpackages:
|
||||
- spew
|
||||
- name: github.com/golang/protobuf
|
||||
version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175
|
||||
version: 1680a479a2cfb3fa22b972af7e36d0a0fde47bf8
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/matttproud/golang_protobuf_extensions
|
||||
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
version: c182affec369e30f25d3eb8cd8a478dee585ae7d
|
||||
subpackages:
|
||||
- pbutil
|
||||
- name: github.com/opentracing/opentracing-go
|
||||
@ -33,47 +33,49 @@ imports:
|
||||
- name: github.com/pkg/errors
|
||||
version: ba968bfe8b2f7e042a574c888954fccecfa385b4
|
||||
- name: github.com/pmezard/go-difflib
|
||||
version: 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
subpackages:
|
||||
- difflib
|
||||
- name: github.com/prometheus/client_golang
|
||||
version: c5b7fccd204277076155f10851dad72b76a49317
|
||||
version: 170205fb58decfd011f1550d4cfb737230d7ae4f
|
||||
subpackages:
|
||||
- prometheus
|
||||
- prometheus/internal
|
||||
- name: github.com/prometheus/client_model
|
||||
version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
version: 14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a
|
||||
version: 287d3e634a1e550c9e463dd7e5a75a422c614505
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
- model
|
||||
- name: github.com/prometheus/procfs
|
||||
version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea
|
||||
version: de25ac347ef9305868b04dc42425c973b863b18c
|
||||
subpackages:
|
||||
- internal/fs
|
||||
- internal/util
|
||||
- nfs
|
||||
- xfs
|
||||
- name: github.com/stretchr/testify
|
||||
version: f35b8ab0b5a2cef36673838d662e249dd9c94686
|
||||
version: 85f2b59c4459e5bf57488796be8c3667cb8246d6
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
- suite
|
||||
- name: github.com/uber-go/atomic
|
||||
version: df976f2515e274675050de7b3f42545de80594fd
|
||||
- name: github.com/uber/jaeger-lib
|
||||
version: 0e30338a695636fe5bcf7301e8030ce8dd2a8530
|
||||
version: a87ae9d84fb038a8d79266298970720be7c80fcd
|
||||
subpackages:
|
||||
- metrics
|
||||
- metrics/metricstest
|
||||
- metrics/prometheus
|
||||
- name: go.uber.org/atomic
|
||||
version: 1ea20fb1cbb1cc08cbd0d913a96dead89aa18289
|
||||
version: df976f2515e274675050de7b3f42545de80594fd
|
||||
- name: go.uber.org/multierr
|
||||
version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
|
||||
- name: go.uber.org/zap
|
||||
version: ff33455a0e382e8a81d14dd7c922020b6b5e7982
|
||||
version: 27376062155ad36be76b0f12cf1572a221d3a48c
|
||||
subpackages:
|
||||
- buffer
|
||||
- internal/bufferpool
|
||||
@ -81,10 +83,14 @@ imports:
|
||||
- internal/exit
|
||||
- zapcore
|
||||
- name: golang.org/x/net
|
||||
version: 49bb7cea24b1df9410e1712aa6433dae904ff66a
|
||||
version: aa69164e4478b84860dc6769c710c699c67058a3
|
||||
subpackages:
|
||||
- context
|
||||
- context/ctxhttp
|
||||
testImports:
|
||||
- name: github.com/uber-go/atomic
|
||||
version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
|
||||
- name: golang.org/x/sys
|
||||
version: 0a153f010e6963173baba2306531d173aa843137
|
||||
subpackages:
|
||||
- windows
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 51d6538a90f86fe93ac480b35f37b2be17fef232
|
||||
testImports: []
|
||||
|
9
vendor/github.com/uber/jaeger-client-go/glide.yaml
generated
vendored
9
vendor/github.com/uber/jaeger-client-go/glide.yaml
generated
vendored
@ -12,11 +12,16 @@ import:
|
||||
- metrics
|
||||
- package: github.com/pkg/errors
|
||||
version: ~0.8.0
|
||||
- package: go.uber.org/zap
|
||||
source: https://github.com/uber-go/zap.git
|
||||
version: ^1
|
||||
- package: github.com/uber-go/atomic
|
||||
version: ^1
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: ^1
|
||||
testImport:
|
||||
- package: github.com/stretchr/testify
|
||||
subpackages:
|
||||
- assert
|
||||
- require
|
||||
- suite
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: v0.8.0
|
||||
|
2
vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
generated
vendored
2
vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
generated
vendored
@ -35,7 +35,7 @@ func BuildJaegerThrift(span *Span) *j.Span {
|
||||
SpanId: int64(span.context.spanID),
|
||||
ParentSpanId: int64(span.context.parentID),
|
||||
OperationName: span.operationName,
|
||||
Flags: int32(span.context.flags),
|
||||
Flags: int32(span.context.samplingState.flags()),
|
||||
StartTime: startTime,
|
||||
Duration: duration,
|
||||
Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
|
||||
|
20
vendor/github.com/uber/jaeger-client-go/metrics.go
generated
vendored
20
vendor/github.com/uber/jaeger-client-go/metrics.go
generated
vendored
@ -26,6 +26,9 @@ type Metrics struct {
|
||||
// Number of traces started by this tracer as not sampled
|
||||
TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
|
||||
|
||||
// Number of traces started by this tracer with delayed sampling
|
||||
TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
|
||||
|
||||
// Number of externally started sampled traces this tracer joined
|
||||
TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
|
||||
|
||||
@ -33,13 +36,22 @@ type Metrics struct {
|
||||
TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
|
||||
|
||||
// Number of sampled spans started by this tracer
|
||||
SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of sampled spans started by this tracer"`
|
||||
SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
|
||||
|
||||
// Number of unsampled spans started by this tracer
|
||||
SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of unsampled spans started by this tracer"`
|
||||
// Number of not sampled spans started by this tracer
|
||||
SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
|
||||
|
||||
// Number of spans with delayed sampling started by this tracer
|
||||
SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
|
||||
|
||||
// Number of spans finished by this tracer
|
||||
SpansFinished metrics.Counter `metric:"finished_spans" help:"Number of spans finished by this tracer"`
|
||||
SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
|
||||
|
||||
// Number of spans finished by this tracer
|
||||
SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
|
||||
|
||||
// Number of spans finished by this tracer
|
||||
SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
|
||||
|
||||
// Number of errors decoding tracing context
|
||||
DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
|
||||
|
8
vendor/github.com/uber/jaeger-client-go/propagation.go
generated
vendored
8
vendor/github.com/uber/jaeger-client-go/propagation.go
generated
vendored
@ -193,7 +193,7 @@ func (p *BinaryPropagator) Inject(
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.flags); err != nil {
|
||||
if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -222,6 +222,7 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
|
||||
return emptyContext, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
var ctx SpanContext
|
||||
ctx.samplingState = &samplingState{}
|
||||
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
@ -232,9 +233,12 @@ func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
if err := binary.Read(carrier, binary.BigEndian, &ctx.flags); err != nil {
|
||||
|
||||
var flags byte
|
||||
if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
|
||||
return emptyContext, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
ctx.samplingState.setFlags(flags)
|
||||
|
||||
// Handle the baggage items
|
||||
var numBaggage int32
|
||||
|
20
vendor/github.com/uber/jaeger-client-go/reporter.go
generated
vendored
20
vendor/github.com/uber/jaeger-client-go/reporter.go
generated
vendored
@ -28,6 +28,8 @@ import (
|
||||
// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
|
||||
type Reporter interface {
|
||||
// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
|
||||
// If the reporter is processing Span asynchronously then it needs to Retain() the span,
|
||||
// and then Release() it when no longer needed, to avoid span data corruption.
|
||||
Report(span *Span)
|
||||
|
||||
// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
|
||||
@ -93,13 +95,14 @@ func NewInMemoryReporter() *InMemoryReporter {
|
||||
// Report implements Report() method of Reporter by storing the span in the buffer.
|
||||
func (r *InMemoryReporter) Report(span *Span) {
|
||||
r.lock.Lock()
|
||||
r.spans = append(r.spans, span)
|
||||
// Need to retain the span otherwise it will be released
|
||||
r.spans = append(r.spans, span.Retain())
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
// Close implements Close() method of Reporter by doing nothing.
|
||||
// Close implements Close() method of Reporter
|
||||
func (r *InMemoryReporter) Close() {
|
||||
// no-op
|
||||
r.Reset()
|
||||
}
|
||||
|
||||
// SpansSubmitted returns the number of spans accumulated in the buffer.
|
||||
@ -122,7 +125,12 @@ func (r *InMemoryReporter) GetSpans() []opentracing.Span {
|
||||
func (r *InMemoryReporter) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
r.spans = nil
|
||||
|
||||
// Before reset the collection need to release Span memory
|
||||
for _, span := range r.spans {
|
||||
span.(*Span).Release()
|
||||
}
|
||||
r.spans = r.spans[:0]
|
||||
}
|
||||
|
||||
// ------------------------------
|
||||
@ -218,7 +226,8 @@ func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
|
||||
// because some of them may still be successfully added to the queue.
|
||||
func (r *remoteReporter) Report(span *Span) {
|
||||
select {
|
||||
case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span}:
|
||||
// Need to retain the span otherwise it will be released
|
||||
case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
|
||||
atomic.AddInt64(&r.queueLength, 1)
|
||||
default:
|
||||
r.metrics.ReporterDropped.Inc(1)
|
||||
@ -278,6 +287,7 @@ func (r *remoteReporter) processQueue() {
|
||||
// to reduce the number of gauge stats, we only emit queue length on flush
|
||||
r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
|
||||
}
|
||||
span.Release()
|
||||
case reporterQueueItemClose:
|
||||
timer.Stop()
|
||||
flush()
|
||||
|
455
vendor/github.com/uber/jaeger-client-go/sampler.go
generated
vendored
455
vendor/github.com/uber/jaeger-client-go/sampler.go
generated
vendored
@ -17,20 +17,14 @@ package jaeger
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go/log"
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
|
||||
"github.com/uber/jaeger-client-go/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSamplingServerURL = "http://localhost:5778/sampling"
|
||||
defaultSamplingRefreshInterval = time.Minute
|
||||
defaultMaxOperations = 2000
|
||||
defaultMaxOperations = 2000
|
||||
)
|
||||
|
||||
// Sampler decides whether a new trace should be sampled or not.
|
||||
@ -48,9 +42,7 @@ type Sampler interface {
|
||||
|
||||
// Equal checks if the `other` sampler is functionally equivalent
|
||||
// to this sampler.
|
||||
// TODO remove this function. This function is used to determine if 2 samplers are equivalent
|
||||
// which does not bode well with the adaptive sampler which has to create all the composite samplers
|
||||
// for the comparison to occur. This is expensive to do if only one sampler has changed.
|
||||
// TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
|
||||
Equal(other Sampler) bool
|
||||
}
|
||||
|
||||
@ -58,17 +50,23 @@ type Sampler interface {
|
||||
|
||||
// ConstSampler is a sampler that always makes the same decision.
|
||||
type ConstSampler struct {
|
||||
legacySamplerV1Base
|
||||
Decision bool
|
||||
tags []Tag
|
||||
}
|
||||
|
||||
// NewConstSampler creates a ConstSampler.
|
||||
func NewConstSampler(sample bool) Sampler {
|
||||
func NewConstSampler(sample bool) *ConstSampler {
|
||||
tags := []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeConst},
|
||||
{key: SamplerParamTagKey, value: sample},
|
||||
}
|
||||
return &ConstSampler{Decision: sample, tags: tags}
|
||||
s := &ConstSampler{
|
||||
Decision: sample,
|
||||
tags: tags,
|
||||
}
|
||||
s.delegate = s.IsSampled
|
||||
return s
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
@ -89,11 +87,17 @@ func (s *ConstSampler) Equal(other Sampler) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// String is used to log sampler details.
|
||||
func (s *ConstSampler) String() string {
|
||||
return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// ProbabilisticSampler is a sampler that randomly samples a certain percentage
|
||||
// of traces.
|
||||
type ProbabilisticSampler struct {
|
||||
legacySamplerV1Base
|
||||
samplingRate float64
|
||||
samplingBoundary uint64
|
||||
tags []Tag
|
||||
@ -115,16 +119,19 @@ func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error
|
||||
}
|
||||
|
||||
func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
|
||||
samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
|
||||
tags := []Tag{
|
||||
s := new(ProbabilisticSampler)
|
||||
s.delegate = s.IsSampled
|
||||
return s.init(samplingRate)
|
||||
}
|
||||
|
||||
func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
|
||||
s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
|
||||
s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
|
||||
s.tags = []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
|
||||
{key: SamplerParamTagKey, value: samplingRate},
|
||||
}
|
||||
return &ProbabilisticSampler{
|
||||
samplingRate: samplingRate,
|
||||
samplingBoundary: uint64(float64(maxRandomNumber) * samplingRate),
|
||||
tags: tags,
|
||||
{key: SamplerParamTagKey, value: s.samplingRate},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SamplingRate returns the sampling probability this sampled was constructed with.
|
||||
@ -150,65 +157,104 @@ func (s *ProbabilisticSampler) Equal(other Sampler) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Update modifies in-place the sampling rate. Locking must be done externally.
|
||||
func (s *ProbabilisticSampler) Update(samplingRate float64) error {
|
||||
if samplingRate < 0.0 || samplingRate > 1.0 {
|
||||
return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
|
||||
}
|
||||
s.init(samplingRate)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String is used to log sampler details.
|
||||
func (s *ProbabilisticSampler) String() string {
|
||||
return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type rateLimitingSampler struct {
|
||||
// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
|
||||
// burstiness of the service, i.e. a service with uniformly distributed requests will have those
|
||||
// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
|
||||
// number of sequential requests can be sampled each second.
|
||||
type RateLimitingSampler struct {
|
||||
legacySamplerV1Base
|
||||
maxTracesPerSecond float64
|
||||
rateLimiter utils.RateLimiter
|
||||
rateLimiter *utils.ReconfigurableRateLimiter
|
||||
tags []Tag
|
||||
}
|
||||
|
||||
// NewRateLimitingSampler creates a sampler that samples at most maxTracesPerSecond. The distribution of sampled
|
||||
// traces follows burstiness of the service, i.e. a service with uniformly distributed requests will have those
|
||||
// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a number of
|
||||
// sequential requests can be sampled each second.
|
||||
func NewRateLimitingSampler(maxTracesPerSecond float64) Sampler {
|
||||
tags := []Tag{
|
||||
// NewRateLimitingSampler creates new RateLimitingSampler.
|
||||
func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
|
||||
s := new(RateLimitingSampler)
|
||||
s.delegate = s.IsSampled
|
||||
return s.init(maxTracesPerSecond)
|
||||
}
|
||||
|
||||
func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
|
||||
if s.rateLimiter == nil {
|
||||
s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
|
||||
} else {
|
||||
s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
|
||||
}
|
||||
s.maxTracesPerSecond = maxTracesPerSecond
|
||||
s.tags = []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
|
||||
{key: SamplerParamTagKey, value: maxTracesPerSecond},
|
||||
}
|
||||
return &rateLimitingSampler{
|
||||
maxTracesPerSecond: maxTracesPerSecond,
|
||||
rateLimiter: utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0)),
|
||||
tags: tags,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *rateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return s.rateLimiter.CheckCredit(1.0), s.tags
|
||||
}
|
||||
|
||||
func (s *rateLimitingSampler) Close() {
|
||||
// Update reconfigures the rate limiter, while preserving its accumulated balance.
|
||||
// Locking must be done externally.
|
||||
func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
|
||||
if s.maxTracesPerSecond != maxTracesPerSecond {
|
||||
s.init(maxTracesPerSecond)
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing.
|
||||
func (s *RateLimitingSampler) Close() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (s *rateLimitingSampler) Equal(other Sampler) bool {
|
||||
if o, ok := other.(*rateLimitingSampler); ok {
|
||||
// Equal compares with another sampler.
|
||||
func (s *RateLimitingSampler) Equal(other Sampler) bool {
|
||||
if o, ok := other.(*RateLimitingSampler); ok {
|
||||
return s.maxTracesPerSecond == o.maxTracesPerSecond
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// String is used to log sampler details.
|
||||
func (s *RateLimitingSampler) String() string {
|
||||
return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both probabilisticSampler and
|
||||
// rateLimitingSampler. The rateLimitingSampler is used as a guaranteed lower bound sampler such that
|
||||
// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
|
||||
// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
|
||||
// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
|
||||
// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
|
||||
//
|
||||
// The probabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
|
||||
// samplers return true, the tags for probabilisticSampler will be used.
|
||||
// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
|
||||
// samplers return true, the tags for ProbabilisticSampler will be used.
|
||||
type GuaranteedThroughputProbabilisticSampler struct {
|
||||
probabilisticSampler *ProbabilisticSampler
|
||||
lowerBoundSampler Sampler
|
||||
lowerBoundSampler *RateLimitingSampler
|
||||
tags []Tag
|
||||
samplingRate float64
|
||||
lowerBound float64
|
||||
}
|
||||
|
||||
// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
|
||||
// probabilisticSampler and rateLimitingSampler.
|
||||
// ProbabilisticSampler and RateLimitingSampler.
|
||||
func NewGuaranteedThroughputProbabilisticSampler(
|
||||
lowerBound, samplingRate float64,
|
||||
) (*GuaranteedThroughputProbabilisticSampler, error) {
|
||||
@ -225,8 +271,14 @@ func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float6
|
||||
}
|
||||
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
|
||||
if s.probabilisticSampler == nil || s.samplingRate != samplingRate {
|
||||
if s.probabilisticSampler == nil {
|
||||
s.probabilisticSampler = newProbabilisticSampler(samplingRate)
|
||||
} else if s.samplingRate != samplingRate {
|
||||
s.probabilisticSampler.init(samplingRate)
|
||||
}
|
||||
// since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
|
||||
samplingRate = s.probabilisticSampler.SamplingRate()
|
||||
if s.samplingRate != samplingRate || s.tags == nil {
|
||||
s.samplingRate = s.probabilisticSampler.SamplingRate()
|
||||
s.tags = []Tag{
|
||||
{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
|
||||
@ -253,7 +305,7 @@ func (s *GuaranteedThroughputProbabilisticSampler) Close() {
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
|
||||
// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
|
||||
// more information.
|
||||
return false
|
||||
}
|
||||
@ -262,52 +314,119 @@ func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
|
||||
func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
|
||||
s.setProbabilisticSampler(samplingRate)
|
||||
if s.lowerBound != lowerBound {
|
||||
s.lowerBoundSampler = NewRateLimitingSampler(lowerBound)
|
||||
s.lowerBoundSampler.Update(lowerBound)
|
||||
s.lowerBound = lowerBound
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type adaptiveSampler struct {
|
||||
// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
|
||||
// on a per-operation basis.
|
||||
type PerOperationSampler struct {
|
||||
sync.RWMutex
|
||||
|
||||
samplers map[string]*GuaranteedThroughputProbabilisticSampler
|
||||
defaultSampler *ProbabilisticSampler
|
||||
lowerBound float64
|
||||
maxOperations int
|
||||
|
||||
// see description in PerOperationSamplerParams
|
||||
operationNameLateBinding bool
|
||||
}
|
||||
|
||||
// NewAdaptiveSampler returns a delegating sampler that applies both probabilisticSampler and
|
||||
// rateLimitingSampler via the guaranteedThroughputProbabilisticSampler. This sampler keeps track of all
|
||||
// operations and delegates calls to the respective guaranteedThroughputProbabilisticSampler.
|
||||
func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (Sampler, error) {
|
||||
return newAdaptiveSampler(strategies, maxOperations), nil
|
||||
// NewAdaptiveSampler returns a new PerOperationSampler.
|
||||
// Deprecated: please use NewPerOperationSampler.
|
||||
func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
|
||||
return NewPerOperationSampler(PerOperationSamplerParams{
|
||||
MaxOperations: maxOperations,
|
||||
Strategies: strategies,
|
||||
}), nil
|
||||
}
|
||||
|
||||
func newAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) Sampler {
|
||||
// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
|
||||
type PerOperationSamplerParams struct {
|
||||
// Max number of operations that will be tracked. Other operations will be given default strategy.
|
||||
MaxOperations int
|
||||
|
||||
// Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
|
||||
// When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
|
||||
// the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
|
||||
// in applications that always provide the correct span name on trace creation.
|
||||
//
|
||||
// For backwards compatibility this option is off by default.
|
||||
OperationNameLateBinding bool
|
||||
|
||||
// Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
|
||||
Strategies *sampling.PerOperationSamplingStrategies
|
||||
}
|
||||
|
||||
// NewPerOperationSampler returns a new PerOperationSampler.
|
||||
func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
|
||||
if params.MaxOperations <= 0 {
|
||||
params.MaxOperations = defaultMaxOperations
|
||||
}
|
||||
samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
|
||||
for _, strategy := range strategies.PerOperationStrategies {
|
||||
for _, strategy := range params.Strategies.PerOperationStrategies {
|
||||
sampler := newGuaranteedThroughputProbabilisticSampler(
|
||||
strategies.DefaultLowerBoundTracesPerSecond,
|
||||
params.Strategies.DefaultLowerBoundTracesPerSecond,
|
||||
strategy.ProbabilisticSampling.SamplingRate,
|
||||
)
|
||||
samplers[strategy.Operation] = sampler
|
||||
}
|
||||
return &adaptiveSampler{
|
||||
samplers: samplers,
|
||||
defaultSampler: newProbabilisticSampler(strategies.DefaultSamplingProbability),
|
||||
lowerBound: strategies.DefaultLowerBoundTracesPerSecond,
|
||||
maxOperations: maxOperations,
|
||||
return &PerOperationSampler{
|
||||
samplers: samplers,
|
||||
defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
|
||||
lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
|
||||
maxOperations: params.MaxOperations,
|
||||
operationNameLateBinding: params.OperationNameLateBinding,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
// IsSampled is not used and only exists to match Sampler V1 API.
|
||||
// TODO (breaking change) remove when upgrading everything to SamplerV2
|
||||
func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
|
||||
samplerV1 := s.getSamplerForOperation(operationName)
|
||||
var sampled bool
|
||||
var tags []Tag
|
||||
if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
|
||||
sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
|
||||
}
|
||||
return sampled, tags
|
||||
}
|
||||
|
||||
// OnCreateSpan implements OnCreateSpan of SamplerV2.
|
||||
func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
|
||||
sampled, tags := s.trySampling(span, span.OperationName())
|
||||
return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
|
||||
}
|
||||
|
||||
// OnSetOperationName implements OnSetOperationName of SamplerV2.
|
||||
func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
|
||||
sampled, tags := s.trySampling(span, operationName)
|
||||
return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
|
||||
}
|
||||
|
||||
// OnSetTag implements OnSetTag of SamplerV2.
|
||||
func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
|
||||
return SamplingDecision{Sample: false, Retryable: true}
|
||||
}
|
||||
|
||||
// OnFinishSpan implements OnFinishSpan of SamplerV2.
|
||||
func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
|
||||
return SamplingDecision{Sample: false, Retryable: true}
|
||||
}
|
||||
|
||||
func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
|
||||
s.RLock()
|
||||
sampler, ok := s.samplers[operation]
|
||||
if ok {
|
||||
defer s.RUnlock()
|
||||
return sampler.IsSampled(id, operation)
|
||||
return sampler
|
||||
}
|
||||
s.RUnlock()
|
||||
s.Lock()
|
||||
@ -316,18 +435,19 @@ func (s *adaptiveSampler) IsSampled(id TraceID, operation string) (bool, []Tag)
|
||||
// Check if sampler has already been created
|
||||
sampler, ok = s.samplers[operation]
|
||||
if ok {
|
||||
return sampler.IsSampled(id, operation)
|
||||
return sampler
|
||||
}
|
||||
// Store only up to maxOperations of unique ops.
|
||||
if len(s.samplers) >= s.maxOperations {
|
||||
return s.defaultSampler.IsSampled(id, operation)
|
||||
return s.defaultSampler
|
||||
}
|
||||
newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
|
||||
s.samplers[operation] = newSampler
|
||||
return newSampler.IsSampled(id, operation)
|
||||
return newSampler
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) Close() {
|
||||
// Close invokes Close on all underlying samplers.
|
||||
func (s *PerOperationSampler) Close() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
for _, sampler := range s.samplers {
|
||||
@ -336,222 +456,39 @@ func (s *adaptiveSampler) Close() {
|
||||
s.defaultSampler.Close()
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is overly expensive for adaptiveSampler since it's composed of multiple
|
||||
// Equal is not used.
|
||||
// TODO (breaking change) remove this in the future
|
||||
func (s *PerOperationSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
|
||||
// samplers which all need to be initialized before this function can be called for a comparison.
|
||||
// Therefore, adaptiveSampler uses the update() function to only alter the samplers that need
|
||||
// Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
|
||||
// changing. Hence this function always returns false so that the update function can be called.
|
||||
// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *adaptiveSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
|
||||
func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
|
||||
for _, strategy := range strategies.PerOperationStrategies {
|
||||
operation := strategy.Operation
|
||||
samplingRate := strategy.ProbabilisticSampling.SamplingRate
|
||||
lowerBound := strategies.DefaultLowerBoundTracesPerSecond
|
||||
if sampler, ok := s.samplers[operation]; ok {
|
||||
sampler.update(lowerBound, samplingRate)
|
||||
newSamplers[operation] = sampler
|
||||
} else {
|
||||
sampler := newGuaranteedThroughputProbabilisticSampler(
|
||||
lowerBound,
|
||||
samplingRate,
|
||||
)
|
||||
s.samplers[operation] = sampler
|
||||
newSamplers[operation] = sampler
|
||||
}
|
||||
}
|
||||
s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
|
||||
if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
|
||||
s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// RemotelyControlledSampler is a delegating sampler that polls a remote server
|
||||
// for the appropriate sampling strategy, constructs a corresponding sampler and
|
||||
// delegates to it for sampling decisions.
|
||||
type RemotelyControlledSampler struct {
|
||||
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
|
||||
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
|
||||
closed int64 // 0 - not closed, 1 - closed
|
||||
|
||||
sync.RWMutex
|
||||
samplerOptions
|
||||
|
||||
serviceName string
|
||||
manager sampling.SamplingManager
|
||||
doneChan chan *sync.WaitGroup
|
||||
}
|
||||
|
||||
type httpSamplingManager struct {
|
||||
serverURL string
|
||||
}
|
||||
|
||||
func (s *httpSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) {
|
||||
var out sampling.SamplingStrategyResponse
|
||||
v := url.Values{}
|
||||
v.Set("service", serviceName)
|
||||
if err := utils.GetJSON(s.serverURL+"?"+v.Encode(), &out); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
// NewRemotelyControlledSampler creates a sampler that periodically pulls
|
||||
// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
|
||||
func NewRemotelyControlledSampler(
|
||||
serviceName string,
|
||||
opts ...SamplerOption,
|
||||
) *RemotelyControlledSampler {
|
||||
options := applySamplerOptions(opts...)
|
||||
sampler := &RemotelyControlledSampler{
|
||||
samplerOptions: options,
|
||||
serviceName: serviceName,
|
||||
manager: &httpSamplingManager{serverURL: options.samplingServerURL},
|
||||
doneChan: make(chan *sync.WaitGroup),
|
||||
}
|
||||
go sampler.pollController()
|
||||
return sampler
|
||||
}
|
||||
|
||||
func applySamplerOptions(opts ...SamplerOption) samplerOptions {
|
||||
options := samplerOptions{}
|
||||
for _, option := range opts {
|
||||
option(&options)
|
||||
}
|
||||
if options.sampler == nil {
|
||||
options.sampler = newProbabilisticSampler(0.001)
|
||||
}
|
||||
if options.logger == nil {
|
||||
options.logger = log.NullLogger
|
||||
}
|
||||
if options.maxOperations <= 0 {
|
||||
options.maxOperations = defaultMaxOperations
|
||||
}
|
||||
if options.samplingServerURL == "" {
|
||||
options.samplingServerURL = defaultSamplingServerURL
|
||||
}
|
||||
if options.metrics == nil {
|
||||
options.metrics = NewNullMetrics()
|
||||
}
|
||||
if options.samplingRefreshInterval <= 0 {
|
||||
options.samplingRefreshInterval = defaultSamplingRefreshInterval
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.sampler.IsSampled(id, operation)
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Close() {
|
||||
if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
|
||||
s.logger.Error("Repeated attempt to close the sampler is ignored")
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
s.doneChan <- &wg
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is expensive and will be removed. See adaptiveSampler.Equal() for
|
||||
// more information.
|
||||
if o, ok := other.(*RemotelyControlledSampler); ok {
|
||||
s.RLock()
|
||||
o.RLock()
|
||||
defer s.RUnlock()
|
||||
defer o.RUnlock()
|
||||
return s.sampler.Equal(o.sampler)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollController() {
|
||||
ticker := time.NewTicker(s.samplingRefreshInterval)
|
||||
defer ticker.Stop()
|
||||
s.pollControllerWithTicker(ticker)
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.updateSampler()
|
||||
case wg := <-s.doneChan:
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) getSampler() Sampler {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.sampler
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) setSampler(sampler Sampler) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.sampler = sampler
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) updateSampler() {
|
||||
res, err := s.manager.GetSamplingStrategy(s.serviceName)
|
||||
if err != nil {
|
||||
s.metrics.SamplerQueryFailure.Inc(1)
|
||||
s.logger.Infof("Unable to query sampling strategy: %v", err)
|
||||
return
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.metrics.SamplerRetrieved.Inc(1)
|
||||
if strategies := res.GetOperationSampling(); strategies != nil {
|
||||
s.updateAdaptiveSampler(strategies)
|
||||
} else {
|
||||
err = s.updateRateLimitingOrProbabilisticSampler(res)
|
||||
}
|
||||
if err != nil {
|
||||
s.metrics.SamplerUpdateFailure.Inc(1)
|
||||
s.logger.Infof("Unable to handle sampling strategy response %+v. Got error: %v", res, err)
|
||||
return
|
||||
}
|
||||
s.metrics.SamplerUpdated.Inc(1)
|
||||
}
|
||||
|
||||
// NB: this function should only be called while holding a Write lock
|
||||
func (s *RemotelyControlledSampler) updateAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies) {
|
||||
if adaptiveSampler, ok := s.sampler.(*adaptiveSampler); ok {
|
||||
adaptiveSampler.update(strategies)
|
||||
} else {
|
||||
s.sampler = newAdaptiveSampler(strategies, s.maxOperations)
|
||||
}
|
||||
}
|
||||
|
||||
// NB: this function should only be called while holding a Write lock
|
||||
func (s *RemotelyControlledSampler) updateRateLimitingOrProbabilisticSampler(res *sampling.SamplingStrategyResponse) error {
|
||||
var newSampler Sampler
|
||||
if probabilistic := res.GetProbabilisticSampling(); probabilistic != nil {
|
||||
newSampler = newProbabilisticSampler(probabilistic.SamplingRate)
|
||||
} else if rateLimiting := res.GetRateLimitingSampling(); rateLimiting != nil {
|
||||
newSampler = NewRateLimitingSampler(float64(rateLimiting.MaxTracesPerSecond))
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported sampling strategy type %v", res.GetStrategyType())
|
||||
}
|
||||
if !s.sampler.Equal(newSampler) {
|
||||
s.sampler = newSampler
|
||||
}
|
||||
return nil
|
||||
s.samplers = newSamplers
|
||||
}
|
||||
|
81
vendor/github.com/uber/jaeger-client-go/sampler_options.go
generated
vendored
81
vendor/github.com/uber/jaeger-client-go/sampler_options.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SamplerOption is a function that sets some option on the sampler
|
||||
type SamplerOption func(options *samplerOptions)
|
||||
|
||||
// SamplerOptions is a factory for all available SamplerOption's
|
||||
var SamplerOptions samplerOptions
|
||||
|
||||
type samplerOptions struct {
|
||||
metrics *Metrics
|
||||
maxOperations int
|
||||
sampler Sampler
|
||||
logger Logger
|
||||
samplingServerURL string
|
||||
samplingRefreshInterval time.Duration
|
||||
}
|
||||
|
||||
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
|
||||
// which is used to emit statistics.
|
||||
func (samplerOptions) Metrics(m *Metrics) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.metrics = m
|
||||
}
|
||||
}
|
||||
|
||||
// MaxOperations creates a SamplerOption that sets the maximum number of
|
||||
// operations the sampler will keep track of.
|
||||
func (samplerOptions) MaxOperations(maxOperations int) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.maxOperations = maxOperations
|
||||
}
|
||||
}
|
||||
|
||||
// InitialSampler creates a SamplerOption that sets the initial sampler
|
||||
// to use before a remote sampler is created and used.
|
||||
func (samplerOptions) InitialSampler(sampler Sampler) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.sampler = sampler
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates a SamplerOption that sets the logger used by the sampler.
|
||||
func (samplerOptions) Logger(logger Logger) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingServerURL creates a SamplerOption that sets the sampling server url
|
||||
// of the local agent that contains the sampling strategies.
|
||||
func (samplerOptions) SamplingServerURL(samplingServerURL string) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingServerURL = samplingServerURL
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingRefreshInterval creates a SamplerOption that sets how often the
|
||||
// sampler will poll local agent for the appropriate sampling strategy.
|
||||
func (samplerOptions) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingRefreshInterval = samplingRefreshInterval
|
||||
}
|
||||
}
|
335
vendor/github.com/uber/jaeger-client-go/sampler_remote.go
generated
vendored
Normal file
335
vendor/github.com/uber/jaeger-client-go/sampler_remote.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go/thrift-gen/sampling"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSamplingRefreshInterval = time.Minute
|
||||
)
|
||||
|
||||
// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
|
||||
type SamplingStrategyFetcher interface {
|
||||
Fetch(service string) ([]byte, error)
|
||||
}
|
||||
|
||||
// SamplingStrategyParser is used to parse sampling strategy updates. The output object
|
||||
// should be of the type that is recognized by the SamplerUpdaters.
|
||||
type SamplingStrategyParser interface {
|
||||
Parse(response []byte) (interface{}, error)
|
||||
}
|
||||
|
||||
// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
|
||||
// retrieved from remote config server, to the current sampler. The updater can modify
|
||||
// the sampler in-place if sampler supports it, or create a new one.
|
||||
//
|
||||
// If the strategy does not contain configuration for the sampler in question,
|
||||
// updater must return modifiedSampler=nil to give other updaters a chance to inspect
|
||||
// the sampling strategy response.
|
||||
//
|
||||
// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
|
||||
type SamplerUpdater interface {
|
||||
Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
|
||||
}
|
||||
|
||||
// RemotelyControlledSampler is a delegating sampler that polls a remote server
|
||||
// for the appropriate sampling strategy, constructs a corresponding sampler and
|
||||
// delegates to it for sampling decisions.
|
||||
type RemotelyControlledSampler struct {
|
||||
// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
|
||||
// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
|
||||
closed int64 // 0 - not closed, 1 - closed
|
||||
|
||||
sync.RWMutex
|
||||
samplerOptions
|
||||
|
||||
serviceName string
|
||||
doneChan chan *sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewRemotelyControlledSampler creates a sampler that periodically pulls
|
||||
// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
|
||||
func NewRemotelyControlledSampler(
|
||||
serviceName string,
|
||||
opts ...SamplerOption,
|
||||
) *RemotelyControlledSampler {
|
||||
options := new(samplerOptions).applyOptionsAndDefaults(opts...)
|
||||
sampler := &RemotelyControlledSampler{
|
||||
samplerOptions: *options,
|
||||
serviceName: serviceName,
|
||||
doneChan: make(chan *sync.WaitGroup),
|
||||
}
|
||||
go sampler.pollController()
|
||||
return sampler
|
||||
}
|
||||
|
||||
// IsSampled implements IsSampled() of Sampler.
|
||||
// TODO (breaking change) remove when Sampler V1 is removed
|
||||
func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// OnCreateSpan implements OnCreateSpan of SamplerV2.
|
||||
func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
|
||||
return s.sampler.OnCreateSpan(span)
|
||||
}
|
||||
|
||||
// OnSetOperationName implements OnSetOperationName of SamplerV2.
|
||||
func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
|
||||
return s.sampler.OnSetOperationName(span, operationName)
|
||||
}
|
||||
|
||||
// OnSetTag implements OnSetTag of SamplerV2.
|
||||
func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
|
||||
return s.sampler.OnSetTag(span, key, value)
|
||||
}
|
||||
|
||||
// OnFinishSpan implements OnFinishSpan of SamplerV2.
|
||||
func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
|
||||
return s.sampler.OnFinishSpan(span)
|
||||
}
|
||||
|
||||
// Close implements Close() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Close() {
|
||||
if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
|
||||
s.logger.Error("Repeated attempt to close the sampler is ignored")
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
s.doneChan <- &wg
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Equal implements Equal() of Sampler.
|
||||
func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
|
||||
// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
|
||||
// more information.
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollController() {
|
||||
ticker := time.NewTicker(s.samplingRefreshInterval)
|
||||
defer ticker.Stop()
|
||||
s.pollControllerWithTicker(ticker)
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
s.UpdateSampler()
|
||||
case wg := <-s.doneChan:
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sampler returns the currently active sampler.
|
||||
func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.sampler
|
||||
}
|
||||
|
||||
func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.sampler = sampler
|
||||
}
|
||||
|
||||
// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
|
||||
// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
|
||||
func (s *RemotelyControlledSampler) UpdateSampler() {
|
||||
res, err := s.samplingFetcher.Fetch(s.serviceName)
|
||||
if err != nil {
|
||||
s.metrics.SamplerQueryFailure.Inc(1)
|
||||
s.logger.Infof("failed to fetch sampling strategy: %v", err)
|
||||
return
|
||||
}
|
||||
strategy, err := s.samplingParser.Parse(res)
|
||||
if err != nil {
|
||||
s.metrics.SamplerUpdateFailure.Inc(1)
|
||||
s.logger.Infof("failed to parse sampling strategy response: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
s.metrics.SamplerRetrieved.Inc(1)
|
||||
if err := s.updateSamplerViaUpdaters(strategy); err != nil {
|
||||
s.metrics.SamplerUpdateFailure.Inc(1)
|
||||
s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
|
||||
return
|
||||
}
|
||||
s.metrics.SamplerUpdated.Inc(1)
|
||||
}
|
||||
|
||||
// NB: this function should only be called while holding a Write lock
|
||||
func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
|
||||
for _, updater := range s.updaters {
|
||||
sampler, err := updater.Update(s.sampler, strategy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sampler != nil {
|
||||
s.sampler = sampler
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unsupported sampling strategy %+v", strategy)
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
|
||||
type ProbabilisticSamplerUpdater struct{}
|
||||
|
||||
// Update implements Update of SamplerUpdater.
|
||||
func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
|
||||
type response interface {
|
||||
GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
|
||||
}
|
||||
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
|
||||
if resp, ok := strategy.(response); ok {
|
||||
if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
|
||||
if ps, ok := sampler.(*ProbabilisticSampler); ok {
|
||||
if err := ps.Update(probabilistic.SamplingRate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sampler, nil
|
||||
}
|
||||
return newProbabilisticSampler(probabilistic.SamplingRate), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
|
||||
type RateLimitingSamplerUpdater struct{}
|
||||
|
||||
// Update implements Update of SamplerUpdater.
|
||||
func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
|
||||
type response interface {
|
||||
GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
|
||||
}
|
||||
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
|
||||
if resp, ok := strategy.(response); ok {
|
||||
if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
|
||||
rateLimit := float64(rateLimiting.MaxTracesPerSecond)
|
||||
if rl, ok := sampler.(*RateLimitingSampler); ok {
|
||||
rl.Update(rateLimit)
|
||||
return rl, nil
|
||||
}
|
||||
return NewRateLimitingSampler(rateLimit), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
|
||||
// Fields have the same meaning as in PerOperationSamplerParams.
|
||||
type AdaptiveSamplerUpdater struct {
|
||||
MaxOperations int
|
||||
OperationNameLateBinding bool
|
||||
}
|
||||
|
||||
// Update implements Update of SamplerUpdater.
|
||||
func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
|
||||
type response interface {
|
||||
GetOperationSampling() *sampling.PerOperationSamplingStrategies
|
||||
}
|
||||
var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
|
||||
if p, ok := strategy.(response); ok {
|
||||
if operations := p.GetOperationSampling(); operations != nil {
|
||||
if as, ok := sampler.(*PerOperationSampler); ok {
|
||||
as.update(operations)
|
||||
return as, nil
|
||||
}
|
||||
return NewPerOperationSampler(PerOperationSamplerParams{
|
||||
MaxOperations: u.MaxOperations,
|
||||
OperationNameLateBinding: u.OperationNameLateBinding,
|
||||
Strategies: operations,
|
||||
}), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type httpSamplingStrategyFetcher struct {
|
||||
serverURL string
|
||||
logger Logger
|
||||
}
|
||||
|
||||
func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
|
||||
v := url.Values{}
|
||||
v.Set("service", serviceName)
|
||||
uri := f.serverURL + "?" + v.Encode()
|
||||
|
||||
// TODO create and reuse http.Client with proper timeout settings, etc.
|
||||
resp, err := http.Get(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// -----------------------
|
||||
|
||||
type samplingStrategyParser struct{}
|
||||
|
||||
func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
|
||||
strategy := new(sampling.SamplingStrategyResponse)
|
||||
if err := json.Unmarshal(response, strategy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return strategy, nil
|
||||
}
|
162
vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
generated
vendored
Normal file
162
vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Copyright (c) 2017 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/uber/jaeger-client-go/log"
|
||||
)
|
||||
|
||||
// SamplerOption is a function that sets some option on the sampler
|
||||
type SamplerOption func(options *samplerOptions)
|
||||
|
||||
// SamplerOptions is a factory for all available SamplerOption's.
|
||||
var SamplerOptions SamplerOptionsFactory
|
||||
|
||||
// SamplerOptionsFactory is a factory for all available SamplerOption's.
|
||||
// The type acts as a namespace for factory functions. It is public to
|
||||
// make the functions discoverable via godoc. Recommended to be used
|
||||
// via global SamplerOptions variable.
|
||||
type SamplerOptionsFactory struct{}
|
||||
|
||||
type samplerOptions struct {
|
||||
metrics *Metrics
|
||||
sampler SamplerV2
|
||||
logger Logger
|
||||
samplingServerURL string
|
||||
samplingRefreshInterval time.Duration
|
||||
samplingFetcher SamplingStrategyFetcher
|
||||
samplingParser SamplingStrategyParser
|
||||
updaters []SamplerUpdater
|
||||
posParams PerOperationSamplerParams
|
||||
}
|
||||
|
||||
// Metrics creates a SamplerOption that initializes Metrics on the sampler,
|
||||
// which is used to emit statistics.
|
||||
func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.metrics = m
|
||||
}
|
||||
}
|
||||
|
||||
// MaxOperations creates a SamplerOption that sets the maximum number of
|
||||
// operations the sampler will keep track of.
|
||||
func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.posParams.MaxOperations = maxOperations
|
||||
}
|
||||
}
|
||||
|
||||
// OperationNameLateBinding creates a SamplerOption that sets the respective
|
||||
// field in the PerOperationSamplerParams.
|
||||
func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.posParams.OperationNameLateBinding = enable
|
||||
}
|
||||
}
|
||||
|
||||
// InitialSampler creates a SamplerOption that sets the initial sampler
|
||||
// to use before a remote sampler is created and used.
|
||||
func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.sampler = samplerV1toV2(sampler)
|
||||
}
|
||||
}
|
||||
|
||||
// Logger creates a SamplerOption that sets the logger used by the sampler.
|
||||
func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingServerURL creates a SamplerOption that sets the sampling server url
|
||||
// of the local agent that contains the sampling strategies.
|
||||
func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingServerURL = samplingServerURL
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingRefreshInterval creates a SamplerOption that sets how often the
|
||||
// sampler will poll local agent for the appropriate sampling strategy.
|
||||
func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingRefreshInterval = samplingRefreshInterval
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
|
||||
func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingFetcher = fetcher
|
||||
}
|
||||
}
|
||||
|
||||
// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
|
||||
func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.samplingParser = parser
|
||||
}
|
||||
}
|
||||
|
||||
// Updaters creates a SamplerOption that initializes sampler updaters.
|
||||
func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
|
||||
return func(o *samplerOptions) {
|
||||
o.updaters = updaters
|
||||
}
|
||||
}
|
||||
|
||||
func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
|
||||
for _, option := range opts {
|
||||
option(o)
|
||||
}
|
||||
if o.sampler == nil {
|
||||
o.sampler = newProbabilisticSampler(0.001)
|
||||
}
|
||||
if o.logger == nil {
|
||||
o.logger = log.NullLogger
|
||||
}
|
||||
if o.samplingServerURL == "" {
|
||||
o.samplingServerURL = DefaultSamplingServerURL
|
||||
}
|
||||
if o.metrics == nil {
|
||||
o.metrics = NewNullMetrics()
|
||||
}
|
||||
if o.samplingRefreshInterval <= 0 {
|
||||
o.samplingRefreshInterval = defaultSamplingRefreshInterval
|
||||
}
|
||||
if o.samplingFetcher == nil {
|
||||
o.samplingFetcher = &httpSamplingStrategyFetcher{
|
||||
serverURL: o.samplingServerURL,
|
||||
logger: o.logger,
|
||||
}
|
||||
}
|
||||
if o.samplingParser == nil {
|
||||
o.samplingParser = new(samplingStrategyParser)
|
||||
}
|
||||
if o.updaters == nil {
|
||||
o.updaters = []SamplerUpdater{
|
||||
&AdaptiveSamplerUpdater{
|
||||
MaxOperations: o.posParams.MaxOperations,
|
||||
OperationNameLateBinding: o.posParams.OperationNameLateBinding,
|
||||
},
|
||||
new(ProbabilisticSamplerUpdater),
|
||||
new(RateLimitingSamplerUpdater),
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
93
vendor/github.com/uber/jaeger-client-go/sampler_v2.go
generated
vendored
Normal file
93
vendor/github.com/uber/jaeger-client-go/sampler_v2.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright (c) 2019 Uber Technologies, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
// SamplingDecision is returned by the V2 samplers.
|
||||
type SamplingDecision struct {
|
||||
Sample bool
|
||||
Retryable bool
|
||||
Tags []Tag
|
||||
}
|
||||
|
||||
// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
|
||||
// be made at different points of the span lifecycle.
|
||||
type SamplerV2 interface {
|
||||
OnCreateSpan(span *Span) SamplingDecision
|
||||
OnSetOperationName(span *Span, operationName string) SamplingDecision
|
||||
OnSetTag(span *Span, key string, value interface{}) SamplingDecision
|
||||
OnFinishSpan(span *Span) SamplingDecision
|
||||
|
||||
// Close does a clean shutdown of the sampler, stopping any background
|
||||
// go-routines it may have started.
|
||||
Close()
|
||||
}
|
||||
|
||||
// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
|
||||
func samplerV1toV2(s Sampler) SamplerV2 {
|
||||
if s2, ok := s.(SamplerV2); ok {
|
||||
return s2
|
||||
}
|
||||
type legacySamplerV1toV2Adapter struct {
|
||||
legacySamplerV1Base
|
||||
}
|
||||
return &legacySamplerV1toV2Adapter{
|
||||
legacySamplerV1Base: legacySamplerV1Base{
|
||||
delegate: s.IsSampled,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
|
||||
// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
|
||||
// for backwards compatibility reasons.
|
||||
// TODO (breaking change) remove this in the next major release
|
||||
type SamplerV2Base struct{}
|
||||
|
||||
// IsSampled implements IsSampled of Sampler.
|
||||
func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Close implements Close of Sampler.
|
||||
func (SamplerV2Base) Close() {}
|
||||
|
||||
// Equal implements Equal of Sampler.
|
||||
func (SamplerV2Base) Equal(other Sampler) bool { return false }
|
||||
|
||||
// legacySamplerV1Base is used as a base for simple samplers that only implement
|
||||
// the legacy isSampled() function that is not sensitive to its arguments.
|
||||
type legacySamplerV1Base struct {
|
||||
delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
|
||||
}
|
||||
|
||||
func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
|
||||
isSampled, tags := s.delegate(span.context.traceID, span.operationName)
|
||||
return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
|
||||
}
|
||||
|
||||
func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
|
||||
isSampled, tags := s.delegate(span.context.traceID, span.operationName)
|
||||
return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
|
||||
}
|
||||
|
||||
func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
|
||||
return SamplingDecision{Sample: false, Retryable: true}
|
||||
}
|
||||
|
||||
func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
|
||||
return SamplingDecision{Sample: false, Retryable: true}
|
||||
}
|
||||
|
||||
func (s *legacySamplerV1Base) Close() {}
|
221
vendor/github.com/uber/jaeger-client-go/span.go
generated
vendored
221
vendor/github.com/uber/jaeger-client-go/span.go
generated
vendored
@ -16,6 +16,7 @@ package jaeger
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
@ -25,10 +26,15 @@ import (
|
||||
|
||||
// Span implements opentracing.Span
|
||||
type Span struct {
|
||||
// referenceCounter used to increase the lifetime of
|
||||
// the object before return it into the pool.
|
||||
referenceCounter int32
|
||||
|
||||
sync.RWMutex
|
||||
|
||||
tracer *Tracer
|
||||
|
||||
// TODO: (breaking change) change to use a pointer
|
||||
context SpanContext
|
||||
|
||||
// The name of the "operation" this span is an instance of.
|
||||
@ -60,18 +66,26 @@ type Span struct {
|
||||
}
|
||||
|
||||
// Tag is a simple key value wrapper.
|
||||
// TODO deprecate in the next major release, use opentracing.Tag instead.
|
||||
// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
|
||||
type Tag struct {
|
||||
key string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewTag creates a new Tag.
|
||||
// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
|
||||
func NewTag(key string, value interface{}) Tag {
|
||||
return Tag{key: key, value: value}
|
||||
}
|
||||
|
||||
// SetOperationName sets or changes the operation name.
|
||||
func (s *Span) SetOperationName(operationName string) opentracing.Span {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.context.IsSampled() {
|
||||
s.operationName = operationName
|
||||
s.operationName = operationName
|
||||
s.Unlock()
|
||||
if !s.isSamplingFinalized() {
|
||||
decision := s.tracer.sampler.OnSetOperationName(s, operationName)
|
||||
s.applySamplingDecision(decision, true)
|
||||
}
|
||||
s.observer.OnSetOperationName(operationName)
|
||||
return s
|
||||
@ -79,19 +93,85 @@ func (s *Span) SetOperationName(operationName string) opentracing.Span {
|
||||
|
||||
// SetTag implements SetTag() of opentracing.Span
|
||||
func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
|
||||
return s.setTagInternal(key, value, true)
|
||||
}
|
||||
|
||||
func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
|
||||
s.observer.OnSetTag(key, value)
|
||||
if key == string(ext.SamplingPriority) && !setSamplingPriority(s, value) {
|
||||
return s
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.context.IsSampled() {
|
||||
s.setTagNoLocking(key, value)
|
||||
if !s.isSamplingFinalized() {
|
||||
decision := s.tracer.sampler.OnSetTag(s, key, value)
|
||||
s.applySamplingDecision(decision, lock)
|
||||
}
|
||||
if s.isWriteable() {
|
||||
if lock {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
}
|
||||
s.appendTagNoLocking(key, value)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Span) setTagNoLocking(key string, value interface{}) {
|
||||
// SpanContext returns span context
|
||||
func (s *Span) SpanContext() SpanContext {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.context
|
||||
}
|
||||
|
||||
// StartTime returns span start time
|
||||
func (s *Span) StartTime() time.Time {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.startTime
|
||||
}
|
||||
|
||||
// Duration returns span duration
|
||||
func (s *Span) Duration() time.Duration {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.duration
|
||||
}
|
||||
|
||||
// Tags returns tags for span
|
||||
func (s *Span) Tags() opentracing.Tags {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
var result = make(opentracing.Tags, len(s.tags))
|
||||
for _, tag := range s.tags {
|
||||
result[tag.key] = tag.value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Logs returns micro logs for span
|
||||
func (s *Span) Logs() []opentracing.LogRecord {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
return append([]opentracing.LogRecord(nil), s.logs...)
|
||||
}
|
||||
|
||||
// References returns references for this span
|
||||
func (s *Span) References() []opentracing.SpanReference {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.references == nil || len(s.references) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]opentracing.SpanReference, len(s.references))
|
||||
for i, r := range s.references {
|
||||
result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *Span) appendTagNoLocking(key string, value interface{}) {
|
||||
s.tags = append(s.tags, Tag{key: key, value: value})
|
||||
}
|
||||
|
||||
@ -111,7 +191,7 @@ func (s *Span) logFieldsNoLocking(fields ...log.Field) {
|
||||
Fields: fields,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
s.appendLog(lr)
|
||||
s.appendLogNoLocking(lr)
|
||||
}
|
||||
|
||||
// LogKV implements opentracing.Span API
|
||||
@ -148,12 +228,12 @@ func (s *Span) Log(ld opentracing.LogData) {
|
||||
if ld.Timestamp.IsZero() {
|
||||
ld.Timestamp = s.tracer.timeNow()
|
||||
}
|
||||
s.appendLog(ld.ToLogRecord())
|
||||
s.appendLogNoLocking(ld.ToLogRecord())
|
||||
}
|
||||
}
|
||||
|
||||
// this function should only be called while holding a Write lock
|
||||
func (s *Span) appendLog(lr opentracing.LogRecord) {
|
||||
func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
|
||||
// TODO add logic to limit number of logs per span (issue #46)
|
||||
s.logs = append(s.logs, lr)
|
||||
}
|
||||
@ -174,6 +254,8 @@ func (s *Span) BaggageItem(key string) string {
|
||||
}
|
||||
|
||||
// Finish implements opentracing.Span API
|
||||
// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
|
||||
// so after that, the Span object should no longer be used because it won't be valid anymore.
|
||||
func (s *Span) Finish() {
|
||||
s.FinishWithOptions(opentracing.FinishOptions{})
|
||||
}
|
||||
@ -185,18 +267,27 @@ func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
|
||||
}
|
||||
s.observer.OnFinish(options)
|
||||
s.Lock()
|
||||
s.duration = options.FinishTime.Sub(s.startTime)
|
||||
s.Unlock()
|
||||
if !s.isSamplingFinalized() {
|
||||
decision := s.tracer.sampler.OnFinishSpan(s)
|
||||
s.applySamplingDecision(decision, true)
|
||||
}
|
||||
if s.context.IsSampled() {
|
||||
s.duration = options.FinishTime.Sub(s.startTime)
|
||||
// Note: bulk logs are not subject to maxLogsPerSpan limit
|
||||
if options.LogRecords != nil {
|
||||
s.logs = append(s.logs, options.LogRecords...)
|
||||
}
|
||||
for _, ld := range options.BulkLogData {
|
||||
s.logs = append(s.logs, ld.ToLogRecord())
|
||||
if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
|
||||
s.Lock()
|
||||
// Note: bulk logs are not subject to maxLogsPerSpan limit
|
||||
if options.LogRecords != nil {
|
||||
s.logs = append(s.logs, options.LogRecords...)
|
||||
}
|
||||
for _, ld := range options.BulkLogData {
|
||||
s.logs = append(s.logs, ld.ToLogRecord())
|
||||
}
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
// call reportSpan even for non-sampled traces, to return span to the pool
|
||||
// and update metrics counter
|
||||
s.tracer.reportSpan(s)
|
||||
}
|
||||
|
||||
@ -225,25 +316,105 @@ func (s *Span) OperationName() string {
|
||||
return s.operationName
|
||||
}
|
||||
|
||||
// Retain increases object counter to increase the lifetime of the object
|
||||
func (s *Span) Retain() *Span {
|
||||
atomic.AddInt32(&s.referenceCounter, 1)
|
||||
return s
|
||||
}
|
||||
|
||||
// Release decrements object counter and return to the
|
||||
// allocator manager when counter will below zero
|
||||
func (s *Span) Release() {
|
||||
if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
|
||||
s.tracer.spanAllocator.Put(s)
|
||||
}
|
||||
}
|
||||
|
||||
// reset span state and release unused data
|
||||
func (s *Span) reset() {
|
||||
s.firstInProcess = false
|
||||
s.context = emptyContext
|
||||
s.operationName = ""
|
||||
s.tracer = nil
|
||||
s.startTime = time.Time{}
|
||||
s.duration = 0
|
||||
s.observer = nil
|
||||
atomic.StoreInt32(&s.referenceCounter, 0)
|
||||
|
||||
// Note: To reuse memory we can save the pointers on the heap
|
||||
s.tags = s.tags[:0]
|
||||
s.logs = s.logs[:0]
|
||||
s.references = s.references[:0]
|
||||
}
|
||||
|
||||
func (s *Span) serviceName() string {
|
||||
return s.tracer.serviceName
|
||||
}
|
||||
|
||||
func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
|
||||
if !decision.Retryable {
|
||||
s.context.samplingState.setFinal()
|
||||
}
|
||||
if decision.Sample {
|
||||
s.context.samplingState.setSampled()
|
||||
if len(decision.Tags) > 0 {
|
||||
if lock {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
}
|
||||
for _, tag := range decision.Tags {
|
||||
s.appendTagNoLocking(tag.key, tag.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Span can be written to if it is sampled or the sampling decision has not been finalized.
|
||||
func (s *Span) isWriteable() bool {
|
||||
state := s.context.samplingState
|
||||
return !state.isFinal() || state.isSampled()
|
||||
}
|
||||
|
||||
func (s *Span) isSamplingFinalized() bool {
|
||||
return s.context.samplingState.isFinal()
|
||||
}
|
||||
|
||||
// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
|
||||
// The behavior of setSamplingPriority is surprising
|
||||
// If noDebugFlagOnForcedSampling is set
|
||||
// setSamplingPriority(span, 1) always sets only flagSampled
|
||||
// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
|
||||
// setSamplingPriority(span, 1) sets both flagSampled and flagDebug
|
||||
// However,
|
||||
// setSamplingPriority(span, 0) always only resets flagSampled
|
||||
//
|
||||
// This means that doing a setSamplingPriority(span, 1) followed by setSamplingPriority(span, 0) can
|
||||
// leave flagDebug set
|
||||
func setSamplingPriority(s *Span, value interface{}) bool {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
val, ok := value.(uint16)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if val == 0 {
|
||||
s.context.flags = s.context.flags & (^flagSampled)
|
||||
s.context.samplingState.unsetSampled()
|
||||
s.context.samplingState.setFinal()
|
||||
return true
|
||||
}
|
||||
if s.tracer.isDebugAllowed(s.operationName) {
|
||||
s.context.flags = s.context.flags | flagDebug | flagSampled
|
||||
if s.tracer.options.noDebugFlagOnForcedSampling {
|
||||
s.context.samplingState.setSampled()
|
||||
s.context.samplingState.setFinal()
|
||||
return true
|
||||
} else if s.tracer.isDebugAllowed(s.operationName) {
|
||||
s.context.samplingState.setDebugAndSampled()
|
||||
s.context.samplingState.setFinal()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EnableFirehose enables firehose flag on the span context
|
||||
func EnableFirehose(s *Span) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.context.samplingState.setFirehose()
|
||||
}
|
||||
|
56
vendor/github.com/uber/jaeger-client-go/span_allocator.go
generated
vendored
Normal file
56
vendor/github.com/uber/jaeger-client-go/span_allocator.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright (c) 2019 The Jaeger Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jaeger
|
||||
|
||||
import "sync"
|
||||
|
||||
// SpanAllocator abstraction of managign span allocations
|
||||
type SpanAllocator interface {
|
||||
Get() *Span
|
||||
Put(*Span)
|
||||
}
|
||||
|
||||
type syncPollSpanAllocator struct {
|
||||
spanPool sync.Pool
|
||||
}
|
||||
|
||||
func newSyncPollSpanAllocator() SpanAllocator {
|
||||
return &syncPollSpanAllocator{
|
||||
spanPool: sync.Pool{New: func() interface{} {
|
||||
return &Span{}
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *syncPollSpanAllocator) Get() *Span {
|
||||
return pool.spanPool.Get().(*Span)
|
||||
}
|
||||
|
||||
func (pool *syncPollSpanAllocator) Put(span *Span) {
|
||||
span.reset()
|
||||
pool.spanPool.Put(span)
|
||||
}
|
||||
|
||||
type simpleSpanAllocator struct{}
|
||||
|
||||
func (pool simpleSpanAllocator) Get() *Span {
|
||||
return &Span{}
|
||||
}
|
||||
|
||||
func (pool simpleSpanAllocator) Put(span *Span) {
|
||||
// @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
|
||||
// since finished spans are not reused, no need to reset them
|
||||
// span.reset()
|
||||
}
|
@ -19,11 +19,15 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
flagSampled = byte(1)
|
||||
flagDebug = byte(2)
|
||||
flagSampled = 1
|
||||
flagDebug = 2
|
||||
flagFirehose = 8
|
||||
)
|
||||
|
||||
var (
|
||||
@ -55,9 +59,6 @@ type SpanContext struct {
|
||||
// Should be 0 if the current span is a root span.
|
||||
parentID SpanID
|
||||
|
||||
// flags is a bitmap containing such bits as 'sampled' and 'debug'.
|
||||
flags byte
|
||||
|
||||
// Distributed Context baggage. The is a snapshot in time.
|
||||
baggage map[string]string
|
||||
|
||||
@ -66,6 +67,102 @@ type SpanContext struct {
|
||||
//
|
||||
// See JaegerDebugHeader in constants.go
|
||||
debugID string
|
||||
|
||||
// samplingState is shared across all spans
|
||||
samplingState *samplingState
|
||||
|
||||
// remote indicates that span context represents a remote parent
|
||||
remote bool
|
||||
}
|
||||
|
||||
type samplingState struct {
|
||||
// Span context's state flags that are propagated across processes. Only lower 8 bits are used.
|
||||
// We use an int32 instead of byte to be able to use CAS operations.
|
||||
stateFlags atomic.Int32
|
||||
|
||||
// When state is not final, sampling will be retried on other span write operations,
|
||||
// like SetOperationName / SetTag, and the spans will remain writable.
|
||||
final atomic.Bool
|
||||
|
||||
// localRootSpan stores the SpanID of the first span created in this process for a given trace.
|
||||
localRootSpan SpanID
|
||||
|
||||
// extendedState allows samplers to keep intermediate state.
|
||||
// The keys and values in this map are completely opaque: interface{} -> interface{}.
|
||||
extendedState sync.Map
|
||||
}
|
||||
|
||||
func (s *samplingState) isLocalRootSpan(id SpanID) bool {
|
||||
return id == s.localRootSpan
|
||||
}
|
||||
|
||||
func (s *samplingState) setFlag(newFlag int32) {
|
||||
swapped := false
|
||||
for !swapped {
|
||||
old := s.stateFlags.Load()
|
||||
swapped = s.stateFlags.CAS(old, old|newFlag)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *samplingState) unsetFlag(newFlag int32) {
|
||||
swapped := false
|
||||
for !swapped {
|
||||
old := s.stateFlags.Load()
|
||||
swapped = s.stateFlags.CAS(old, old&^newFlag)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *samplingState) setSampled() {
|
||||
s.setFlag(flagSampled)
|
||||
}
|
||||
|
||||
func (s *samplingState) unsetSampled() {
|
||||
s.unsetFlag(flagSampled)
|
||||
}
|
||||
|
||||
func (s *samplingState) setDebugAndSampled() {
|
||||
s.setFlag(flagDebug | flagSampled)
|
||||
}
|
||||
|
||||
func (s *samplingState) setFirehose() {
|
||||
s.setFlag(flagFirehose)
|
||||
}
|
||||
|
||||
func (s *samplingState) setFlags(flags byte) {
|
||||
s.stateFlags.Store(int32(flags))
|
||||
}
|
||||
|
||||
func (s *samplingState) setFinal() {
|
||||
s.final.Store(true)
|
||||
}
|
||||
|
||||
func (s *samplingState) flags() byte {
|
||||
return byte(s.stateFlags.Load())
|
||||
}
|
||||
|
||||
func (s *samplingState) isSampled() bool {
|
||||
return s.stateFlags.Load()&flagSampled == flagSampled
|
||||
}
|
||||
|
||||
func (s *samplingState) isDebug() bool {
|
||||
return s.stateFlags.Load()&flagDebug == flagDebug
|
||||
}
|
||||
|
||||
func (s *samplingState) isFirehose() bool {
|
||||
return s.stateFlags.Load()&flagFirehose == flagFirehose
|
||||
}
|
||||
|
||||
func (s *samplingState) isFinal() bool {
|
||||
return s.final.Load()
|
||||
}
|
||||
|
||||
func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
|
||||
if value, ok := s.extendedState.Load(key); ok {
|
||||
return value
|
||||
}
|
||||
value := initValue()
|
||||
value, _ = s.extendedState.LoadOrStore(key, value)
|
||||
return value
|
||||
}
|
||||
|
||||
// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
|
||||
@ -80,12 +177,28 @@ func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
|
||||
// IsSampled returns whether this trace was chosen for permanent storage
|
||||
// by the sampling mechanism of the tracer.
|
||||
func (c SpanContext) IsSampled() bool {
|
||||
return (c.flags & flagSampled) == flagSampled
|
||||
return c.samplingState.isSampled()
|
||||
}
|
||||
|
||||
// IsDebug indicates whether sampling was explicitly requested by the service.
|
||||
func (c SpanContext) IsDebug() bool {
|
||||
return (c.flags & flagDebug) == flagDebug
|
||||
return c.samplingState.isDebug()
|
||||
}
|
||||
|
||||
// IsSamplingFinalized indicates whether the sampling decision has been finalized.
|
||||
func (c SpanContext) IsSamplingFinalized() bool {
|
||||
return c.samplingState.isFinal()
|
||||
}
|
||||
|
||||
// IsFirehose indicates whether the firehose flag was set
|
||||
func (c SpanContext) IsFirehose() bool {
|
||||
return c.samplingState.isFirehose()
|
||||
}
|
||||
|
||||
// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
|
||||
// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
|
||||
func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
|
||||
return c.samplingState.extendedStateForKey(key, initValue)
|
||||
}
|
||||
|
||||
// IsValid indicates whether this context actually represents a valid trace.
|
||||
@ -93,11 +206,16 @@ func (c SpanContext) IsValid() bool {
|
||||
return c.traceID.IsValid() && c.spanID != 0
|
||||
}
|
||||
|
||||
// SetFirehose enables firehose mode for this trace.
|
||||
func (c SpanContext) SetFirehose() {
|
||||
c.samplingState.setFirehose()
|
||||
}
|
||||
|
||||
func (c SpanContext) String() string {
|
||||
if c.traceID.High == 0 {
|
||||
return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
|
||||
return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
|
||||
}
|
||||
return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags)
|
||||
return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.samplingState.stateFlags.Load())
|
||||
}
|
||||
|
||||
// ContextFromString reconstructs the Context encoded in a string
|
||||
@ -124,7 +242,8 @@ func ContextFromString(value string) (SpanContext, error) {
|
||||
if err != nil {
|
||||
return emptyContext, err
|
||||
}
|
||||
context.flags = byte(flags)
|
||||
context.samplingState = &samplingState{}
|
||||
context.samplingState.setFlags(byte(flags))
|
||||
return context, nil
|
||||
}
|
||||
|
||||
@ -143,18 +262,24 @@ func (c SpanContext) ParentID() SpanID {
|
||||
return c.parentID
|
||||
}
|
||||
|
||||
// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
|
||||
func (c SpanContext) Flags() byte {
|
||||
return c.samplingState.flags()
|
||||
}
|
||||
|
||||
// NewSpanContext creates a new instance of SpanContext
|
||||
func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
|
||||
flags := byte(0)
|
||||
samplingState := &samplingState{}
|
||||
if sampled {
|
||||
flags = flagSampled
|
||||
samplingState.setSampled()
|
||||
}
|
||||
|
||||
return SpanContext{
|
||||
traceID: traceID,
|
||||
spanID: spanID,
|
||||
parentID: parentID,
|
||||
flags: flags,
|
||||
baggage: baggage}
|
||||
traceID: traceID,
|
||||
spanID: spanID,
|
||||
parentID: parentID,
|
||||
samplingState: samplingState,
|
||||
baggage: baggage}
|
||||
}
|
||||
|
||||
// CopyFrom copies data from ctx into this context, including span identity and baggage.
|
||||
@ -163,7 +288,7 @@ func (c *SpanContext) CopyFrom(ctx *SpanContext) {
|
||||
c.traceID = ctx.traceID
|
||||
c.spanID = ctx.spanID
|
||||
c.parentID = ctx.parentID
|
||||
c.flags = ctx.flags
|
||||
c.samplingState = ctx.samplingState
|
||||
if l := len(ctx.baggage); l > 0 {
|
||||
c.baggage = make(map[string]string, l)
|
||||
for k, v := range ctx.baggage {
|
||||
@ -187,7 +312,7 @@ func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
|
||||
newBaggage[key] = value
|
||||
}
|
||||
// Use positional parameters so the compiler will help catch new fields.
|
||||
return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""}
|
||||
return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
|
||||
}
|
||||
|
||||
// isDebugIDContainerOnly returns true when the instance of the context is only
|
269
vendor/github.com/uber/jaeger-client-go/tracer.go
generated
vendored
269
vendor/github.com/uber/jaeger-client-go/tracer.go
generated
vendored
@ -38,7 +38,7 @@ type Tracer struct {
|
||||
serviceName string
|
||||
hostIPv4 uint32 // this is for zipkin endpoint conversion
|
||||
|
||||
sampler Sampler
|
||||
sampler SamplerV2
|
||||
reporter Reporter
|
||||
metrics Metrics
|
||||
logger log.Logger
|
||||
@ -47,15 +47,15 @@ type Tracer struct {
|
||||
randomNumber func() uint64
|
||||
|
||||
options struct {
|
||||
poolSpans bool
|
||||
gen128Bit bool // whether to generate 128bit trace IDs
|
||||
zipkinSharedRPCSpan bool
|
||||
highTraceIDGenerator func() uint64 // custom high trace ID generator
|
||||
maxTagValueLength int
|
||||
gen128Bit bool // whether to generate 128bit trace IDs
|
||||
zipkinSharedRPCSpan bool
|
||||
highTraceIDGenerator func() uint64 // custom high trace ID generator
|
||||
maxTagValueLength int
|
||||
noDebugFlagOnForcedSampling bool
|
||||
// more options to come
|
||||
}
|
||||
// pool for Span objects
|
||||
spanPool sync.Pool
|
||||
// allocator of Span objects
|
||||
spanAllocator SpanAllocator
|
||||
|
||||
injectors map[interface{}]Injector
|
||||
extractors map[interface{}]Extractor
|
||||
@ -74,6 +74,7 @@ type Tracer struct {
|
||||
// NewTracer creates Tracer implementation that reports tracing to Jaeger.
|
||||
// The returned io.Closer can be used in shutdown hooks to ensure that the internal
|
||||
// queue of the Reporter is drained and all buffered spans are submitted to collectors.
|
||||
// TODO (breaking change) return *Tracer only, without closer.
|
||||
func NewTracer(
|
||||
serviceName string,
|
||||
sampler Sampler,
|
||||
@ -81,15 +82,13 @@ func NewTracer(
|
||||
options ...TracerOption,
|
||||
) (opentracing.Tracer, io.Closer) {
|
||||
t := &Tracer{
|
||||
serviceName: serviceName,
|
||||
sampler: sampler,
|
||||
reporter: reporter,
|
||||
injectors: make(map[interface{}]Injector),
|
||||
extractors: make(map[interface{}]Extractor),
|
||||
metrics: *NewNullMetrics(),
|
||||
spanPool: sync.Pool{New: func() interface{} {
|
||||
return &Span{}
|
||||
}},
|
||||
serviceName: serviceName,
|
||||
sampler: samplerV1toV2(sampler),
|
||||
reporter: reporter,
|
||||
injectors: make(map[interface{}]Injector),
|
||||
extractors: make(map[interface{}]Extractor),
|
||||
metrics: *NewNullMetrics(),
|
||||
spanAllocator: simpleSpanAllocator{},
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
@ -148,7 +147,15 @@ func NewTracer(
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
|
||||
}
|
||||
if ip, err := utils.HostIP(); err == nil {
|
||||
if ipval, ok := t.getTag(TracerIPTagKey); ok {
|
||||
ipv4, err := utils.ParseIPToUint32(ipval.(string))
|
||||
if err != nil {
|
||||
t.hostIPv4 = 0
|
||||
t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
|
||||
} else {
|
||||
t.hostIPv4 = ipv4
|
||||
}
|
||||
} else if ip, err := utils.HostIP(); err == nil {
|
||||
t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
|
||||
t.hostIPv4 = utils.PackIPAsUint32(ip)
|
||||
} else {
|
||||
@ -217,20 +224,30 @@ func (t *Tracer) startSpanWithOptions(
|
||||
var references []Reference
|
||||
var parent SpanContext
|
||||
var hasParent bool // need this because `parent` is a value, not reference
|
||||
var ctx SpanContext
|
||||
var isSelfRef bool
|
||||
for _, ref := range options.References {
|
||||
ctx, ok := ref.ReferencedContext.(SpanContext)
|
||||
ctxRef, ok := ref.ReferencedContext.(SpanContext)
|
||||
if !ok {
|
||||
t.logger.Error(fmt.Sprintf(
|
||||
"Reference contains invalid type of SpanReference: %s",
|
||||
reflect.ValueOf(ref.ReferencedContext)))
|
||||
continue
|
||||
}
|
||||
if !isValidReference(ctx) {
|
||||
if !isValidReference(ctxRef) {
|
||||
continue
|
||||
}
|
||||
references = append(references, Reference{Type: ref.Type, Context: ctx})
|
||||
|
||||
if ref.Type == selfRefType {
|
||||
isSelfRef = true
|
||||
ctx = ctxRef
|
||||
continue
|
||||
}
|
||||
|
||||
references = append(references, Reference{Type: ref.Type, Context: ctxRef})
|
||||
|
||||
if !hasParent {
|
||||
parent = ctx
|
||||
parent = ctxRef
|
||||
hasParent = ref.Type == opentracing.ChildOfRef
|
||||
}
|
||||
}
|
||||
@ -245,60 +262,77 @@ func (t *Tracer) startSpanWithOptions(
|
||||
rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
|
||||
}
|
||||
|
||||
var samplerTags []Tag
|
||||
var ctx SpanContext
|
||||
var internalTags []Tag
|
||||
newTrace := false
|
||||
if !hasParent || !parent.IsValid() {
|
||||
newTrace = true
|
||||
ctx.traceID.Low = t.randomID()
|
||||
if t.options.gen128Bit {
|
||||
ctx.traceID.High = t.options.highTraceIDGenerator()
|
||||
}
|
||||
ctx.spanID = SpanID(ctx.traceID.Low)
|
||||
ctx.parentID = 0
|
||||
ctx.flags = byte(0)
|
||||
if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
|
||||
ctx.flags |= (flagSampled | flagDebug)
|
||||
samplerTags = []Tag{{key: JaegerDebugHeader, value: parent.debugID}}
|
||||
} else if sampled, tags := t.sampler.IsSampled(ctx.traceID, operationName); sampled {
|
||||
ctx.flags |= flagSampled
|
||||
samplerTags = tags
|
||||
}
|
||||
} else {
|
||||
ctx.traceID = parent.traceID
|
||||
if rpcServer && t.options.zipkinSharedRPCSpan {
|
||||
// Support Zipkin's one-span-per-RPC model
|
||||
ctx.spanID = parent.spanID
|
||||
ctx.parentID = parent.parentID
|
||||
if !isSelfRef {
|
||||
if !hasParent || !parent.IsValid() {
|
||||
newTrace = true
|
||||
ctx.traceID.Low = t.randomID()
|
||||
if t.options.gen128Bit {
|
||||
ctx.traceID.High = t.options.highTraceIDGenerator()
|
||||
}
|
||||
ctx.spanID = SpanID(ctx.traceID.Low)
|
||||
ctx.parentID = 0
|
||||
ctx.samplingState = &samplingState{
|
||||
localRootSpan: ctx.spanID,
|
||||
}
|
||||
if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
|
||||
ctx.samplingState.setDebugAndSampled()
|
||||
internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
|
||||
}
|
||||
} else {
|
||||
ctx.spanID = SpanID(t.randomID())
|
||||
ctx.parentID = parent.spanID
|
||||
ctx.traceID = parent.traceID
|
||||
if rpcServer && t.options.zipkinSharedRPCSpan {
|
||||
// Support Zipkin's one-span-per-RPC model
|
||||
ctx.spanID = parent.spanID
|
||||
ctx.parentID = parent.parentID
|
||||
} else {
|
||||
ctx.spanID = SpanID(t.randomID())
|
||||
ctx.parentID = parent.spanID
|
||||
}
|
||||
ctx.samplingState = parent.samplingState
|
||||
if parent.remote {
|
||||
ctx.samplingState.setFinal()
|
||||
ctx.samplingState.localRootSpan = ctx.spanID
|
||||
}
|
||||
}
|
||||
ctx.flags = parent.flags
|
||||
}
|
||||
if hasParent {
|
||||
// copy baggage items
|
||||
if l := len(parent.baggage); l > 0 {
|
||||
ctx.baggage = make(map[string]string, len(parent.baggage))
|
||||
for k, v := range parent.baggage {
|
||||
ctx.baggage[k] = v
|
||||
if hasParent {
|
||||
// copy baggage items
|
||||
if l := len(parent.baggage); l > 0 {
|
||||
ctx.baggage = make(map[string]string, len(parent.baggage))
|
||||
for k, v := range parent.baggage {
|
||||
ctx.baggage[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sp := t.newSpan()
|
||||
sp.context = ctx
|
||||
sp.tracer = t
|
||||
sp.operationName = operationName
|
||||
sp.startTime = options.StartTime
|
||||
sp.duration = 0
|
||||
sp.references = references
|
||||
sp.firstInProcess = rpcServer || sp.context.parentID == 0
|
||||
|
||||
if !sp.isSamplingFinalized() {
|
||||
decision := t.sampler.OnCreateSpan(sp)
|
||||
sp.applySamplingDecision(decision, false)
|
||||
}
|
||||
sp.observer = t.observer.OnStartSpan(sp, operationName, options)
|
||||
return t.startSpanInternal(
|
||||
sp,
|
||||
operationName,
|
||||
options.StartTime,
|
||||
samplerTags,
|
||||
options.Tags,
|
||||
newTrace,
|
||||
rpcServer,
|
||||
references,
|
||||
)
|
||||
|
||||
if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
|
||||
if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
|
||||
sp.tags = make([]Tag, 0, tagsTotalLength)
|
||||
}
|
||||
sp.tags = append(sp.tags, internalTags...)
|
||||
for k, v := range options.Tags {
|
||||
sp.setTagInternal(k, v, false)
|
||||
}
|
||||
}
|
||||
t.emitNewSpanMetrics(sp, newTrace)
|
||||
return sp
|
||||
}
|
||||
|
||||
// Inject implements Inject() method of opentracing.Tracer
|
||||
@ -323,6 +357,7 @@ func (t *Tracer) Extract(
|
||||
if err != nil {
|
||||
return nil, err // ensure returned spanCtx is nil
|
||||
}
|
||||
spanCtx.remote = true
|
||||
return spanCtx, nil
|
||||
}
|
||||
return nil, opentracing.ErrUnsupportedFormat
|
||||
@ -333,10 +368,10 @@ func (t *Tracer) Close() error {
|
||||
t.reporter.Close()
|
||||
t.sampler.Close()
|
||||
if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
|
||||
mgr.Close()
|
||||
_ = mgr.Close()
|
||||
}
|
||||
if throttler, ok := t.debugThrottler.(io.Closer); ok {
|
||||
throttler.Close()
|
||||
_ = throttler.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -350,55 +385,38 @@ func (t *Tracer) Tags() []opentracing.Tag {
|
||||
return tags
|
||||
}
|
||||
|
||||
// getTag returns the value of specific tag, if not exists, return nil.
|
||||
// TODO only used by tests, move there.
|
||||
func (t *Tracer) getTag(key string) (interface{}, bool) {
|
||||
for _, tag := range t.tags {
|
||||
if tag.key == key {
|
||||
return tag.value, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// newSpan returns an instance of a clean Span object.
|
||||
// If options.PoolSpans is true, the spans are retrieved from an object pool.
|
||||
func (t *Tracer) newSpan() *Span {
|
||||
if !t.options.poolSpans {
|
||||
return &Span{}
|
||||
}
|
||||
sp := t.spanPool.Get().(*Span)
|
||||
sp.context = emptyContext
|
||||
sp.tracer = nil
|
||||
sp.tags = nil
|
||||
sp.logs = nil
|
||||
return sp
|
||||
return t.spanAllocator.Get()
|
||||
}
|
||||
|
||||
func (t *Tracer) startSpanInternal(
|
||||
sp *Span,
|
||||
operationName string,
|
||||
startTime time.Time,
|
||||
internalTags []Tag,
|
||||
tags opentracing.Tags,
|
||||
newTrace bool,
|
||||
rpcServer bool,
|
||||
references []Reference,
|
||||
) *Span {
|
||||
sp.tracer = t
|
||||
sp.operationName = operationName
|
||||
sp.startTime = startTime
|
||||
sp.duration = 0
|
||||
sp.references = references
|
||||
sp.firstInProcess = rpcServer || sp.context.parentID == 0
|
||||
if len(tags) > 0 || len(internalTags) > 0 {
|
||||
sp.tags = make([]Tag, len(internalTags), len(tags)+len(internalTags))
|
||||
copy(sp.tags, internalTags)
|
||||
for k, v := range tags {
|
||||
sp.observer.OnSetTag(k, v)
|
||||
if k == string(ext.SamplingPriority) && !setSamplingPriority(sp, v) {
|
||||
continue
|
||||
}
|
||||
sp.setTagNoLocking(k, v)
|
||||
// emitNewSpanMetrics generates metrics on the number of started spans and traces.
|
||||
// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
|
||||
// server-side RPC span has the exact same trace/span/parent IDs as the
|
||||
// calling client-side span, but obviously the server side span is
|
||||
// no longer a root span of the trace.
|
||||
func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
|
||||
if !sp.isSamplingFinalized() {
|
||||
t.metrics.SpansStartedDelayedSampling.Inc(1)
|
||||
if newTrace {
|
||||
t.metrics.TracesStartedDelayedSampling.Inc(1)
|
||||
}
|
||||
}
|
||||
// emit metrics
|
||||
if sp.context.IsSampled() {
|
||||
// joining a trace is not possible, because sampling decision inherited from upstream is final
|
||||
} else if sp.context.IsSampled() {
|
||||
t.metrics.SpansStartedSampled.Inc(1)
|
||||
if newTrace {
|
||||
// We cannot simply check for parentID==0 because in Zipkin model the
|
||||
// server-side RPC span has the exact same trace/span/parent IDs as the
|
||||
// calling client-side span, but obviously the server side span is
|
||||
// no longer a root span of the trace.
|
||||
t.metrics.TracesStartedSampled.Inc(1)
|
||||
} else if sp.firstInProcess {
|
||||
t.metrics.TracesJoinedSampled.Inc(1)
|
||||
@ -411,17 +429,25 @@ func (t *Tracer) startSpanInternal(
|
||||
t.metrics.TracesJoinedNotSampled.Inc(1)
|
||||
}
|
||||
}
|
||||
return sp
|
||||
}
|
||||
|
||||
func (t *Tracer) reportSpan(sp *Span) {
|
||||
t.metrics.SpansFinished.Inc(1)
|
||||
if !sp.isSamplingFinalized() {
|
||||
t.metrics.SpansFinishedDelayedSampling.Inc(1)
|
||||
} else if sp.context.IsSampled() {
|
||||
t.metrics.SpansFinishedSampled.Inc(1)
|
||||
} else {
|
||||
t.metrics.SpansFinishedNotSampled.Inc(1)
|
||||
}
|
||||
|
||||
// Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
|
||||
// and then Release() it when no longer needed.
|
||||
// Otherwise, the span may be reused for another trace and its data may be overwritten.
|
||||
if sp.context.IsSampled() {
|
||||
t.reporter.Report(sp)
|
||||
}
|
||||
if t.options.poolSpans {
|
||||
t.spanPool.Put(sp)
|
||||
}
|
||||
|
||||
sp.Release()
|
||||
}
|
||||
|
||||
// randomID generates a random trace/span ID, using tracer.random() generator.
|
||||
@ -443,3 +469,18 @@ func (t *Tracer) setBaggage(sp *Span, key, value string) {
|
||||
func (t *Tracer) isDebugAllowed(operation string) bool {
|
||||
return t.debugThrottler.IsAllowed(operation)
|
||||
}
|
||||
|
||||
// Sampler returns the sampler given to the tracer at creation.
|
||||
func (t *Tracer) Sampler() SamplerV2 {
|
||||
return t.sampler
|
||||
}
|
||||
|
||||
// SelfRef creates an opentracing compliant SpanReference from a jaeger
|
||||
// SpanContext. This is a factory function in order to encapsulate jaeger specific
|
||||
// types.
|
||||
func SelfRef(ctx SpanContext) opentracing.SpanReference {
|
||||
return opentracing.SpanReference{
|
||||
Type: selfRefType,
|
||||
ReferencedContext: ctx,
|
||||
}
|
||||
}
|
||||
|
12
vendor/github.com/uber/jaeger-client-go/tracer_options.go
generated
vendored
12
vendor/github.com/uber/jaeger-client-go/tracer_options.go
generated
vendored
@ -81,7 +81,11 @@ func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
|
||||
// that can access parent spans after those spans have been finished.
|
||||
func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
|
||||
return func(tracer *Tracer) {
|
||||
tracer.options.poolSpans = poolSpans
|
||||
if poolSpans {
|
||||
tracer.spanAllocator = newSyncPollSpanAllocator()
|
||||
} else {
|
||||
tracer.spanAllocator = simpleSpanAllocator{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,6 +126,12 @@ func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
|
||||
}
|
||||
}
|
||||
|
||||
func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
|
||||
return func(tracer *Tracer) {
|
||||
tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
|
||||
}
|
||||
}
|
||||
|
||||
func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
|
||||
return func(tracer *Tracer) {
|
||||
tracer.options.highTraceIDGenerator = highTraceIDGenerator
|
||||
|
93
vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
generated
vendored
93
vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
generated
vendored
@ -20,22 +20,15 @@ import (
|
||||
)
|
||||
|
||||
// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
|
||||
//
|
||||
// TODO (breaking change) remove this interface in favor of public struct below
|
||||
//
|
||||
// Deprecated, use ReconfigurableRateLimiter.
|
||||
type RateLimiter interface {
|
||||
CheckCredit(itemCost float64) bool
|
||||
}
|
||||
|
||||
type rateLimiter struct {
|
||||
sync.Mutex
|
||||
|
||||
creditsPerSecond float64
|
||||
balance float64
|
||||
maxBalance float64
|
||||
lastTick time.Time
|
||||
|
||||
timeNow func() time.Time
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a new rate limiter based on leaky bucket algorithm, formulated in terms of a
|
||||
// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
|
||||
// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
|
||||
// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
|
||||
// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
|
||||
@ -47,31 +40,73 @@ type rateLimiter struct {
|
||||
//
|
||||
// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
|
||||
// as bytes/second, and calling CheckCredit() with the actual message size.
|
||||
func NewRateLimiter(creditsPerSecond, maxBalance float64) RateLimiter {
|
||||
return &rateLimiter{
|
||||
//
|
||||
// TODO (breaking change) rename to RateLimiter once the interface is removed
|
||||
type ReconfigurableRateLimiter struct {
|
||||
lock sync.Mutex
|
||||
|
||||
creditsPerSecond float64
|
||||
balance float64
|
||||
maxBalance float64
|
||||
lastTick time.Time
|
||||
|
||||
timeNow func() time.Time
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a new ReconfigurableRateLimiter.
|
||||
func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
|
||||
return &ReconfigurableRateLimiter{
|
||||
creditsPerSecond: creditsPerSecond,
|
||||
balance: maxBalance,
|
||||
maxBalance: maxBalance,
|
||||
lastTick: time.Now(),
|
||||
timeNow: time.Now}
|
||||
timeNow: time.Now,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *rateLimiter) CheckCredit(itemCost float64) bool {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
// calculate how much time passed since the last tick, and update current tick
|
||||
currentTime := b.timeNow()
|
||||
elapsedTime := currentTime.Sub(b.lastTick)
|
||||
b.lastTick = currentTime
|
||||
// calculate how much credit have we accumulated since the last tick
|
||||
b.balance += elapsedTime.Seconds() * b.creditsPerSecond
|
||||
if b.balance > b.maxBalance {
|
||||
b.balance = b.maxBalance
|
||||
}
|
||||
// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
|
||||
// is not lest than itemCost.
|
||||
func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
|
||||
// if we have enough credits to pay for current item, then reduce balance and allow
|
||||
if b.balance >= itemCost {
|
||||
b.balance -= itemCost
|
||||
if rl.balance >= itemCost {
|
||||
rl.balance -= itemCost
|
||||
return true
|
||||
}
|
||||
// otherwise check if balance can be increased due to time elapsed, and try again
|
||||
rl.updateBalance()
|
||||
if rl.balance >= itemCost {
|
||||
rl.balance -= itemCost
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
|
||||
func (rl *ReconfigurableRateLimiter) updateBalance() {
|
||||
// calculate how much time passed since the last tick, and update current tick
|
||||
currentTime := rl.timeNow()
|
||||
elapsedTime := currentTime.Sub(rl.lastTick)
|
||||
rl.lastTick = currentTime
|
||||
// calculate how much credit have we accumulated since the last tick
|
||||
rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
|
||||
if rl.balance > rl.maxBalance {
|
||||
rl.balance = rl.maxBalance
|
||||
}
|
||||
}
|
||||
|
||||
// Update changes the main parameters of the rate limiter in-place, while retaining
|
||||
// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
|
||||
// instead of creating a new rate limiter helps to avoid thundering herd when sampling
|
||||
// strategies are updated.
|
||||
func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
|
||||
rl.lock.Lock()
|
||||
defer rl.lock.Unlock()
|
||||
|
||||
rl.updateBalance() // get up to date balance
|
||||
rl.balance = rl.balance * maxBalance / rl.maxBalance
|
||||
rl.creditsPerSecond = creditsPerSecond
|
||||
rl.maxBalance = maxBalance
|
||||
}
|
||||
|
5
vendor/github.com/uber/jaeger-client-go/zipkin.go
generated
vendored
5
vendor/github.com/uber/jaeger-client-go/zipkin.go
generated
vendored
@ -55,7 +55,7 @@ func (p *zipkinPropagator) Inject(
|
||||
carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
|
||||
carrier.SetSpanID(uint64(ctx.SpanID()))
|
||||
carrier.SetParentID(uint64(ctx.ParentID()))
|
||||
carrier.SetFlags(ctx.flags)
|
||||
carrier.SetFlags(ctx.samplingState.flags())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -71,6 +71,7 @@ func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, er
|
||||
ctx.traceID.Low = carrier.TraceID()
|
||||
ctx.spanID = SpanID(carrier.SpanID())
|
||||
ctx.parentID = SpanID(carrier.ParentID())
|
||||
ctx.flags = carrier.Flags()
|
||||
ctx.samplingState = &samplingState{}
|
||||
ctx.samplingState.setFlags(carrier.Flags())
|
||||
return ctx, nil
|
||||
}
|
||||
|
2
vendor/github.com/uber/jaeger-lib/metrics/metrics.go
generated
vendored
2
vendor/github.com/uber/jaeger-lib/metrics/metrics.go
generated
vendored
@ -37,7 +37,7 @@ func MustInit(metrics interface{}, factory Factory, globalTags map[string]string
|
||||
}
|
||||
}
|
||||
|
||||
// Init does the same as Init, but returns an error instead of
|
||||
// Init does the same as MustInit, but returns an error instead of
|
||||
// panicking.
|
||||
func Init(m interface{}, factory Factory, globalTags map[string]string) error {
|
||||
// Allow user to opt out of reporting metrics by passing in nil.
|
||||
|
12
vendor/go.uber.org/atomic/.gitignore
generated
vendored
Normal file
12
vendor/go.uber.org/atomic/.gitignore
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
/bin
|
||||
.DS_Store
|
||||
/vendor
|
||||
cover.html
|
||||
cover.out
|
||||
lint.log
|
||||
|
||||
# Binaries
|
||||
*.test
|
||||
|
||||
# Profiling output
|
||||
*.prof
|
59
vendor/go.uber.org/atomic/CHANGELOG.md
generated
vendored
Normal file
59
vendor/go.uber.org/atomic/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.5.1] - 2019-11-19
|
||||
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
|
||||
causing `CAS` to fail even though the old value matches.
|
||||
|
||||
## [1.5.0] - 2019-10-29
|
||||
### Changed
|
||||
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
|
||||
If you need to use the old import path, please add a `replace` directive to
|
||||
your `go.mod`.
|
||||
|
||||
## [1.4.0] - 2019-05-01
|
||||
### Added
|
||||
- Add `atomic.Error` type for atomic operations on `error` values.
|
||||
|
||||
## [1.3.2] - 2018-05-02
|
||||
### Added
|
||||
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
|
||||
|
||||
## [1.3.1] - 2017-11-14
|
||||
### Fixed
|
||||
- Revert optimization for `atomic.String.Store("")` which caused data races.
|
||||
|
||||
## [1.3.0] - 2017-11-13
|
||||
### Added
|
||||
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
|
||||
|
||||
### Changed
|
||||
- Optimize `atomic.String.Store("")` by avoiding an allocation.
|
||||
|
||||
## [1.2.0] - 2017-04-12
|
||||
### Added
|
||||
- Shadow `atomic.Value` from `sync/atomic`.
|
||||
|
||||
## [1.1.0] - 2017-03-10
|
||||
### Added
|
||||
- Add atomic `Float64` type.
|
||||
|
||||
### Changed
|
||||
- Support new `go.uber.org/atomic` import path.
|
||||
|
||||
## [1.0.0] - 2016-07-18
|
||||
|
||||
- Initial release.
|
||||
|
||||
[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
|
||||
[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
|
||||
[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
|
||||
[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
|
||||
[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
|
||||
[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
|
||||
[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
|
||||
[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
|
||||
[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
|
19
vendor/go.uber.org/atomic/LICENSE.txt
generated
vendored
Normal file
19
vendor/go.uber.org/atomic/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (c) 2016 Uber Technologies, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
35
vendor/go.uber.org/atomic/Makefile
generated
vendored
Normal file
35
vendor/go.uber.org/atomic/Makefile
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# Directory to place `go install`ed binaries into.
|
||||
export GOBIN ?= $(shell pwd)/bin
|
||||
|
||||
GOLINT = $(GOBIN)/golint
|
||||
|
||||
GO_FILES ?= *.go
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go build ./...
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test -race ./...
|
||||
|
||||
.PHONY: gofmt
|
||||
gofmt:
|
||||
$(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
|
||||
gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
|
||||
@[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
|
||||
|
||||
$(GOLINT):
|
||||
go install golang.org/x/lint/golint
|
||||
|
||||
.PHONY: golint
|
||||
golint: $(GOLINT)
|
||||
$(GOLINT) ./...
|
||||
|
||||
.PHONY: lint
|
||||
lint: gofmt golint
|
||||
|
||||
.PHONY: cover
|
||||
cover:
|
||||
go test -coverprofile=cover.out -coverpkg ./... -v ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
63
vendor/go.uber.org/atomic/README.md
generated
vendored
Normal file
63
vendor/go.uber.org/atomic/README.md
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
|
||||
|
||||
Simple wrappers for primitive types to enforce atomic access.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
$ go get -u go.uber.org/atomic@v1
|
||||
```
|
||||
|
||||
### Legacy Import Path
|
||||
|
||||
As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
|
||||
of using this package. If you are using Go modules, this package will fail to
|
||||
compile with the legacy import path path `github.com/uber-go/atomic`.
|
||||
|
||||
We recommend migrating your code to the new import path but if you're unable
|
||||
to do so, or if your dependencies are still using the old import path, you
|
||||
will have to add a `replace` directive to your `go.mod` file downgrading the
|
||||
legacy import path to an older version.
|
||||
|
||||
```
|
||||
replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
|
||||
```
|
||||
|
||||
You can do so automatically by running the following command.
|
||||
|
||||
```shell
|
||||
$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The standard library's `sync/atomic` is powerful, but it's easy to forget which
|
||||
variables must be accessed atomically. `go.uber.org/atomic` preserves all the
|
||||
functionality of the standard library, but wraps the primitive types to
|
||||
provide a safer, more convenient API.
|
||||
|
||||
```go
|
||||
var atom atomic.Uint32
|
||||
atom.Store(42)
|
||||
atom.Sub(2)
|
||||
atom.CAS(40, 11)
|
||||
```
|
||||
|
||||
See the [documentation][doc] for a complete API specification.
|
||||
|
||||
## Development Status
|
||||
|
||||
Stable.
|
||||
|
||||
---
|
||||
|
||||
Released under the [MIT License](LICENSE.txt).
|
||||
|
||||
[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
|
||||
[doc]: https://godoc.org/go.uber.org/atomic
|
||||
[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
|
||||
[ci]: https://travis-ci.com/uber-go/atomic
|
||||
[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
|
||||
[cov]: https://codecov.io/gh/uber-go/atomic
|
||||
[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
|
||||
[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
|
356
vendor/go.uber.org/atomic/atomic.go
generated
vendored
Normal file
356
vendor/go.uber.org/atomic/atomic.go
generated
vendored
Normal file
@ -0,0 +1,356 @@
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
// Package atomic provides simple wrappers around numerics to enforce atomic
|
||||
// access.
|
||||
package atomic
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Int32 is an atomic wrapper around an int32.
|
||||
type Int32 struct{ v int32 }
|
||||
|
||||
// NewInt32 creates an Int32.
|
||||
func NewInt32(i int32) *Int32 {
|
||||
return &Int32{i}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (i *Int32) Load() int32 {
|
||||
return atomic.LoadInt32(&i.v)
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped int32 and returns the new value.
|
||||
func (i *Int32) Add(n int32) int32 {
|
||||
return atomic.AddInt32(&i.v, n)
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped int32 and returns the new value.
|
||||
func (i *Int32) Sub(n int32) int32 {
|
||||
return atomic.AddInt32(&i.v, -n)
|
||||
}
|
||||
|
||||
// Inc atomically increments the wrapped int32 and returns the new value.
|
||||
func (i *Int32) Inc() int32 {
|
||||
return i.Add(1)
|
||||
}
|
||||
|
||||
// Dec atomically decrements the wrapped int32 and returns the new value.
|
||||
func (i *Int32) Dec() int32 {
|
||||
return i.Sub(1)
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (i *Int32) CAS(old, new int32) bool {
|
||||
return atomic.CompareAndSwapInt32(&i.v, old, new)
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (i *Int32) Store(n int32) {
|
||||
atomic.StoreInt32(&i.v, n)
|
||||
}
|
||||
|
||||
// Swap atomically swaps the wrapped int32 and returns the old value.
|
||||
func (i *Int32) Swap(n int32) int32 {
|
||||
return atomic.SwapInt32(&i.v, n)
|
||||
}
|
||||
|
||||
// Int64 is an atomic wrapper around an int64.
|
||||
type Int64 struct{ v int64 }
|
||||
|
||||
// NewInt64 creates an Int64.
|
||||
func NewInt64(i int64) *Int64 {
|
||||
return &Int64{i}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (i *Int64) Load() int64 {
|
||||
return atomic.LoadInt64(&i.v)
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped int64 and returns the new value.
|
||||
func (i *Int64) Add(n int64) int64 {
|
||||
return atomic.AddInt64(&i.v, n)
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped int64 and returns the new value.
|
||||
func (i *Int64) Sub(n int64) int64 {
|
||||
return atomic.AddInt64(&i.v, -n)
|
||||
}
|
||||
|
||||
// Inc atomically increments the wrapped int64 and returns the new value.
|
||||
func (i *Int64) Inc() int64 {
|
||||
return i.Add(1)
|
||||
}
|
||||
|
||||
// Dec atomically decrements the wrapped int64 and returns the new value.
|
||||
func (i *Int64) Dec() int64 {
|
||||
return i.Sub(1)
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (i *Int64) CAS(old, new int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&i.v, old, new)
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (i *Int64) Store(n int64) {
|
||||
atomic.StoreInt64(&i.v, n)
|
||||
}
|
||||
|
||||
// Swap atomically swaps the wrapped int64 and returns the old value.
|
||||
func (i *Int64) Swap(n int64) int64 {
|
||||
return atomic.SwapInt64(&i.v, n)
|
||||
}
|
||||
|
||||
// Uint32 is an atomic wrapper around an uint32.
|
||||
type Uint32 struct{ v uint32 }
|
||||
|
||||
// NewUint32 creates a Uint32.
|
||||
func NewUint32(i uint32) *Uint32 {
|
||||
return &Uint32{i}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (i *Uint32) Load() uint32 {
|
||||
return atomic.LoadUint32(&i.v)
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped uint32 and returns the new value.
|
||||
func (i *Uint32) Add(n uint32) uint32 {
|
||||
return atomic.AddUint32(&i.v, n)
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped uint32 and returns the new value.
|
||||
func (i *Uint32) Sub(n uint32) uint32 {
|
||||
return atomic.AddUint32(&i.v, ^(n - 1))
|
||||
}
|
||||
|
||||
// Inc atomically increments the wrapped uint32 and returns the new value.
|
||||
func (i *Uint32) Inc() uint32 {
|
||||
return i.Add(1)
|
||||
}
|
||||
|
||||
// Dec atomically decrements the wrapped int32 and returns the new value.
|
||||
func (i *Uint32) Dec() uint32 {
|
||||
return i.Sub(1)
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (i *Uint32) CAS(old, new uint32) bool {
|
||||
return atomic.CompareAndSwapUint32(&i.v, old, new)
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (i *Uint32) Store(n uint32) {
|
||||
atomic.StoreUint32(&i.v, n)
|
||||
}
|
||||
|
||||
// Swap atomically swaps the wrapped uint32 and returns the old value.
|
||||
func (i *Uint32) Swap(n uint32) uint32 {
|
||||
return atomic.SwapUint32(&i.v, n)
|
||||
}
|
||||
|
||||
// Uint64 is an atomic wrapper around a uint64.
|
||||
type Uint64 struct{ v uint64 }
|
||||
|
||||
// NewUint64 creates a Uint64.
|
||||
func NewUint64(i uint64) *Uint64 {
|
||||
return &Uint64{i}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (i *Uint64) Load() uint64 {
|
||||
return atomic.LoadUint64(&i.v)
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped uint64 and returns the new value.
|
||||
func (i *Uint64) Add(n uint64) uint64 {
|
||||
return atomic.AddUint64(&i.v, n)
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped uint64 and returns the new value.
|
||||
func (i *Uint64) Sub(n uint64) uint64 {
|
||||
return atomic.AddUint64(&i.v, ^(n - 1))
|
||||
}
|
||||
|
||||
// Inc atomically increments the wrapped uint64 and returns the new value.
|
||||
func (i *Uint64) Inc() uint64 {
|
||||
return i.Add(1)
|
||||
}
|
||||
|
||||
// Dec atomically decrements the wrapped uint64 and returns the new value.
|
||||
func (i *Uint64) Dec() uint64 {
|
||||
return i.Sub(1)
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (i *Uint64) CAS(old, new uint64) bool {
|
||||
return atomic.CompareAndSwapUint64(&i.v, old, new)
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (i *Uint64) Store(n uint64) {
|
||||
atomic.StoreUint64(&i.v, n)
|
||||
}
|
||||
|
||||
// Swap atomically swaps the wrapped uint64 and returns the old value.
|
||||
func (i *Uint64) Swap(n uint64) uint64 {
|
||||
return atomic.SwapUint64(&i.v, n)
|
||||
}
|
||||
|
||||
// Bool is an atomic Boolean.
|
||||
type Bool struct{ v uint32 }
|
||||
|
||||
// NewBool creates a Bool.
|
||||
func NewBool(initial bool) *Bool {
|
||||
return &Bool{boolToInt(initial)}
|
||||
}
|
||||
|
||||
// Load atomically loads the Boolean.
|
||||
func (b *Bool) Load() bool {
|
||||
return truthy(atomic.LoadUint32(&b.v))
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (b *Bool) CAS(old, new bool) bool {
|
||||
return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new))
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (b *Bool) Store(new bool) {
|
||||
atomic.StoreUint32(&b.v, boolToInt(new))
|
||||
}
|
||||
|
||||
// Swap sets the given value and returns the previous value.
|
||||
func (b *Bool) Swap(new bool) bool {
|
||||
return truthy(atomic.SwapUint32(&b.v, boolToInt(new)))
|
||||
}
|
||||
|
||||
// Toggle atomically negates the Boolean and returns the previous value.
|
||||
func (b *Bool) Toggle() bool {
|
||||
for {
|
||||
old := b.Load()
|
||||
if b.CAS(old, !old) {
|
||||
return old
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func truthy(n uint32) bool {
|
||||
return n == 1
|
||||
}
|
||||
|
||||
func boolToInt(b bool) uint32 {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64 is an atomic wrapper around float64.
|
||||
type Float64 struct {
|
||||
v uint64
|
||||
}
|
||||
|
||||
// NewFloat64 creates a Float64.
|
||||
func NewFloat64(f float64) *Float64 {
|
||||
return &Float64{math.Float64bits(f)}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (f *Float64) Load() float64 {
|
||||
return math.Float64frombits(atomic.LoadUint64(&f.v))
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (f *Float64) Store(s float64) {
|
||||
atomic.StoreUint64(&f.v, math.Float64bits(s))
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped float64 and returns the new value.
|
||||
func (f *Float64) Add(s float64) float64 {
|
||||
for {
|
||||
old := f.Load()
|
||||
new := old + s
|
||||
if f.CAS(old, new) {
|
||||
return new
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped float64 and returns the new value.
|
||||
func (f *Float64) Sub(s float64) float64 {
|
||||
return f.Add(-s)
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (f *Float64) CAS(old, new float64) bool {
|
||||
return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new))
|
||||
}
|
||||
|
||||
// Duration is an atomic wrapper around time.Duration
|
||||
// https://godoc.org/time#Duration
|
||||
type Duration struct {
|
||||
v Int64
|
||||
}
|
||||
|
||||
// NewDuration creates a Duration.
|
||||
func NewDuration(d time.Duration) *Duration {
|
||||
return &Duration{v: *NewInt64(int64(d))}
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped value.
|
||||
func (d *Duration) Load() time.Duration {
|
||||
return time.Duration(d.v.Load())
|
||||
}
|
||||
|
||||
// Store atomically stores the passed value.
|
||||
func (d *Duration) Store(n time.Duration) {
|
||||
d.v.Store(int64(n))
|
||||
}
|
||||
|
||||
// Add atomically adds to the wrapped time.Duration and returns the new value.
|
||||
func (d *Duration) Add(n time.Duration) time.Duration {
|
||||
return time.Duration(d.v.Add(int64(n)))
|
||||
}
|
||||
|
||||
// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
|
||||
func (d *Duration) Sub(n time.Duration) time.Duration {
|
||||
return time.Duration(d.v.Sub(int64(n)))
|
||||
}
|
||||
|
||||
// Swap atomically swaps the wrapped time.Duration and returns the old value.
|
||||
func (d *Duration) Swap(n time.Duration) time.Duration {
|
||||
return time.Duration(d.v.Swap(int64(n)))
|
||||
}
|
||||
|
||||
// CAS is an atomic compare-and-swap.
|
||||
func (d *Duration) CAS(old, new time.Duration) bool {
|
||||
return d.v.CAS(int64(old), int64(new))
|
||||
}
|
||||
|
||||
// Value shadows the type of the same name from sync/atomic
|
||||
// https://godoc.org/sync/atomic#Value
|
||||
type Value struct{ atomic.Value }
|
55
vendor/go.uber.org/atomic/error.go
generated
vendored
Normal file
55
vendor/go.uber.org/atomic/error.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
package atomic
|
||||
|
||||
// Error is an atomic type-safe wrapper around Value for errors
|
||||
type Error struct{ v Value }
|
||||
|
||||
// errorHolder is non-nil holder for error object.
|
||||
// atomic.Value panics on saving nil object, so err object needs to be
|
||||
// wrapped with valid object first.
|
||||
type errorHolder struct{ err error }
|
||||
|
||||
// NewError creates new atomic error object
|
||||
func NewError(err error) *Error {
|
||||
e := &Error{}
|
||||
if err != nil {
|
||||
e.Store(err)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped error
|
||||
func (e *Error) Load() error {
|
||||
v := e.v.Load()
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
eh := v.(errorHolder)
|
||||
return eh.err
|
||||
}
|
||||
|
||||
// Store atomically stores error.
|
||||
// NOTE: a holder object is allocated on each Store call.
|
||||
func (e *Error) Store(err error) {
|
||||
e.v.Store(errorHolder{err: err})
|
||||
}
|
10
vendor/go.uber.org/atomic/go.mod
generated
vendored
Normal file
10
vendor/go.uber.org/atomic/go.mod
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
module go.uber.org/atomic
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
22
vendor/go.uber.org/atomic/go.sum
generated
vendored
Normal file
22
vendor/go.uber.org/atomic/go.sum
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
49
vendor/go.uber.org/atomic/string.go
generated
vendored
Normal file
49
vendor/go.uber.org/atomic/string.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2016 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
package atomic
|
||||
|
||||
// String is an atomic type-safe wrapper around Value for strings.
|
||||
type String struct{ v Value }
|
||||
|
||||
// NewString creates a String.
|
||||
func NewString(str string) *String {
|
||||
s := &String{}
|
||||
if str != "" {
|
||||
s.Store(str)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Load atomically loads the wrapped string.
|
||||
func (s *String) Load() string {
|
||||
v := s.v.Load()
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
return v.(string)
|
||||
}
|
||||
|
||||
// Store atomically stores the passed string.
|
||||
// Note: Converting the string to an interface{} to store in the Value
|
||||
// requires an allocation.
|
||||
func (s *String) Store(str string) {
|
||||
s.v.Store(str)
|
||||
}
|
28
vendor/go.uber.org/atomic/tools.go
generated
vendored
Normal file
28
vendor/go.uber.org/atomic/tools.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2019 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
// +build tools
|
||||
|
||||
package atomic
|
||||
|
||||
import (
|
||||
// Tools used during development.
|
||||
_ "golang.org/x/lint/golint"
|
||||
)
|
15
vendor/golang.org/x/lint/CONTRIBUTING.md
generated
vendored
Normal file
15
vendor/golang.org/x/lint/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# Contributing to Golint
|
||||
|
||||
## Before filing an issue:
|
||||
|
||||
### Are you having trouble building golint?
|
||||
|
||||
Check you have the latest version of its dependencies. Run
|
||||
```
|
||||
go get -u golang.org/x/lint/golint
|
||||
```
|
||||
If you still have problems, consider searching for existing issues before filing a new issue.
|
||||
|
||||
## Before sending a pull request:
|
||||
|
||||
Have you understood the purpose of golint? Make sure to carefully read `README`.
|
27
vendor/golang.org/x/lint/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/lint/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
88
vendor/golang.org/x/lint/README.md
generated
vendored
Normal file
88
vendor/golang.org/x/lint/README.md
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
Golint is a linter for Go source code.
|
||||
|
||||
[](https://travis-ci.org/golang/lint)
|
||||
|
||||
## Installation
|
||||
|
||||
Golint requires a
|
||||
[supported release of Go](https://golang.org/doc/devel/release.html#policy).
|
||||
|
||||
go get -u golang.org/x/lint/golint
|
||||
|
||||
To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
|
||||
|
||||
## Usage
|
||||
|
||||
Invoke `golint` with one or more filenames, directories, or packages named
|
||||
by its import path. Golint uses the same
|
||||
[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
|
||||
the `go` command and therefore
|
||||
also supports relative import paths like `./...`. Additionally the `...`
|
||||
wildcard can be used as suffix on relative and absolute file paths to recurse
|
||||
into them.
|
||||
|
||||
The output of this tool is a list of suggestions in Vim quickfix format,
|
||||
which is accepted by lots of different editors.
|
||||
|
||||
## Purpose
|
||||
|
||||
Golint differs from gofmt. Gofmt reformats Go source code, whereas
|
||||
golint prints out style mistakes.
|
||||
|
||||
Golint differs from govet. Govet is concerned with correctness, whereas
|
||||
golint is concerned with coding style. Golint is in use at Google, and it
|
||||
seeks to match the accepted style of the open source Go project.
|
||||
|
||||
The suggestions made by golint are exactly that: suggestions.
|
||||
Golint is not perfect, and has both false positives and false negatives.
|
||||
Do not treat its output as a gold standard. We will not be adding pragmas
|
||||
or other knobs to suppress specific warnings, so do not expect or require
|
||||
code to be completely "lint-free".
|
||||
In short, this tool is not, and will never be, trustworthy enough for its
|
||||
suggestions to be enforced automatically, for example as part of a build process.
|
||||
Golint makes suggestions for many of the mechanically checkable items listed in
|
||||
[Effective Go](https://golang.org/doc/effective_go.html) and the
|
||||
[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
|
||||
|
||||
## Scope
|
||||
|
||||
Golint is meant to carry out the stylistic conventions put forth in
|
||||
[Effective Go](https://golang.org/doc/effective_go.html) and
|
||||
[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
|
||||
Changes that are not aligned with those documents will not be considered.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions to this project are welcome provided they are [in scope](#scope),
|
||||
though please send mail before starting work on anything major.
|
||||
Contributors retain their copyright, so we need you to fill out
|
||||
[a short form](https://developers.google.com/open-source/cla/individual)
|
||||
before we can accept your contribution.
|
||||
|
||||
## Vim
|
||||
|
||||
Add this to your ~/.vimrc:
|
||||
|
||||
set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
|
||||
|
||||
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
|
||||
|
||||
Running `:Lint` will run golint on the current file and populate the quickfix list.
|
||||
|
||||
Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
|
||||
|
||||
autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
|
||||
|
||||
|
||||
## Emacs
|
||||
|
||||
Add this to your `.emacs` file:
|
||||
|
||||
(add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/"))
|
||||
(require 'golint)
|
||||
|
||||
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
|
||||
|
||||
Running M-x golint will run golint on the current file.
|
||||
|
||||
For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).
|
5
vendor/golang.org/x/lint/go.mod
generated
vendored
Normal file
5
vendor/golang.org/x/lint/go.mod
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
module golang.org/x/lint
|
||||
|
||||
go 1.11
|
||||
|
||||
require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f
|
8
vendor/golang.org/x/lint/go.sum
generated
vendored
Normal file
8
vendor/golang.org/x/lint/go.sum
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
159
vendor/golang.org/x/lint/golint/golint.go
generated
vendored
Normal file
159
vendor/golang.org/x/lint/golint/golint.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// golint lints the Go source files named on its command line.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/lint"
|
||||
)
|
||||
|
||||
var (
|
||||
minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
|
||||
setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
|
||||
suggestions int
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
||||
fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
|
||||
fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
|
||||
fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
|
||||
fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
lintDir(".")
|
||||
} else {
|
||||
// dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
|
||||
// directory, file or package targets. The distinction affects which
|
||||
// checks are run. It is no valid to mix target types.
|
||||
var dirsRun, filesRun, pkgsRun int
|
||||
var args []string
|
||||
for _, arg := range flag.Args() {
|
||||
if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
|
||||
dirsRun = 1
|
||||
for _, dirname := range allPackagesInFS(arg) {
|
||||
args = append(args, dirname)
|
||||
}
|
||||
} else if isDir(arg) {
|
||||
dirsRun = 1
|
||||
args = append(args, arg)
|
||||
} else if exists(arg) {
|
||||
filesRun = 1
|
||||
args = append(args, arg)
|
||||
} else {
|
||||
pkgsRun = 1
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if dirsRun+filesRun+pkgsRun != 1 {
|
||||
usage()
|
||||
os.Exit(2)
|
||||
}
|
||||
switch {
|
||||
case dirsRun == 1:
|
||||
for _, dir := range args {
|
||||
lintDir(dir)
|
||||
}
|
||||
case filesRun == 1:
|
||||
lintFiles(args...)
|
||||
case pkgsRun == 1:
|
||||
for _, pkg := range importPaths(args) {
|
||||
lintPackage(pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *setExitStatus && suggestions > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func isDir(filename string) bool {
|
||||
fi, err := os.Stat(filename)
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
||||
|
||||
func exists(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func lintFiles(filenames ...string) {
|
||||
files := make(map[string][]byte)
|
||||
for _, filename := range filenames {
|
||||
src, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
files[filename] = src
|
||||
}
|
||||
|
||||
l := new(lint.Linter)
|
||||
ps, err := l.LintFiles(files)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
return
|
||||
}
|
||||
for _, p := range ps {
|
||||
if p.Confidence >= *minConfidence {
|
||||
fmt.Printf("%v: %s\n", p.Position, p.Text)
|
||||
suggestions++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lintDir(dirname string) {
|
||||
pkg, err := build.ImportDir(dirname, 0)
|
||||
lintImportedPackage(pkg, err)
|
||||
}
|
||||
|
||||
func lintPackage(pkgname string) {
|
||||
pkg, err := build.Import(pkgname, ".", 0)
|
||||
lintImportedPackage(pkg, err)
|
||||
}
|
||||
|
||||
func lintImportedPackage(pkg *build.Package, err error) {
|
||||
if err != nil {
|
||||
if _, nogo := err.(*build.NoGoError); nogo {
|
||||
// Don't complain if the failure is due to no Go source files.
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
|
||||
var files []string
|
||||
files = append(files, pkg.GoFiles...)
|
||||
files = append(files, pkg.CgoFiles...)
|
||||
files = append(files, pkg.TestGoFiles...)
|
||||
if pkg.Dir != "." {
|
||||
for i, f := range files {
|
||||
files[i] = filepath.Join(pkg.Dir, f)
|
||||
}
|
||||
}
|
||||
// TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
|
||||
|
||||
lintFiles(files...)
|
||||
}
|
309
vendor/golang.org/x/lint/golint/import.go
generated
vendored
Normal file
309
vendor/golang.org/x/lint/golint/import.go
generated
vendored
Normal file
@ -0,0 +1,309 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
|
||||
This file holds a direct copy of the import path matching code of
|
||||
https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
|
||||
replaced when https://golang.org/issue/8768 is resolved.
|
||||
|
||||
It has been updated to follow upstream changes in a few ways.
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
buildContext = build.Default
|
||||
goroot = filepath.Clean(runtime.GOROOT())
|
||||
gorootSrc = filepath.Join(goroot, "src")
|
||||
)
|
||||
|
||||
// importPathsNoDotExpansion returns the import paths to use for the given
|
||||
// command line, but it does no ... expansion.
|
||||
func importPathsNoDotExpansion(args []string) []string {
|
||||
if len(args) == 0 {
|
||||
return []string{"."}
|
||||
}
|
||||
var out []string
|
||||
for _, a := range args {
|
||||
// Arguments are supposed to be import paths, but
|
||||
// as a courtesy to Windows developers, rewrite \ to /
|
||||
// in command-line arguments. Handles .\... and so on.
|
||||
if filepath.Separator == '\\' {
|
||||
a = strings.Replace(a, `\`, `/`, -1)
|
||||
}
|
||||
|
||||
// Put argument in canonical form, but preserve leading ./.
|
||||
if strings.HasPrefix(a, "./") {
|
||||
a = "./" + path.Clean(a)
|
||||
if a == "./." {
|
||||
a = "."
|
||||
}
|
||||
} else {
|
||||
a = path.Clean(a)
|
||||
}
|
||||
if a == "all" || a == "std" {
|
||||
out = append(out, allPackages(a)...)
|
||||
continue
|
||||
}
|
||||
out = append(out, a)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// importPaths returns the import paths to use for the given command line.
|
||||
func importPaths(args []string) []string {
|
||||
args = importPathsNoDotExpansion(args)
|
||||
var out []string
|
||||
for _, a := range args {
|
||||
if strings.Contains(a, "...") {
|
||||
if build.IsLocalImport(a) {
|
||||
out = append(out, allPackagesInFS(a)...)
|
||||
} else {
|
||||
out = append(out, allPackages(a)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
out = append(out, a)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// matchPattern(pattern)(name) reports whether
|
||||
// name matches pattern. Pattern is a limited glob
|
||||
// pattern in which '...' means 'any string' and there
|
||||
// is no other special syntax.
|
||||
func matchPattern(pattern string) func(name string) bool {
|
||||
re := regexp.QuoteMeta(pattern)
|
||||
re = strings.Replace(re, `\.\.\.`, `.*`, -1)
|
||||
// Special case: foo/... matches foo too.
|
||||
if strings.HasSuffix(re, `/.*`) {
|
||||
re = re[:len(re)-len(`/.*`)] + `(/.*)?`
|
||||
}
|
||||
reg := regexp.MustCompile(`^` + re + `$`)
|
||||
return func(name string) bool {
|
||||
return reg.MatchString(name)
|
||||
}
|
||||
}
|
||||
|
||||
// hasPathPrefix reports whether the path s begins with the
|
||||
// elements in prefix.
|
||||
func hasPathPrefix(s, prefix string) bool {
|
||||
switch {
|
||||
default:
|
||||
return false
|
||||
case len(s) == len(prefix):
|
||||
return s == prefix
|
||||
case len(s) > len(prefix):
|
||||
if prefix != "" && prefix[len(prefix)-1] == '/' {
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
|
||||
}
|
||||
}
|
||||
|
||||
// treeCanMatchPattern(pattern)(name) reports whether
|
||||
// name or children of name can possibly match pattern.
|
||||
// Pattern is the same limited glob accepted by matchPattern.
|
||||
func treeCanMatchPattern(pattern string) func(name string) bool {
|
||||
wildCard := false
|
||||
if i := strings.Index(pattern, "..."); i >= 0 {
|
||||
wildCard = true
|
||||
pattern = pattern[:i]
|
||||
}
|
||||
return func(name string) bool {
|
||||
return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
|
||||
wildCard && strings.HasPrefix(name, pattern)
|
||||
}
|
||||
}
|
||||
|
||||
// allPackages returns all the packages that can be found
|
||||
// under the $GOPATH directories and $GOROOT matching pattern.
|
||||
// The pattern is either "all" (all packages), "std" (standard packages)
|
||||
// or a path including "...".
|
||||
func allPackages(pattern string) []string {
|
||||
pkgs := matchPackages(pattern)
|
||||
if len(pkgs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
func matchPackages(pattern string) []string {
|
||||
match := func(string) bool { return true }
|
||||
treeCanMatch := func(string) bool { return true }
|
||||
if pattern != "all" && pattern != "std" {
|
||||
match = matchPattern(pattern)
|
||||
treeCanMatch = treeCanMatchPattern(pattern)
|
||||
}
|
||||
|
||||
have := map[string]bool{
|
||||
"builtin": true, // ignore pseudo-package that exists only for documentation
|
||||
}
|
||||
if !buildContext.CgoEnabled {
|
||||
have["runtime/cgo"] = true // ignore during walk
|
||||
}
|
||||
var pkgs []string
|
||||
|
||||
// Commands
|
||||
cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
|
||||
filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil || !fi.IsDir() || path == cmd {
|
||||
return nil
|
||||
}
|
||||
name := path[len(cmd):]
|
||||
if !treeCanMatch(name) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// Commands are all in cmd/, not in subdirectories.
|
||||
if strings.Contains(name, string(filepath.Separator)) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
// We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
|
||||
name = "cmd/" + name
|
||||
if have[name] {
|
||||
return nil
|
||||
}
|
||||
have[name] = true
|
||||
if !match(name) {
|
||||
return nil
|
||||
}
|
||||
_, err = buildContext.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
if _, noGo := err.(*build.NoGoError); !noGo {
|
||||
log.Print(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
pkgs = append(pkgs, name)
|
||||
return nil
|
||||
})
|
||||
|
||||
for _, src := range buildContext.SrcDirs() {
|
||||
if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
|
||||
continue
|
||||
}
|
||||
src = filepath.Clean(src) + string(filepath.Separator)
|
||||
root := src
|
||||
if pattern == "cmd" {
|
||||
root += "cmd" + string(filepath.Separator)
|
||||
}
|
||||
filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil || !fi.IsDir() || path == src {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid .foo, _foo, and testdata directory trees.
|
||||
_, elem := filepath.Split(path)
|
||||
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
name := filepath.ToSlash(path[len(src):])
|
||||
if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
|
||||
// The name "std" is only the standard library.
|
||||
// If the name is cmd, it's the root of the command tree.
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if !treeCanMatch(name) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if have[name] {
|
||||
return nil
|
||||
}
|
||||
have[name] = true
|
||||
if !match(name) {
|
||||
return nil
|
||||
}
|
||||
_, err = buildContext.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
if _, noGo := err.(*build.NoGoError); noGo {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
pkgs = append(pkgs, name)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
// allPackagesInFS is like allPackages but is passed a pattern
|
||||
// beginning ./ or ../, meaning it should scan the tree rooted
|
||||
// at the given directory. There are ... in the pattern too.
|
||||
func allPackagesInFS(pattern string) []string {
|
||||
pkgs := matchPackagesInFS(pattern)
|
||||
if len(pkgs) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
func matchPackagesInFS(pattern string) []string {
|
||||
// Find directory to begin the scan.
|
||||
// Could be smarter but this one optimization
|
||||
// is enough for now, since ... is usually at the
|
||||
// end of a path.
|
||||
i := strings.Index(pattern, "...")
|
||||
dir, _ := path.Split(pattern[:i])
|
||||
|
||||
// pattern begins with ./ or ../.
|
||||
// path.Clean will discard the ./ but not the ../.
|
||||
// We need to preserve the ./ for pattern matching
|
||||
// and in the returned import paths.
|
||||
prefix := ""
|
||||
if strings.HasPrefix(pattern, "./") {
|
||||
prefix = "./"
|
||||
}
|
||||
match := matchPattern(pattern)
|
||||
|
||||
var pkgs []string
|
||||
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil || !fi.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if path == dir {
|
||||
// filepath.Walk starts at dir and recurses. For the recursive case,
|
||||
// the path is the result of filepath.Join, which calls filepath.Clean.
|
||||
// The initial case is not Cleaned, though, so we do this explicitly.
|
||||
//
|
||||
// This converts a path like "./io/" to "io". Without this step, running
|
||||
// "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
|
||||
// package, because prepending the prefix "./" to the unclean path would
|
||||
// result in "././io", and match("././io") returns false.
|
||||
path = filepath.Clean(path)
|
||||
}
|
||||
|
||||
// Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
|
||||
_, elem := filepath.Split(path)
|
||||
dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
|
||||
if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
name := prefix + filepath.ToSlash(path)
|
||||
if !match(name) {
|
||||
return nil
|
||||
}
|
||||
if _, err = build.ImportDir(path, 0); err != nil {
|
||||
if _, noGo := err.(*build.NoGoError); !noGo {
|
||||
log.Print(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
pkgs = append(pkgs, name)
|
||||
return nil
|
||||
})
|
||||
return pkgs
|
||||
}
|
13
vendor/golang.org/x/lint/golint/importcomment.go
generated
vendored
Normal file
13
vendor/golang.org/x/lint/golint/importcomment.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
// Require use of the correct import path only for Go 1.12+ users, so
|
||||
// any breakages coincide with people updating their CI configs or
|
||||
// whatnot.
|
||||
|
||||
package main // import "golang.org/x/lint/golint"
|
1614
vendor/golang.org/x/lint/lint.go
generated
vendored
Normal file
1614
vendor/golang.org/x/lint/lint.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3
vendor/golang.org/x/tools/AUTHORS
generated
vendored
Normal file
3
vendor/golang.org/x/tools/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/golang.org/x/tools/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/golang.org/x/tools/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/golang.org/x/tools/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/tools/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/tools/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/tools/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
627
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
627
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
@ -0,0 +1,627 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astutil
|
||||
|
||||
// This file defines utilities for working with source positions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// PathEnclosingInterval returns the node that encloses the source
|
||||
// interval [start, end), and all its ancestors up to the AST root.
|
||||
//
|
||||
// The definition of "enclosing" used by this function considers
|
||||
// additional whitespace abutting a node to be enclosed by it.
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <-A->
|
||||
// <----B----->
|
||||
//
|
||||
// the ast.BinaryExpr(+) node is considered to enclose interval B
|
||||
// even though its [Pos()..End()) is actually only interval A.
|
||||
// This behaviour makes user interfaces more tolerant of imperfect
|
||||
// input.
|
||||
//
|
||||
// This function treats tokens as nodes, though they are not included
|
||||
// in the result. e.g. PathEnclosingInterval("+") returns the
|
||||
// enclosing ast.BinaryExpr("x + y").
|
||||
//
|
||||
// If start==end, the 1-char interval following start is used instead.
|
||||
//
|
||||
// The 'exact' result is true if the interval contains only path[0]
|
||||
// and perhaps some adjacent whitespace. It is false if the interval
|
||||
// overlaps multiple children of path[0], or if it contains only
|
||||
// interior whitespace of path[0].
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <--C--> <---E-->
|
||||
// ^
|
||||
// D
|
||||
//
|
||||
// intervals C, D and E are inexact. C is contained by the
|
||||
// z-assignment statement, because it spans three of its children (:=,
|
||||
// x, +). So too is the 1-char interval D, because it contains only
|
||||
// interior whitespace of the assignment. E is considered interior
|
||||
// whitespace of the BlockStmt containing the assignment.
|
||||
//
|
||||
// Precondition: [start, end) both lie within the same file as root.
|
||||
// TODO(adonovan): return (nil, false) in this case and remove precond.
|
||||
// Requires FileSet; see loader.tokenFileContainsPos.
|
||||
//
|
||||
// Postcondition: path is never nil; it always contains at least 'root'.
|
||||
//
|
||||
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
|
||||
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
|
||||
|
||||
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
|
||||
var visit func(node ast.Node) bool
|
||||
visit = func(node ast.Node) bool {
|
||||
path = append(path, node)
|
||||
|
||||
nodePos := node.Pos()
|
||||
nodeEnd := node.End()
|
||||
|
||||
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
|
||||
|
||||
// Intersect [start, end) with interval of node.
|
||||
if start < nodePos {
|
||||
start = nodePos
|
||||
}
|
||||
if end > nodeEnd {
|
||||
end = nodeEnd
|
||||
}
|
||||
|
||||
// Find sole child that contains [start, end).
|
||||
children := childrenOf(node)
|
||||
l := len(children)
|
||||
for i, child := range children {
|
||||
// [childPos, childEnd) is unaugmented interval of child.
|
||||
childPos := child.Pos()
|
||||
childEnd := child.End()
|
||||
|
||||
// [augPos, augEnd) is whitespace-augmented interval of child.
|
||||
augPos := childPos
|
||||
augEnd := childEnd
|
||||
if i > 0 {
|
||||
augPos = children[i-1].End() // start of preceding whitespace
|
||||
}
|
||||
if i < l-1 {
|
||||
nextChildPos := children[i+1].Pos()
|
||||
// Does [start, end) lie between child and next child?
|
||||
if start >= augEnd && end <= nextChildPos {
|
||||
return false // inexact match
|
||||
}
|
||||
augEnd = nextChildPos // end of following whitespace
|
||||
}
|
||||
|
||||
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
|
||||
// i, augPos, augEnd, start, end) // debugging
|
||||
|
||||
// Does augmented child strictly contain [start, end)?
|
||||
if augPos <= start && end <= augEnd {
|
||||
_, isToken := child.(tokenNode)
|
||||
return isToken || visit(child)
|
||||
}
|
||||
|
||||
// Does [start, end) overlap multiple children?
|
||||
// i.e. left-augmented child contains start
|
||||
// but LR-augmented child does not contain end.
|
||||
if start < childEnd && end > augEnd {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// No single child contained [start, end),
|
||||
// so node is the result. Is it exact?
|
||||
|
||||
// (It's tempting to put this condition before the
|
||||
// child loop, but it gives the wrong result in the
|
||||
// case where a node (e.g. ExprStmt) and its sole
|
||||
// child have equal intervals.)
|
||||
if start == nodePos && end == nodeEnd {
|
||||
return true // exact match
|
||||
}
|
||||
|
||||
return false // inexact: overlaps multiple children
|
||||
}
|
||||
|
||||
if start > end {
|
||||
start, end = end, start
|
||||
}
|
||||
|
||||
if start < root.End() && end > root.Pos() {
|
||||
if start == end {
|
||||
end = start + 1 // empty interval => interval of size 1
|
||||
}
|
||||
exact = visit(root)
|
||||
|
||||
// Reverse the path:
|
||||
for i, l := 0, len(path); i < l/2; i++ {
|
||||
path[i], path[l-1-i] = path[l-1-i], path[i]
|
||||
}
|
||||
} else {
|
||||
// Selection lies within whitespace preceding the
|
||||
// first (or following the last) declaration in the file.
|
||||
// The result nonetheless always includes the ast.File.
|
||||
path = append(path, root)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// tokenNode is a dummy implementation of ast.Node for a single token.
|
||||
// They are used transiently by PathEnclosingInterval but never escape
|
||||
// this package.
|
||||
//
|
||||
type tokenNode struct {
|
||||
pos token.Pos
|
||||
end token.Pos
|
||||
}
|
||||
|
||||
func (n tokenNode) Pos() token.Pos {
|
||||
return n.pos
|
||||
}
|
||||
|
||||
func (n tokenNode) End() token.Pos {
|
||||
return n.end
|
||||
}
|
||||
|
||||
func tok(pos token.Pos, len int) ast.Node {
|
||||
return tokenNode{pos, pos + token.Pos(len)}
|
||||
}
|
||||
|
||||
// childrenOf returns the direct non-nil children of ast.Node n.
|
||||
// It may include fake ast.Node implementations for bare tokens.
|
||||
// it is not safe to call (e.g.) ast.Walk on such nodes.
|
||||
//
|
||||
func childrenOf(n ast.Node) []ast.Node {
|
||||
var children []ast.Node
|
||||
|
||||
// First add nodes for all true subtrees.
|
||||
ast.Inspect(n, func(node ast.Node) bool {
|
||||
if node == n { // push n
|
||||
return true // recur
|
||||
}
|
||||
if node != nil { // push child
|
||||
children = append(children, node)
|
||||
}
|
||||
return false // no recursion
|
||||
})
|
||||
|
||||
// Then add fake Nodes for bare tokens.
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Elt.End(), len("]")))
|
||||
|
||||
case *ast.AssignStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.BasicLit:
|
||||
children = append(children,
|
||||
tok(n.ValuePos, len(n.Value)))
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.BlockStmt:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("}")))
|
||||
|
||||
case *ast.BranchStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.CallExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
if n.Ellipsis != 0 {
|
||||
children = append(children, tok(n.Ellipsis, len("...")))
|
||||
}
|
||||
|
||||
case *ast.CaseClause:
|
||||
if n.List == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.ChanType:
|
||||
switch n.Dir {
|
||||
case ast.RECV:
|
||||
children = append(children, tok(n.Begin, len("<-chan")))
|
||||
case ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan<-")))
|
||||
case ast.RECV | ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan")))
|
||||
}
|
||||
|
||||
case *ast.CommClause:
|
||||
if n.Comm == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.Comment:
|
||||
// nop
|
||||
|
||||
case *ast.CommentGroup:
|
||||
// nop
|
||||
|
||||
case *ast.CompositeLit:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("{")))
|
||||
|
||||
case *ast.DeclStmt:
|
||||
// nop
|
||||
|
||||
case *ast.DeferStmt:
|
||||
children = append(children,
|
||||
tok(n.Defer, len("defer")))
|
||||
|
||||
case *ast.Ellipsis:
|
||||
children = append(children,
|
||||
tok(n.Ellipsis, len("...")))
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nop
|
||||
|
||||
case *ast.ExprStmt:
|
||||
// nop
|
||||
|
||||
case *ast.Field:
|
||||
// TODO(adonovan): Field.{Doc,Comment,Tag}?
|
||||
|
||||
case *ast.FieldList:
|
||||
children = append(children,
|
||||
tok(n.Opening, len("(")),
|
||||
tok(n.Closing, len(")")))
|
||||
|
||||
case *ast.File:
|
||||
// TODO test: Doc
|
||||
children = append(children,
|
||||
tok(n.Package, len("package")))
|
||||
|
||||
case *ast.ForStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")))
|
||||
|
||||
case *ast.FuncDecl:
|
||||
// TODO(adonovan): FuncDecl.Comment?
|
||||
|
||||
// Uniquely, FuncDecl breaks the invariant that
|
||||
// preorder traversal yields tokens in lexical order:
|
||||
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
|
||||
//
|
||||
// As a workaround, we inline the case for FuncType
|
||||
// here and order things correctly.
|
||||
//
|
||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
||||
children = append(children, tok(n.Type.Func, len("func")))
|
||||
if n.Recv != nil {
|
||||
children = append(children, n.Recv)
|
||||
}
|
||||
children = append(children, n.Name)
|
||||
if n.Type.Params != nil {
|
||||
children = append(children, n.Type.Params)
|
||||
}
|
||||
if n.Type.Results != nil {
|
||||
children = append(children, n.Type.Results)
|
||||
}
|
||||
if n.Body != nil {
|
||||
children = append(children, n.Body)
|
||||
}
|
||||
|
||||
case *ast.FuncLit:
|
||||
// nop
|
||||
|
||||
case *ast.FuncType:
|
||||
if n.Func != 0 {
|
||||
children = append(children,
|
||||
tok(n.Func, len("func")))
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
if n.Lparen != 0 {
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
}
|
||||
|
||||
case *ast.GoStmt:
|
||||
children = append(children,
|
||||
tok(n.Go, len("go")))
|
||||
|
||||
case *ast.Ident:
|
||||
children = append(children,
|
||||
tok(n.NamePos, len(n.Name)))
|
||||
|
||||
case *ast.IfStmt:
|
||||
children = append(children,
|
||||
tok(n.If, len("if")))
|
||||
|
||||
case *ast.ImportSpec:
|
||||
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.IndexExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("{")),
|
||||
tok(n.Rbrack, len("}")))
|
||||
|
||||
case *ast.InterfaceType:
|
||||
children = append(children,
|
||||
tok(n.Interface, len("interface")))
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.MapType:
|
||||
children = append(children,
|
||||
tok(n.Map, len("map")))
|
||||
|
||||
case *ast.ParenExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.RangeStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")),
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
children = append(children,
|
||||
tok(n.Return, len("return")))
|
||||
|
||||
case *ast.SelectStmt:
|
||||
children = append(children,
|
||||
tok(n.Select, len("select")))
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
// nop
|
||||
|
||||
case *ast.SendStmt:
|
||||
children = append(children,
|
||||
tok(n.Arrow, len("<-")))
|
||||
|
||||
case *ast.SliceExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Rbrack, len("]")))
|
||||
|
||||
case *ast.StarExpr:
|
||||
children = append(children, tok(n.Star, len("*")))
|
||||
|
||||
case *ast.StructType:
|
||||
children = append(children, tok(n.Struct, len("struct")))
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen-1, len(".")),
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.TypeSpec:
|
||||
// TODO(adonovan): TypeSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.ValueSpec:
|
||||
// TODO(adonovan): ValueSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
|
||||
// nop
|
||||
}
|
||||
|
||||
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
|
||||
// the switch above so we can make interleaved callbacks for
|
||||
// both Nodes and Tokens in the right order and avoid the need
|
||||
// to sort.
|
||||
sort.Sort(byPos(children))
|
||||
|
||||
return children
|
||||
}
|
||||
|
||||
type byPos []ast.Node
|
||||
|
||||
func (sl byPos) Len() int {
|
||||
return len(sl)
|
||||
}
|
||||
func (sl byPos) Less(i, j int) bool {
|
||||
return sl[i].Pos() < sl[j].Pos()
|
||||
}
|
||||
func (sl byPos) Swap(i, j int) {
|
||||
sl[i], sl[j] = sl[j], sl[i]
|
||||
}
|
||||
|
||||
// NodeDescription returns a description of the concrete type of n suitable
|
||||
// for a user interface.
|
||||
//
|
||||
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
|
||||
// StarExpr) we could be much more specific given the path to the AST
|
||||
// root. Perhaps we should do that.
|
||||
//
|
||||
func NodeDescription(n ast.Node) string {
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
return "array type"
|
||||
case *ast.AssignStmt:
|
||||
return "assignment"
|
||||
case *ast.BadDecl:
|
||||
return "bad declaration"
|
||||
case *ast.BadExpr:
|
||||
return "bad expression"
|
||||
case *ast.BadStmt:
|
||||
return "bad statement"
|
||||
case *ast.BasicLit:
|
||||
return "basic literal"
|
||||
case *ast.BinaryExpr:
|
||||
return fmt.Sprintf("binary %s operation", n.Op)
|
||||
case *ast.BlockStmt:
|
||||
return "block"
|
||||
case *ast.BranchStmt:
|
||||
switch n.Tok {
|
||||
case token.BREAK:
|
||||
return "break statement"
|
||||
case token.CONTINUE:
|
||||
return "continue statement"
|
||||
case token.GOTO:
|
||||
return "goto statement"
|
||||
case token.FALLTHROUGH:
|
||||
return "fall-through statement"
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
|
||||
return "function call (or conversion)"
|
||||
}
|
||||
return "function call"
|
||||
case *ast.CaseClause:
|
||||
return "case clause"
|
||||
case *ast.ChanType:
|
||||
return "channel type"
|
||||
case *ast.CommClause:
|
||||
return "communication clause"
|
||||
case *ast.Comment:
|
||||
return "comment"
|
||||
case *ast.CommentGroup:
|
||||
return "comment group"
|
||||
case *ast.CompositeLit:
|
||||
return "composite literal"
|
||||
case *ast.DeclStmt:
|
||||
return NodeDescription(n.Decl) + " statement"
|
||||
case *ast.DeferStmt:
|
||||
return "defer statement"
|
||||
case *ast.Ellipsis:
|
||||
return "ellipsis"
|
||||
case *ast.EmptyStmt:
|
||||
return "empty statement"
|
||||
case *ast.ExprStmt:
|
||||
return "expression statement"
|
||||
case *ast.Field:
|
||||
// Can be any of these:
|
||||
// struct {x, y int} -- struct field(s)
|
||||
// struct {T} -- anon struct field
|
||||
// interface {I} -- interface embedding
|
||||
// interface {f()} -- interface method
|
||||
// func (A) func(B) C -- receiver, param(s), result(s)
|
||||
return "field/method/parameter"
|
||||
case *ast.FieldList:
|
||||
return "field/method/parameter list"
|
||||
case *ast.File:
|
||||
return "source file"
|
||||
case *ast.ForStmt:
|
||||
return "for loop"
|
||||
case *ast.FuncDecl:
|
||||
return "function declaration"
|
||||
case *ast.FuncLit:
|
||||
return "function literal"
|
||||
case *ast.FuncType:
|
||||
return "function type"
|
||||
case *ast.GenDecl:
|
||||
switch n.Tok {
|
||||
case token.IMPORT:
|
||||
return "import declaration"
|
||||
case token.CONST:
|
||||
return "constant declaration"
|
||||
case token.TYPE:
|
||||
return "type declaration"
|
||||
case token.VAR:
|
||||
return "variable declaration"
|
||||
}
|
||||
case *ast.GoStmt:
|
||||
return "go statement"
|
||||
case *ast.Ident:
|
||||
return "identifier"
|
||||
case *ast.IfStmt:
|
||||
return "if statement"
|
||||
case *ast.ImportSpec:
|
||||
return "import specification"
|
||||
case *ast.IncDecStmt:
|
||||
if n.Tok == token.INC {
|
||||
return "increment statement"
|
||||
}
|
||||
return "decrement statement"
|
||||
case *ast.IndexExpr:
|
||||
return "index expression"
|
||||
case *ast.InterfaceType:
|
||||
return "interface type"
|
||||
case *ast.KeyValueExpr:
|
||||
return "key/value association"
|
||||
case *ast.LabeledStmt:
|
||||
return "statement label"
|
||||
case *ast.MapType:
|
||||
return "map type"
|
||||
case *ast.Package:
|
||||
return "package"
|
||||
case *ast.ParenExpr:
|
||||
return "parenthesized " + NodeDescription(n.X)
|
||||
case *ast.RangeStmt:
|
||||
return "range loop"
|
||||
case *ast.ReturnStmt:
|
||||
return "return statement"
|
||||
case *ast.SelectStmt:
|
||||
return "select statement"
|
||||
case *ast.SelectorExpr:
|
||||
return "selector"
|
||||
case *ast.SendStmt:
|
||||
return "channel send"
|
||||
case *ast.SliceExpr:
|
||||
return "slice expression"
|
||||
case *ast.StarExpr:
|
||||
return "*-operation" // load/store expr or pointer type
|
||||
case *ast.StructType:
|
||||
return "struct type"
|
||||
case *ast.SwitchStmt:
|
||||
return "switch statement"
|
||||
case *ast.TypeAssertExpr:
|
||||
return "type assertion"
|
||||
case *ast.TypeSpec:
|
||||
return "type specification"
|
||||
case *ast.TypeSwitchStmt:
|
||||
return "type switch"
|
||||
case *ast.UnaryExpr:
|
||||
return fmt.Sprintf("unary %s operation", n.Op)
|
||||
case *ast.ValueSpec:
|
||||
return "value specification"
|
||||
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
||||
}
|
481
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
481
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
@ -0,0 +1,481 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package astutil contains common utilities for working with the Go AST.
|
||||
package astutil // import "golang.org/x/tools/go/ast/astutil"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AddImport adds the import path to the file f, if absent.
|
||||
func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
|
||||
return AddNamedImport(fset, f, "", path)
|
||||
}
|
||||
|
||||
// AddNamedImport adds the import with the given name and path to the file f, if absent.
|
||||
// If name is not empty, it is used to rename the import.
|
||||
//
|
||||
// For example, calling
|
||||
// AddNamedImport(fset, f, "pathpkg", "path")
|
||||
// adds
|
||||
// import pathpkg "path"
|
||||
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
|
||||
if imports(f, name, path) {
|
||||
return false
|
||||
}
|
||||
|
||||
newImport := &ast.ImportSpec{
|
||||
Path: &ast.BasicLit{
|
||||
Kind: token.STRING,
|
||||
Value: strconv.Quote(path),
|
||||
},
|
||||
}
|
||||
if name != "" {
|
||||
newImport.Name = &ast.Ident{Name: name}
|
||||
}
|
||||
|
||||
// Find an import decl to add to.
|
||||
// The goal is to find an existing import
|
||||
// whose import path has the longest shared
|
||||
// prefix with path.
|
||||
var (
|
||||
bestMatch = -1 // length of longest shared prefix
|
||||
lastImport = -1 // index in f.Decls of the file's final import decl
|
||||
impDecl *ast.GenDecl // import decl containing the best match
|
||||
impIndex = -1 // spec index in impDecl containing the best match
|
||||
|
||||
isThirdPartyPath = isThirdParty(path)
|
||||
)
|
||||
for i, decl := range f.Decls {
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if ok && gen.Tok == token.IMPORT {
|
||||
lastImport = i
|
||||
// Do not add to import "C", to avoid disrupting the
|
||||
// association with its doc comment, breaking cgo.
|
||||
if declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Match an empty import decl if that's all that is available.
|
||||
if len(gen.Specs) == 0 && bestMatch == -1 {
|
||||
impDecl = gen
|
||||
}
|
||||
|
||||
// Compute longest shared prefix with imports in this group and find best
|
||||
// matched import spec.
|
||||
// 1. Always prefer import spec with longest shared prefix.
|
||||
// 2. While match length is 0,
|
||||
// - for stdlib package: prefer first import spec.
|
||||
// - for third party package: prefer first third party import spec.
|
||||
// We cannot use last import spec as best match for third party package
|
||||
// because grouped imports are usually placed last by goimports -local
|
||||
// flag.
|
||||
// See issue #19190.
|
||||
seenAnyThirdParty := false
|
||||
for j, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
p := importPath(impspec)
|
||||
n := matchLen(p, path)
|
||||
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
|
||||
bestMatch = n
|
||||
impDecl = gen
|
||||
impIndex = j
|
||||
}
|
||||
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no import decl found, add one after the last import.
|
||||
if impDecl == nil {
|
||||
impDecl = &ast.GenDecl{
|
||||
Tok: token.IMPORT,
|
||||
}
|
||||
if lastImport >= 0 {
|
||||
impDecl.TokPos = f.Decls[lastImport].End()
|
||||
} else {
|
||||
// There are no existing imports.
|
||||
// Our new import, preceded by a blank line, goes after the package declaration
|
||||
// and after the comment, if any, that starts on the same line as the
|
||||
// package declaration.
|
||||
impDecl.TokPos = f.Package
|
||||
|
||||
file := fset.File(f.Package)
|
||||
pkgLine := file.Line(f.Package)
|
||||
for _, c := range f.Comments {
|
||||
if file.Line(c.Pos()) > pkgLine {
|
||||
break
|
||||
}
|
||||
// +2 for a blank line
|
||||
impDecl.TokPos = c.End() + 2
|
||||
}
|
||||
}
|
||||
f.Decls = append(f.Decls, nil)
|
||||
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
|
||||
f.Decls[lastImport+1] = impDecl
|
||||
}
|
||||
|
||||
// Insert new import at insertAt.
|
||||
insertAt := 0
|
||||
if impIndex >= 0 {
|
||||
// insert after the found import
|
||||
insertAt = impIndex + 1
|
||||
}
|
||||
impDecl.Specs = append(impDecl.Specs, nil)
|
||||
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
|
||||
impDecl.Specs[insertAt] = newImport
|
||||
pos := impDecl.Pos()
|
||||
if insertAt > 0 {
|
||||
// If there is a comment after an existing import, preserve the comment
|
||||
// position by adding the new import after the comment.
|
||||
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
|
||||
pos = spec.Comment.End()
|
||||
} else {
|
||||
// Assign same position as the previous import,
|
||||
// so that the sorter sees it as being in the same block.
|
||||
pos = impDecl.Specs[insertAt-1].Pos()
|
||||
}
|
||||
}
|
||||
if newImport.Name != nil {
|
||||
newImport.Name.NamePos = pos
|
||||
}
|
||||
newImport.Path.ValuePos = pos
|
||||
newImport.EndPos = pos
|
||||
|
||||
// Clean up parens. impDecl contains at least one spec.
|
||||
if len(impDecl.Specs) == 1 {
|
||||
// Remove unneeded parens.
|
||||
impDecl.Lparen = token.NoPos
|
||||
} else if !impDecl.Lparen.IsValid() {
|
||||
// impDecl needs parens added.
|
||||
impDecl.Lparen = impDecl.Specs[0].Pos()
|
||||
}
|
||||
|
||||
f.Imports = append(f.Imports, newImport)
|
||||
|
||||
if len(f.Decls) <= 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge all the import declarations into the first one.
|
||||
var first *ast.GenDecl
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
if first == nil {
|
||||
first = gen
|
||||
continue // Don't touch the first one.
|
||||
}
|
||||
// We now know there is more than one package in this import
|
||||
// declaration. Ensure that it ends up parenthesized.
|
||||
first.Lparen = first.Pos()
|
||||
// Move the imports of the other import declaration to the first one.
|
||||
for _, spec := range gen.Specs {
|
||||
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
|
||||
first.Specs = append(first.Specs, spec)
|
||||
}
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
i--
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func isThirdParty(importPath string) bool {
|
||||
// Third party package import path usually contains "." (".com", ".org", ...)
|
||||
// This logic is taken from golang.org/x/tools/imports package.
|
||||
return strings.Contains(importPath, ".")
|
||||
}
|
||||
|
||||
// DeleteImport deletes the import path from the file f, if present.
|
||||
// If there are duplicate import declarations, all matching ones are deleted.
|
||||
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
||||
return DeleteNamedImport(fset, f, "", path)
|
||||
}
|
||||
|
||||
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
||||
// If there are duplicate import declarations, all matching ones are deleted.
|
||||
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
||||
var delspecs []*ast.ImportSpec
|
||||
var delcomments []*ast.CommentGroup
|
||||
|
||||
// Find the import nodes that import path, if any.
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < len(gen.Specs); j++ {
|
||||
spec := gen.Specs[j]
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importName(impspec) != name || importPath(impspec) != path {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found an import spec that imports path.
|
||||
// Delete it.
|
||||
delspecs = append(delspecs, impspec)
|
||||
deleted = true
|
||||
copy(gen.Specs[j:], gen.Specs[j+1:])
|
||||
gen.Specs = gen.Specs[:len(gen.Specs)-1]
|
||||
|
||||
// If this was the last import spec in this decl,
|
||||
// delete the decl, too.
|
||||
if len(gen.Specs) == 0 {
|
||||
copy(f.Decls[i:], f.Decls[i+1:])
|
||||
f.Decls = f.Decls[:len(f.Decls)-1]
|
||||
i--
|
||||
break
|
||||
} else if len(gen.Specs) == 1 {
|
||||
if impspec.Doc != nil {
|
||||
delcomments = append(delcomments, impspec.Doc)
|
||||
}
|
||||
if impspec.Comment != nil {
|
||||
delcomments = append(delcomments, impspec.Comment)
|
||||
}
|
||||
for _, cg := range f.Comments {
|
||||
// Found comment on the same line as the import spec.
|
||||
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
|
||||
delcomments = append(delcomments, cg)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
spec := gen.Specs[0].(*ast.ImportSpec)
|
||||
|
||||
// Move the documentation right after the import decl.
|
||||
if spec.Doc != nil {
|
||||
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
|
||||
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
|
||||
}
|
||||
}
|
||||
for _, cg := range f.Comments {
|
||||
if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
|
||||
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
|
||||
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if j > 0 {
|
||||
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
|
||||
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
|
||||
line := fset.Position(impspec.Path.ValuePos).Line
|
||||
|
||||
// We deleted an entry but now there may be
|
||||
// a blank line-sized hole where the import was.
|
||||
if line-lastLine > 1 {
|
||||
// There was a blank line immediately preceding the deleted import,
|
||||
// so there's no need to close the hole.
|
||||
// Do nothing.
|
||||
} else if line != fset.File(gen.Rparen).LineCount() {
|
||||
// There was no blank line. Close the hole.
|
||||
fset.File(gen.Rparen).MergeLine(line)
|
||||
}
|
||||
}
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
// Delete imports from f.Imports.
|
||||
for i := 0; i < len(f.Imports); i++ {
|
||||
imp := f.Imports[i]
|
||||
for j, del := range delspecs {
|
||||
if imp == del {
|
||||
copy(f.Imports[i:], f.Imports[i+1:])
|
||||
f.Imports = f.Imports[:len(f.Imports)-1]
|
||||
copy(delspecs[j:], delspecs[j+1:])
|
||||
delspecs = delspecs[:len(delspecs)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete comments from f.Comments.
|
||||
for i := 0; i < len(f.Comments); i++ {
|
||||
cg := f.Comments[i]
|
||||
for j, del := range delcomments {
|
||||
if cg == del {
|
||||
copy(f.Comments[i:], f.Comments[i+1:])
|
||||
f.Comments = f.Comments[:len(f.Comments)-1]
|
||||
copy(delcomments[j:], delcomments[j+1:])
|
||||
delcomments = delcomments[:len(delcomments)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(delspecs) > 0 {
|
||||
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RewriteImport rewrites any import of path oldPath to path newPath.
|
||||
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
|
||||
for _, imp := range f.Imports {
|
||||
if importPath(imp) == oldPath {
|
||||
rewrote = true
|
||||
// record old End, because the default is to compute
|
||||
// it using the length of imp.Path.Value.
|
||||
imp.EndPos = imp.End()
|
||||
imp.Path.Value = strconv.Quote(newPath)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UsesImport reports whether a given import is used.
|
||||
func UsesImport(f *ast.File, path string) (used bool) {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := spec.Name.String()
|
||||
switch name {
|
||||
case "<nil>":
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
case "_", ".":
|
||||
// Not sure if this import is used - err on the side of caution.
|
||||
return true
|
||||
}
|
||||
|
||||
ast.Walk(visitFn(func(n ast.Node) {
|
||||
sel, ok := n.(*ast.SelectorExpr)
|
||||
if ok && isTopName(sel.X, name) {
|
||||
used = true
|
||||
}
|
||||
}), f)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node)
|
||||
|
||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
fn(node)
|
||||
return fn
|
||||
}
|
||||
|
||||
// imports reports whether f has an import with the specified name and path.
|
||||
func imports(f *ast.File, name, path string) bool {
|
||||
for _, s := range f.Imports {
|
||||
if importName(s) == name && importPath(s) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if importPath(s) == path {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importName returns the name of s,
|
||||
// or "" if the import is not named.
|
||||
func importName(s *ast.ImportSpec) string {
|
||||
if s.Name == nil {
|
||||
return ""
|
||||
}
|
||||
return s.Name.Name
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// matchLen returns the length of the longest path segment prefix shared by x and y.
|
||||
func matchLen(x, y string) int {
|
||||
n := 0
|
||||
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
|
||||
if x[i] == '/' {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// isTopName returns true if n is a top-level unresolved identifier with the given name.
|
||||
func isTopName(n ast.Expr, name string) bool {
|
||||
id, ok := n.(*ast.Ident)
|
||||
return ok && id.Name == name && id.Obj == nil
|
||||
}
|
||||
|
||||
// Imports returns the file imports grouped by paragraph.
|
||||
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
|
||||
var groups [][]*ast.ImportSpec
|
||||
|
||||
for _, decl := range f.Decls {
|
||||
genDecl, ok := decl.(*ast.GenDecl)
|
||||
if !ok || genDecl.Tok != token.IMPORT {
|
||||
break
|
||||
}
|
||||
|
||||
group := []*ast.ImportSpec{}
|
||||
|
||||
var lastLine int
|
||||
for _, spec := range genDecl.Specs {
|
||||
importSpec := spec.(*ast.ImportSpec)
|
||||
pos := importSpec.Path.ValuePos
|
||||
line := fset.Position(pos).Line
|
||||
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
|
||||
groups = append(groups, group)
|
||||
group = []*ast.ImportSpec{}
|
||||
}
|
||||
group = append(group, importSpec)
|
||||
lastLine = line
|
||||
}
|
||||
groups = append(groups, group)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go
generated
vendored
Normal file
@ -0,0 +1,477 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
|
||||
// before and/or after the node's children, using a Cursor describing
|
||||
// the current node and providing operations on it.
|
||||
//
|
||||
// The return value of ApplyFunc controls the syntax tree traversal.
|
||||
// See Apply for details.
|
||||
type ApplyFunc func(*Cursor) bool
|
||||
|
||||
// Apply traverses a syntax tree recursively, starting with root,
|
||||
// and calling pre and post for each node as described below.
|
||||
// Apply returns the syntax tree, possibly modified.
|
||||
//
|
||||
// If pre is not nil, it is called for each node before the node's
|
||||
// children are traversed (pre-order). If pre returns false, no
|
||||
// children are traversed, and post is not called for that node.
|
||||
//
|
||||
// If post is not nil, and a prior call of pre didn't return false,
|
||||
// post is called for each node after its children are traversed
|
||||
// (post-order). If post returns false, traversal is terminated and
|
||||
// Apply returns immediately.
|
||||
//
|
||||
// Only fields that refer to AST nodes are considered children;
|
||||
// i.e., token.Pos, Scopes, Objects, and fields of basic types
|
||||
// (strings, etc.) are ignored.
|
||||
//
|
||||
// Children are traversed in the order in which they appear in the
|
||||
// respective node's struct definition. A package's files are
|
||||
// traversed in the filenames' alphabetical order.
|
||||
//
|
||||
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
|
||||
parent := &struct{ ast.Node }{root}
|
||||
defer func() {
|
||||
if r := recover(); r != nil && r != abort {
|
||||
panic(r)
|
||||
}
|
||||
result = parent.Node
|
||||
}()
|
||||
a := &application{pre: pre, post: post}
|
||||
a.apply(parent, "Node", nil, root)
|
||||
return
|
||||
}
|
||||
|
||||
var abort = new(int) // singleton, to signal termination of Apply
|
||||
|
||||
// A Cursor describes a node encountered during Apply.
|
||||
// Information about the node and its parent is available
|
||||
// from the Node, Parent, Name, and Index methods.
|
||||
//
|
||||
// If p is a variable of type and value of the current parent node
|
||||
// c.Parent(), and f is the field identifier with name c.Name(),
|
||||
// the following invariants hold:
|
||||
//
|
||||
// p.f == c.Node() if c.Index() < 0
|
||||
// p.f[c.Index()] == c.Node() if c.Index() >= 0
|
||||
//
|
||||
// The methods Replace, Delete, InsertBefore, and InsertAfter
|
||||
// can be used to change the AST without disrupting Apply.
|
||||
type Cursor struct {
|
||||
parent ast.Node
|
||||
name string
|
||||
iter *iterator // valid if non-nil
|
||||
node ast.Node
|
||||
}
|
||||
|
||||
// Node returns the current Node.
|
||||
func (c *Cursor) Node() ast.Node { return c.node }
|
||||
|
||||
// Parent returns the parent of the current Node.
|
||||
func (c *Cursor) Parent() ast.Node { return c.parent }
|
||||
|
||||
// Name returns the name of the parent Node field that contains the current Node.
|
||||
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
|
||||
// the filename for the current Node.
|
||||
func (c *Cursor) Name() string { return c.name }
|
||||
|
||||
// Index reports the index >= 0 of the current Node in the slice of Nodes that
|
||||
// contains it, or a value < 0 if the current Node is not part of a slice.
|
||||
// The index of the current node changes if InsertBefore is called while
|
||||
// processing the current node.
|
||||
func (c *Cursor) Index() int {
|
||||
if c.iter != nil {
|
||||
return c.iter.index
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// field returns the current node's parent field value.
|
||||
func (c *Cursor) field() reflect.Value {
|
||||
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
|
||||
}
|
||||
|
||||
// Replace replaces the current Node with n.
|
||||
// The replacement node is not walked by Apply.
|
||||
func (c *Cursor) Replace(n ast.Node) {
|
||||
if _, ok := c.node.(*ast.File); ok {
|
||||
file, ok := n.(*ast.File)
|
||||
if !ok {
|
||||
panic("attempt to replace *ast.File with non-*ast.File")
|
||||
}
|
||||
c.parent.(*ast.Package).Files[c.name] = file
|
||||
return
|
||||
}
|
||||
|
||||
v := c.field()
|
||||
if i := c.Index(); i >= 0 {
|
||||
v = v.Index(i)
|
||||
}
|
||||
v.Set(reflect.ValueOf(n))
|
||||
}
|
||||
|
||||
// Delete deletes the current Node from its containing slice.
|
||||
// If the current Node is not part of a slice, Delete panics.
|
||||
// As a special case, if the current node is a package file,
|
||||
// Delete removes it from the package's Files map.
|
||||
func (c *Cursor) Delete() {
|
||||
if _, ok := c.node.(*ast.File); ok {
|
||||
delete(c.parent.(*ast.Package).Files, c.name)
|
||||
return
|
||||
}
|
||||
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("Delete node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
|
||||
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
|
||||
v.SetLen(l - 1)
|
||||
c.iter.step--
|
||||
}
|
||||
|
||||
// InsertAfter inserts n after the current Node in its containing slice.
|
||||
// If the current Node is not part of a slice, InsertAfter panics.
|
||||
// Apply does not walk n.
|
||||
func (c *Cursor) InsertAfter(n ast.Node) {
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("InsertAfter node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
|
||||
v.Index(i + 1).Set(reflect.ValueOf(n))
|
||||
c.iter.step++
|
||||
}
|
||||
|
||||
// InsertBefore inserts n before the current Node in its containing slice.
|
||||
// If the current Node is not part of a slice, InsertBefore panics.
|
||||
// Apply will not walk n.
|
||||
func (c *Cursor) InsertBefore(n ast.Node) {
|
||||
i := c.Index()
|
||||
if i < 0 {
|
||||
panic("InsertBefore node not contained in slice")
|
||||
}
|
||||
v := c.field()
|
||||
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
|
||||
l := v.Len()
|
||||
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
|
||||
v.Index(i).Set(reflect.ValueOf(n))
|
||||
c.iter.index++
|
||||
}
|
||||
|
||||
// application carries all the shared data so we can pass it around cheaply.
|
||||
type application struct {
|
||||
pre, post ApplyFunc
|
||||
cursor Cursor
|
||||
iter iterator
|
||||
}
|
||||
|
||||
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
|
||||
// convert typed nil into untyped nil
|
||||
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
n = nil
|
||||
}
|
||||
|
||||
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
|
||||
saved := a.cursor
|
||||
a.cursor.parent = parent
|
||||
a.cursor.name = name
|
||||
a.cursor.iter = iter
|
||||
a.cursor.node = n
|
||||
|
||||
if a.pre != nil && !a.pre(&a.cursor) {
|
||||
a.cursor = saved
|
||||
return
|
||||
}
|
||||
|
||||
// walk children
|
||||
// (the order of the cases matches the order of the corresponding node types in go/ast)
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
// nothing to do
|
||||
|
||||
// Comments and fields
|
||||
case *ast.Comment:
|
||||
// nothing to do
|
||||
|
||||
case *ast.CommentGroup:
|
||||
if n != nil {
|
||||
a.applyList(n, "List")
|
||||
}
|
||||
|
||||
case *ast.Field:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Names")
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Tag", nil, n.Tag)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.FieldList:
|
||||
a.applyList(n, "List")
|
||||
|
||||
// Expressions
|
||||
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
|
||||
// nothing to do
|
||||
|
||||
case *ast.Ellipsis:
|
||||
a.apply(n, "Elt", nil, n.Elt)
|
||||
|
||||
case *ast.FuncLit:
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.CompositeLit:
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.applyList(n, "Elts")
|
||||
|
||||
case *ast.ParenExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Sel", nil, n.Sel)
|
||||
|
||||
case *ast.IndexExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Index", nil, n.Index)
|
||||
|
||||
case *ast.SliceExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Low", nil, n.Low)
|
||||
a.apply(n, "High", nil, n.High)
|
||||
a.apply(n, "Max", nil, n.Max)
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
|
||||
case *ast.CallExpr:
|
||||
a.apply(n, "Fun", nil, n.Fun)
|
||||
a.applyList(n, "Args")
|
||||
|
||||
case *ast.StarExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Y", nil, n.Y)
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
// Types
|
||||
case *ast.ArrayType:
|
||||
a.apply(n, "Len", nil, n.Len)
|
||||
a.apply(n, "Elt", nil, n.Elt)
|
||||
|
||||
case *ast.StructType:
|
||||
a.apply(n, "Fields", nil, n.Fields)
|
||||
|
||||
case *ast.FuncType:
|
||||
a.apply(n, "Params", nil, n.Params)
|
||||
a.apply(n, "Results", nil, n.Results)
|
||||
|
||||
case *ast.InterfaceType:
|
||||
a.apply(n, "Methods", nil, n.Methods)
|
||||
|
||||
case *ast.MapType:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
case *ast.ChanType:
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
// Statements
|
||||
case *ast.BadStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.DeclStmt:
|
||||
a.apply(n, "Decl", nil, n.Decl)
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nothing to do
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
a.apply(n, "Label", nil, n.Label)
|
||||
a.apply(n, "Stmt", nil, n.Stmt)
|
||||
|
||||
case *ast.ExprStmt:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.SendStmt:
|
||||
a.apply(n, "Chan", nil, n.Chan)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
a.apply(n, "X", nil, n.X)
|
||||
|
||||
case *ast.AssignStmt:
|
||||
a.applyList(n, "Lhs")
|
||||
a.applyList(n, "Rhs")
|
||||
|
||||
case *ast.GoStmt:
|
||||
a.apply(n, "Call", nil, n.Call)
|
||||
|
||||
case *ast.DeferStmt:
|
||||
a.apply(n, "Call", nil, n.Call)
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
a.applyList(n, "Results")
|
||||
|
||||
case *ast.BranchStmt:
|
||||
a.apply(n, "Label", nil, n.Label)
|
||||
|
||||
case *ast.BlockStmt:
|
||||
a.applyList(n, "List")
|
||||
|
||||
case *ast.IfStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Cond", nil, n.Cond)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
a.apply(n, "Else", nil, n.Else)
|
||||
|
||||
case *ast.CaseClause:
|
||||
a.applyList(n, "List")
|
||||
a.applyList(n, "Body")
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Tag", nil, n.Tag)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Assign", nil, n.Assign)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.CommClause:
|
||||
a.apply(n, "Comm", nil, n.Comm)
|
||||
a.applyList(n, "Body")
|
||||
|
||||
case *ast.SelectStmt:
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.ForStmt:
|
||||
a.apply(n, "Init", nil, n.Init)
|
||||
a.apply(n, "Cond", nil, n.Cond)
|
||||
a.apply(n, "Post", nil, n.Post)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
case *ast.RangeStmt:
|
||||
a.apply(n, "Key", nil, n.Key)
|
||||
a.apply(n, "Value", nil, n.Value)
|
||||
a.apply(n, "X", nil, n.X)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
// Declarations
|
||||
case *ast.ImportSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Path", nil, n.Path)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.ValueSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Names")
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.applyList(n, "Values")
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.TypeSpec:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Comment", nil, n.Comment)
|
||||
|
||||
case *ast.BadDecl:
|
||||
// nothing to do
|
||||
|
||||
case *ast.GenDecl:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.applyList(n, "Specs")
|
||||
|
||||
case *ast.FuncDecl:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Recv", nil, n.Recv)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.apply(n, "Type", nil, n.Type)
|
||||
a.apply(n, "Body", nil, n.Body)
|
||||
|
||||
// Files and packages
|
||||
case *ast.File:
|
||||
a.apply(n, "Doc", nil, n.Doc)
|
||||
a.apply(n, "Name", nil, n.Name)
|
||||
a.applyList(n, "Decls")
|
||||
// Don't walk n.Comments; they have either been walked already if
|
||||
// they are Doc comments, or they can be easily walked explicitly.
|
||||
|
||||
case *ast.Package:
|
||||
// collect and sort names for reproducible behavior
|
||||
var names []string
|
||||
for name := range n.Files {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
a.apply(n, name, nil, n.Files[name])
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
|
||||
}
|
||||
|
||||
if a.post != nil && !a.post(&a.cursor) {
|
||||
panic(abort)
|
||||
}
|
||||
|
||||
a.cursor = saved
|
||||
}
|
||||
|
||||
// An iterator controls iteration over a slice of nodes.
|
||||
type iterator struct {
|
||||
index, step int
|
||||
}
|
||||
|
||||
func (a *application) applyList(parent ast.Node, name string) {
|
||||
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
|
||||
saved := a.iter
|
||||
a.iter.index = 0
|
||||
for {
|
||||
// must reload parent.name each time, since cursor modifications might change it
|
||||
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
|
||||
if a.iter.index >= v.Len() {
|
||||
break
|
||||
}
|
||||
|
||||
// element x may be nil in a bad AST - be cautious
|
||||
var x ast.Node
|
||||
if e := v.Index(a.iter.index); e.IsValid() {
|
||||
x = e.Interface().(ast.Node)
|
||||
}
|
||||
|
||||
a.iter.step = 1
|
||||
a.apply(parent, name, &a.iter, x)
|
||||
a.iter.index += a.iter.step
|
||||
}
|
||||
a.iter = saved
|
||||
}
|
14
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package astutil
|
||||
|
||||
import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
func Unparen(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, ok := e.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
109
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
Normal file
109
vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gcexportdata provides functions for locating, reading, and
|
||||
// writing export data files containing type information produced by the
|
||||
// gc compiler. This package supports go1.7 export data format and all
|
||||
// later versions.
|
||||
//
|
||||
// Although it might seem convenient for this package to live alongside
|
||||
// go/types in the standard library, this would cause version skew
|
||||
// problems for developer tools that use it, since they must be able to
|
||||
// consume the outputs of the gc compiler both before and after a Go
|
||||
// update such as from Go 1.7 to Go 1.8. Because this package lives in
|
||||
// golang.org/x/tools, sites can update their version of this repo some
|
||||
// time before the Go 1.8 release and rebuild and redeploy their
|
||||
// developer tools, which will then be able to consume both Go 1.7 and
|
||||
// Go 1.8 export data files, so they will work before and after the
|
||||
// Go update. (See discussion at https://golang.org/issue/15651.)
|
||||
//
|
||||
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/tools/go/internal/gcimporter"
|
||||
)
|
||||
|
||||
// Find returns the name of an object (.o) or archive (.a) file
|
||||
// containing type information for the specified import path,
|
||||
// using the workspace layout conventions of go/build.
|
||||
// If no file was found, an empty filename is returned.
|
||||
//
|
||||
// A relative srcDir is interpreted relative to the current working directory.
|
||||
//
|
||||
// Find also returns the package's resolved (canonical) import path,
|
||||
// reflecting the effects of srcDir and vendoring on importPath.
|
||||
func Find(importPath, srcDir string) (filename, path string) {
|
||||
return gcimporter.FindPkg(importPath, srcDir)
|
||||
}
|
||||
|
||||
// NewReader returns a reader for the export data section of an object
|
||||
// (.o) or archive (.a) file read from r. The new reader may provide
|
||||
// additional trailing data beyond the end of the export data.
|
||||
func NewReader(r io.Reader) (io.Reader, error) {
|
||||
buf := bufio.NewReader(r)
|
||||
_, err := gcimporter.FindExportData(buf)
|
||||
// If we ever switch to a zip-like archive format with the ToC
|
||||
// at the end, we can return the correct portion of export data,
|
||||
// but for now we must return the entire rest of the file.
|
||||
return buf, err
|
||||
}
|
||||
|
||||
// Read reads export data from in, decodes it, and returns type
|
||||
// information for the package.
|
||||
// The package name is specified by path.
|
||||
// File position information is added to fset.
|
||||
//
|
||||
// Read may inspect and add to the imports map to ensure that references
|
||||
// within the export data to other packages are consistent. The caller
|
||||
// must ensure that imports[path] does not exist, or exists but is
|
||||
// incomplete (see types.Package.Complete), and Read inserts the
|
||||
// resulting package into this map entry.
|
||||
//
|
||||
// On return, the state of the reader is undefined.
|
||||
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(data, []byte("!<arch>")) {
|
||||
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
|
||||
}
|
||||
|
||||
// The App Engine Go runtime v1.6 uses the old export data format.
|
||||
// TODO(adonovan): delete once v1.7 has been around for a while.
|
||||
if bytes.HasPrefix(data, []byte("package ")) {
|
||||
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// The indexed export format starts with an 'i'; the older
|
||||
// binary export format starts with a 'c', 'd', or 'v'
|
||||
// (from "version"). Select appropriate importer.
|
||||
if len(data) > 0 && data[0] == 'i' {
|
||||
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
|
||||
return pkg, err
|
||||
}
|
||||
|
||||
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
|
||||
return pkg, err
|
||||
}
|
||||
|
||||
// Write writes encoded type information for the specified package to out.
|
||||
// The FileSet provides file position information for named objects.
|
||||
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
||||
b, err := gcimporter.IExportData(fset, pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = out.Write(b)
|
||||
return err
|
||||
}
|
73
vendor/golang.org/x/tools/go/gcexportdata/importer.go
generated
vendored
Normal file
73
vendor/golang.org/x/tools/go/gcexportdata/importer.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcexportdata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
)
|
||||
|
||||
// NewImporter returns a new instance of the types.Importer interface
|
||||
// that reads type information from export data files written by gc.
|
||||
// The Importer also satisfies types.ImporterFrom.
|
||||
//
|
||||
// Export data files are located using "go build" workspace conventions
|
||||
// and the build.Default context.
|
||||
//
|
||||
// Use this importer instead of go/importer.For("gc", ...) to avoid the
|
||||
// version-skew problems described in the documentation of this package,
|
||||
// or to control the FileSet or access the imports map populated during
|
||||
// package loading.
|
||||
//
|
||||
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
|
||||
return importer{fset, imports}
|
||||
}
|
||||
|
||||
type importer struct {
|
||||
fset *token.FileSet
|
||||
imports map[string]*types.Package
|
||||
}
|
||||
|
||||
func (imp importer) Import(importPath string) (*types.Package, error) {
|
||||
return imp.ImportFrom(importPath, "", 0)
|
||||
}
|
||||
|
||||
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
|
||||
filename, path := Find(importPath, srcDir)
|
||||
if filename == "" {
|
||||
if importPath == "unsafe" {
|
||||
// Even for unsafe, call Find first in case
|
||||
// the package was vendored.
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find import: %s", importPath)
|
||||
}
|
||||
|
||||
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
|
||||
return pkg, nil // cache hit
|
||||
}
|
||||
|
||||
// open file
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
f.Close()
|
||||
if err != nil {
|
||||
// add file name to error
|
||||
err = fmt.Errorf("reading export data: %s: %v", filename, err)
|
||||
}
|
||||
}()
|
||||
|
||||
r, err := NewReader(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return Read(r, imp.fset, imp.imports, path)
|
||||
}
|
852
vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
generated
vendored
Normal file
852
vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go
generated
vendored
Normal file
@ -0,0 +1,852 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Binary package export.
|
||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
|
||||
// see that file for specification of the format.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// If debugFormat is set, each integer and string value is preceded by a marker
|
||||
// and position information in the encoding. This mechanism permits an importer
|
||||
// to recognize immediately when it is out of sync. The importer recognizes this
|
||||
// mode automatically (i.e., it can import export data produced with debugging
|
||||
// support even if debugFormat is not set at the time of import). This mode will
|
||||
// lead to massively larger export data (by a factor of 2 to 3) and should only
|
||||
// be enabled during development and debugging.
|
||||
//
|
||||
// NOTE: This flag is the first flag to enable if importing dies because of
|
||||
// (suspected) format errors, and whenever a change is made to the format.
|
||||
const debugFormat = false // default: false
|
||||
|
||||
// If trace is set, debugging output is printed to std out.
|
||||
const trace = false // default: false
|
||||
|
||||
// Current export format version. Increase with each format change.
|
||||
// Note: The latest binary (non-indexed) export format is at version 6.
|
||||
// This exporter is still at level 4, but it doesn't matter since
|
||||
// the binary importer can handle older versions just fine.
|
||||
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
|
||||
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
|
||||
// 4: type name objects support type aliases, uses aliasTag
|
||||
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
|
||||
// 2: removed unused bool in ODCL export (compiler only)
|
||||
// 1: header format change (more regular), export package for _ struct fields
|
||||
// 0: Go1.7 encoding
|
||||
const exportVersion = 4
|
||||
|
||||
// trackAllTypes enables cycle tracking for all types, not just named
|
||||
// types. The existing compiler invariants assume that unnamed types
|
||||
// that are not completely set up are not used, or else there are spurious
|
||||
// errors.
|
||||
// If disabled, only named types are tracked, possibly leading to slightly
|
||||
// less efficient encoding in rare cases. It also prevents the export of
|
||||
// some corner-case type declarations (but those are not handled correctly
|
||||
// with with the textual export format either).
|
||||
// TODO(gri) enable and remove once issues caused by it are fixed
|
||||
const trackAllTypes = false
|
||||
|
||||
type exporter struct {
|
||||
fset *token.FileSet
|
||||
out bytes.Buffer
|
||||
|
||||
// object -> index maps, indexed in order of serialization
|
||||
strIndex map[string]int
|
||||
pkgIndex map[*types.Package]int
|
||||
typIndex map[types.Type]int
|
||||
|
||||
// position encoding
|
||||
posInfoFormat bool
|
||||
prevFile string
|
||||
prevLine int
|
||||
|
||||
// debugging support
|
||||
written int // bytes written
|
||||
indent int // for trace
|
||||
}
|
||||
|
||||
// internalError represents an error generated inside this package.
|
||||
type internalError string
|
||||
|
||||
func (e internalError) Error() string { return "gcimporter: " + string(e) }
|
||||
|
||||
func internalErrorf(format string, args ...interface{}) error {
|
||||
return internalError(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// BExportData returns binary export data for pkg.
|
||||
// If no file set is provided, position info will be missing.
|
||||
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if ierr, ok := e.(internalError); ok {
|
||||
err = ierr
|
||||
return
|
||||
}
|
||||
// Not an internal error; panic again.
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
|
||||
p := exporter{
|
||||
fset: fset,
|
||||
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
|
||||
pkgIndex: make(map[*types.Package]int),
|
||||
typIndex: make(map[types.Type]int),
|
||||
posInfoFormat: true, // TODO(gri) might become a flag, eventually
|
||||
}
|
||||
|
||||
// write version info
|
||||
// The version string must start with "version %d" where %d is the version
|
||||
// number. Additional debugging information may follow after a blank; that
|
||||
// text is ignored by the importer.
|
||||
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
|
||||
var debug string
|
||||
if debugFormat {
|
||||
debug = "debug"
|
||||
}
|
||||
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
|
||||
p.bool(trackAllTypes)
|
||||
p.bool(p.posInfoFormat)
|
||||
|
||||
// --- generic export data ---
|
||||
|
||||
// populate type map with predeclared "known" types
|
||||
for index, typ := range predeclared() {
|
||||
p.typIndex[typ] = index
|
||||
}
|
||||
if len(p.typIndex) != len(predeclared()) {
|
||||
return nil, internalError("duplicate entries in type map?")
|
||||
}
|
||||
|
||||
// write package data
|
||||
p.pkg(pkg, true)
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
// write objects
|
||||
objcount := 0
|
||||
scope := pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
if !ast.IsExported(name) {
|
||||
continue
|
||||
}
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.obj(scope.Lookup(name))
|
||||
objcount++
|
||||
}
|
||||
|
||||
// indicate end of list
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.tag(endTag)
|
||||
|
||||
// for self-verification only (redundant)
|
||||
p.int(objcount)
|
||||
|
||||
if trace {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
// --- end of export data ---
|
||||
|
||||
return p.out.Bytes(), nil
|
||||
}
|
||||
|
||||
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
|
||||
if pkg == nil {
|
||||
panic(internalError("unexpected nil pkg"))
|
||||
}
|
||||
|
||||
// if we saw the package before, write its index (>= 0)
|
||||
if i, ok := p.pkgIndex[pkg]; ok {
|
||||
p.index('P', i)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, remember the package, write the package tag (< 0) and package data
|
||||
if trace {
|
||||
p.tracef("P%d = { ", len(p.pkgIndex))
|
||||
defer p.tracef("} ")
|
||||
}
|
||||
p.pkgIndex[pkg] = len(p.pkgIndex)
|
||||
|
||||
p.tag(packageTag)
|
||||
p.string(pkg.Name())
|
||||
if emptypath {
|
||||
p.string("")
|
||||
} else {
|
||||
p.string(pkg.Path())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) obj(obj types.Object) {
|
||||
switch obj := obj.(type) {
|
||||
case *types.Const:
|
||||
p.tag(constTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
p.typ(obj.Type())
|
||||
p.value(obj.Val())
|
||||
|
||||
case *types.TypeName:
|
||||
if obj.IsAlias() {
|
||||
p.tag(aliasTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
} else {
|
||||
p.tag(typeTag)
|
||||
}
|
||||
p.typ(obj.Type())
|
||||
|
||||
case *types.Var:
|
||||
p.tag(varTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
p.typ(obj.Type())
|
||||
|
||||
case *types.Func:
|
||||
p.tag(funcTag)
|
||||
p.pos(obj)
|
||||
p.qualifiedName(obj)
|
||||
sig := obj.Type().(*types.Signature)
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) pos(obj types.Object) {
|
||||
if !p.posInfoFormat {
|
||||
return
|
||||
}
|
||||
|
||||
file, line := p.fileLine(obj)
|
||||
if file == p.prevFile {
|
||||
// common case: write line delta
|
||||
// delta == 0 means different file or no line change
|
||||
delta := line - p.prevLine
|
||||
p.int(delta)
|
||||
if delta == 0 {
|
||||
p.int(-1) // -1 means no file change
|
||||
}
|
||||
} else {
|
||||
// different file
|
||||
p.int(0)
|
||||
// Encode filename as length of common prefix with previous
|
||||
// filename, followed by (possibly empty) suffix. Filenames
|
||||
// frequently share path prefixes, so this can save a lot
|
||||
// of space and make export data size less dependent on file
|
||||
// path length. The suffix is unlikely to be empty because
|
||||
// file names tend to end in ".go".
|
||||
n := commonPrefixLen(p.prevFile, file)
|
||||
p.int(n) // n >= 0
|
||||
p.string(file[n:]) // write suffix only
|
||||
p.prevFile = file
|
||||
p.int(line)
|
||||
}
|
||||
p.prevLine = line
|
||||
}
|
||||
|
||||
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
|
||||
if p.fset != nil {
|
||||
pos := p.fset.Position(obj.Pos())
|
||||
file = pos.Filename
|
||||
line = pos.Line
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func commonPrefixLen(a, b string) int {
|
||||
if len(a) > len(b) {
|
||||
a, b = b, a
|
||||
}
|
||||
// len(a) <= len(b)
|
||||
i := 0
|
||||
for i < len(a) && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (p *exporter) qualifiedName(obj types.Object) {
|
||||
p.string(obj.Name())
|
||||
p.pkg(obj.Pkg(), false)
|
||||
}
|
||||
|
||||
func (p *exporter) typ(t types.Type) {
|
||||
if t == nil {
|
||||
panic(internalError("nil type"))
|
||||
}
|
||||
|
||||
// Possible optimization: Anonymous pointer types *T where
|
||||
// T is a named type are common. We could canonicalize all
|
||||
// such types *T to a single type PT = *T. This would lead
|
||||
// to at most one *T entry in typIndex, and all future *T's
|
||||
// would be encoded as the respective index directly. Would
|
||||
// save 1 byte (pointerTag) per *T and reduce the typIndex
|
||||
// size (at the cost of a canonicalization map). We can do
|
||||
// this later, without encoding format change.
|
||||
|
||||
// if we saw the type before, write its index (>= 0)
|
||||
if i, ok := p.typIndex[t]; ok {
|
||||
p.index('T', i)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, remember the type, write the type tag (< 0) and type data
|
||||
if trackAllTypes {
|
||||
if trace {
|
||||
p.tracef("T%d = {>\n", len(p.typIndex))
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
p.typIndex[t] = len(p.typIndex)
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case *types.Named:
|
||||
if !trackAllTypes {
|
||||
// if we don't track all types, track named types now
|
||||
p.typIndex[t] = len(p.typIndex)
|
||||
}
|
||||
|
||||
p.tag(namedTag)
|
||||
p.pos(t.Obj())
|
||||
p.qualifiedName(t.Obj())
|
||||
p.typ(t.Underlying())
|
||||
if !types.IsInterface(t) {
|
||||
p.assocMethods(t)
|
||||
}
|
||||
|
||||
case *types.Array:
|
||||
p.tag(arrayTag)
|
||||
p.int64(t.Len())
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
p.tag(sliceTag)
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *dddSlice:
|
||||
p.tag(dddTag)
|
||||
p.typ(t.elem)
|
||||
|
||||
case *types.Struct:
|
||||
p.tag(structTag)
|
||||
p.fieldList(t)
|
||||
|
||||
case *types.Pointer:
|
||||
p.tag(pointerTag)
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
p.tag(signatureTag)
|
||||
p.paramList(t.Params(), t.Variadic())
|
||||
p.paramList(t.Results(), false)
|
||||
|
||||
case *types.Interface:
|
||||
p.tag(interfaceTag)
|
||||
p.iface(t)
|
||||
|
||||
case *types.Map:
|
||||
p.tag(mapTag)
|
||||
p.typ(t.Key())
|
||||
p.typ(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
p.tag(chanTag)
|
||||
p.int(int(3 - t.Dir())) // hack
|
||||
p.typ(t.Elem())
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected type %T: %s", t, t))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) assocMethods(named *types.Named) {
|
||||
// Sort methods (for determinism).
|
||||
var methods []*types.Func
|
||||
for i := 0; i < named.NumMethods(); i++ {
|
||||
methods = append(methods, named.Method(i))
|
||||
}
|
||||
sort.Sort(methodsByName(methods))
|
||||
|
||||
p.int(len(methods))
|
||||
|
||||
if trace && methods != nil {
|
||||
p.tracef("associated methods {>\n")
|
||||
}
|
||||
|
||||
for i, m := range methods {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
|
||||
p.pos(m)
|
||||
name := m.Name()
|
||||
p.string(name)
|
||||
if !exported(name) {
|
||||
p.pkg(m.Pkg(), false)
|
||||
}
|
||||
|
||||
sig := m.Type().(*types.Signature)
|
||||
p.paramList(types.NewTuple(sig.Recv()), false)
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
|
||||
}
|
||||
|
||||
if trace && methods != nil {
|
||||
p.tracef("<\n} ")
|
||||
}
|
||||
}
|
||||
|
||||
type methodsByName []*types.Func
|
||||
|
||||
func (x methodsByName) Len() int { return len(x) }
|
||||
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
|
||||
|
||||
func (p *exporter) fieldList(t *types.Struct) {
|
||||
if trace && t.NumFields() > 0 {
|
||||
p.tracef("fields {>\n")
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
|
||||
p.int(t.NumFields())
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.field(t.Field(i))
|
||||
p.string(t.Tag(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) field(f *types.Var) {
|
||||
if !f.IsField() {
|
||||
panic(internalError("field expected"))
|
||||
}
|
||||
|
||||
p.pos(f)
|
||||
p.fieldName(f)
|
||||
p.typ(f.Type())
|
||||
}
|
||||
|
||||
func (p *exporter) iface(t *types.Interface) {
|
||||
// TODO(gri): enable importer to load embedded interfaces,
|
||||
// then emit Embeddeds and ExplicitMethods separately here.
|
||||
p.int(0)
|
||||
|
||||
n := t.NumMethods()
|
||||
if trace && n > 0 {
|
||||
p.tracef("methods {>\n")
|
||||
defer p.tracef("<\n} ")
|
||||
}
|
||||
p.int(n)
|
||||
for i := 0; i < n; i++ {
|
||||
if trace && i > 0 {
|
||||
p.tracef("\n")
|
||||
}
|
||||
p.method(t.Method(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) method(m *types.Func) {
|
||||
sig := m.Type().(*types.Signature)
|
||||
if sig.Recv() == nil {
|
||||
panic(internalError("method expected"))
|
||||
}
|
||||
|
||||
p.pos(m)
|
||||
p.string(m.Name())
|
||||
if m.Name() != "_" && !ast.IsExported(m.Name()) {
|
||||
p.pkg(m.Pkg(), false)
|
||||
}
|
||||
|
||||
// interface method; no need to encode receiver.
|
||||
p.paramList(sig.Params(), sig.Variadic())
|
||||
p.paramList(sig.Results(), false)
|
||||
}
|
||||
|
||||
func (p *exporter) fieldName(f *types.Var) {
|
||||
name := f.Name()
|
||||
|
||||
if f.Anonymous() {
|
||||
// anonymous field - we distinguish between 3 cases:
|
||||
// 1) field name matches base type name and is exported
|
||||
// 2) field name matches base type name and is not exported
|
||||
// 3) field name doesn't match base type name (alias name)
|
||||
bname := basetypeName(f.Type())
|
||||
if name == bname {
|
||||
if ast.IsExported(name) {
|
||||
name = "" // 1) we don't need to know the field name or package
|
||||
} else {
|
||||
name = "?" // 2) use unexported name "?" to force package export
|
||||
}
|
||||
} else {
|
||||
// 3) indicate alias and export name as is
|
||||
// (this requires an extra "@" but this is a rare case)
|
||||
p.string("@")
|
||||
}
|
||||
}
|
||||
|
||||
p.string(name)
|
||||
if name != "" && !ast.IsExported(name) {
|
||||
p.pkg(f.Pkg(), false)
|
||||
}
|
||||
}
|
||||
|
||||
func basetypeName(typ types.Type) string {
|
||||
switch typ := deref(typ).(type) {
|
||||
case *types.Basic:
|
||||
return typ.Name()
|
||||
case *types.Named:
|
||||
return typ.Obj().Name()
|
||||
default:
|
||||
return "" // unnamed type
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
|
||||
// use negative length to indicate unnamed parameters
|
||||
// (look at the first parameter only since either all
|
||||
// names are present or all are absent)
|
||||
n := params.Len()
|
||||
if n > 0 && params.At(0).Name() == "" {
|
||||
n = -n
|
||||
}
|
||||
p.int(n)
|
||||
for i := 0; i < params.Len(); i++ {
|
||||
q := params.At(i)
|
||||
t := q.Type()
|
||||
if variadic && i == params.Len()-1 {
|
||||
t = &dddSlice{t.(*types.Slice).Elem()}
|
||||
}
|
||||
p.typ(t)
|
||||
if n > 0 {
|
||||
name := q.Name()
|
||||
p.string(name)
|
||||
if name != "_" {
|
||||
p.pkg(q.Pkg(), false)
|
||||
}
|
||||
}
|
||||
p.string("") // no compiler-specific info
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) value(x constant.Value) {
|
||||
if trace {
|
||||
p.tracef("= ")
|
||||
}
|
||||
|
||||
switch x.Kind() {
|
||||
case constant.Bool:
|
||||
tag := falseTag
|
||||
if constant.BoolVal(x) {
|
||||
tag = trueTag
|
||||
}
|
||||
p.tag(tag)
|
||||
|
||||
case constant.Int:
|
||||
if v, exact := constant.Int64Val(x); exact {
|
||||
// common case: x fits into an int64 - use compact encoding
|
||||
p.tag(int64Tag)
|
||||
p.int64(v)
|
||||
return
|
||||
}
|
||||
// uncommon case: large x - use float encoding
|
||||
// (powers of 2 will be encoded efficiently with exponent)
|
||||
p.tag(floatTag)
|
||||
p.float(constant.ToFloat(x))
|
||||
|
||||
case constant.Float:
|
||||
p.tag(floatTag)
|
||||
p.float(x)
|
||||
|
||||
case constant.Complex:
|
||||
p.tag(complexTag)
|
||||
p.float(constant.Real(x))
|
||||
p.float(constant.Imag(x))
|
||||
|
||||
case constant.String:
|
||||
p.tag(stringTag)
|
||||
p.string(constant.StringVal(x))
|
||||
|
||||
case constant.Unknown:
|
||||
// package contains type errors
|
||||
p.tag(unknownTag)
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected value %v (%T)", x, x))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *exporter) float(x constant.Value) {
|
||||
if x.Kind() != constant.Float {
|
||||
panic(internalErrorf("unexpected constant %v, want float", x))
|
||||
}
|
||||
// extract sign (there is no -0)
|
||||
sign := constant.Sign(x)
|
||||
if sign == 0 {
|
||||
// x == 0
|
||||
p.int(0)
|
||||
return
|
||||
}
|
||||
// x != 0
|
||||
|
||||
var f big.Float
|
||||
if v, exact := constant.Float64Val(x); exact {
|
||||
// float64
|
||||
f.SetFloat64(v)
|
||||
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||
r := valueToRat(num)
|
||||
f.SetRat(r.Quo(r, valueToRat(denom)))
|
||||
} else {
|
||||
// Value too large to represent as a fraction => inaccessible.
|
||||
// TODO(gri): add big.Float accessor to constant.Value.
|
||||
f.SetFloat64(math.MaxFloat64) // FIXME
|
||||
}
|
||||
|
||||
// extract exponent such that 0.5 <= m < 1.0
|
||||
var m big.Float
|
||||
exp := f.MantExp(&m)
|
||||
|
||||
// extract mantissa as *big.Int
|
||||
// - set exponent large enough so mant satisfies mant.IsInt()
|
||||
// - get *big.Int from mant
|
||||
m.SetMantExp(&m, int(m.MinPrec()))
|
||||
mant, acc := m.Int(nil)
|
||||
if acc != big.Exact {
|
||||
panic(internalError("internal error"))
|
||||
}
|
||||
|
||||
p.int(sign)
|
||||
p.int(exp)
|
||||
p.string(string(mant.Bytes()))
|
||||
}
|
||||
|
||||
func valueToRat(x constant.Value) *big.Rat {
|
||||
// Convert little-endian to big-endian.
|
||||
// I can't believe this is necessary.
|
||||
bytes := constant.Bytes(x)
|
||||
for i := 0; i < len(bytes)/2; i++ {
|
||||
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
|
||||
}
|
||||
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
|
||||
}
|
||||
|
||||
func (p *exporter) bool(b bool) bool {
|
||||
if trace {
|
||||
p.tracef("[")
|
||||
defer p.tracef("= %v] ", b)
|
||||
}
|
||||
|
||||
x := 0
|
||||
if b {
|
||||
x = 1
|
||||
}
|
||||
p.int(x)
|
||||
return b
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Low-level encoders
|
||||
|
||||
func (p *exporter) index(marker byte, index int) {
|
||||
if index < 0 {
|
||||
panic(internalError("invalid index < 0"))
|
||||
}
|
||||
if debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%c%d ", marker, index)
|
||||
}
|
||||
p.rawInt64(int64(index))
|
||||
}
|
||||
|
||||
func (p *exporter) tag(tag int) {
|
||||
if tag >= 0 {
|
||||
panic(internalError("invalid tag >= 0"))
|
||||
}
|
||||
if debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%s ", tagString[-tag])
|
||||
}
|
||||
p.rawInt64(int64(tag))
|
||||
}
|
||||
|
||||
func (p *exporter) int(x int) {
|
||||
p.int64(int64(x))
|
||||
}
|
||||
|
||||
func (p *exporter) int64(x int64) {
|
||||
if debugFormat {
|
||||
p.marker('i')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%d ", x)
|
||||
}
|
||||
p.rawInt64(x)
|
||||
}
|
||||
|
||||
func (p *exporter) string(s string) {
|
||||
if debugFormat {
|
||||
p.marker('s')
|
||||
}
|
||||
if trace {
|
||||
p.tracef("%q ", s)
|
||||
}
|
||||
// if we saw the string before, write its index (>= 0)
|
||||
// (the empty string is mapped to 0)
|
||||
if i, ok := p.strIndex[s]; ok {
|
||||
p.rawInt64(int64(i))
|
||||
return
|
||||
}
|
||||
// otherwise, remember string and write its negative length and bytes
|
||||
p.strIndex[s] = len(p.strIndex)
|
||||
p.rawInt64(-int64(len(s)))
|
||||
for i := 0; i < len(s); i++ {
|
||||
p.rawByte(s[i])
|
||||
}
|
||||
}
|
||||
|
||||
// marker emits a marker byte and position information which makes
|
||||
// it easy for a reader to detect if it is "out of sync". Used for
|
||||
// debugFormat format only.
|
||||
func (p *exporter) marker(m byte) {
|
||||
p.rawByte(m)
|
||||
// Enable this for help tracking down the location
|
||||
// of an incorrect marker when running in debugFormat.
|
||||
if false && trace {
|
||||
p.tracef("#%d ", p.written)
|
||||
}
|
||||
p.rawInt64(int64(p.written))
|
||||
}
|
||||
|
||||
// rawInt64 should only be used by low-level encoders.
|
||||
func (p *exporter) rawInt64(x int64) {
|
||||
var tmp [binary.MaxVarintLen64]byte
|
||||
n := binary.PutVarint(tmp[:], x)
|
||||
for i := 0; i < n; i++ {
|
||||
p.rawByte(tmp[i])
|
||||
}
|
||||
}
|
||||
|
||||
// rawStringln should only be used to emit the initial version string.
|
||||
func (p *exporter) rawStringln(s string) {
|
||||
for i := 0; i < len(s); i++ {
|
||||
p.rawByte(s[i])
|
||||
}
|
||||
p.rawByte('\n')
|
||||
}
|
||||
|
||||
// rawByte is the bottleneck interface to write to p.out.
|
||||
// rawByte escapes b as follows (any encoding does that
|
||||
// hides '$'):
|
||||
//
|
||||
// '$' => '|' 'S'
|
||||
// '|' => '|' '|'
|
||||
//
|
||||
// Necessary so other tools can find the end of the
|
||||
// export data by searching for "$$".
|
||||
// rawByte should only be used by low-level encoders.
|
||||
func (p *exporter) rawByte(b byte) {
|
||||
switch b {
|
||||
case '$':
|
||||
// write '$' as '|' 'S'
|
||||
b = 'S'
|
||||
fallthrough
|
||||
case '|':
|
||||
// write '|' as '|' '|'
|
||||
p.out.WriteByte('|')
|
||||
p.written++
|
||||
}
|
||||
p.out.WriteByte(b)
|
||||
p.written++
|
||||
}
|
||||
|
||||
// tracef is like fmt.Printf but it rewrites the format string
|
||||
// to take care of indentation.
|
||||
func (p *exporter) tracef(format string, args ...interface{}) {
|
||||
if strings.ContainsAny(format, "<>\n") {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(format); i++ {
|
||||
// no need to deal with runes
|
||||
ch := format[i]
|
||||
switch ch {
|
||||
case '>':
|
||||
p.indent++
|
||||
continue
|
||||
case '<':
|
||||
p.indent--
|
||||
continue
|
||||
}
|
||||
buf.WriteByte(ch)
|
||||
if ch == '\n' {
|
||||
for j := p.indent; j > 0; j-- {
|
||||
buf.WriteString(". ")
|
||||
}
|
||||
}
|
||||
}
|
||||
format = buf.String()
|
||||
}
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Debugging support.
|
||||
// (tagString is only used when tracing is enabled)
|
||||
var tagString = [...]string{
|
||||
// Packages
|
||||
-packageTag: "package",
|
||||
|
||||
// Types
|
||||
-namedTag: "named type",
|
||||
-arrayTag: "array",
|
||||
-sliceTag: "slice",
|
||||
-dddTag: "ddd",
|
||||
-structTag: "struct",
|
||||
-pointerTag: "pointer",
|
||||
-signatureTag: "signature",
|
||||
-interfaceTag: "interface",
|
||||
-mapTag: "map",
|
||||
-chanTag: "chan",
|
||||
|
||||
// Values
|
||||
-falseTag: "false",
|
||||
-trueTag: "true",
|
||||
-int64Tag: "int64",
|
||||
-floatTag: "float",
|
||||
-fractionTag: "fraction",
|
||||
-complexTag: "complex",
|
||||
-stringTag: "string",
|
||||
-unknownTag: "unknown",
|
||||
|
||||
// Type aliases
|
||||
-aliasTag: "alias",
|
||||
}
|
1039
vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
generated
vendored
Normal file
1039
vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
93
vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
generated
vendored
Normal file
93
vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
|
||||
|
||||
// This file implements FindExportData.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
|
||||
// See $GOROOT/include/ar.h.
|
||||
hdr := make([]byte, 16+12+6+6+8+10+2)
|
||||
_, err = io.ReadFull(r, hdr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// leave for debugging
|
||||
if false {
|
||||
fmt.Printf("header: %s", hdr)
|
||||
}
|
||||
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
|
||||
size, err = strconv.Atoi(s)
|
||||
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
|
||||
err = fmt.Errorf("invalid archive header")
|
||||
return
|
||||
}
|
||||
name = strings.TrimSpace(string(hdr[:16]))
|
||||
return
|
||||
}
|
||||
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying GC-created object/archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function. The hdr result
|
||||
// is the string before the export data, either "$$" or "$$B".
|
||||
//
|
||||
func FindExportData(r *bufio.Reader) (hdr string, err error) {
|
||||
// Read first line to make sure this is an object file.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
|
||||
if string(line) == "!<arch>\n" {
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, _, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||
// Either way, line should begin with "go object ".
|
||||
if !strings.HasPrefix(string(line), "go object ") {
|
||||
err = fmt.Errorf("not a Go object file")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip over object header to export data.
|
||||
// Begins after first line starting with $$.
|
||||
for line[0] != '$' {
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
hdr = string(line)
|
||||
|
||||
return
|
||||
}
|
1078
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
Normal file
1078
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
739
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
Normal file
739
vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go
generated
vendored
Normal file
@ -0,0 +1,739 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Indexed binary package export.
|
||||
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
|
||||
// see that file for specification of the format.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Current indexed export format version. Increase with each format change.
|
||||
// 0: Go1.11 encoding
|
||||
const iexportVersion = 0
|
||||
|
||||
// IExportData returns the binary export data for pkg.
|
||||
//
|
||||
// If no file set is provided, position info will be missing.
|
||||
// The package path of the top-level package will not be recorded,
|
||||
// so that calls to IImportData can override with a provided package path.
|
||||
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if ierr, ok := e.(internalError); ok {
|
||||
err = ierr
|
||||
return
|
||||
}
|
||||
// Not an internal error; panic again.
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
|
||||
p := iexporter{
|
||||
out: bytes.NewBuffer(nil),
|
||||
fset: fset,
|
||||
allPkgs: map[*types.Package]bool{},
|
||||
stringIndex: map[string]uint64{},
|
||||
declIndex: map[types.Object]uint64{},
|
||||
typIndex: map[types.Type]uint64{},
|
||||
localpkg: pkg,
|
||||
}
|
||||
|
||||
for i, pt := range predeclared() {
|
||||
p.typIndex[pt] = uint64(i)
|
||||
}
|
||||
if len(p.typIndex) > predeclReserved {
|
||||
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
|
||||
}
|
||||
|
||||
// Initialize work queue with exported declarations.
|
||||
scope := pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
if ast.IsExported(name) {
|
||||
p.pushDecl(scope.Lookup(name))
|
||||
}
|
||||
}
|
||||
|
||||
// Loop until no more work.
|
||||
for !p.declTodo.empty() {
|
||||
p.doDecl(p.declTodo.popHead())
|
||||
}
|
||||
|
||||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
w.writeIndex(p.declIndex)
|
||||
w.flush()
|
||||
|
||||
// Assemble header.
|
||||
var hdr intWriter
|
||||
hdr.WriteByte('i')
|
||||
hdr.uint64(iexportVersion)
|
||||
hdr.uint64(uint64(p.strings.Len()))
|
||||
hdr.uint64(dataLen)
|
||||
|
||||
// Flush output.
|
||||
io.Copy(p.out, &hdr)
|
||||
io.Copy(p.out, &p.strings)
|
||||
io.Copy(p.out, &p.data0)
|
||||
|
||||
return p.out.Bytes(), nil
|
||||
}
|
||||
|
||||
// writeIndex writes out an object index. mainIndex indicates whether
|
||||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
// (i.e., name and height).
|
||||
func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
|
||||
// Build a map from packages to objects from that package.
|
||||
pkgObjs := map[*types.Package][]types.Object{}
|
||||
|
||||
// For the main index, make sure to include every package that
|
||||
// we reference, even if we're not exporting (or reexporting)
|
||||
// any symbols from it.
|
||||
pkgObjs[w.p.localpkg] = nil
|
||||
for pkg := range w.p.allPkgs {
|
||||
pkgObjs[pkg] = nil
|
||||
}
|
||||
|
||||
for obj := range index {
|
||||
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
|
||||
}
|
||||
|
||||
var pkgs []*types.Package
|
||||
for pkg, objs := range pkgObjs {
|
||||
pkgs = append(pkgs, pkg)
|
||||
|
||||
sort.Slice(objs, func(i, j int) bool {
|
||||
return objs[i].Name() < objs[j].Name()
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(pkgs, func(i, j int) bool {
|
||||
return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
|
||||
})
|
||||
|
||||
w.uint64(uint64(len(pkgs)))
|
||||
for _, pkg := range pkgs {
|
||||
w.string(w.exportPath(pkg))
|
||||
w.string(pkg.Name())
|
||||
w.uint64(uint64(0)) // package height is not needed for go/types
|
||||
|
||||
objs := pkgObjs[pkg]
|
||||
w.uint64(uint64(len(objs)))
|
||||
for _, obj := range objs {
|
||||
w.string(obj.Name())
|
||||
w.uint64(index[obj])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type iexporter struct {
|
||||
fset *token.FileSet
|
||||
out *bytes.Buffer
|
||||
|
||||
localpkg *types.Package
|
||||
|
||||
// allPkgs tracks all packages that have been referenced by
|
||||
// the export data, so we can ensure to include them in the
|
||||
// main index.
|
||||
allPkgs map[*types.Package]bool
|
||||
|
||||
declTodo objQueue
|
||||
|
||||
strings intWriter
|
||||
stringIndex map[string]uint64
|
||||
|
||||
data0 intWriter
|
||||
declIndex map[types.Object]uint64
|
||||
typIndex map[types.Type]uint64
|
||||
}
|
||||
|
||||
// stringOff returns the offset of s within the string section.
|
||||
// If not already present, it's added to the end.
|
||||
func (p *iexporter) stringOff(s string) uint64 {
|
||||
off, ok := p.stringIndex[s]
|
||||
if !ok {
|
||||
off = uint64(p.strings.Len())
|
||||
p.stringIndex[s] = off
|
||||
|
||||
p.strings.uint64(uint64(len(s)))
|
||||
p.strings.WriteString(s)
|
||||
}
|
||||
return off
|
||||
}
|
||||
|
||||
// pushDecl adds n to the declaration work queue, if not already present.
|
||||
func (p *iexporter) pushDecl(obj types.Object) {
|
||||
// Package unsafe is known to the compiler and predeclared.
|
||||
assert(obj.Pkg() != types.Unsafe)
|
||||
|
||||
if _, ok := p.declIndex[obj]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
|
||||
p.declTodo.pushTail(obj)
|
||||
}
|
||||
|
||||
// exportWriter handles writing out individual data section chunks.
|
||||
type exportWriter struct {
|
||||
p *iexporter
|
||||
|
||||
data intWriter
|
||||
currPkg *types.Package
|
||||
prevFile string
|
||||
prevLine int64
|
||||
}
|
||||
|
||||
func (w *exportWriter) exportPath(pkg *types.Package) string {
|
||||
if pkg == w.p.localpkg {
|
||||
return ""
|
||||
}
|
||||
return pkg.Path()
|
||||
}
|
||||
|
||||
func (p *iexporter) doDecl(obj types.Object) {
|
||||
w := p.newWriter()
|
||||
w.setPkg(obj.Pkg(), false)
|
||||
|
||||
switch obj := obj.(type) {
|
||||
case *types.Var:
|
||||
w.tag('V')
|
||||
w.pos(obj.Pos())
|
||||
w.typ(obj.Type(), obj.Pkg())
|
||||
|
||||
case *types.Func:
|
||||
sig, _ := obj.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
}
|
||||
w.tag('F')
|
||||
w.pos(obj.Pos())
|
||||
w.signature(sig)
|
||||
|
||||
case *types.Const:
|
||||
w.tag('C')
|
||||
w.pos(obj.Pos())
|
||||
w.value(obj.Type(), obj.Val())
|
||||
|
||||
case *types.TypeName:
|
||||
if obj.IsAlias() {
|
||||
w.tag('A')
|
||||
w.pos(obj.Pos())
|
||||
w.typ(obj.Type(), obj.Pkg())
|
||||
break
|
||||
}
|
||||
|
||||
// Defined type.
|
||||
w.tag('T')
|
||||
w.pos(obj.Pos())
|
||||
|
||||
underlying := obj.Type().Underlying()
|
||||
w.typ(underlying, obj.Pkg())
|
||||
|
||||
t := obj.Type()
|
||||
if types.IsInterface(t) {
|
||||
break
|
||||
}
|
||||
|
||||
named, ok := t.(*types.Named)
|
||||
if !ok {
|
||||
panic(internalErrorf("%s is not a defined type", t))
|
||||
}
|
||||
|
||||
n := named.NumMethods()
|
||||
w.uint64(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
m := named.Method(i)
|
||||
w.pos(m.Pos())
|
||||
w.string(m.Name())
|
||||
sig, _ := m.Type().(*types.Signature)
|
||||
w.param(sig.Recv())
|
||||
w.signature(sig)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected object: %v", obj))
|
||||
}
|
||||
|
||||
p.declIndex[obj] = w.flush()
|
||||
}
|
||||
|
||||
func (w *exportWriter) tag(tag byte) {
|
||||
w.data.WriteByte(tag)
|
||||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
return
|
||||
}
|
||||
|
||||
p := w.p.fset.Position(pos)
|
||||
file := p.Filename
|
||||
line := int64(p.Line)
|
||||
|
||||
// When file is the same as the last position (common case),
|
||||
// we can save a few bytes by delta encoding just the line
|
||||
// number.
|
||||
//
|
||||
// Note: Because data objects may be read out of order (or not
|
||||
// at all), we can only apply delta encoding within a single
|
||||
// object. This is handled implicitly by tracking prevFile and
|
||||
// prevLine as fields of exportWriter.
|
||||
|
||||
if file == w.prevFile {
|
||||
delta := line - w.prevLine
|
||||
w.int64(delta)
|
||||
if delta == deltaNewFile {
|
||||
w.int64(-1)
|
||||
}
|
||||
} else {
|
||||
w.int64(deltaNewFile)
|
||||
w.int64(line) // line >= 0
|
||||
w.string(file)
|
||||
w.prevFile = file
|
||||
}
|
||||
w.prevLine = line
|
||||
}
|
||||
|
||||
func (w *exportWriter) pkg(pkg *types.Package) {
|
||||
// Ensure any referenced packages are declared in the main index.
|
||||
w.p.allPkgs[pkg] = true
|
||||
|
||||
w.string(w.exportPath(pkg))
|
||||
}
|
||||
|
||||
func (w *exportWriter) qualifiedIdent(obj types.Object) {
|
||||
// Ensure any referenced declarations are written out too.
|
||||
w.p.pushDecl(obj)
|
||||
|
||||
w.string(obj.Name())
|
||||
w.pkg(obj.Pkg())
|
||||
}
|
||||
|
||||
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
|
||||
w.data.uint64(w.p.typOff(t, pkg))
|
||||
}
|
||||
|
||||
func (p *iexporter) newWriter() *exportWriter {
|
||||
return &exportWriter{p: p}
|
||||
}
|
||||
|
||||
func (w *exportWriter) flush() uint64 {
|
||||
off := uint64(w.p.data0.Len())
|
||||
io.Copy(&w.p.data0, &w.data)
|
||||
return off
|
||||
}
|
||||
|
||||
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
|
||||
off, ok := p.typIndex[t]
|
||||
if !ok {
|
||||
w := p.newWriter()
|
||||
w.doTyp(t, pkg)
|
||||
off = predeclReserved + w.flush()
|
||||
p.typIndex[t] = off
|
||||
}
|
||||
return off
|
||||
}
|
||||
|
||||
func (w *exportWriter) startType(k itag) {
|
||||
w.data.uint64(uint64(k))
|
||||
}
|
||||
|
||||
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
|
||||
switch t := t.(type) {
|
||||
case *types.Named:
|
||||
w.startType(definedType)
|
||||
w.qualifiedIdent(t.Obj())
|
||||
|
||||
case *types.Pointer:
|
||||
w.startType(pointerType)
|
||||
w.typ(t.Elem(), pkg)
|
||||
|
||||
case *types.Slice:
|
||||
w.startType(sliceType)
|
||||
w.typ(t.Elem(), pkg)
|
||||
|
||||
case *types.Array:
|
||||
w.startType(arrayType)
|
||||
w.uint64(uint64(t.Len()))
|
||||
w.typ(t.Elem(), pkg)
|
||||
|
||||
case *types.Chan:
|
||||
w.startType(chanType)
|
||||
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
|
||||
var dir uint64
|
||||
switch t.Dir() {
|
||||
case types.RecvOnly:
|
||||
dir = 1
|
||||
case types.SendOnly:
|
||||
dir = 2
|
||||
case types.SendRecv:
|
||||
dir = 3
|
||||
}
|
||||
w.uint64(dir)
|
||||
w.typ(t.Elem(), pkg)
|
||||
|
||||
case *types.Map:
|
||||
w.startType(mapType)
|
||||
w.typ(t.Key(), pkg)
|
||||
w.typ(t.Elem(), pkg)
|
||||
|
||||
case *types.Signature:
|
||||
w.startType(signatureType)
|
||||
w.setPkg(pkg, true)
|
||||
w.signature(t)
|
||||
|
||||
case *types.Struct:
|
||||
w.startType(structType)
|
||||
w.setPkg(pkg, true)
|
||||
|
||||
n := t.NumFields()
|
||||
w.uint64(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
w.pos(f.Pos())
|
||||
w.string(f.Name())
|
||||
w.typ(f.Type(), pkg)
|
||||
w.bool(f.Anonymous())
|
||||
w.string(t.Tag(i)) // note (or tag)
|
||||
}
|
||||
|
||||
case *types.Interface:
|
||||
w.startType(interfaceType)
|
||||
w.setPkg(pkg, true)
|
||||
|
||||
n := t.NumEmbeddeds()
|
||||
w.uint64(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Embedded(i)
|
||||
w.pos(f.Obj().Pos())
|
||||
w.typ(f.Obj().Type(), f.Obj().Pkg())
|
||||
}
|
||||
|
||||
n = t.NumExplicitMethods()
|
||||
w.uint64(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
m := t.ExplicitMethod(i)
|
||||
w.pos(m.Pos())
|
||||
w.string(m.Name())
|
||||
sig, _ := m.Type().(*types.Signature)
|
||||
w.signature(sig)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
|
||||
}
|
||||
}
|
||||
|
||||
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
|
||||
if write {
|
||||
w.pkg(pkg)
|
||||
}
|
||||
|
||||
w.currPkg = pkg
|
||||
}
|
||||
|
||||
func (w *exportWriter) signature(sig *types.Signature) {
|
||||
w.paramList(sig.Params())
|
||||
w.paramList(sig.Results())
|
||||
if sig.Params().Len() > 0 {
|
||||
w.bool(sig.Variadic())
|
||||
}
|
||||
}
|
||||
|
||||
func (w *exportWriter) paramList(tup *types.Tuple) {
|
||||
n := tup.Len()
|
||||
w.uint64(uint64(n))
|
||||
for i := 0; i < n; i++ {
|
||||
w.param(tup.At(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (w *exportWriter) param(obj types.Object) {
|
||||
w.pos(obj.Pos())
|
||||
w.localIdent(obj)
|
||||
w.typ(obj.Type(), obj.Pkg())
|
||||
}
|
||||
|
||||
func (w *exportWriter) value(typ types.Type, v constant.Value) {
|
||||
w.typ(typ, nil)
|
||||
|
||||
switch v.Kind() {
|
||||
case constant.Bool:
|
||||
w.bool(constant.BoolVal(v))
|
||||
case constant.Int:
|
||||
var i big.Int
|
||||
if i64, exact := constant.Int64Val(v); exact {
|
||||
i.SetInt64(i64)
|
||||
} else if ui64, exact := constant.Uint64Val(v); exact {
|
||||
i.SetUint64(ui64)
|
||||
} else {
|
||||
i.SetString(v.ExactString(), 10)
|
||||
}
|
||||
w.mpint(&i, typ)
|
||||
case constant.Float:
|
||||
f := constantToFloat(v)
|
||||
w.mpfloat(f, typ)
|
||||
case constant.Complex:
|
||||
w.mpfloat(constantToFloat(constant.Real(v)), typ)
|
||||
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
|
||||
case constant.String:
|
||||
w.string(constant.StringVal(v))
|
||||
case constant.Unknown:
|
||||
// package contains type errors
|
||||
default:
|
||||
panic(internalErrorf("unexpected value %v (%T)", v, v))
|
||||
}
|
||||
}
|
||||
|
||||
// constantToFloat converts a constant.Value with kind constant.Float to a
|
||||
// big.Float.
|
||||
func constantToFloat(x constant.Value) *big.Float {
|
||||
assert(x.Kind() == constant.Float)
|
||||
// Use the same floating-point precision (512) as cmd/compile
|
||||
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
|
||||
const mpprec = 512
|
||||
var f big.Float
|
||||
f.SetPrec(mpprec)
|
||||
if v, exact := constant.Float64Val(x); exact {
|
||||
// float64
|
||||
f.SetFloat64(v)
|
||||
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
|
||||
// TODO(gri): add big.Rat accessor to constant.Value.
|
||||
n := valueToRat(num)
|
||||
d := valueToRat(denom)
|
||||
f.SetRat(n.Quo(n, d))
|
||||
} else {
|
||||
// Value too large to represent as a fraction => inaccessible.
|
||||
// TODO(gri): add big.Float accessor to constant.Value.
|
||||
_, ok := f.SetString(x.ExactString())
|
||||
assert(ok)
|
||||
}
|
||||
return &f
|
||||
}
|
||||
|
||||
// mpint exports a multi-precision integer.
|
||||
//
|
||||
// For unsigned types, small values are written out as a single
|
||||
// byte. Larger values are written out as a length-prefixed big-endian
|
||||
// byte string, where the length prefix is encoded as its complement.
|
||||
// For example, bytes 0, 1, and 2 directly represent the integer
|
||||
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
|
||||
// 2-, and 3-byte big-endian string follow.
|
||||
//
|
||||
// Encoding for signed types use the same general approach as for
|
||||
// unsigned types, except small values use zig-zag encoding and the
|
||||
// bottom bit of length prefix byte for large values is reserved as a
|
||||
// sign bit.
|
||||
//
|
||||
// The exact boundary between small and large encodings varies
|
||||
// according to the maximum number of bytes needed to encode a value
|
||||
// of type typ. As a special case, 8-bit types are always encoded as a
|
||||
// single byte.
|
||||
//
|
||||
// TODO(mdempsky): Is this level of complexity really worthwhile?
|
||||
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
|
||||
basic, ok := typ.Underlying().(*types.Basic)
|
||||
if !ok {
|
||||
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
|
||||
}
|
||||
|
||||
signed, maxBytes := intSize(basic)
|
||||
|
||||
negative := x.Sign() < 0
|
||||
if !signed && negative {
|
||||
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
|
||||
}
|
||||
|
||||
b := x.Bytes()
|
||||
if len(b) > 0 && b[0] == 0 {
|
||||
panic(internalErrorf("leading zeros"))
|
||||
}
|
||||
if uint(len(b)) > maxBytes {
|
||||
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
|
||||
}
|
||||
|
||||
maxSmall := 256 - maxBytes
|
||||
if signed {
|
||||
maxSmall = 256 - 2*maxBytes
|
||||
}
|
||||
if maxBytes == 1 {
|
||||
maxSmall = 256
|
||||
}
|
||||
|
||||
// Check if x can use small value encoding.
|
||||
if len(b) <= 1 {
|
||||
var ux uint
|
||||
if len(b) == 1 {
|
||||
ux = uint(b[0])
|
||||
}
|
||||
if signed {
|
||||
ux <<= 1
|
||||
if negative {
|
||||
ux--
|
||||
}
|
||||
}
|
||||
if ux < maxSmall {
|
||||
w.data.WriteByte(byte(ux))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n := 256 - uint(len(b))
|
||||
if signed {
|
||||
n = 256 - 2*uint(len(b))
|
||||
if negative {
|
||||
n |= 1
|
||||
}
|
||||
}
|
||||
if n < maxSmall || n >= 256 {
|
||||
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
|
||||
}
|
||||
|
||||
w.data.WriteByte(byte(n))
|
||||
w.data.Write(b)
|
||||
}
|
||||
|
||||
// mpfloat exports a multi-precision floating point number.
|
||||
//
|
||||
// The number's value is decomposed into mantissa × 2**exponent, where
|
||||
// mantissa is an integer. The value is written out as mantissa (as a
|
||||
// multi-precision integer) and then the exponent, except exponent is
|
||||
// omitted if mantissa is zero.
|
||||
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
|
||||
if f.IsInf() {
|
||||
panic("infinite constant")
|
||||
}
|
||||
|
||||
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
|
||||
var mant big.Float
|
||||
exp := int64(f.MantExp(&mant))
|
||||
|
||||
// Scale so that mant is an integer.
|
||||
prec := mant.MinPrec()
|
||||
mant.SetMantExp(&mant, int(prec))
|
||||
exp -= int64(prec)
|
||||
|
||||
manti, acc := mant.Int(nil)
|
||||
if acc != big.Exact {
|
||||
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
|
||||
}
|
||||
w.mpint(manti, typ)
|
||||
if manti.Sign() != 0 {
|
||||
w.int64(exp)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *exportWriter) bool(b bool) bool {
|
||||
var x uint64
|
||||
if b {
|
||||
x = 1
|
||||
}
|
||||
w.uint64(x)
|
||||
return b
|
||||
}
|
||||
|
||||
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
|
||||
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
|
||||
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
|
||||
|
||||
func (w *exportWriter) localIdent(obj types.Object) {
|
||||
// Anonymous parameters.
|
||||
if obj == nil {
|
||||
w.string("")
|
||||
return
|
||||
}
|
||||
|
||||
name := obj.Name()
|
||||
if name == "_" {
|
||||
w.string("_")
|
||||
return
|
||||
}
|
||||
|
||||
w.string(name)
|
||||
}
|
||||
|
||||
type intWriter struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
func (w *intWriter) int64(x int64) {
|
||||
var buf [binary.MaxVarintLen64]byte
|
||||
n := binary.PutVarint(buf[:], x)
|
||||
w.Write(buf[:n])
|
||||
}
|
||||
|
||||
func (w *intWriter) uint64(x uint64) {
|
||||
var buf [binary.MaxVarintLen64]byte
|
||||
n := binary.PutUvarint(buf[:], x)
|
||||
w.Write(buf[:n])
|
||||
}
|
||||
|
||||
func assert(cond bool) {
|
||||
if !cond {
|
||||
panic("internal error: assertion failed")
|
||||
}
|
||||
}
|
||||
|
||||
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
|
||||
|
||||
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
|
||||
// a ready-to-use empty queue.
|
||||
type objQueue struct {
|
||||
ring []types.Object
|
||||
head, tail int
|
||||
}
|
||||
|
||||
// empty returns true if q contains no Nodes.
|
||||
func (q *objQueue) empty() bool {
|
||||
return q.head == q.tail
|
||||
}
|
||||
|
||||
// pushTail appends n to the tail of the queue.
|
||||
func (q *objQueue) pushTail(obj types.Object) {
|
||||
if len(q.ring) == 0 {
|
||||
q.ring = make([]types.Object, 16)
|
||||
} else if q.head+len(q.ring) == q.tail {
|
||||
// Grow the ring.
|
||||
nring := make([]types.Object, len(q.ring)*2)
|
||||
// Copy the old elements.
|
||||
part := q.ring[q.head%len(q.ring):]
|
||||
if q.tail-q.head <= len(part) {
|
||||
part = part[:q.tail-q.head]
|
||||
copy(nring, part)
|
||||
} else {
|
||||
pos := copy(nring, part)
|
||||
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
|
||||
}
|
||||
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
|
||||
}
|
||||
|
||||
q.ring[q.tail%len(q.ring)] = obj
|
||||
q.tail++
|
||||
}
|
||||
|
||||
// popHead pops a node from the head of the queue. It panics if q is empty.
|
||||
func (q *objQueue) popHead() types.Object {
|
||||
if q.empty() {
|
||||
panic("dequeue empty")
|
||||
}
|
||||
obj := q.ring[q.head%len(q.ring)]
|
||||
q.head++
|
||||
return obj
|
||||
}
|
630
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
Normal file
630
vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go
generated
vendored
Normal file
@ -0,0 +1,630 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Indexed package import.
|
||||
// See cmd/compile/internal/gc/iexport.go for the export data format.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type intReader struct {
|
||||
*bytes.Reader
|
||||
path string
|
||||
}
|
||||
|
||||
func (r *intReader) int64() int64 {
|
||||
i, err := binary.ReadVarint(r.Reader)
|
||||
if err != nil {
|
||||
errorf("import %q: read varint error: %v", r.path, err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func (r *intReader) uint64() uint64 {
|
||||
i, err := binary.ReadUvarint(r.Reader)
|
||||
if err != nil {
|
||||
errorf("import %q: read varint error: %v", r.path, err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
const predeclReserved = 32
|
||||
|
||||
type itag uint64
|
||||
|
||||
const (
|
||||
// Types
|
||||
definedType itag = iota
|
||||
pointerType
|
||||
sliceType
|
||||
arrayType
|
||||
chanType
|
||||
mapType
|
||||
signatureType
|
||||
structType
|
||||
interfaceType
|
||||
)
|
||||
|
||||
// IImportData imports a package from the serialized package data
|
||||
// and returns the number of bytes consumed and a reference to the package.
|
||||
// If the export data version is not recognized or the format is otherwise
|
||||
// compromised, an error is returned.
|
||||
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
|
||||
const currentVersion = 1
|
||||
version := int64(-1)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if version > currentVersion {
|
||||
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
|
||||
} else {
|
||||
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r := &intReader{bytes.NewReader(data), path}
|
||||
|
||||
version = int64(r.uint64())
|
||||
switch version {
|
||||
case currentVersion, 0:
|
||||
default:
|
||||
errorf("unknown iexport format version %d", version)
|
||||
}
|
||||
|
||||
sLen := int64(r.uint64())
|
||||
dLen := int64(r.uint64())
|
||||
|
||||
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||
stringData := data[whence : whence+sLen]
|
||||
declData := data[whence+sLen : whence+sLen+dLen]
|
||||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
ipath: path,
|
||||
version: int(version),
|
||||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
pkgCache: make(map[uint64]*types.Package),
|
||||
|
||||
declData: declData,
|
||||
pkgIndex: make(map[*types.Package]map[string]uint64),
|
||||
typCache: make(map[uint64]types.Type),
|
||||
|
||||
fake: fakeFileSet{
|
||||
fset: fset,
|
||||
files: make(map[string]*token.File),
|
||||
},
|
||||
}
|
||||
|
||||
for i, pt := range predeclared() {
|
||||
p.typCache[uint64(i)] = pt
|
||||
}
|
||||
|
||||
pkgList := make([]*types.Package, r.uint64())
|
||||
for i := range pkgList {
|
||||
pkgPathOff := r.uint64()
|
||||
pkgPath := p.stringAt(pkgPathOff)
|
||||
pkgName := p.stringAt(r.uint64())
|
||||
_ = r.uint64() // package height; unused by go/types
|
||||
|
||||
if pkgPath == "" {
|
||||
pkgPath = path
|
||||
}
|
||||
pkg := imports[pkgPath]
|
||||
if pkg == nil {
|
||||
pkg = types.NewPackage(pkgPath, pkgName)
|
||||
imports[pkgPath] = pkg
|
||||
} else if pkg.Name() != pkgName {
|
||||
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
|
||||
}
|
||||
|
||||
p.pkgCache[pkgPathOff] = pkg
|
||||
|
||||
nameIndex := make(map[string]uint64)
|
||||
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
|
||||
name := p.stringAt(r.uint64())
|
||||
nameIndex[name] = r.uint64()
|
||||
}
|
||||
|
||||
p.pkgIndex[pkg] = nameIndex
|
||||
pkgList[i] = pkg
|
||||
}
|
||||
if len(pkgList) == 0 {
|
||||
errorf("no packages found for %s", path)
|
||||
panic("unreachable")
|
||||
}
|
||||
p.ipkg = pkgList[0]
|
||||
names := make([]string, 0, len(p.pkgIndex[p.ipkg]))
|
||||
for name := range p.pkgIndex[p.ipkg] {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
p.doDecl(p.ipkg, name)
|
||||
}
|
||||
|
||||
for _, typ := range p.interfaceList {
|
||||
typ.Complete()
|
||||
}
|
||||
|
||||
// record all referenced packages as imports
|
||||
list := append(([]*types.Package)(nil), pkgList[1:]...)
|
||||
sort.Sort(byPath(list))
|
||||
p.ipkg.SetImports(list)
|
||||
|
||||
// package was imported completely and without errors
|
||||
p.ipkg.MarkComplete()
|
||||
|
||||
consumed, _ := r.Seek(0, io.SeekCurrent)
|
||||
return int(consumed), p.ipkg, nil
|
||||
}
|
||||
|
||||
type iimporter struct {
|
||||
ipath string
|
||||
ipkg *types.Package
|
||||
version int
|
||||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
pkgCache map[uint64]*types.Package
|
||||
|
||||
declData []byte
|
||||
pkgIndex map[*types.Package]map[string]uint64
|
||||
typCache map[uint64]types.Type
|
||||
|
||||
fake fakeFileSet
|
||||
interfaceList []*types.Interface
|
||||
}
|
||||
|
||||
func (p *iimporter) doDecl(pkg *types.Package, name string) {
|
||||
// See if we've already imported this declaration.
|
||||
if obj := pkg.Scope().Lookup(name); obj != nil {
|
||||
return
|
||||
}
|
||||
|
||||
off, ok := p.pkgIndex[pkg][name]
|
||||
if !ok {
|
||||
errorf("%v.%v not in index", pkg, name)
|
||||
}
|
||||
|
||||
r := &importReader{p: p, currPkg: pkg}
|
||||
r.declReader.Reset(p.declData[off:])
|
||||
|
||||
r.obj(name)
|
||||
}
|
||||
|
||||
func (p *iimporter) stringAt(off uint64) string {
|
||||
if s, ok := p.stringCache[off]; ok {
|
||||
return s
|
||||
}
|
||||
|
||||
slen, n := binary.Uvarint(p.stringData[off:])
|
||||
if n <= 0 {
|
||||
errorf("varint failed")
|
||||
}
|
||||
spos := off + uint64(n)
|
||||
s := string(p.stringData[spos : spos+slen])
|
||||
p.stringCache[off] = s
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||
if pkg, ok := p.pkgCache[off]; ok {
|
||||
return pkg
|
||||
}
|
||||
path := p.stringAt(off)
|
||||
if path == p.ipath {
|
||||
return p.ipkg
|
||||
}
|
||||
errorf("missing package %q in %q", path, p.ipath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
|
||||
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
|
||||
return t
|
||||
}
|
||||
|
||||
if off < predeclReserved {
|
||||
errorf("predeclared type missing from cache: %v", off)
|
||||
}
|
||||
|
||||
r := &importReader{p: p}
|
||||
r.declReader.Reset(p.declData[off-predeclReserved:])
|
||||
t := r.doType(base)
|
||||
|
||||
if base == nil || !isInterface(t) {
|
||||
p.typCache[off] = t
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
type importReader struct {
|
||||
p *iimporter
|
||||
declReader bytes.Reader
|
||||
currPkg *types.Package
|
||||
prevFile string
|
||||
prevLine int64
|
||||
prevColumn int64
|
||||
}
|
||||
|
||||
func (r *importReader) obj(name string) {
|
||||
tag := r.byte()
|
||||
pos := r.pos()
|
||||
|
||||
switch tag {
|
||||
case 'A':
|
||||
typ := r.typ()
|
||||
|
||||
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
|
||||
|
||||
case 'C':
|
||||
typ, val := r.value()
|
||||
|
||||
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
|
||||
|
||||
case 'F':
|
||||
sig := r.signature(nil)
|
||||
|
||||
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
|
||||
|
||||
case 'T':
|
||||
// Types can be recursive. We need to setup a stub
|
||||
// declaration before recursing.
|
||||
obj := types.NewTypeName(pos, r.currPkg, name, nil)
|
||||
named := types.NewNamed(obj, nil, nil)
|
||||
r.declare(obj)
|
||||
|
||||
underlying := r.p.typAt(r.uint64(), named).Underlying()
|
||||
named.SetUnderlying(underlying)
|
||||
|
||||
if !isInterface(underlying) {
|
||||
for n := r.uint64(); n > 0; n-- {
|
||||
mpos := r.pos()
|
||||
mname := r.ident()
|
||||
recv := r.param()
|
||||
msig := r.signature(recv)
|
||||
|
||||
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
|
||||
}
|
||||
}
|
||||
|
||||
case 'V':
|
||||
typ := r.typ()
|
||||
|
||||
r.declare(types.NewVar(pos, r.currPkg, name, typ))
|
||||
|
||||
default:
|
||||
errorf("unexpected tag: %v", tag)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) declare(obj types.Object) {
|
||||
obj.Pkg().Scope().Insert(obj)
|
||||
}
|
||||
|
||||
func (r *importReader) value() (typ types.Type, val constant.Value) {
|
||||
typ = r.typ()
|
||||
|
||||
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
|
||||
case types.IsBoolean:
|
||||
val = constant.MakeBool(r.bool())
|
||||
|
||||
case types.IsString:
|
||||
val = constant.MakeString(r.string())
|
||||
|
||||
case types.IsInteger:
|
||||
val = r.mpint(b)
|
||||
|
||||
case types.IsFloat:
|
||||
val = r.mpfloat(b)
|
||||
|
||||
case types.IsComplex:
|
||||
re := r.mpfloat(b)
|
||||
im := r.mpfloat(b)
|
||||
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||
|
||||
default:
|
||||
if b.Kind() == types.Invalid {
|
||||
val = constant.MakeUnknown()
|
||||
return
|
||||
}
|
||||
errorf("unexpected type %v", typ) // panics
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
|
||||
if (b.Info() & types.IsUntyped) != 0 {
|
||||
return true, 64
|
||||
}
|
||||
|
||||
switch b.Kind() {
|
||||
case types.Float32, types.Complex64:
|
||||
return true, 3
|
||||
case types.Float64, types.Complex128:
|
||||
return true, 7
|
||||
}
|
||||
|
||||
signed = (b.Info() & types.IsUnsigned) == 0
|
||||
switch b.Kind() {
|
||||
case types.Int8, types.Uint8:
|
||||
maxBytes = 1
|
||||
case types.Int16, types.Uint16:
|
||||
maxBytes = 2
|
||||
case types.Int32, types.Uint32:
|
||||
maxBytes = 4
|
||||
default:
|
||||
maxBytes = 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *importReader) mpint(b *types.Basic) constant.Value {
|
||||
signed, maxBytes := intSize(b)
|
||||
|
||||
maxSmall := 256 - maxBytes
|
||||
if signed {
|
||||
maxSmall = 256 - 2*maxBytes
|
||||
}
|
||||
if maxBytes == 1 {
|
||||
maxSmall = 256
|
||||
}
|
||||
|
||||
n, _ := r.declReader.ReadByte()
|
||||
if uint(n) < maxSmall {
|
||||
v := int64(n)
|
||||
if signed {
|
||||
v >>= 1
|
||||
if n&1 != 0 {
|
||||
v = ^v
|
||||
}
|
||||
}
|
||||
return constant.MakeInt64(v)
|
||||
}
|
||||
|
||||
v := -n
|
||||
if signed {
|
||||
v = -(n &^ 1) >> 1
|
||||
}
|
||||
if v < 1 || uint(v) > maxBytes {
|
||||
errorf("weird decoding: %v, %v => %v", n, signed, v)
|
||||
}
|
||||
|
||||
buf := make([]byte, v)
|
||||
io.ReadFull(&r.declReader, buf)
|
||||
|
||||
// convert to little endian
|
||||
// TODO(gri) go/constant should have a more direct conversion function
|
||||
// (e.g., once it supports a big.Float based implementation)
|
||||
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
|
||||
buf[i], buf[j] = buf[j], buf[i]
|
||||
}
|
||||
|
||||
x := constant.MakeFromBytes(buf)
|
||||
if signed && n&1 != 0 {
|
||||
x = constant.UnaryOp(token.SUB, x, 0)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
|
||||
x := r.mpint(b)
|
||||
if constant.Sign(x) == 0 {
|
||||
return x
|
||||
}
|
||||
|
||||
exp := r.int64()
|
||||
switch {
|
||||
case exp > 0:
|
||||
x = constant.Shift(x, token.SHL, uint(exp))
|
||||
case exp < 0:
|
||||
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
|
||||
x = constant.BinaryOp(x, token.QUO, d)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (r *importReader) ident() string {
|
||||
return r.string()
|
||||
}
|
||||
|
||||
func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||
name := r.string()
|
||||
pkg := r.pkg()
|
||||
return pkg, name
|
||||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.version >= 1 {
|
||||
r.posv1()
|
||||
} else {
|
||||
r.posv0()
|
||||
}
|
||||
|
||||
if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
|
||||
}
|
||||
|
||||
func (r *importReader) posv0() {
|
||||
delta := r.int64()
|
||||
if delta != deltaNewFile {
|
||||
r.prevLine += delta
|
||||
} else if l := r.int64(); l == -1 {
|
||||
r.prevLine += deltaNewFile
|
||||
} else {
|
||||
r.prevFile = r.string()
|
||||
r.prevLine = l
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) posv1() {
|
||||
delta := r.int64()
|
||||
r.prevColumn += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
delta = r.int64()
|
||||
r.prevLine += delta >> 1
|
||||
if delta&1 != 0 {
|
||||
r.prevFile = r.string()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
return r.p.typAt(r.uint64(), nil)
|
||||
}
|
||||
|
||||
func isInterface(t types.Type) bool {
|
||||
_, ok := t.(*types.Interface)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
|
||||
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
|
||||
|
||||
func (r *importReader) doType(base *types.Named) types.Type {
|
||||
switch k := r.kind(); k {
|
||||
default:
|
||||
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
|
||||
return nil
|
||||
|
||||
case definedType:
|
||||
pkg, name := r.qualifiedIdent()
|
||||
r.p.doDecl(pkg, name)
|
||||
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
|
||||
case pointerType:
|
||||
return types.NewPointer(r.typ())
|
||||
case sliceType:
|
||||
return types.NewSlice(r.typ())
|
||||
case arrayType:
|
||||
n := r.uint64()
|
||||
return types.NewArray(r.typ(), int64(n))
|
||||
case chanType:
|
||||
dir := chanDir(int(r.uint64()))
|
||||
return types.NewChan(dir, r.typ())
|
||||
case mapType:
|
||||
return types.NewMap(r.typ(), r.typ())
|
||||
case signatureType:
|
||||
r.currPkg = r.pkg()
|
||||
return r.signature(nil)
|
||||
|
||||
case structType:
|
||||
r.currPkg = r.pkg()
|
||||
|
||||
fields := make([]*types.Var, r.uint64())
|
||||
tags := make([]string, len(fields))
|
||||
for i := range fields {
|
||||
fpos := r.pos()
|
||||
fname := r.ident()
|
||||
ftyp := r.typ()
|
||||
emb := r.bool()
|
||||
tag := r.string()
|
||||
|
||||
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
|
||||
tags[i] = tag
|
||||
}
|
||||
return types.NewStruct(fields, tags)
|
||||
|
||||
case interfaceType:
|
||||
r.currPkg = r.pkg()
|
||||
|
||||
embeddeds := make([]types.Type, r.uint64())
|
||||
for i := range embeddeds {
|
||||
_ = r.pos()
|
||||
embeddeds[i] = r.typ()
|
||||
}
|
||||
|
||||
methods := make([]*types.Func, r.uint64())
|
||||
for i := range methods {
|
||||
mpos := r.pos()
|
||||
mname := r.ident()
|
||||
|
||||
// TODO(mdempsky): Matches bimport.go, but I
|
||||
// don't agree with this.
|
||||
var recv *types.Var
|
||||
if base != nil {
|
||||
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
|
||||
}
|
||||
|
||||
msig := r.signature(recv)
|
||||
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
|
||||
}
|
||||
|
||||
typ := newInterface(methods, embeddeds)
|
||||
r.p.interfaceList = append(r.p.interfaceList, typ)
|
||||
return typ
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) kind() itag {
|
||||
return itag(r.uint64())
|
||||
}
|
||||
|
||||
func (r *importReader) signature(recv *types.Var) *types.Signature {
|
||||
params := r.paramList()
|
||||
results := r.paramList()
|
||||
variadic := params.Len() > 0 && r.bool()
|
||||
return types.NewSignature(recv, params, results, variadic)
|
||||
}
|
||||
|
||||
func (r *importReader) paramList() *types.Tuple {
|
||||
xs := make([]*types.Var, r.uint64())
|
||||
for i := range xs {
|
||||
xs[i] = r.param()
|
||||
}
|
||||
return types.NewTuple(xs...)
|
||||
}
|
||||
|
||||
func (r *importReader) param() *types.Var {
|
||||
pos := r.pos()
|
||||
name := r.ident()
|
||||
typ := r.typ()
|
||||
return types.NewParam(pos, r.currPkg, name, typ)
|
||||
}
|
||||
|
||||
func (r *importReader) bool() bool {
|
||||
return r.uint64() != 0
|
||||
}
|
||||
|
||||
func (r *importReader) int64() int64 {
|
||||
n, err := binary.ReadVarint(&r.declReader)
|
||||
if err != nil {
|
||||
errorf("readVarint: %v", err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (r *importReader) uint64() uint64 {
|
||||
n, err := binary.ReadUvarint(&r.declReader)
|
||||
if err != nil {
|
||||
errorf("readUvarint: %v", err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (r *importReader) byte() byte {
|
||||
x, err := r.declReader.ReadByte()
|
||||
if err != nil {
|
||||
errorf("declReader.ReadByte: %v", err)
|
||||
}
|
||||
return x
|
||||
}
|
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
21
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
named := make([]*types.Named, len(embeddeds))
|
||||
for i, e := range embeddeds {
|
||||
var ok bool
|
||||
named[i], ok = e.(*types.Named)
|
||||
if !ok {
|
||||
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
|
||||
}
|
||||
}
|
||||
return types.NewInterface(methods, named)
|
||||
}
|
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
13
vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package gcimporter
|
||||
|
||||
import "go/types"
|
||||
|
||||
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
|
||||
return types.NewInterfaceType(methods, embeddeds)
|
||||
}
|
23
vendor/golang.org/x/xerrors/doc.go
generated
vendored
23
vendor/golang.org/x/xerrors/doc.go
generated
vendored
@ -4,22 +4,19 @@
|
||||
|
||||
// Package xerrors implements functions to manipulate errors.
|
||||
//
|
||||
// This package supports transitioning to the Go 2 proposal for error values:
|
||||
// This package is based on the Go 2 proposal for error values:
|
||||
// https://golang.org/design/29934-error-values
|
||||
//
|
||||
// Most of the functions and types in this package will be incorporated into the
|
||||
// standard library's errors package in Go 1.13; the behavior of this package's
|
||||
// Errorf function will be incorporated into the standard library's fmt.Errorf.
|
||||
// Use this package to get equivalent behavior in all supported Go versions. For
|
||||
// example, create errors using
|
||||
// These functions were incorporated into the standard library's errors package
|
||||
// in Go 1.13:
|
||||
// - Is
|
||||
// - As
|
||||
// - Unwrap
|
||||
//
|
||||
// xerrors.New("write failed")
|
||||
// Also, Errorf's %w verb was incorporated into fmt.Errorf.
|
||||
//
|
||||
// or
|
||||
// Use this package to get equivalent behavior in all supported Go versions.
|
||||
//
|
||||
// xerrors.Errorf("while reading: %v", err)
|
||||
//
|
||||
// If you want your error type to participate in the new formatting
|
||||
// implementation for %v and %+v, provide it with a Format method that calls
|
||||
// xerrors.FormatError, as shown in the example for FormatError.
|
||||
// No other features of this package were included in Go 1.13, and at present
|
||||
// there are no plans to include any of them.
|
||||
package xerrors // import "golang.org/x/xerrors"
|
||||
|
17
vendor/modules.txt
vendored
17
vendor/modules.txt
vendored
@ -240,7 +240,7 @@ github.com/stretchr/testify/require
|
||||
github.com/teris-io/shortid
|
||||
# github.com/ua-parser/uap-go v0.0.0-20190826212731-daf92ba38329
|
||||
github.com/ua-parser/uap-go/uaparser
|
||||
# github.com/uber/jaeger-client-go v2.16.0+incompatible
|
||||
# github.com/uber/jaeger-client-go v2.20.1+incompatible
|
||||
github.com/uber/jaeger-client-go
|
||||
github.com/uber/jaeger-client-go/config
|
||||
github.com/uber/jaeger-client-go/internal/baggage
|
||||
@ -259,7 +259,7 @@ github.com/uber/jaeger-client-go/thrift-gen/zipkincore
|
||||
github.com/uber/jaeger-client-go/transport
|
||||
github.com/uber/jaeger-client-go/utils
|
||||
github.com/uber/jaeger-client-go/zipkin
|
||||
# github.com/uber/jaeger-lib v2.0.0+incompatible
|
||||
# github.com/uber/jaeger-lib v2.2.0+incompatible
|
||||
github.com/uber/jaeger-lib/metrics
|
||||
# github.com/unknwon/com v1.0.1
|
||||
github.com/unknwon/com
|
||||
@ -268,12 +268,17 @@ github.com/yudai/gojsondiff
|
||||
github.com/yudai/gojsondiff/formatter
|
||||
# github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82
|
||||
github.com/yudai/golcs
|
||||
# golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392
|
||||
# go.uber.org/atomic v1.5.1
|
||||
go.uber.org/atomic
|
||||
# golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/crypto/ed25519
|
||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||
golang.org/x/crypto/md4
|
||||
golang.org/x/crypto/pbkdf2
|
||||
golang.org/x/crypto/ripemd160
|
||||
# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f
|
||||
golang.org/x/lint
|
||||
golang.org/x/lint/golint
|
||||
# golang.org/x/net v0.0.0-20190923162816-aa69164e4478
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/context/ctxhttp
|
||||
@ -298,7 +303,11 @@ golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
|
||||
# golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935
|
||||
golang.org/x/tools/go/ast/astutil
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
golang.org/x/tools/go/internal/gcimporter
|
||||
# golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/appengine v1.6.1
|
||||
|
Loading…
Reference in New Issue
Block a user