mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Use v0.39.0 of SDK (#23388)
This commit is contained in:
parent
242db64158
commit
a29056966c
7
go.mod
7
go.mod
@ -30,9 +30,9 @@ require (
|
||||
github.com/gorilla/websocket v1.4.1
|
||||
github.com/gosimple/slug v1.4.2
|
||||
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.35.0
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.39.0
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
|
||||
github.com/hashicorp/go-plugin v1.0.1
|
||||
github.com/hashicorp/go-plugin v1.2.2
|
||||
github.com/hashicorp/go-version v1.1.0
|
||||
github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
|
||||
@ -76,8 +76,7 @@ require (
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/tools v0.0.0-20191213221258-04c2e8eff935 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect
|
||||
google.golang.org/grpc v1.23.1
|
||||
google.golang.org/grpc v1.27.1
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/ini.v1 v1.46.0
|
||||
|
27
go.sum
27
go.sum
@ -32,6 +32,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
@ -59,6 +60,8 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||
github.com/facebookgo/inject v0.0.0-20180706035515-f23751cae28b h1:V6c4/dSTNhSaNn4c5ulbakfv277qCvs7byFYv7P83iQ=
|
||||
@ -103,6 +106,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@ -128,12 +133,13 @@ github.com/gosimple/slug v1.4.2 h1:jDmprx3q/9Lfk4FkGZtvzDQ9Cj9eAmsjzeQGp24PeiQ=
|
||||
github.com/gosimple/slug v1.4.2/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0=
|
||||
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4 h1:SPdxCL9BChFTlyi0Khv64vdCW4TMna8+sxL7+Chx+Ag=
|
||||
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4/go.mod h1:nc0XxBzjeGcrMltCDw269LoWF9S8ibhgxolCdA1R8To=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.35.0 h1:IxNaNq8hN3ShQ804FURFOd1ehbKOmFROztY+8vohhW8=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.35.0/go.mod h1:zX/Zz/HYDAkL1NxffOZeixqPqIVVoCTWI2AuFy4J+V4=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.39.0 h1:tPP83HeY9gN4q8O3tYka1vd82OQ/3CFdwx4QeEhJ0Qc=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.39.0/go.mod h1:xRhfTHl+Dkqf2Py6Lr4pcHBC5pm8/N+IwPJ0R/iAHMM=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
|
||||
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-plugin v1.2.2 h1:mgDpq0PkoK5gck2w4ivaMpWRHv/matdOR4xmeScmf/w=
|
||||
github.com/hashicorp/go-plugin v1.2.2/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0=
|
||||
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
|
||||
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@ -144,6 +150,8 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec h1:CGkYB1Q7DSsH/ku+to+foV4agt2F2miquaLUgF6L178=
|
||||
github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
|
||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
@ -235,6 +243,7 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0 h1:ElTg5tNp4DqfV7UQjDqv2+RJlNzsDtvNAWccbItceIE=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
@ -331,6 +340,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -403,17 +413,20 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
|
2
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
2
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@ -393,7 +393,7 @@ func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// by keys in lexicographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
|
6
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
6
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@ -456,6 +456,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
@ -519,8 +521,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if v.Type().Implements(textMarshalerType) {
|
||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2889
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2889
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
885
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
generated
vendored
Normal file
@ -0,0 +1,885 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Author: kenton@google.com (Kenton Varda)
|
||||
// Based on original Protocol Buffers design by
|
||||
// Sanjay Ghemawat, Jeff Dean, and others.
|
||||
//
|
||||
// The messages in this file describe the definitions found in .proto files.
|
||||
// A valid .proto file can be translated directly to a FileDescriptorProto
|
||||
// without any other information (e.g. without reading its imports).
|
||||
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "DescriptorProtos";
|
||||
option csharp_namespace = "Google.Protobuf.Reflection";
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// descriptor.proto must be optimized for speed because reflection-based
|
||||
// algorithms don't work during bootstrapping.
|
||||
option optimize_for = SPEED;
|
||||
|
||||
// The protocol compiler can output a FileDescriptorSet containing the .proto
|
||||
// files it parses.
|
||||
message FileDescriptorSet {
|
||||
repeated FileDescriptorProto file = 1;
|
||||
}
|
||||
|
||||
// Describes a complete .proto file.
|
||||
message FileDescriptorProto {
|
||||
optional string name = 1; // file name, relative to root of source tree
|
||||
optional string package = 2; // e.g. "foo", "foo.bar", etc.
|
||||
|
||||
// Names of files imported by this file.
|
||||
repeated string dependency = 3;
|
||||
// Indexes of the public imported files in the dependency list above.
|
||||
repeated int32 public_dependency = 10;
|
||||
// Indexes of the weak imported files in the dependency list.
|
||||
// For Google-internal migration only. Do not use.
|
||||
repeated int32 weak_dependency = 11;
|
||||
|
||||
// All top-level definitions in this file.
|
||||
repeated DescriptorProto message_type = 4;
|
||||
repeated EnumDescriptorProto enum_type = 5;
|
||||
repeated ServiceDescriptorProto service = 6;
|
||||
repeated FieldDescriptorProto extension = 7;
|
||||
|
||||
optional FileOptions options = 8;
|
||||
|
||||
// This field contains optional information about the original source code.
|
||||
// You may safely remove this entire field without harming runtime
|
||||
// functionality of the descriptors -- the information is needed only by
|
||||
// development tools.
|
||||
optional SourceCodeInfo source_code_info = 9;
|
||||
|
||||
// The syntax of the proto file.
|
||||
// The supported values are "proto2" and "proto3".
|
||||
optional string syntax = 12;
|
||||
}
|
||||
|
||||
// Describes a message type.
|
||||
message DescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated FieldDescriptorProto field = 2;
|
||||
repeated FieldDescriptorProto extension = 6;
|
||||
|
||||
repeated DescriptorProto nested_type = 3;
|
||||
repeated EnumDescriptorProto enum_type = 4;
|
||||
|
||||
message ExtensionRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
|
||||
optional ExtensionRangeOptions options = 3;
|
||||
}
|
||||
repeated ExtensionRange extension_range = 5;
|
||||
|
||||
repeated OneofDescriptorProto oneof_decl = 8;
|
||||
|
||||
optional MessageOptions options = 7;
|
||||
|
||||
// Range of reserved tag numbers. Reserved tag numbers may not be used by
|
||||
// fields or extension ranges in the same message. Reserved ranges may
|
||||
// not overlap.
|
||||
message ReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Exclusive.
|
||||
}
|
||||
repeated ReservedRange reserved_range = 9;
|
||||
// Reserved field names, which may not be used by fields in the same message.
|
||||
// A given name may only be reserved once.
|
||||
repeated string reserved_name = 10;
|
||||
}
|
||||
|
||||
message ExtensionRangeOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
// Describes a field within a message.
|
||||
message FieldDescriptorProto {
|
||||
enum Type {
|
||||
// 0 is reserved for errors.
|
||||
// Order is weird for historical reasons.
|
||||
TYPE_DOUBLE = 1;
|
||||
TYPE_FLOAT = 2;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
|
||||
// negative values are likely.
|
||||
TYPE_INT64 = 3;
|
||||
TYPE_UINT64 = 4;
|
||||
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
|
||||
// negative values are likely.
|
||||
TYPE_INT32 = 5;
|
||||
TYPE_FIXED64 = 6;
|
||||
TYPE_FIXED32 = 7;
|
||||
TYPE_BOOL = 8;
|
||||
TYPE_STRING = 9;
|
||||
// Tag-delimited aggregate.
|
||||
// Group type is deprecated and not supported in proto3. However, Proto3
|
||||
// implementations should still be able to parse the group wire format and
|
||||
// treat group fields as unknown fields.
|
||||
TYPE_GROUP = 10;
|
||||
TYPE_MESSAGE = 11; // Length-delimited aggregate.
|
||||
|
||||
// New in version 2.
|
||||
TYPE_BYTES = 12;
|
||||
TYPE_UINT32 = 13;
|
||||
TYPE_ENUM = 14;
|
||||
TYPE_SFIXED32 = 15;
|
||||
TYPE_SFIXED64 = 16;
|
||||
TYPE_SINT32 = 17; // Uses ZigZag encoding.
|
||||
TYPE_SINT64 = 18; // Uses ZigZag encoding.
|
||||
}
|
||||
|
||||
enum Label {
|
||||
// 0 is reserved for errors
|
||||
LABEL_OPTIONAL = 1;
|
||||
LABEL_REQUIRED = 2;
|
||||
LABEL_REPEATED = 3;
|
||||
}
|
||||
|
||||
optional string name = 1;
|
||||
optional int32 number = 3;
|
||||
optional Label label = 4;
|
||||
|
||||
// If type_name is set, this need not be set. If both this and type_name
|
||||
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
|
||||
optional Type type = 5;
|
||||
|
||||
// For message and enum types, this is the name of the type. If the name
|
||||
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
|
||||
// rules are used to find the type (i.e. first the nested types within this
|
||||
// message are searched, then within the parent, on up to the root
|
||||
// namespace).
|
||||
optional string type_name = 6;
|
||||
|
||||
// For extensions, this is the name of the type being extended. It is
|
||||
// resolved in the same manner as type_name.
|
||||
optional string extendee = 2;
|
||||
|
||||
// For numeric types, contains the original text representation of the value.
|
||||
// For booleans, "true" or "false".
|
||||
// For strings, contains the default text contents (not escaped in any way).
|
||||
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
|
||||
// TODO(kenton): Base-64 encode?
|
||||
optional string default_value = 7;
|
||||
|
||||
// If set, gives the index of a oneof in the containing type's oneof_decl
|
||||
// list. This field is a member of that oneof.
|
||||
optional int32 oneof_index = 9;
|
||||
|
||||
// JSON name of this field. The value is set by protocol compiler. If the
|
||||
// user has set a "json_name" option on this field, that option's value
|
||||
// will be used. Otherwise, it's deduced from the field's name by converting
|
||||
// it to camelCase.
|
||||
optional string json_name = 10;
|
||||
|
||||
optional FieldOptions options = 8;
|
||||
}
|
||||
|
||||
// Describes a oneof.
|
||||
message OneofDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional OneofOptions options = 2;
|
||||
}
|
||||
|
||||
// Describes an enum type.
|
||||
message EnumDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
repeated EnumValueDescriptorProto value = 2;
|
||||
|
||||
optional EnumOptions options = 3;
|
||||
|
||||
// Range of reserved numeric values. Reserved values may not be used by
|
||||
// entries in the same enum. Reserved ranges may not overlap.
|
||||
//
|
||||
// Note that this is distinct from DescriptorProto.ReservedRange in that it
|
||||
// is inclusive such that it can appropriately represent the entire int32
|
||||
// domain.
|
||||
message EnumReservedRange {
|
||||
optional int32 start = 1; // Inclusive.
|
||||
optional int32 end = 2; // Inclusive.
|
||||
}
|
||||
|
||||
// Range of reserved numeric values. Reserved numeric values may not be used
|
||||
// by enum values in the same enum declaration. Reserved ranges may not
|
||||
// overlap.
|
||||
repeated EnumReservedRange reserved_range = 4;
|
||||
|
||||
// Reserved enum value names, which may not be reused. A given name may only
|
||||
// be reserved once.
|
||||
repeated string reserved_name = 5;
|
||||
}
|
||||
|
||||
// Describes a value within an enum.
|
||||
message EnumValueDescriptorProto {
|
||||
optional string name = 1;
|
||||
optional int32 number = 2;
|
||||
|
||||
optional EnumValueOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a service.
|
||||
message ServiceDescriptorProto {
|
||||
optional string name = 1;
|
||||
repeated MethodDescriptorProto method = 2;
|
||||
|
||||
optional ServiceOptions options = 3;
|
||||
}
|
||||
|
||||
// Describes a method of a service.
|
||||
message MethodDescriptorProto {
|
||||
optional string name = 1;
|
||||
|
||||
// Input and output type names. These are resolved in the same way as
|
||||
// FieldDescriptorProto.type_name, but must refer to a message type.
|
||||
optional string input_type = 2;
|
||||
optional string output_type = 3;
|
||||
|
||||
optional MethodOptions options = 4;
|
||||
|
||||
// Identifies if client streams multiple client messages
|
||||
optional bool client_streaming = 5 [default = false];
|
||||
// Identifies if server streams multiple server messages
|
||||
optional bool server_streaming = 6 [default = false];
|
||||
}
|
||||
|
||||
|
||||
// ===================================================================
|
||||
// Options
|
||||
|
||||
// Each of the definitions above may have "options" attached. These are
|
||||
// just annotations which may cause code to be generated slightly differently
|
||||
// or may contain hints for code that manipulates protocol messages.
|
||||
//
|
||||
// Clients may define custom options as extensions of the *Options messages.
|
||||
// These extensions may not yet be known at parsing time, so the parser cannot
|
||||
// store the values in them. Instead it stores them in a field in the *Options
|
||||
// message called uninterpreted_option. This field must have the same name
|
||||
// across all *Options messages. We then use this field to populate the
|
||||
// extensions when we build a descriptor, at which point all protos have been
|
||||
// parsed and so all extensions are known.
|
||||
//
|
||||
// Extension numbers for custom options may be chosen as follows:
|
||||
// * For options which will only be used within a single application or
|
||||
// organization, or for experimental options, use field numbers 50000
|
||||
// through 99999. It is up to you to ensure that you do not use the
|
||||
// same number for multiple options.
|
||||
// * For options which will be published and used publicly by multiple
|
||||
// independent entities, e-mail protobuf-global-extension-registry@google.com
|
||||
// to reserve extension numbers. Simply provide your project name (e.g.
|
||||
// Objective-C plugin) and your project website (if available) -- there's no
|
||||
// need to explain how you intend to use them. Usually you only need one
|
||||
// extension number. You can declare multiple options with only one extension
|
||||
// number by putting them in a sub-message. See the Custom Options section of
|
||||
// the docs for examples:
|
||||
// https://developers.google.com/protocol-buffers/docs/proto#options
|
||||
// If this turns out to be popular, a web service will be set up
|
||||
// to automatically assign option numbers.
|
||||
|
||||
message FileOptions {
|
||||
|
||||
// Sets the Java package where classes generated from this .proto will be
|
||||
// placed. By default, the proto package is used, but this is often
|
||||
// inappropriate because proto packages do not normally start with backwards
|
||||
// domain names.
|
||||
optional string java_package = 1;
|
||||
|
||||
|
||||
// If set, all the classes from the .proto file are wrapped in a single
|
||||
// outer class with the given name. This applies to both Proto1
|
||||
// (equivalent to the old "--one_java_file" option) and Proto2 (where
|
||||
// a .proto always translates to a single class, but you may want to
|
||||
// explicitly choose the class name).
|
||||
optional string java_outer_classname = 8;
|
||||
|
||||
// If set true, then the Java code generator will generate a separate .java
|
||||
// file for each top-level message, enum, and service defined in the .proto
|
||||
// file. Thus, these types will *not* be nested inside the outer class
|
||||
// named by java_outer_classname. However, the outer class will still be
|
||||
// generated to contain the file's getDescriptor() method as well as any
|
||||
// top-level extensions defined in the file.
|
||||
optional bool java_multiple_files = 10 [default = false];
|
||||
|
||||
// This option does nothing.
|
||||
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
|
||||
|
||||
// If set true, then the Java2 code generator will generate code that
|
||||
// throws an exception whenever an attempt is made to assign a non-UTF-8
|
||||
// byte sequence to a string field.
|
||||
// Message reflection will do the same.
|
||||
// However, an extension field still accepts non-UTF-8 byte sequences.
|
||||
// This option has no effect on when used with the lite runtime.
|
||||
optional bool java_string_check_utf8 = 27 [default = false];
|
||||
|
||||
|
||||
// Generated classes can be optimized for speed or code size.
|
||||
enum OptimizeMode {
|
||||
SPEED = 1; // Generate complete code for parsing, serialization,
|
||||
// etc.
|
||||
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
|
||||
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
|
||||
}
|
||||
optional OptimizeMode optimize_for = 9 [default = SPEED];
|
||||
|
||||
// Sets the Go package where structs generated from this .proto will be
|
||||
// placed. If omitted, the Go package will be derived from the following:
|
||||
// - The basename of the package import path, if provided.
|
||||
// - Otherwise, the package statement in the .proto file, if present.
|
||||
// - Otherwise, the basename of the .proto file, without extension.
|
||||
optional string go_package = 11;
|
||||
|
||||
|
||||
|
||||
|
||||
// Should generic services be generated in each language? "Generic" services
|
||||
// are not specific to any particular RPC system. They are generated by the
|
||||
// main code generators in each language (without additional plugins).
|
||||
// Generic services were the only kind of service generation supported by
|
||||
// early versions of google.protobuf.
|
||||
//
|
||||
// Generic services are now considered deprecated in favor of using plugins
|
||||
// that generate code specific to your particular RPC system. Therefore,
|
||||
// these default to false. Old code which depends on generic services should
|
||||
// explicitly set them to true.
|
||||
optional bool cc_generic_services = 16 [default = false];
|
||||
optional bool java_generic_services = 17 [default = false];
|
||||
optional bool py_generic_services = 18 [default = false];
|
||||
optional bool php_generic_services = 42 [default = false];
|
||||
|
||||
// Is this file deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for everything in the file, or it will be completely ignored; in the very
|
||||
// least, this is a formalization for deprecating files.
|
||||
optional bool deprecated = 23 [default = false];
|
||||
|
||||
// Enables the use of arenas for the proto messages in this file. This applies
|
||||
// only to generated classes for C++.
|
||||
optional bool cc_enable_arenas = 31 [default = false];
|
||||
|
||||
|
||||
// Sets the objective c class prefix which is prepended to all objective c
|
||||
// generated classes from this .proto. There is no default.
|
||||
optional string objc_class_prefix = 36;
|
||||
|
||||
// Namespace for generated classes; defaults to the package.
|
||||
optional string csharp_namespace = 37;
|
||||
|
||||
// By default Swift generators will take the proto package and CamelCase it
|
||||
// replacing '.' with underscore and use that to prefix the types/symbols
|
||||
// defined. When this options is provided, they will use this value instead
|
||||
// to prefix the types/symbols defined.
|
||||
optional string swift_prefix = 39;
|
||||
|
||||
// Sets the php class prefix which is prepended to all php generated classes
|
||||
// from this .proto. Default is empty.
|
||||
optional string php_class_prefix = 40;
|
||||
|
||||
// Use this option to change the namespace of php generated classes. Default
|
||||
// is empty. When this option is empty, the package name will be used for
|
||||
// determining the namespace.
|
||||
optional string php_namespace = 41;
|
||||
|
||||
// Use this option to change the namespace of php generated metadata classes.
|
||||
// Default is empty. When this option is empty, the proto file name will be
|
||||
// used for determining the namespace.
|
||||
optional string php_metadata_namespace = 44;
|
||||
|
||||
// Use this option to change the package of ruby generated classes. Default
|
||||
// is empty. When this option is not set, the package name will be used for
|
||||
// determining the ruby package.
|
||||
optional string ruby_package = 45;
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here.
|
||||
// See the documentation for the "Options" section above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message.
|
||||
// See the documentation for the "Options" section above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 38;
|
||||
}
|
||||
|
||||
message MessageOptions {
|
||||
// Set true to use the old proto1 MessageSet wire format for extensions.
|
||||
// This is provided for backwards-compatibility with the MessageSet wire
|
||||
// format. You should not use this for any other reason: It's less
|
||||
// efficient, has fewer features, and is more complicated.
|
||||
//
|
||||
// The message must be defined exactly as follows:
|
||||
// message Foo {
|
||||
// option message_set_wire_format = true;
|
||||
// extensions 4 to max;
|
||||
// }
|
||||
// Note that the message cannot have any defined fields; MessageSets only
|
||||
// have extensions.
|
||||
//
|
||||
// All extensions of your type must be singular messages; e.g. they cannot
|
||||
// be int32s, enums, or repeated messages.
|
||||
//
|
||||
// Because this is an option, the above two restrictions are not enforced by
|
||||
// the protocol compiler.
|
||||
optional bool message_set_wire_format = 1 [default = false];
|
||||
|
||||
// Disables the generation of the standard "descriptor()" accessor, which can
|
||||
// conflict with a field of the same name. This is meant to make migration
|
||||
// from proto1 easier; new code should avoid fields named "descriptor".
|
||||
optional bool no_standard_descriptor_accessor = 2 [default = false];
|
||||
|
||||
// Is this message deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the message, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating messages.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
// Whether the message is an automatically generated map entry type for the
|
||||
// maps field.
|
||||
//
|
||||
// For maps fields:
|
||||
// map<KeyType, ValueType> map_field = 1;
|
||||
// The parsed descriptor looks like:
|
||||
// message MapFieldEntry {
|
||||
// option map_entry = true;
|
||||
// optional KeyType key = 1;
|
||||
// optional ValueType value = 2;
|
||||
// }
|
||||
// repeated MapFieldEntry map_field = 1;
|
||||
//
|
||||
// Implementations may choose not to generate the map_entry=true message, but
|
||||
// use a native map in the target language to hold the keys and values.
|
||||
// The reflection APIs in such implementations still need to work as
|
||||
// if the field is a repeated message field.
|
||||
//
|
||||
// NOTE: Do not set the option in .proto files. Always use the maps syntax
|
||||
// instead. The option should only be implicitly set by the proto compiler
|
||||
// parser.
|
||||
optional bool map_entry = 7;
|
||||
|
||||
reserved 8; // javalite_serializable
|
||||
reserved 9; // javanano_as_lite
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message FieldOptions {
|
||||
// The ctype option instructs the C++ code generator to use a different
|
||||
// representation of the field than it normally would. See the specific
|
||||
// options below. This option is not yet implemented in the open source
|
||||
// release -- sorry, we'll try to include it in a future version!
|
||||
optional CType ctype = 1 [default = STRING];
|
||||
enum CType {
|
||||
// Default mode.
|
||||
STRING = 0;
|
||||
|
||||
CORD = 1;
|
||||
|
||||
STRING_PIECE = 2;
|
||||
}
|
||||
// The packed option can be enabled for repeated primitive fields to enable
|
||||
// a more efficient representation on the wire. Rather than repeatedly
|
||||
// writing the tag and type for each element, the entire array is encoded as
|
||||
// a single length-delimited blob. In proto3, only explicit setting it to
|
||||
// false will avoid using packed encoding.
|
||||
optional bool packed = 2;
|
||||
|
||||
// The jstype option determines the JavaScript type used for values of the
|
||||
// field. The option is permitted only for 64 bit integral and fixed types
|
||||
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
|
||||
// is represented as JavaScript string, which avoids loss of precision that
|
||||
// can happen when a large value is converted to a floating point JavaScript.
|
||||
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
|
||||
// use the JavaScript "number" type. The behavior of the default option
|
||||
// JS_NORMAL is implementation dependent.
|
||||
//
|
||||
// This option is an enum to permit additional types to be added, e.g.
|
||||
// goog.math.Integer.
|
||||
optional JSType jstype = 6 [default = JS_NORMAL];
|
||||
enum JSType {
|
||||
// Use the default type.
|
||||
JS_NORMAL = 0;
|
||||
|
||||
// Use JavaScript strings.
|
||||
JS_STRING = 1;
|
||||
|
||||
// Use JavaScript numbers.
|
||||
JS_NUMBER = 2;
|
||||
}
|
||||
|
||||
// Should this field be parsed lazily? Lazy applies only to message-type
|
||||
// fields. It means that when the outer message is initially parsed, the
|
||||
// inner message's contents will not be parsed but instead stored in encoded
|
||||
// form. The inner message will actually be parsed when it is first accessed.
|
||||
//
|
||||
// This is only a hint. Implementations are free to choose whether to use
|
||||
// eager or lazy parsing regardless of the value of this option. However,
|
||||
// setting this option true suggests that the protocol author believes that
|
||||
// using lazy parsing on this field is worth the additional bookkeeping
|
||||
// overhead typically needed to implement it.
|
||||
//
|
||||
// This option does not affect the public interface of any generated code;
|
||||
// all method signatures remain the same. Furthermore, thread-safety of the
|
||||
// interface is not affected by this option; const methods remain safe to
|
||||
// call from multiple threads concurrently, while non-const methods continue
|
||||
// to require exclusive access.
|
||||
//
|
||||
//
|
||||
// Note that implementations may choose not to check required fields within
|
||||
// a lazy sub-message. That is, calling IsInitialized() on the outer message
|
||||
// may return true even if the inner message has missing required fields.
|
||||
// This is necessary because otherwise the inner message would have to be
|
||||
// parsed in order to perform the check, defeating the purpose of lazy
|
||||
// parsing. An implementation which chooses not to check required fields
|
||||
// must be consistent about it. That is, for any particular sub-message, the
|
||||
// implementation must either *always* check its required fields, or *never*
|
||||
// check its required fields, regardless of whether or not the message has
|
||||
// been parsed.
|
||||
optional bool lazy = 5 [default = false];
|
||||
|
||||
// Is this field deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for accessors, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating fields.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
// For Google-internal migration only. Do not use.
|
||||
optional bool weak = 10 [default = false];
|
||||
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
|
||||
reserved 4; // removed jtype
|
||||
}
|
||||
|
||||
message OneofOptions {
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumOptions {
|
||||
|
||||
// Set this option to true to allow mapping different tag names to the same
|
||||
// value.
|
||||
optional bool allow_alias = 2;
|
||||
|
||||
// Is this enum deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum, or it will be completely ignored; in the very least, this
|
||||
// is a formalization for deprecating enums.
|
||||
optional bool deprecated = 3 [default = false];
|
||||
|
||||
reserved 5; // javanano_as_lite
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message EnumValueOptions {
|
||||
// Is this enum value deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the enum value, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating enum values.
|
||||
optional bool deprecated = 1 [default = false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message ServiceOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this service deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the service, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating services.
|
||||
optional bool deprecated = 33 [default = false];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
message MethodOptions {
|
||||
|
||||
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
|
||||
// framework. We apologize for hoarding these numbers to ourselves, but
|
||||
// we were already using them long before we decided to release Protocol
|
||||
// Buffers.
|
||||
|
||||
// Is this method deprecated?
|
||||
// Depending on the target platform, this can emit Deprecated annotations
|
||||
// for the method, or it will be completely ignored; in the very least,
|
||||
// this is a formalization for deprecating methods.
|
||||
optional bool deprecated = 33 [default = false];
|
||||
|
||||
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
|
||||
// or neither? HTTP based RPC implementation may choose GET verb for safe
|
||||
// methods, and PUT verb for idempotent methods instead of the default POST.
|
||||
enum IdempotencyLevel {
|
||||
IDEMPOTENCY_UNKNOWN = 0;
|
||||
NO_SIDE_EFFECTS = 1; // implies idempotent
|
||||
IDEMPOTENT = 2; // idempotent, but may have side effects
|
||||
}
|
||||
optional IdempotencyLevel idempotency_level = 34
|
||||
[default = IDEMPOTENCY_UNKNOWN];
|
||||
|
||||
// The parser stores options it doesn't recognize here. See above.
|
||||
repeated UninterpretedOption uninterpreted_option = 999;
|
||||
|
||||
// Clients can define custom options in extensions of this message. See above.
|
||||
extensions 1000 to max;
|
||||
}
|
||||
|
||||
|
||||
// A message representing a option the parser does not recognize. This only
|
||||
// appears in options protos created by the compiler::Parser class.
|
||||
// DescriptorPool resolves these when building Descriptor objects. Therefore,
|
||||
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
|
||||
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
|
||||
// in them.
|
||||
message UninterpretedOption {
|
||||
// The name of the uninterpreted option. Each string represents a segment in
|
||||
// a dot-separated name. is_extension is true iff a segment represents an
|
||||
// extension (denoted with parentheses in options specs in .proto files).
|
||||
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
|
||||
// "foo.(bar.baz).qux".
|
||||
message NamePart {
|
||||
required string name_part = 1;
|
||||
required bool is_extension = 2;
|
||||
}
|
||||
repeated NamePart name = 2;
|
||||
|
||||
// The value of the uninterpreted option, in whatever type the tokenizer
|
||||
// identified it as during parsing. Exactly one of these should be set.
|
||||
optional string identifier_value = 3;
|
||||
optional uint64 positive_int_value = 4;
|
||||
optional int64 negative_int_value = 5;
|
||||
optional double double_value = 6;
|
||||
optional bytes string_value = 7;
|
||||
optional string aggregate_value = 8;
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
// Optional source code info
|
||||
|
||||
// Encapsulates information about the original source file from which a
|
||||
// FileDescriptorProto was generated.
|
||||
message SourceCodeInfo {
|
||||
// A Location identifies a piece of source code in a .proto file which
|
||||
// corresponds to a particular definition. This information is intended
|
||||
// to be useful to IDEs, code indexers, documentation generators, and similar
|
||||
// tools.
|
||||
//
|
||||
// For example, say we have a file like:
|
||||
// message Foo {
|
||||
// optional string foo = 1;
|
||||
// }
|
||||
// Let's look at just the field definition:
|
||||
// optional string foo = 1;
|
||||
// ^ ^^ ^^ ^ ^^^
|
||||
// a bc de f ghi
|
||||
// We have the following locations:
|
||||
// span path represents
|
||||
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
|
||||
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
|
||||
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
|
||||
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
|
||||
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
|
||||
//
|
||||
// Notes:
|
||||
// - A location may refer to a repeated field itself (i.e. not to any
|
||||
// particular index within it). This is used whenever a set of elements are
|
||||
// logically enclosed in a single code segment. For example, an entire
|
||||
// extend block (possibly containing multiple extension definitions) will
|
||||
// have an outer location whose path refers to the "extensions" repeated
|
||||
// field without an index.
|
||||
// - Multiple locations may have the same path. This happens when a single
|
||||
// logical declaration is spread out across multiple places. The most
|
||||
// obvious example is the "extend" block again -- there may be multiple
|
||||
// extend blocks in the same scope, each of which will have the same path.
|
||||
// - A location's span is not always a subset of its parent's span. For
|
||||
// example, the "extendee" of an extension declaration appears at the
|
||||
// beginning of the "extend" block and is shared by all extensions within
|
||||
// the block.
|
||||
// - Just because a location's span is a subset of some other location's span
|
||||
// does not mean that it is a descendant. For example, a "group" defines
|
||||
// both a type and a field in a single declaration. Thus, the locations
|
||||
// corresponding to the type and field and their components will overlap.
|
||||
// - Code which tries to interpret locations should probably be designed to
|
||||
// ignore those that it doesn't understand, as more types of locations could
|
||||
// be recorded in the future.
|
||||
repeated Location location = 1;
|
||||
message Location {
|
||||
// Identifies which part of the FileDescriptorProto was defined at this
|
||||
// location.
|
||||
//
|
||||
// Each element is a field number or an index. They form a path from
|
||||
// the root FileDescriptorProto to the place where the definition. For
|
||||
// example, this path:
|
||||
// [ 4, 3, 2, 7, 1 ]
|
||||
// refers to:
|
||||
// file.message_type(3) // 4, 3
|
||||
// .field(7) // 2, 7
|
||||
// .name() // 1
|
||||
// This is because FileDescriptorProto.message_type has field number 4:
|
||||
// repeated DescriptorProto message_type = 4;
|
||||
// and DescriptorProto.field has field number 2:
|
||||
// repeated FieldDescriptorProto field = 2;
|
||||
// and FieldDescriptorProto.name has field number 1:
|
||||
// optional string name = 1;
|
||||
//
|
||||
// Thus, the above path gives the location of a field name. If we removed
|
||||
// the last element:
|
||||
// [ 4, 3, 2, 7 ]
|
||||
// this path refers to the whole field declaration (from the beginning
|
||||
// of the label to the terminating semicolon).
|
||||
repeated int32 path = 1 [packed = true];
|
||||
|
||||
// Always has exactly three or four elements: start line, start column,
|
||||
// end line (optional, otherwise assumed same as start line), end column.
|
||||
// These are packed into a single field for efficiency. Note that line
|
||||
// and column numbers are zero-based -- typically you will want to add
|
||||
// 1 to each before displaying to a user.
|
||||
repeated int32 span = 2 [packed = true];
|
||||
|
||||
// If this SourceCodeInfo represents a complete declaration, these are any
|
||||
// comments appearing before and after the declaration which appear to be
|
||||
// attached to the declaration.
|
||||
//
|
||||
// A series of line comments appearing on consecutive lines, with no other
|
||||
// tokens appearing on those lines, will be treated as a single comment.
|
||||
//
|
||||
// leading_detached_comments will keep paragraphs of comments that appear
|
||||
// before (but not connected to) the current element. Each paragraph,
|
||||
// separated by empty lines, will be one comment element in the repeated
|
||||
// field.
|
||||
//
|
||||
// Only the comment content is provided; comment markers (e.g. //) are
|
||||
// stripped out. For block comments, leading whitespace and an asterisk
|
||||
// will be stripped from the beginning of each line other than the first.
|
||||
// Newlines are included in the output.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// optional int32 foo = 1; // Comment attached to foo.
|
||||
// // Comment attached to bar.
|
||||
// optional int32 bar = 2;
|
||||
//
|
||||
// optional string baz = 3;
|
||||
// // Comment attached to baz.
|
||||
// // Another line attached to baz.
|
||||
//
|
||||
// // Comment attached to qux.
|
||||
// //
|
||||
// // Another line attached to qux.
|
||||
// optional double qux = 4;
|
||||
//
|
||||
// // Detached comment for corge. This is not leading or trailing comments
|
||||
// // to qux or corge because there are blank lines separating it from
|
||||
// // both.
|
||||
//
|
||||
// // Detached comment for corge paragraph 2.
|
||||
//
|
||||
// optional string corge = 5;
|
||||
// /* Block comment attached
|
||||
// * to corge. Leading asterisks
|
||||
// * will be removed. */
|
||||
// /* Block comment attached to
|
||||
// * grault. */
|
||||
// optional int32 grault = 6;
|
||||
//
|
||||
// // ignored detached comments.
|
||||
optional string leading_comments = 3;
|
||||
optional string trailing_comments = 4;
|
||||
repeated string leading_detached_comments = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Describes the relationship between generated code and its original source
|
||||
// file. A GeneratedCodeInfo message is associated with only one generated
|
||||
// source file, but may contain references to different source .proto files.
|
||||
message GeneratedCodeInfo {
|
||||
// An Annotation connects some span of text in generated code to an element
|
||||
// of its generating .proto file.
|
||||
repeated Annotation annotation = 1;
|
||||
message Annotation {
|
||||
// Identifies the element in the original source .proto file. This field
|
||||
// is formatted the same as SourceCodeInfo.Location.path.
|
||||
repeated int32 path = 1 [packed = true];
|
||||
|
||||
// Identifies the filesystem path to the original source .proto.
|
||||
optional string source_file = 2;
|
||||
|
||||
// Identifies the starting offset in bytes in the generated code
|
||||
// that relates to the identified object.
|
||||
optional int32 begin = 3;
|
||||
|
||||
// Identifies the ending offset in bytes in the generated code that
|
||||
// relates to the identified offset. The end offset should be one past
|
||||
// the last relevant byte (so the length of the text = end - begin).
|
||||
optional int32 end = 4;
|
||||
}
|
||||
}
|
7
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
7
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@ -102,7 +102,8 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
@ -181,7 +182,9 @@ func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4)
|
||||
}
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
|
3
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
3
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
@ -121,7 +121,8 @@ option objc_class_prefix = "GPB";
|
||||
//
|
||||
message Any {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. The last segment of the URL's path must represent
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
|
6
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
6
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@ -41,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
@ -142,7 +142,9 @@ func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5)
|
||||
}
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 190 bytes of a gzipped FileDescriptorProto
|
||||
|
3
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
3
vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
generated
vendored
@ -61,7 +61,7 @@ option objc_class_prefix = "GPB";
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// } else if (duration.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
@ -101,7 +101,6 @@ option objc_class_prefix = "GPB";
|
||||
//
|
||||
//
|
||||
message Duration {
|
||||
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
|
85
vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
generated
vendored
Normal file
85
vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package empty
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (m *Empty) String() string { return proto.CompactTextString(m) }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_900544acb223d5b8, []int{0}
|
||||
}
|
||||
|
||||
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||
|
||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Empty.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(m, src)
|
||||
}
|
||||
func (m *Empty) XXX_Size() int {
|
||||
return xxx_messageInfo_Empty.Size(m)
|
||||
}
|
||||
func (m *Empty) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Empty.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8)
|
||||
}
|
||||
|
||||
var fileDescriptor_900544acb223d5b8 = []byte{
|
||||
// 148 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
|
||||
0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
|
||||
0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
|
||||
0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
|
||||
0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
|
||||
0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
|
||||
0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
|
||||
0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
|
||||
0xb7, 0x00, 0x00, 0x00,
|
||||
}
|
52
vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
generated
vendored
Normal file
52
vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option go_package = "github.com/golang/protobuf/ptypes/empty";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "EmptyProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
option cc_enable_arenas = true;
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
message Empty {}
|
40
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
40
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@ -20,17 +20,19 @@ var _ = math.Inf
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
@ -91,12 +93,14 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
@ -160,7 +164,9 @@ func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e)
|
||||
}
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 191 bytes of a gzipped FileDescriptorProto
|
||||
|
37
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
37
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
generated
vendored
@ -40,17 +40,19 @@ option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone
|
||||
// or calendar, represented as seconds and fractions of seconds at
|
||||
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||
// backwards to year one. It is encoded assuming all minutes are 60
|
||||
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||
// table is needed for interpretation. Range is from
|
||||
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||
// By restricting to that range, we ensure that we can convert to
|
||||
// and from RFC 3339 date strings.
|
||||
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
@ -111,17 +113,18 @@ option objc_class_prefix = "GPB";
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
|
||||
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
|
||||
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
message Timestamp {
|
||||
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
|
71
vendor/github.com/grafana/grafana-plugin-sdk-go/data/conversion_input.go
generated
vendored
Normal file
71
vendor/github.com/grafana/grafana-plugin-sdk-go/data/conversion_input.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package data
|
||||
|
||||
import "fmt"
|
||||
|
||||
// FrameInputConverter is a type to support building a Frame while also
|
||||
// doing conversion as data is added to the Frame.
|
||||
type FrameInputConverter struct {
|
||||
Frame *Frame
|
||||
fieldConverters []FieldConverter
|
||||
}
|
||||
|
||||
// A FieldConverter is a type to support building Frame fields of a different
|
||||
// type than one's input data.
|
||||
type FieldConverter struct {
|
||||
// OutputFieldType is the type of Field that will be created.
|
||||
OutputFieldType FieldType
|
||||
|
||||
// Converter is a conversion function that is called when setting Field values with a FrameInputConverter.
|
||||
// Care must be taken that the type returned by the conversion function matches the member type of the FieldType,
|
||||
// and that the input type matches the expected input type for the Converter function, or panics can occur.
|
||||
// If the Converter is nil, no conversion is performed when calling methods to set values.
|
||||
Converter Converter
|
||||
}
|
||||
|
||||
// Converter is a function type for converting values in a Frame. It is the consumers responsibility
|
||||
// to the check the underlying interface types of the input and return types to avoid panics.
|
||||
type Converter func(v interface{}) (interface{}, error)
|
||||
|
||||
// NewFrameInputConverter returns a FrameInputConverter which is used to create a Frame from data
|
||||
// that needs value conversions. The FrameInputConverter will create a new Frame with fields
|
||||
// based on the FieldConverters' OutputFieldTypes of length rowLen.
|
||||
func NewFrameInputConverter(fieldConvs []FieldConverter, rowLen int) (*FrameInputConverter, error) {
|
||||
fTypes := make([]FieldType, len(fieldConvs))
|
||||
for i, fc := range fieldConvs {
|
||||
fTypes[i] = fc.OutputFieldType
|
||||
}
|
||||
|
||||
f := NewFrameOfFieldTypes("", rowLen, fTypes...)
|
||||
return &FrameInputConverter{
|
||||
Frame: f,
|
||||
fieldConverters: fieldConvs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Set sets val a FieldIdx and rowIdx of the frame. If the corresponding FieldConverter's
|
||||
// Converter is not nil, then the Converter function is called before setting the value (otherwise Frame.Set is called directly).
|
||||
// If an error is returned from the Converter function this function returns that error.
|
||||
// Like Frame.Set and Field.Set, it will panic if fieldIdx or rowIdx are out of range.
|
||||
func (fcb *FrameInputConverter) Set(fieldIdx, rowIdx int, val interface{}) error {
|
||||
if fcb.fieldConverters[fieldIdx].Converter == nil {
|
||||
fcb.Frame.Set(fieldIdx, rowIdx, val)
|
||||
return nil
|
||||
}
|
||||
convertedVal, err := fcb.fieldConverters[fieldIdx].Converter(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fcb.Frame.Set(fieldIdx, rowIdx, convertedVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
var asStringConverter Converter = func(v interface{}) (interface{}, error) {
|
||||
return fmt.Sprintf("%v", v), nil
|
||||
}
|
||||
|
||||
// AsStringFieldConverter will always return a string a regardless of the input.
|
||||
// This is done with fmt.Sprintf which uses reflection.
|
||||
var AsStringFieldConverter = FieldConverter{
|
||||
OutputFieldType: FieldTypeString,
|
||||
Converter: asStringConverter,
|
||||
}
|
29
vendor/github.com/grafana/grafana-plugin-sdk-go/data/frame.go
generated
vendored
29
vendor/github.com/grafana/grafana-plugin-sdk-go/data/frame.go
generated
vendored
@ -121,6 +121,19 @@ func (f *Frame) EmptyCopy() *Frame {
|
||||
return newFrame
|
||||
}
|
||||
|
||||
// NewFrameOfFieldTypes returns a Frame where the Fields are initalized to the
|
||||
// corresponding field type in fTypes. Each Field will be of length FieldLen.
|
||||
func NewFrameOfFieldTypes(name string, fieldLen int, fTypes ...FieldType) *Frame {
|
||||
f := &Frame{
|
||||
Name: name,
|
||||
Fields: make(Fields, len(fTypes)),
|
||||
}
|
||||
for i, fT := range fTypes {
|
||||
f.Fields[i] = NewFieldFromFieldType(fT, fieldLen)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// TypeIndices returns a slice of Field index positions for the given fTypes.
|
||||
func (f *Frame) TypeIndices(fTypes ...FieldType) []int {
|
||||
indices := []int{}
|
||||
@ -226,6 +239,22 @@ func (f *Frame) FloatAt(fieldIdx int, rowIdx int) (float64, error) {
|
||||
return f.Fields[fieldIdx].FloatAt(rowIdx)
|
||||
}
|
||||
|
||||
// SetFieldNames sets each Field Name in the frame to the corresponding frame.
|
||||
// If the number of provided names does not match the number of Fields in the frame an error is returned.
|
||||
func (f *Frame) SetFieldNames(names ...string) error {
|
||||
fieldLen := 0
|
||||
if f.Fields != nil {
|
||||
fieldLen = len(f.Fields)
|
||||
}
|
||||
if fieldLen != len(names) {
|
||||
return fmt.Errorf("can not set field names, number of names %v does not match frame field length %v", len(names), fieldLen)
|
||||
}
|
||||
for i, name := range names {
|
||||
f.Fields[i].Name = name
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FrameTestCompareOptions returns go-cmp testing options to allow testing of Frame equivelnce.
|
||||
// Since the data within a Frame's Fields is not exported, this function allows the unexported
|
||||
// values to be tested.
|
||||
|
185
vendor/github.com/grafana/grafana-plugin-sdk-go/data/sql.go
generated
vendored
185
vendor/github.com/grafana/grafana-plugin-sdk-go/data/sql.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// NewFromSQLRows returns a new Frame populated with the data from rows. The Field Vector types
|
||||
// will be Vectors of pointer types, []*T, if the SQL column is nullable or if the nullable property is unknown.
|
||||
// Otherwise, they will be []T types.
|
||||
//
|
||||
// Fields will be named to match name of the SQL columns and the SQL column names must be unique (https://github.com/grafana/grafana-plugin-sdk-go/issues/59).
|
||||
//
|
||||
// All the types must be supported by the Frame or a SQLStringConverter will be created and
|
||||
// the resulting Field Vector type will be of type []*string.
|
||||
//
|
||||
// The SQLStringConverter's ConversionFunc will be applied to matching rows if it is not nil.
|
||||
// Additionally, if the SQLStringConverter's Replacer is not nil, the replacement will be performed.
|
||||
// A map of Field/Column index to the corresponding SQLStringConverter is returned so what conversions were
|
||||
// done can be inspected.
|
||||
func NewFromSQLRows(rows *sql.Rows, converters ...SQLStringConverter) (*Frame, map[int]SQLStringConverter, error) {
|
||||
frame, mappers, err := newForSQLRows(rows, converters...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
sRow := frame.newScannableRow()
|
||||
err := rows.Scan(sRow...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for fieldIdx, mapper := range mappers {
|
||||
if mapper.ConversionFunc == nil {
|
||||
continue
|
||||
}
|
||||
field := frame.Fields[fieldIdx]
|
||||
for i := 0; i < field.Len(); i++ {
|
||||
v, err := mapper.ConversionFunc(field.At(i).(*string))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
field.Set(i, v)
|
||||
}
|
||||
if mapper.Replacer == nil {
|
||||
continue
|
||||
}
|
||||
if err := Replace(frame, fieldIdx, mapper.Replacer); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return frame, mappers, nil
|
||||
}
|
||||
|
||||
// newForSQLRows creates a new Frame appropriate for scanning SQL rows with
|
||||
// the the new Frame's ScannableRow() method.
|
||||
func newForSQLRows(rows *sql.Rows, converters ...SQLStringConverter) (*Frame, map[int]SQLStringConverter, error) {
|
||||
mapping := make(map[int]SQLStringConverter)
|
||||
colTypes, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
colNames, err := rows.Columns()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// In the future we can probably remove this restriction. But right now we map names to Arrow Field Names.
|
||||
// Arrow Field names must be unique: https://github.com/grafana/grafana-plugin-sdk-go/issues/59
|
||||
seen := map[string]int{}
|
||||
for i, name := range colNames {
|
||||
if j, ok := seen[name]; ok {
|
||||
return nil, nil, fmt.Errorf(`duplicate column names are not allowed, found identical name "%v" at column indices %v and %v`, name, j, i)
|
||||
}
|
||||
seen[name] = i
|
||||
}
|
||||
frame := &Frame{}
|
||||
for i, colType := range colTypes {
|
||||
colName := colNames[i]
|
||||
nullable, ok := colType.Nullable()
|
||||
if !ok {
|
||||
nullable = true // If we don't know if it is nullable, assume it is
|
||||
}
|
||||
scanType := colType.ScanType()
|
||||
for _, converter := range converters {
|
||||
if converter.InputScanKind == scanType.Kind() && converter.InputTypeName == colType.DatabaseTypeName() {
|
||||
nullable = true // String converters are always nullable
|
||||
scanType = reflect.TypeOf("")
|
||||
mapping[i] = converter
|
||||
}
|
||||
}
|
||||
var vec interface{}
|
||||
if !nullable {
|
||||
vec = reflect.MakeSlice(reflect.SliceOf(scanType), 0, 0).Interface()
|
||||
} else {
|
||||
ptrType := reflect.TypeOf(reflect.New(scanType).Interface())
|
||||
// Nullabe types get passed to scan as a pointer to a pointer
|
||||
vec = reflect.MakeSlice(reflect.SliceOf(ptrType), 0, 0).Interface()
|
||||
}
|
||||
if !ValidFieldType(vec) {
|
||||
// Automatically create string mapper if we end up with an unsupported type
|
||||
mapping[i] = SQLStringConverter{
|
||||
Name: fmt.Sprintf("Autogenerated for column %v", i),
|
||||
InputTypeName: colType.DatabaseTypeName(),
|
||||
InputScanKind: colType.ScanType().Kind(),
|
||||
}
|
||||
ptrType := reflect.TypeOf(reflect.New(reflect.TypeOf("")).Interface())
|
||||
vec = reflect.MakeSlice(reflect.SliceOf(ptrType), 0, 0).Interface()
|
||||
}
|
||||
frame.Fields = append(frame.Fields, NewField(colName, nil, vec))
|
||||
}
|
||||
return frame, mapping, nil
|
||||
}
|
||||
|
||||
// newScannableRow adds a row to the Frame by extending each Field's Vector. It returns
|
||||
// a slice of references that can be passed to the database/sql rows.Scan() to scan directly into
|
||||
// the extended Vectors of the data.
|
||||
func (f *Frame) newScannableRow() []interface{} {
|
||||
row := make([]interface{}, len(f.Fields))
|
||||
for i, field := range f.Fields {
|
||||
field.Extend(1)
|
||||
// non-nullable fields will be *T, and nullable fields will be **T
|
||||
ptr := field.PointerAt(field.Len() - 1)
|
||||
row[i] = ptr
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
// SQLStringConverter can be used to store types not supported by
|
||||
// a Frame into a *string. When scanning, if a SQL's row's InputScanType's Kind
|
||||
// and InputScanKind match that returned by the sql response, then the
|
||||
// conversion func will be run on the row.
|
||||
type SQLStringConverter struct {
|
||||
// Name is an optional property that can be used to identify a converter
|
||||
Name string
|
||||
InputScanKind reflect.Kind // reflect.Type might better or worse option?
|
||||
InputTypeName string
|
||||
|
||||
// Conversion func may be nil to do no additional operations on the string conversion.
|
||||
ConversionFunc func(in *string) (*string, error)
|
||||
|
||||
// If the Replacer is not nil, the replacement will be performed.
|
||||
Replacer *StringFieldReplacer
|
||||
}
|
||||
|
||||
// Note: SQLStringConverter is perhaps better understood as []byte. However, currently
|
||||
// the Vector type ([][]byte) is not supported. https://github.com/grafana/grafana-plugin-sdk-go/issues/57
|
||||
|
||||
// StringFieldReplacer is used to replace a *string Field in a Frame. The type
|
||||
// returned by the ReplaceFunc must match the type of elements of VectorType.
|
||||
// Both properties must be non-nil.
|
||||
type StringFieldReplacer struct {
|
||||
VectorType interface{}
|
||||
ReplaceFunc func(in *string) (interface{}, error)
|
||||
}
|
||||
|
||||
// Replace will replace a *string Vector of the specified Field's index
|
||||
// using the StringFieldReplacer.
|
||||
func Replace(frame *Frame, fieldIdx int, replacer *StringFieldReplacer) error {
|
||||
if fieldIdx > len(frame.Fields) {
|
||||
return fmt.Errorf("fieldIdx is out of bounds, field len: %v", len(frame.Fields))
|
||||
}
|
||||
field := frame.Fields[fieldIdx]
|
||||
if field.Type() != FieldTypeNullableString {
|
||||
return fmt.Errorf("can only replace []*string vectors, vector is of type %s", field.Type())
|
||||
}
|
||||
|
||||
if !ValidFieldType(replacer.VectorType) {
|
||||
return fmt.Errorf("can not replace column with unsupported type %T", replacer.VectorType)
|
||||
}
|
||||
newVector := newVector(replacer.VectorType, field.vector.Len())
|
||||
for i := 0; i < newVector.Len(); i++ {
|
||||
oldVal := field.vector.At(i).(*string) // Vector type is checked earlier above
|
||||
newVal, err := replacer.ReplaceFunc(oldVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newVector.Set(i, newVal)
|
||||
}
|
||||
field.vector = newVector
|
||||
return nil
|
||||
}
|
28
vendor/github.com/grafana/grafana-plugin-sdk-go/genproto/pluginv2/backend.pb.go
generated
vendored
28
vendor/github.com/grafana/grafana-plugin-sdk-go/genproto/pluginv2/backend.pb.go
generated
vendored
@ -1002,7 +1002,9 @@ func init() {
|
||||
proto.RegisterType((*CheckHealthResponse)(nil), "pluginv2.CheckHealthResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("backend.proto", fileDescriptor_5ab9ba5b8d8b2ba5) }
|
||||
func init() {
|
||||
proto.RegisterFile("backend.proto", fileDescriptor_5ab9ba5b8d8b2ba5)
|
||||
}
|
||||
|
||||
var fileDescriptor_5ab9ba5b8d8b2ba5 = []byte{
|
||||
// 1144 bytes of a gzipped FileDescriptorProto
|
||||
@ -1082,11 +1084,11 @@ var fileDescriptor_5ab9ba5b8d8b2ba5 = []byte{
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
var _ grpc.ClientConnInterface
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
const _ = grpc.SupportPackageIsVersion6
|
||||
|
||||
// ResourceClient is the client API for Resource service.
|
||||
//
|
||||
@ -1096,10 +1098,10 @@ type ResourceClient interface {
|
||||
}
|
||||
|
||||
type resourceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewResourceClient(cc *grpc.ClientConn) ResourceClient {
|
||||
func NewResourceClient(cc grpc.ClientConnInterface) ResourceClient {
|
||||
return &resourceClient{cc}
|
||||
}
|
||||
|
||||
@ -1195,10 +1197,10 @@ type DataClient interface {
|
||||
}
|
||||
|
||||
type dataClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewDataClient(cc *grpc.ClientConn) DataClient {
|
||||
func NewDataClient(cc grpc.ClientConnInterface) DataClient {
|
||||
return &dataClient{cc}
|
||||
}
|
||||
|
||||
@ -1268,10 +1270,10 @@ type DiagnosticsClient interface {
|
||||
}
|
||||
|
||||
type diagnosticsClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewDiagnosticsClient(cc *grpc.ClientConn) DiagnosticsClient {
|
||||
func NewDiagnosticsClient(cc grpc.ClientConnInterface) DiagnosticsClient {
|
||||
return &diagnosticsClient{cc}
|
||||
}
|
||||
|
||||
@ -1375,10 +1377,10 @@ type TransformClient interface {
|
||||
}
|
||||
|
||||
type transformClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewTransformClient(cc *grpc.ClientConn) TransformClient {
|
||||
func NewTransformClient(cc grpc.ClientConnInterface) TransformClient {
|
||||
return &transformClient{cc}
|
||||
}
|
||||
|
||||
@ -1447,10 +1449,10 @@ type TransformDataCallBackClient interface {
|
||||
}
|
||||
|
||||
type transformDataCallBackClient struct {
|
||||
cc *grpc.ClientConn
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewTransformDataCallBackClient(cc *grpc.ClientConn) TransformDataCallBackClient {
|
||||
func NewTransformDataCallBackClient(cc grpc.ClientConnInterface) TransformDataCallBackClient {
|
||||
return &transformDataCallBackClient{cc}
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/hashicorp/go-plugin/README.md
generated
vendored
5
vendor/github.com/hashicorp/go-plugin/README.md
generated
vendored
@ -141,11 +141,6 @@ This plugin system will give host processes a system for constraining
|
||||
versions. This is in addition to the protocol versioning already present
|
||||
which is more for larger underlying changes.
|
||||
|
||||
**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
|
||||
to support automatic download + install of plugins. Paired with cryptographically
|
||||
secure plugins (above), we can make this a safe operation for an amazing
|
||||
user experience.
|
||||
|
||||
## What About Shared Libraries?
|
||||
|
||||
When we started using plugins (late 2012, early 2013), plugins over RPC
|
||||
|
18
vendor/github.com/hashicorp/go-plugin/client.go
generated
vendored
18
vendor/github.com/hashicorp/go-plugin/client.go
generated
vendored
@ -159,11 +159,8 @@ type ClientConfig struct {
|
||||
|
||||
// SyncStdout, SyncStderr can be set to override the
|
||||
// respective os.Std* values in the plugin. Care should be taken to
|
||||
// avoid races here. If these are nil, then this will automatically be
|
||||
// hooked up to os.Stdin, Stdout, and Stderr, respectively.
|
||||
//
|
||||
// If the default values (nil) are used, then this package will not
|
||||
// sync any of these streams.
|
||||
// avoid races here. If these are nil, then this will be set to
|
||||
// ioutil.Discard.
|
||||
SyncStdout io.Writer
|
||||
SyncStderr io.Writer
|
||||
|
||||
@ -690,14 +687,14 @@ func (c *Client) Start() (addr net.Addr, err error) {
|
||||
|
||||
// Check the core protocol. Wrapped in a {} for scoping.
|
||||
{
|
||||
var coreProtocol int64
|
||||
coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
|
||||
var coreProtocol int
|
||||
coreProtocol, err = strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error parsing core protocol version: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if int(coreProtocol) != CoreProtocolVersion {
|
||||
if coreProtocol != CoreProtocolVersion {
|
||||
err = fmt.Errorf("Incompatible core API version with plugin. "+
|
||||
"Plugin version: %s, Core version: %d\n\n"+
|
||||
"To fix this, the plugin usually only needs to be recompiled.\n"+
|
||||
@ -788,7 +785,10 @@ func (c *Client) reattach() (net.Addr, error) {
|
||||
// Verify the process still exists. If not, then it is an error
|
||||
p, err := os.FindProcess(c.config.Reattach.Pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// On Unix systems, FindProcess never returns an error.
|
||||
// On Windows, for non-existent pids it returns:
|
||||
// os.SyscallError - 'OpenProcess: the paremter is incorrect'
|
||||
return nil, ErrProcessNotFound
|
||||
}
|
||||
|
||||
// Attempt to connect to the addr since on Unix systems FindProcess
|
||||
|
14
vendor/github.com/hashicorp/go-plugin/go.mod
generated
vendored
14
vendor/github.com/hashicorp/go-plugin/go.mod
generated
vendored
@ -1,17 +1,15 @@
|
||||
module github.com/hashicorp/go-plugin
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/golang/protobuf v1.3.4
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb
|
||||
github.com/jhump/protoreflect v1.6.0
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77
|
||||
github.com/oklog/run v1.0.0
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect
|
||||
google.golang.org/grpc v1.14.0
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
google.golang.org/grpc v1.27.1
|
||||
)
|
||||
|
55
vendor/github.com/hashicorp/go-plugin/go.sum
generated
vendored
55
vendor/github.com/hashicorp/go-plugin/go.sum
generated
vendored
@ -1,31 +1,74 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
|
||||
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
8
vendor/github.com/hashicorp/go-plugin/grpc_client.go
generated
vendored
8
vendor/github.com/hashicorp/go-plugin/grpc_client.go
generated
vendored
@ -37,7 +37,6 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32)))
|
||||
|
||||
|
||||
// Connect. Note the first parameter is unused because we use a custom
|
||||
// dialer that has the state to see the address.
|
||||
conn, err := grpc.Dial("unused", opts...)
|
||||
@ -62,6 +61,13 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
|
||||
go broker.Run()
|
||||
go brokerGRPCClient.StartStream()
|
||||
|
||||
// Start the stdio client
|
||||
stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr)
|
||||
|
||||
cl := &GRPCClient{
|
||||
Conn: conn,
|
||||
Plugins: c.config.Plugins,
|
||||
|
19
vendor/github.com/hashicorp/go-plugin/grpc_server.go
generated
vendored
19
vendor/github.com/hashicorp/go-plugin/grpc_server.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/reflection"
|
||||
)
|
||||
|
||||
// GRPCServiceName is the name of the service that the health check should
|
||||
@ -51,9 +52,10 @@ type GRPCServer struct {
|
||||
Stdout io.Reader
|
||||
Stderr io.Reader
|
||||
|
||||
config GRPCServerConfig
|
||||
server *grpc.Server
|
||||
broker *GRPCBroker
|
||||
config GRPCServerConfig
|
||||
server *grpc.Server
|
||||
broker *GRPCBroker
|
||||
stdioServer *grpcStdioServer
|
||||
|
||||
logger hclog.Logger
|
||||
}
|
||||
@ -73,6 +75,9 @@ func (s *GRPCServer) Init() error {
|
||||
GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
|
||||
grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
|
||||
|
||||
// Register the reflection service
|
||||
reflection.Register(s.server)
|
||||
|
||||
// Register the broker service
|
||||
brokerServer := newGRPCBrokerServer()
|
||||
plugin.RegisterGRPCBrokerServer(s.server, brokerServer)
|
||||
@ -80,11 +85,13 @@ func (s *GRPCServer) Init() error {
|
||||
go s.broker.Run()
|
||||
|
||||
// Register the controller
|
||||
controllerServer := &grpcControllerServer{
|
||||
server: s,
|
||||
}
|
||||
controllerServer := &grpcControllerServer{server: s}
|
||||
plugin.RegisterGRPCControllerServer(s.server, controllerServer)
|
||||
|
||||
// Register the stdio service
|
||||
s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr)
|
||||
plugin.RegisterGRPCStdioServer(s.server, s.stdioServer)
|
||||
|
||||
// Register all our plugins onto the gRPC server.
|
||||
for k, raw := range s.Plugins {
|
||||
p, ok := raw.(GRPCPlugin)
|
||||
|
207
vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
generated
vendored
Normal file
207
vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of
|
||||
// stdio data. This is currently 1 KB for no reason other than that seems like
|
||||
// enough (stdio data isn't that common) and is fairly low.
|
||||
const grpcStdioBuffer = 1 * 1024
|
||||
|
||||
// grpcStdioServer implements the Stdio service and streams stdiout/stderr.
|
||||
type grpcStdioServer struct {
|
||||
stdoutCh <-chan []byte
|
||||
stderrCh <-chan []byte
|
||||
}
|
||||
|
||||
// newGRPCStdioServer creates a new grpcStdioServer and starts the stream
|
||||
// copying for the given out and err readers.
|
||||
//
|
||||
// This must only be called ONCE per srcOut, srcErr.
|
||||
func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer {
|
||||
stdoutCh := make(chan []byte)
|
||||
stderrCh := make(chan []byte)
|
||||
|
||||
// Begin copying the streams
|
||||
go copyChan(log, stdoutCh, srcOut)
|
||||
go copyChan(log, stderrCh, srcErr)
|
||||
|
||||
// Construct our server
|
||||
return &grpcStdioServer{
|
||||
stdoutCh: stdoutCh,
|
||||
stderrCh: stderrCh,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamStdio streams our stdout/err as the response.
|
||||
func (s *grpcStdioServer) StreamStdio(
|
||||
_ *empty.Empty,
|
||||
srv plugin.GRPCStdio_StreamStdioServer,
|
||||
) error {
|
||||
// Share the same data value between runs. Sending this over the wire
|
||||
// marshals it so we can reuse this.
|
||||
var data plugin.StdioData
|
||||
|
||||
for {
|
||||
// Read our data
|
||||
select {
|
||||
case data.Data = <-s.stdoutCh:
|
||||
data.Channel = plugin.StdioData_STDOUT
|
||||
|
||||
case data.Data = <-s.stderrCh:
|
||||
data.Channel = plugin.StdioData_STDERR
|
||||
|
||||
case <-srv.Context().Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not sure if this is possible, but if we somehow got here and
|
||||
// we didn't populate any data at all, then just continue.
|
||||
if len(data.Data) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Send our data to the client.
|
||||
if err := srv.Send(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// grpcStdioClient wraps the stdio service as a client to copy
|
||||
// the stdio data to output writers.
|
||||
type grpcStdioClient struct {
|
||||
log hclog.Logger
|
||||
stdioClient plugin.GRPCStdio_StreamStdioClient
|
||||
}
|
||||
|
||||
// newGRPCStdioClient creates a grpcStdioClient. This will perform the
|
||||
// initial connection to the stdio service. If the stdio service is unavailable
|
||||
// then this will be a no-op. This allows this to work without error for
|
||||
// plugins that don't support this.
|
||||
func newGRPCStdioClient(
|
||||
ctx context.Context,
|
||||
log hclog.Logger,
|
||||
conn *grpc.ClientConn,
|
||||
) (*grpcStdioClient, error) {
|
||||
client := plugin.NewGRPCStdioClient(conn)
|
||||
|
||||
// Connect immediately to the endpoint
|
||||
stdioClient, err := client.StreamStdio(ctx, &empty.Empty{})
|
||||
|
||||
// If we get an Unavailable or Unimplemented error, this means that the plugin isn't
|
||||
// updated and linking to the latest version of go-plugin that supports
|
||||
// this. We fall back to the previous behavior of just not syncing anything.
|
||||
if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented {
|
||||
log.Warn("stdio service not available, stdout/stderr syncing unavailable")
|
||||
stdioClient = nil
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &grpcStdioClient{
|
||||
log: log,
|
||||
stdioClient: stdioClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run starts the loop that receives stdio data and writes it to the given
|
||||
// writers. This blocks and should be run in a goroutine.
|
||||
func (c *grpcStdioClient) Run(stdout, stderr io.Writer) {
|
||||
// This will be nil if stdio is not supported by the plugin
|
||||
if c.stdioClient == nil {
|
||||
c.log.Warn("stdio service unavailable, run will do nothing")
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
c.log.Trace("waiting for stdio data")
|
||||
data, err := c.stdioClient.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF ||
|
||||
status.Code(err) == codes.Unavailable ||
|
||||
status.Code(err) == codes.Canceled ||
|
||||
status.Code(err) == codes.Unimplemented ||
|
||||
err == context.Canceled {
|
||||
c.log.Warn("received EOF, stopping recv loop", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.log.Error("error receiving data", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine our output writer based on channel
|
||||
var w io.Writer
|
||||
switch data.Channel {
|
||||
case plugin.StdioData_STDOUT:
|
||||
w = stdout
|
||||
|
||||
case plugin.StdioData_STDERR:
|
||||
w = stderr
|
||||
|
||||
default:
|
||||
c.log.Warn("unknown channel, dropping", "channel", data.Channel)
|
||||
continue
|
||||
}
|
||||
|
||||
// Write! In the event of an error we just continue.
|
||||
if c.log.IsTrace() {
|
||||
c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data))
|
||||
}
|
||||
if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil {
|
||||
c.log.Error("failed to copy all bytes", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copyChan copies an io.Reader into a channel.
|
||||
func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) {
|
||||
bufsrc := bufio.NewReader(src)
|
||||
|
||||
for {
|
||||
// Make our data buffer. We allocate a new one per loop iteration
|
||||
// so that we can send it over the channel.
|
||||
var data [1024]byte
|
||||
|
||||
// Read the data, this will block until data is available
|
||||
n, err := bufsrc.Read(data[:])
|
||||
|
||||
// We have to check if we have data BEFORE err != nil. The bufio
|
||||
// docs guarantee n == 0 on EOF but its better to be safe here.
|
||||
if n > 0 {
|
||||
// We have data! Send it on the channel. This will block if there
|
||||
// is no reader on the other side. We expect that go-plugin will
|
||||
// connect immediately to the stdio server to drain this so we want
|
||||
// this block to happen for backpressure.
|
||||
dst <- data[:n]
|
||||
}
|
||||
|
||||
// If we hit EOF we're done copying
|
||||
if err == io.EOF {
|
||||
log.Debug("stdio EOF, exiting copy loop")
|
||||
return
|
||||
}
|
||||
|
||||
// Any other error we just exit the loop. We don't expect there to
|
||||
// be errors since our use case for this is reading/writing from
|
||||
// a in-process pipe (os.Pipe).
|
||||
if err != nil {
|
||||
log.Warn("error copying stdio data, stopping copy", "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
generated
vendored
2
vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:.
|
||||
//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:.
|
||||
|
||||
package plugin
|
||||
|
48
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
generated
vendored
48
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
generated
vendored
@ -3,12 +3,13 @@
|
||||
|
||||
package plugin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -35,17 +36,16 @@ func (m *ConnInfo) Reset() { *m = ConnInfo{} }
|
||||
func (m *ConnInfo) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConnInfo) ProtoMessage() {}
|
||||
func (*ConnInfo) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_802e9beed3ec3b28, []int{0}
|
||||
return fileDescriptor_grpc_broker_3322b07398605250, []int{0}
|
||||
}
|
||||
|
||||
func (m *ConnInfo) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConnInfo.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ConnInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConnInfo.Merge(m, src)
|
||||
func (dst *ConnInfo) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ConnInfo.Merge(dst, src)
|
||||
}
|
||||
func (m *ConnInfo) XXX_Size() int {
|
||||
return xxx_messageInfo_ConnInfo.Size(m)
|
||||
@ -81,23 +81,6 @@ func init() {
|
||||
proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) }
|
||||
|
||||
var fileDescriptor_802e9beed3ec3b28 = []byte{
|
||||
// 175 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
|
||||
0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
|
||||
0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
|
||||
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
|
||||
0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
|
||||
0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
|
||||
0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
|
||||
0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
|
||||
0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b,
|
||||
0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -201,3 +184,20 @@ var _GRPCBroker_serviceDesc = grpc.ServiceDesc{
|
||||
},
|
||||
Metadata: "grpc_broker.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) }
|
||||
|
||||
var fileDescriptor_grpc_broker_3322b07398605250 = []byte{
|
||||
// 175 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
|
||||
0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
|
||||
0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
|
||||
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
|
||||
0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
|
||||
0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
|
||||
0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
|
||||
0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
|
||||
0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b,
|
||||
0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
2
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
generated
vendored
2
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
generated
vendored
@ -11,5 +11,3 @@ message ConnInfo {
|
||||
service GRPCBroker {
|
||||
rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
|
||||
}
|
||||
|
||||
|
||||
|
42
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
generated
vendored
42
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
generated
vendored
@ -3,12 +3,13 @@
|
||||
|
||||
package plugin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -32,17 +33,16 @@ func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (m *Empty) String() string { return proto.CompactTextString(m) }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23c2c7e42feab570, []int{0}
|
||||
return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0}
|
||||
}
|
||||
|
||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Empty.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(m, src)
|
||||
func (dst *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(dst, src)
|
||||
}
|
||||
func (m *Empty) XXX_Size() int {
|
||||
return xxx_messageInfo_Empty.Size(m)
|
||||
@ -57,19 +57,6 @@ func init() {
|
||||
proto.RegisterType((*Empty)(nil), "plugin.Empty")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) }
|
||||
|
||||
var fileDescriptor_23c2c7e42feab570 = []byte{
|
||||
// 108 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f,
|
||||
0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d,
|
||||
0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0,
|
||||
0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03,
|
||||
0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08,
|
||||
0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@ -141,3 +128,18 @@ var _GRPCController_serviceDesc = grpc.ServiceDesc{
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "grpc_controller.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436)
|
||||
}
|
||||
|
||||
var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{
|
||||
// 108 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f,
|
||||
0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d,
|
||||
0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0,
|
||||
0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03,
|
||||
0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08,
|
||||
0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
233
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
generated
vendored
Normal file
233
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc_stdio.proto
|
||||
|
||||
package plugin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import empty "github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type StdioData_Channel int32
|
||||
|
||||
const (
|
||||
StdioData_INVALID StdioData_Channel = 0
|
||||
StdioData_STDOUT StdioData_Channel = 1
|
||||
StdioData_STDERR StdioData_Channel = 2
|
||||
)
|
||||
|
||||
var StdioData_Channel_name = map[int32]string{
|
||||
0: "INVALID",
|
||||
1: "STDOUT",
|
||||
2: "STDERR",
|
||||
}
|
||||
var StdioData_Channel_value = map[string]int32{
|
||||
"INVALID": 0,
|
||||
"STDOUT": 1,
|
||||
"STDERR": 2,
|
||||
}
|
||||
|
||||
func (x StdioData_Channel) String() string {
|
||||
return proto.EnumName(StdioData_Channel_name, int32(x))
|
||||
}
|
||||
func (StdioData_Channel) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0}
|
||||
}
|
||||
|
||||
// StdioData is a single chunk of stdout or stderr data that is streamed
|
||||
// from GRPCStdio.
|
||||
type StdioData struct {
|
||||
Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"`
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StdioData) Reset() { *m = StdioData{} }
|
||||
func (m *StdioData) String() string { return proto.CompactTextString(m) }
|
||||
func (*StdioData) ProtoMessage() {}
|
||||
func (*StdioData) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0}
|
||||
}
|
||||
func (m *StdioData) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StdioData.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StdioData.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *StdioData) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StdioData.Merge(dst, src)
|
||||
}
|
||||
func (m *StdioData) XXX_Size() int {
|
||||
return xxx_messageInfo_StdioData.Size(m)
|
||||
}
|
||||
func (m *StdioData) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StdioData.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StdioData proto.InternalMessageInfo
|
||||
|
||||
func (m *StdioData) GetChannel() StdioData_Channel {
|
||||
if m != nil {
|
||||
return m.Channel
|
||||
}
|
||||
return StdioData_INVALID
|
||||
}
|
||||
|
||||
func (m *StdioData) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*StdioData)(nil), "plugin.StdioData")
|
||||
proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// GRPCStdioClient is the client API for GRPCStdio service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type GRPCStdioClient interface {
|
||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
||||
// it is not sent again.
|
||||
//
|
||||
// Callers should connect early to prevent blocking on the plugin process.
|
||||
StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error)
|
||||
}
|
||||
|
||||
type gRPCStdioClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient {
|
||||
return &gRPCStdioClient{cc}
|
||||
}
|
||||
|
||||
func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &gRPCStdioStreamStdioClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type GRPCStdio_StreamStdioClient interface {
|
||||
Recv() (*StdioData, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type gRPCStdioStreamStdioClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) {
|
||||
m := new(StdioData)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// GRPCStdioServer is the server API for GRPCStdio service.
|
||||
type GRPCStdioServer interface {
|
||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
||||
// it is not sent again.
|
||||
//
|
||||
// Callers should connect early to prevent blocking on the plugin process.
|
||||
StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error
|
||||
}
|
||||
|
||||
func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) {
|
||||
s.RegisterService(&_GRPCStdio_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(empty.Empty)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream})
|
||||
}
|
||||
|
||||
type GRPCStdio_StreamStdioServer interface {
|
||||
Send(*StdioData) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type gRPCStdioStreamStdioServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
var _GRPCStdio_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "plugin.GRPCStdio",
|
||||
HandlerType: (*GRPCStdioServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "StreamStdio",
|
||||
Handler: _GRPCStdio_StreamStdio_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc_stdio.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) }
|
||||
|
||||
var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{
|
||||
// 221 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48,
|
||||
0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8,
|
||||
0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26,
|
||||
0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06,
|
||||
0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5,
|
||||
0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1,
|
||||
0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60,
|
||||
0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9,
|
||||
0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43,
|
||||
0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e,
|
||||
0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89,
|
||||
0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25,
|
||||
0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00,
|
||||
}
|
30
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
generated
vendored
Normal file
30
vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
syntax = "proto3";
|
||||
package plugin;
|
||||
option go_package = "plugin";
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
// GRPCStdio is a service that is automatically run by the plugin process
|
||||
// to stream any stdout/err data so that it can be mirrored on the plugin
|
||||
// host side.
|
||||
service GRPCStdio {
|
||||
// StreamStdio returns a stream that contains all the stdout/stderr.
|
||||
// This RPC endpoint must only be called ONCE. Once stdio data is consumed
|
||||
// it is not sent again.
|
||||
//
|
||||
// Callers should connect early to prevent blocking on the plugin process.
|
||||
rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData);
|
||||
}
|
||||
|
||||
// StdioData is a single chunk of stdout or stderr data that is streamed
|
||||
// from GRPCStdio.
|
||||
message StdioData {
|
||||
enum Channel {
|
||||
INVALID = 0;
|
||||
STDOUT = 1;
|
||||
STDERR = 2;
|
||||
}
|
||||
|
||||
Channel channel = 1;
|
||||
bytes data = 2;
|
||||
}
|
77
vendor/github.com/hashicorp/go-plugin/server.go
generated
vendored
77
vendor/github.com/hashicorp/go-plugin/server.go
generated
vendored
@ -15,10 +15,8 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
@ -85,6 +83,21 @@ type ServeConfig struct {
|
||||
// Logger is used to pass a logger into the server. If none is provided the
|
||||
// server will create a default logger.
|
||||
Logger hclog.Logger
|
||||
|
||||
// Listener is the listener that the plugin server will listen for
|
||||
// plugin connections. THIS DOES NOT NORMALLY NEED TO BE SET. If this
|
||||
// isn't set, the plugin chooses a listener. This is exposed in case you
|
||||
// want to carefully control how a plugin is served.
|
||||
//
|
||||
// If TLSProvider is set, this listener will be wrapped with a TLS
|
||||
// listener. If you want to manually control TLS you should set
|
||||
// TLSProvider to nil but be aware that the client side will need to be
|
||||
// manually made aware of the certificate used.
|
||||
//
|
||||
// Serve will take ownership of this listener and close it when it is
|
||||
// complete. The caller should NOT close this listener once `Serve` is
|
||||
// called.
|
||||
Listener net.Listener
|
||||
}
|
||||
|
||||
// protocolVersion determines the protocol version and plugin set to be used by
|
||||
@ -173,13 +186,28 @@ func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {
|
||||
//
|
||||
// This is the method that plugins should call in their main() functions.
|
||||
func Serve(opts *ServeConfig) {
|
||||
// We use this to trigger an `os.Exit` so that we can execute our other
|
||||
// deferred functions.
|
||||
exitCode := -1
|
||||
defer func() {
|
||||
if exitCode >= 0 {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
}()
|
||||
|
||||
// If our listener is not nil, then we want to close that on exit.
|
||||
if opts.Listener != nil {
|
||||
defer opts.Listener.Close()
|
||||
}
|
||||
|
||||
// Validate the handshake config
|
||||
if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
|
||||
"key or value was set. Please notify the plugin author and report\n"+
|
||||
"this as a bug.\n")
|
||||
os.Exit(1)
|
||||
exitCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// First check the cookie
|
||||
@ -188,7 +216,8 @@ func Serve(opts *ServeConfig) {
|
||||
"This binary is a plugin. These are not meant to be executed directly.\n"+
|
||||
"Please execute the program that consumes these plugins, which will\n"+
|
||||
"load any plugins automatically\n")
|
||||
os.Exit(1)
|
||||
exitCode = 1
|
||||
return
|
||||
}
|
||||
|
||||
// negotiate the version and plugins
|
||||
@ -221,18 +250,21 @@ func Serve(opts *ServeConfig) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Register a listener so we can accept a connection
|
||||
listener, err := serverListener()
|
||||
if err != nil {
|
||||
logger.Error("plugin init error", "error", err)
|
||||
return
|
||||
}
|
||||
listener := opts.Listener
|
||||
if listener == nil {
|
||||
// Register a listener so we can accept a connection
|
||||
listener, err = serverListener()
|
||||
if err != nil {
|
||||
logger.Error("plugin init error", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Close the listener on return. We wrap this in a func() on purpose
|
||||
// because the "listener" reference may change to TLS.
|
||||
defer func() {
|
||||
listener.Close()
|
||||
}()
|
||||
// Close the listener on return. We wrap this in a func() on purpose
|
||||
// because the "listener" reference may change to TLS.
|
||||
defer func() {
|
||||
listener.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if opts.TLSProvider != nil {
|
||||
@ -337,11 +369,11 @@ func Serve(opts *ServeConfig) {
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, os.Interrupt)
|
||||
go func() {
|
||||
var count int32 = 0
|
||||
count := 0
|
||||
for {
|
||||
<-ch
|
||||
newCount := atomic.AddInt32(&count, 1)
|
||||
logger.Debug("plugin received interrupt signal, ignoring", "count", newCount)
|
||||
count++
|
||||
logger.Trace("plugin received interrupt signal, ignoring", "count", count)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -351,6 +383,11 @@ func Serve(opts *ServeConfig) {
|
||||
|
||||
// Accept connections and wait for completion
|
||||
go server.Serve(listener)
|
||||
|
||||
// Note that given the documentation of Serve we should probably be
|
||||
// setting exitCode = 0 and using os.Exit here. That's how it used to
|
||||
// work before extracting this library. However, for years we've done
|
||||
// this so we'll keep this functionality.
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
@ -390,7 +427,7 @@ func serverListener_tcp() (net.Listener, error) {
|
||||
}
|
||||
|
||||
if minPort > maxPort {
|
||||
return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
|
||||
return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
|
||||
}
|
||||
|
||||
for port := minPort; port <= maxPort; port++ {
|
||||
|
2
vendor/github.com/hashicorp/go-plugin/testing.go
generated
vendored
2
vendor/github.com/hashicorp/go-plugin/testing.go
generated
vendored
@ -7,9 +7,9 @@ import (
|
||||
"net"
|
||||
"net/rpc"
|
||||
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-plugin/internal/plugin"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
|
26
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
26
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
@ -1,12 +1,15 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/rpc/status.proto
|
||||
|
||||
package status // import "google.golang.org/genproto/googleapis/rpc/status"
|
||||
package status
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import any "github.com/golang/protobuf/ptypes/any"
|
||||
import (
|
||||
fmt "fmt"
|
||||
math "math"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
any "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@ -17,7 +20,7 @@ var _ = math.Inf
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// The `Status` type defines a logical error model that is suitable for
|
||||
// different programming environments, including REST APIs and RPC APIs. It is
|
||||
@ -93,16 +96,17 @@ func (m *Status) Reset() { *m = Status{} }
|
||||
func (m *Status) String() string { return proto.CompactTextString(m) }
|
||||
func (*Status) ProtoMessage() {}
|
||||
func (*Status) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_status_ced6ddf76350620b, []int{0}
|
||||
return fileDescriptor_24d244abaf643bfe, []int{0}
|
||||
}
|
||||
|
||||
func (m *Status) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Status.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *Status) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Status.Merge(dst, src)
|
||||
func (m *Status) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Status.Merge(m, src)
|
||||
}
|
||||
func (m *Status) XXX_Size() int {
|
||||
return xxx_messageInfo_Status.Size(m)
|
||||
@ -138,9 +142,9 @@ func init() {
|
||||
proto.RegisterType((*Status)(nil), "google.rpc.Status")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_ced6ddf76350620b) }
|
||||
func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) }
|
||||
|
||||
var fileDescriptor_status_ced6ddf76350620b = []byte{
|
||||
var fileDescriptor_24d244abaf643bfe = []byte{
|
||||
// 209 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28,
|
||||
|
3
vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
generated
vendored
Normal file
3
vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
## Community Code of Conduct
|
||||
|
||||
gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
4
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
4
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@ -1,6 +1,8 @@
|
||||
# How to contribute
|
||||
|
||||
We definitely welcome your patches and contributions to gRPC!
|
||||
We definitely welcome your patches and contributions to gRPC! Please read the gRPC
|
||||
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
|
||||
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
|
||||
|
||||
If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
|
||||
|
||||
|
1
vendor/google.golang.org/grpc/GOVERNANCE.md
generated
vendored
Normal file
1
vendor/google.golang.org/grpc/GOVERNANCE.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
|
27
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
Normal file
27
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
This page lists all active maintainers of this repository. If you were a
|
||||
maintainer and would like to add your name to the Emeritus list, please send us a
|
||||
PR.
|
||||
|
||||
See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
|
||||
for governance guidelines and how to become a maintainer.
|
||||
See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
|
||||
for general contribution guidelines.
|
||||
|
||||
## Maintainers (in alphabetical order)
|
||||
- [canguler](https://github.com/canguler), Google LLC
|
||||
- [cesarghali](https://github.com/cesarghali), Google LLC
|
||||
- [dfawley](https://github.com/dfawley), Google LLC
|
||||
- [easwars](https://github.com/easwars), Google LLC
|
||||
- [jadekler](https://github.com/jadekler), Google LLC
|
||||
- [menghanl](https://github.com/menghanl), Google LLC
|
||||
- [srini100](https://github.com/srini100), Google LLC
|
||||
|
||||
## Emeritus Maintainers (in alphabetical order)
|
||||
- [adelez](https://github.com/adelez), Google LLC
|
||||
- [iamqizhao](https://github.com/iamqizhao), Google LLC
|
||||
- [jtattermusch](https://github.com/jtattermusch), Google LLC
|
||||
- [lyuxuan](https://github.com/lyuxuan), Google LLC
|
||||
- [makmukhi](https://github.com/makmukhi), Google LLC
|
||||
- [matt-kwong](https://github.com/matt-kwong), Google LLC
|
||||
- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
|
||||
- [yongni](https://github.com/yongni), Google LLC
|
3
vendor/google.golang.org/grpc/Makefile
generated
vendored
3
vendor/google.golang.org/grpc/Makefile
generated
vendored
@ -19,6 +19,9 @@ proto:
|
||||
test: testdeps
|
||||
go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
testsubmodule: testdeps
|
||||
cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
|
||||
|
||||
testappengine: testappenginedeps
|
||||
goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
|
70
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
Normal file
70
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// All APIs in this package are EXPERIMENTAL.
|
||||
package attributes
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys.
|
||||
type Attributes struct {
|
||||
m map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// New returns a new Attributes containing all key/value pairs in kvs. If the
|
||||
// same key appears multiple times, the last value overwrites all previous
|
||||
// values for that key. Panics if len(kvs) is not even.
|
||||
func New(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
a.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// WithValues returns a new Attributes containing all key/value pairs in a and
|
||||
// kvs. Panics if len(kvs) is not even. If the same key appears multiple
|
||||
// times, the last value overwrites all previous values for that key. To
|
||||
// remove an existing key, use a nil value.
|
||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
|
||||
for k, v := range a.m {
|
||||
n.m[k] = v
|
||||
}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
n.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
return a.m[key]
|
||||
}
|
20
vendor/google.golang.org/grpc/backoff.go
generated
vendored
20
vendor/google.golang.org/grpc/backoff.go
generated
vendored
@ -23,16 +23,36 @@ package grpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
var DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// ConnectParams defines the parameters for connecting and retrying. Users are
|
||||
// encouraged to use this instead of the BackoffConfig type defined above. See
|
||||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type ConnectParams struct {
|
||||
// Backoff specifies the configuration options for connection backoff.
|
||||
Backoff backoff.Config
|
||||
// MinConnectTimeout is the minimum amount of time we are willing to give a
|
||||
// connection to complete.
|
||||
MinConnectTimeout time.Duration
|
||||
}
|
||||
|
52
vendor/google.golang.org/grpc/backoff/backoff.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/backoff/backoff.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff provides configuration options for backoff.
|
||||
//
|
||||
// More details can be found at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// Config defines the configuration options for backoff.
|
||||
type Config struct {
|
||||
// BaseDelay is the amount of time to backoff after the first failure.
|
||||
BaseDelay time.Duration
|
||||
// Multiplier is the factor with which to multiply backoffs after a
|
||||
// failed retry. Should ideally be greater than 1.
|
||||
Multiplier float64
|
||||
// Jitter is the factor with which backoffs are randomized.
|
||||
Jitter float64
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig is a backoff configuration with the default values specfied
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This should be useful for callers who want to configure backoff with
|
||||
// non-default values only for a subset of the options.
|
||||
var DefaultConfig = Config{
|
||||
BaseDelay: 1.0 * time.Second,
|
||||
Multiplier: 1.6,
|
||||
Jitter: 0.2,
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
120
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
120
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -117,6 +117,15 @@ type NewSubConnOptions struct {
|
||||
HealthCheckEnabled bool
|
||||
}
|
||||
|
||||
// State contains the balancer's state relevant to the gRPC ClientConn.
|
||||
type State struct {
|
||||
// State contains the connectivity state of the balancer, which is used to
|
||||
// determine the state of the ClientConn.
|
||||
ConnectivityState connectivity.State
|
||||
// Picker is used to choose connections (SubConns) for RPCs.
|
||||
Picker V2Picker
|
||||
}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
@ -137,10 +146,19 @@ type ClientConn interface {
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConn.
|
||||
//
|
||||
// Deprecated: use UpdateState instead
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
|
||||
// UpdateState notifies gRPC that the balancer's internal state has
|
||||
// changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConns.
|
||||
UpdateState(State)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
ResolveNow(resolver.ResolveNowOption)
|
||||
ResolveNow(resolver.ResolveNowOptions)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
//
|
||||
@ -185,11 +203,14 @@ type ConfigParser interface {
|
||||
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
|
||||
}
|
||||
|
||||
// PickOptions contains addition information for the Pick operation.
|
||||
type PickOptions struct {
|
||||
// PickInfo contains additional information for the Pick operation.
|
||||
type PickInfo struct {
|
||||
// FullMethodName is the method name that NewClientStream() is called
|
||||
// with. The canonical format is /service/Method.
|
||||
FullMethodName string
|
||||
// Ctx is the RPC's context, and may contain relevant RPC-level information
|
||||
// like the outgoing header metadata.
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
@ -215,7 +236,7 @@ var (
|
||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
@ -223,6 +244,8 @@ var (
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
//
|
||||
// Deprecated: use V2Picker instead
|
||||
type Picker interface {
|
||||
// Pick returns the SubConn to be used to send the RPC.
|
||||
// The returned SubConn must be one returned by NewSubConn().
|
||||
@ -243,18 +266,76 @@ type Picker interface {
|
||||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
// - If the error is ErrTransientFailure:
|
||||
// - If the error is ErrTransientFailure or implements IsTransientFailure()
|
||||
// bool, returning true:
|
||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
||||
// is called to pick again;
|
||||
// - Otherwise, RPC will fail with unavailable error.
|
||||
// - Else (error is other non-nil error):
|
||||
// - The RPC will fail with unavailable error.
|
||||
// - The RPC will fail with the error's status code, or Unknown if it is
|
||||
// not a status error.
|
||||
//
|
||||
// The returned done() function will be called once the rpc has finished,
|
||||
// with the final status of that RPC. If the SubConn returned is not a
|
||||
// valid SubConn type, done may not be called. done may be nil if balancer
|
||||
// doesn't care about the RPC status.
|
||||
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
||||
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
|
||||
}
|
||||
|
||||
// PickResult contains information related to a connection chosen for an RPC.
|
||||
type PickResult struct {
|
||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||
// If the state is not Ready, gRPC will block the RPC until a new Picker is
|
||||
// provided by the balancer (using ClientConn.UpdateState). The SubConn
|
||||
// must be one returned by ClientConn.NewSubConn.
|
||||
SubConn SubConn
|
||||
|
||||
// Done is called when the RPC is completed. If the SubConn is not ready,
|
||||
// this will be called with a nil parameter. If the SubConn is not a valid
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
}
|
||||
|
||||
type transientFailureError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (e *transientFailureError) IsTransientFailure() bool { return true }
|
||||
|
||||
// TransientFailureError wraps err in an error implementing
|
||||
// IsTransientFailure() bool, returning true.
|
||||
func TransientFailureError(err error) error {
|
||||
return &transientFailureError{error: err}
|
||||
}
|
||||
|
||||
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
type V2Picker interface {
|
||||
// Pick returns the connection to use for this RPC and related information.
|
||||
//
|
||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||
// or time-consuming work to service this call, it should return
|
||||
// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
|
||||
// the Picker is updated (using ClientConn.UpdateState).
|
||||
//
|
||||
// If an error is returned:
|
||||
//
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||
//
|
||||
// - If the error implements IsTransientFailure() bool, returning true,
|
||||
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
|
||||
// terminated with this error's Error() string and status code
|
||||
// Unavailable.
|
||||
//
|
||||
// - Any other errors terminate all RPCs with the code and message
|
||||
// provided. If the error is not a status error, it will be converted by
|
||||
// gRPC to a status error with code Unknown.
|
||||
Pick(info PickInfo) (PickResult, error)
|
||||
}
|
||||
|
||||
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
|
||||
@ -292,8 +373,11 @@ type Balancer interface {
|
||||
|
||||
// SubConnState describes the state of a SubConn.
|
||||
type SubConnState struct {
|
||||
// ConnectivityState is the connectivity state of the SubConn.
|
||||
ConnectivityState connectivity.State
|
||||
// TODO: add last connection error
|
||||
// ConnectionError is set if the ConnectivityState is TransientFailure,
|
||||
// describing the reason the SubConn failed. Otherwise, it is nil.
|
||||
ConnectionError error
|
||||
}
|
||||
|
||||
// ClientConnState describes the state of a ClientConn relevant to the
|
||||
@ -305,14 +389,23 @@ type ClientConnState struct {
|
||||
BalancerConfig serviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
||||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// V2Balancer is defined for documentation purposes. If a Balancer also
|
||||
// implements V2Balancer, its UpdateClientConnState method will be called
|
||||
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
|
||||
// instead of HandleSubConnStateChange.
|
||||
type V2Balancer interface {
|
||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||
// changes.
|
||||
UpdateClientConnState(ClientConnState)
|
||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||
// will begin calling ResolveNow on the active name resolver with
|
||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||
// returns a nil error. Any other errors are currently ignored.
|
||||
UpdateClientConnState(ClientConnState) error
|
||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
@ -326,9 +419,8 @@ type V2Balancer interface {
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
@ -348,8 +440,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
|
142
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
142
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
@ -20,6 +20,8 @@ package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
@ -28,34 +30,44 @@ import (
|
||||
)
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
config Config
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
config Config
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
bal := &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
v2PickerBuilder: bb.v2PickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
// Initialize picker to a picker that always return
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateBalancerState with this picker.
|
||||
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
config: bb.config,
|
||||
config: bb.config,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateState with this picker.
|
||||
if bb.pickerBuilder != nil {
|
||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
} else {
|
||||
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
return bal
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
v2PickerBuilder V2PickerBuilder
|
||||
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
@ -63,19 +75,50 @@ type baseBalancer struct {
|
||||
subConns map[resolver.Address]balancer.SubConn
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
v2Picker balancer.V2Picker
|
||||
config Config
|
||||
|
||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
|
||||
func (b *baseBalancer) ResolverError(err error) {
|
||||
b.resolverErr = err
|
||||
if len(b.subConns) == 0 {
|
||||
b.state = connectivity.TransientFailure
|
||||
}
|
||||
if b.state != connectivity.TransientFailure {
|
||||
// The picker will not change since the balancer does not currently
|
||||
// report an error.
|
||||
return
|
||||
}
|
||||
b.regeneratePicker()
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: b.state,
|
||||
Picker: b.v2Picker,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
|
||||
// TODO: handle s.ResolverState.ServiceConfig?
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
@ -101,26 +144,57 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) {
|
||||
// The entry will be deleted in HandleSubConnStateChange.
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeErrors builds an error from the last connection error and the last
|
||||
// resolver error. Must only be called if b.state is TransientFailure.
|
||||
func (b *baseBalancer) mergeErrors() error {
|
||||
// connErr must always be non-nil unless there are no SubConns, in which
|
||||
// case resolverErr must be non-nil.
|
||||
if b.connErr == nil {
|
||||
return fmt.Errorf("last resolver error: %v", b.resolverErr)
|
||||
}
|
||||
if b.resolverErr == nil {
|
||||
return fmt.Errorf("last connection error: %v", b.connErr)
|
||||
}
|
||||
return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
if b.pickerBuilder != nil {
|
||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||
} else {
|
||||
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
|
||||
}
|
||||
return
|
||||
}
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
if b.pickerBuilder != nil {
|
||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[addr] = sc
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
} else {
|
||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
}
|
||||
}
|
||||
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(readySCs)
|
||||
}
|
||||
|
||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
@ -152,6 +226,9 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
oldAggrState := b.state
|
||||
b.state = b.csEvltr.RecordTransition(oldS, s)
|
||||
|
||||
// Set or clear the last connection error accordingly.
|
||||
b.connErr = state.ConnectionError
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
@ -162,7 +239,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
if b.picker != nil {
|
||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||
} else {
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
|
||||
}
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
@ -179,6 +260,19 @@ type errPicker struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
|
||||
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
|
||||
func NewErrPickerV2(err error) balancer.V2Picker {
|
||||
return &errPickerV2{err: err}
|
||||
}
|
||||
|
||||
type errPickerV2 struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return balancer.PickResult{}, p.err
|
||||
}
|
||||
|
29
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
29
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
@ -42,6 +42,26 @@ type PickerBuilder interface {
|
||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||
}
|
||||
|
||||
// V2PickerBuilder creates balancer.V2Picker.
|
||||
type V2PickerBuilder interface {
|
||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||
Build(info PickerBuildInfo) balancer.V2Picker
|
||||
}
|
||||
|
||||
// PickerBuildInfo contains information needed by the picker builder to
|
||||
// construct a picker.
|
||||
type PickerBuildInfo struct {
|
||||
// ReadySCs is a map from all ready SubConns to the Addresses used to
|
||||
// create them.
|
||||
ReadySCs map[balancer.SubConn]SubConnInfo
|
||||
}
|
||||
|
||||
// SubConnInfo contains information about a SubConn created by the base
|
||||
// balancer.
|
||||
type SubConnInfo struct {
|
||||
Address resolver.Address // the address used to create this SubConn
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||
// built by this builder will use the picker builder to build pickers.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||
@ -62,3 +82,12 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
v2PickerBuilder: pb,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
18
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
18
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@ -22,14 +22,12 @@
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
@ -37,7 +35,7 @@ const Name = "round_robin"
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -46,13 +44,13 @@ func init() {
|
||||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||
if len(readySCs) == 0 {
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
|
||||
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
var scs []balancer.SubConn
|
||||
for _, sc := range readySCs {
|
||||
for sc := range info.ReadySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
@ -74,10 +72,10 @@ type rrPicker struct {
|
||||
next int
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
return sc, nil, nil
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
163
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
163
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@ -25,6 +25,8 @@ import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
@ -32,64 +34,17 @@ import (
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
|
||||
// TODO make a general purpose buffer that uses interface{}.
|
||||
type scStateUpdateBuffer struct {
|
||||
c chan *scStateUpdate
|
||||
mu sync.Mutex
|
||||
backlog []*scStateUpdate
|
||||
}
|
||||
|
||||
func newSCStateUpdateBuffer() *scStateUpdateBuffer {
|
||||
return &scStateUpdateBuffer{
|
||||
c: make(chan *scStateUpdate, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that the scStateUpdate will be sent to.
|
||||
//
|
||||
// Upon receiving, the caller should call load to send another
|
||||
// scStateChangeTuple onto the channel if there is any.
|
||||
func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
|
||||
return b.c
|
||||
err error
|
||||
}
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancer balancer.Balancer
|
||||
stateChangeQueue *scStateUpdateBuffer
|
||||
ccUpdateCh chan *balancer.ClientConnState
|
||||
done chan struct{}
|
||||
cc *ClientConn
|
||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
||||
balancer balancer.Balancer
|
||||
scBuffer *buffer.Unbounded
|
||||
done *grpcsync.Event
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
@ -97,11 +52,10 @@ type ccBalancerWrapper struct {
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||
ccUpdateCh: make(chan *balancer.ClientConnState, 1),
|
||||
done: make(chan struct{}),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
cc: cc,
|
||||
scBuffer: buffer.NewUnbounded(),
|
||||
done: grpcsync.NewEvent(),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
@ -113,36 +67,23 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.stateChangeQueue.get():
|
||||
ccb.stateChangeQueue.load()
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
case t := <-ccb.scBuffer.Get():
|
||||
ccb.scBuffer.Load()
|
||||
if ccb.done.HasFired() {
|
||||
break
|
||||
}
|
||||
ccb.balancerMu.Lock()
|
||||
su := t.(*scStateUpdate)
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state})
|
||||
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||
} else {
|
||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
|
||||
}
|
||||
case s := <-ccb.ccUpdateCh:
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ub.UpdateClientConnState(*s)
|
||||
} else {
|
||||
ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil)
|
||||
}
|
||||
case <-ccb.done:
|
||||
ccb.balancerMu.Unlock()
|
||||
case <-ccb.done.Done():
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ccb.done:
|
||||
if ccb.done.HasFired() {
|
||||
ccb.balancer.Close()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
@ -151,19 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() {
|
||||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
ccb.UpdateBalancerState(connectivity.Connecting, nil)
|
||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
||||
return
|
||||
default:
|
||||
}
|
||||
ccb.cc.firstResolveEvent.Fire()
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
close(ccb.done)
|
||||
ccb.done.Fire()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
@ -174,30 +113,29 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.stateChangeQueue.put(&scStateUpdate{
|
||||
ccb.scBuffer.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) {
|
||||
if ccb.cc.curBalancerName != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
s := &ccs.ResolverState
|
||||
for i := 0; i < len(s.Addresses); {
|
||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.balancerMu.Lock()
|
||||
defer ccb.balancerMu.Unlock()
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
return ub.UpdateClientConnState(*ccs)
|
||||
}
|
||||
select {
|
||||
case <-ccb.ccUpdateCh:
|
||||
default:
|
||||
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
||||
ccb.balancerMu.Lock()
|
||||
ub.ResolverError(err)
|
||||
ccb.balancerMu.Unlock()
|
||||
}
|
||||
ccb.ccUpdateCh <- ccs
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
@ -250,7 +188,22 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc
|
||||
ccb.cc.csMgr.updateState(s)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
|
34
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
34
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
@ -19,7 +19,6 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
@ -49,7 +48,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
|
||||
go bw.lbWatcher()
|
||||
return bw
|
||||
}
|
||||
@ -243,7 +242,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
|
||||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
bw.cc.UpdateBalancerState(bw.state, bw)
|
||||
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
|
||||
if s == connectivity.Shutdown {
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
@ -275,17 +274,17 @@ func (bw *balancerWrapper) Close() {
|
||||
|
||||
// The picker is the balancerWrapper itself.
|
||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
|
||||
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
|
||||
failfast := true // Default failfast is true.
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
|
||||
failfast = ss.failfast
|
||||
}
|
||||
a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return balancer.PickResult{}, toRPCErr(err)
|
||||
}
|
||||
if p != nil {
|
||||
done = func(balancer.DoneInfo) { p() }
|
||||
result.Done = func(balancer.DoneInfo) { p() }
|
||||
defer func() {
|
||||
if err != nil {
|
||||
p()
|
||||
@ -297,38 +296,39 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
|
||||
defer bw.mu.Unlock()
|
||||
if bw.pickfirst {
|
||||
// Get the first sc in conns.
|
||||
for _, sc := range bw.conns {
|
||||
return sc, done, nil
|
||||
for _, result.SubConn = range bw.conns {
|
||||
return result, nil
|
||||
}
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
sc, ok1 := bw.conns[resolver.Address{
|
||||
var ok1 bool
|
||||
result.SubConn, ok1 = bw.conns[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend,
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
s, ok2 := bw.connSt[sc]
|
||||
s, ok2 := bw.connSt[result.SubConn]
|
||||
if !ok1 || !ok2 {
|
||||
// This can only happen due to a race where Get() returned an address
|
||||
// that was subsequently removed by Notify. In this case we should
|
||||
// retry always.
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
switch s.s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
return sc, done, nil
|
||||
return result, nil
|
||||
case connectivity.Shutdown, connectivity.TransientFailure:
|
||||
// If the returned sc has been shut down or is in transient failure,
|
||||
// return error, and this RPC will fail or wait for another picker (if
|
||||
// non-failfast).
|
||||
return nil, nil, balancer.ErrTransientFailure
|
||||
return balancer.PickResult{}, balancer.ErrTransientFailure
|
||||
default:
|
||||
// For other states (connecting or unknown), the v1 balancer would
|
||||
// traditionally wait until ready and then issue the RPC. Returning
|
||||
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
||||
// allow the balancer to choose another address in case others are
|
||||
// connected.
|
||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
||||
}
|
||||
}
|
||||
|
311
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
311
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@ -31,7 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@ -42,10 +42,12 @@ import (
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||
_ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
|
||||
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
|
||||
)
|
||||
|
||||
const (
|
||||
@ -186,11 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||
}
|
||||
|
||||
if cc.dopts.defaultServiceConfigRawJSON != nil {
|
||||
sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err)
|
||||
scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
|
||||
if scpr.Err != nil {
|
||||
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
|
||||
}
|
||||
cc.dopts.defaultServiceConfig = sc
|
||||
cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
|
||||
}
|
||||
cc.mkp = cc.dopts.copts.KeepaliveParams
|
||||
|
||||
@ -235,29 +237,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||
}
|
||||
}
|
||||
if cc.dopts.bs == nil {
|
||||
cc.dopts.bs = backoff.Exponential{
|
||||
MaxDelay: DefaultBackoffConfig.MaxDelay,
|
||||
cc.dopts.bs = backoff.DefaultExponential
|
||||
}
|
||||
|
||||
// Determine the resolver to use.
|
||||
cc.parsedTarget = parseTarget(cc.target)
|
||||
grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme)
|
||||
if resolverBuilder == nil {
|
||||
// If resolver builder is still nil, the parsed target's scheme is
|
||||
// not registered. Fallback to default resolver and set Endpoint to
|
||||
// the original target.
|
||||
grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = resolver.Target{
|
||||
Scheme: resolver.GetDefaultScheme(),
|
||||
Endpoint: target,
|
||||
}
|
||||
resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme)
|
||||
if resolverBuilder == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme)
|
||||
}
|
||||
}
|
||||
if cc.dopts.resolverBuilder == nil {
|
||||
// Only try to parse target when resolver builder is not already set.
|
||||
cc.parsedTarget = parseTarget(cc.target)
|
||||
grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||
if cc.dopts.resolverBuilder == nil {
|
||||
// If resolver builder is still nil, the parsed target's scheme is
|
||||
// not registered. Fallback to default resolver and set Endpoint to
|
||||
// the original target.
|
||||
grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||
cc.parsedTarget = resolver.Target{
|
||||
Scheme: resolver.GetDefaultScheme(),
|
||||
Endpoint: target,
|
||||
}
|
||||
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||
}
|
||||
} else {
|
||||
cc.parsedTarget = resolver.Target{Endpoint: target}
|
||||
}
|
||||
|
||||
creds := cc.dopts.copts.TransportCredentials
|
||||
if creds != nil && creds.Info().ServerName != "" {
|
||||
cc.authority = creds.Info().ServerName
|
||||
@ -297,14 +298,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||
}
|
||||
|
||||
// Build the resolver.
|
||||
rWrapper, err := newCCResolverWrapper(cc)
|
||||
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||
}
|
||||
|
||||
cc.mu.Lock()
|
||||
cc.resolverWrapper = rWrapper
|
||||
cc.mu.Unlock()
|
||||
|
||||
// A blocking dial blocks until the clientConn is ready.
|
||||
if cc.dopts.block {
|
||||
for {
|
||||
@ -443,7 +444,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
|
||||
return csm.notifyChan
|
||||
}
|
||||
|
||||
// ClientConn represents a client connection to an RPC server.
|
||||
// ClientConnInterface defines the functions clients need to perform unary and
|
||||
// streaming RPCs. It is implemented by *ClientConn, and is only intended to
|
||||
// be referenced by generated code.
|
||||
type ClientConnInterface interface {
|
||||
// Invoke performs a unary RPC and returns after the response is received
|
||||
// into reply.
|
||||
Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error
|
||||
// NewStream begins a streaming RPC.
|
||||
NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
|
||||
}
|
||||
|
||||
// Assert *ClientConn implements ClientConnInterface.
|
||||
var _ ClientConnInterface = (*ClientConn)(nil)
|
||||
|
||||
// ClientConn represents a virtual connection to a conceptual endpoint, to
|
||||
// perform RPCs.
|
||||
//
|
||||
// A ClientConn is free to have zero or more actual connections to the endpoint
|
||||
// based on configuration, load, etc. It is also free to determine which actual
|
||||
// endpoints to use and may change it every RPC, permitting client-side load
|
||||
// balancing.
|
||||
//
|
||||
// A ClientConn encapsulates a range of functionality including name
|
||||
// resolution, TCP connection establishment (with retries and backoff) and TLS
|
||||
// handshakes. It also handles errors on established connections by
|
||||
// re-resolving the name and reconnecting.
|
||||
type ClientConn struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
@ -532,58 +558,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) updateResolverState(s resolver.State) error {
|
||||
var emptyServiceConfig *ServiceConfig
|
||||
|
||||
func init() {
|
||||
cfg := parseServiceConfig("{}")
|
||||
if cfg.Err != nil {
|
||||
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
|
||||
}
|
||||
emptyServiceConfig = cfg.Config.(*ServiceConfig)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
|
||||
if cc.sc != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.sc, addrs)
|
||||
return
|
||||
}
|
||||
if cc.dopts.defaultServiceConfig != nil {
|
||||
cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
|
||||
} else {
|
||||
cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||
defer cc.firstResolveEvent.Fire()
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
// Check if the ClientConn is already closed. Some fields (e.g.
|
||||
// balancerWrapper) are set to nil when closing the ClientConn, and could
|
||||
// cause nil pointer panic if we don't have this check.
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
|
||||
if cc.dopts.defaultServiceConfig != nil && cc.sc == nil {
|
||||
cc.applyServiceConfig(cc.dopts.defaultServiceConfig)
|
||||
if err != nil {
|
||||
// May need to apply the initial service config in case the resolver
|
||||
// doesn't support service configs, or doesn't provide a service config
|
||||
// with the new addresses.
|
||||
cc.maybeApplyDefaultServiceConfig(nil)
|
||||
|
||||
if cc.balancerWrapper != nil {
|
||||
cc.balancerWrapper.resolverError(err)
|
||||
}
|
||||
|
||||
// No addresses are valid with err set; return early.
|
||||
cc.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
var ret error
|
||||
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
|
||||
cc.maybeApplyDefaultServiceConfig(s.Addresses)
|
||||
// TODO: do we need to apply a failing LB policy if there is no
|
||||
// default, per the error handling design?
|
||||
} else {
|
||||
if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
|
||||
cc.applyServiceConfigAndBalancer(sc, s.Addresses)
|
||||
} else {
|
||||
ret = balancer.ErrBadResolverState
|
||||
if cc.balancerWrapper == nil {
|
||||
var err error
|
||||
if s.ServiceConfig.Err != nil {
|
||||
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
|
||||
} else {
|
||||
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
|
||||
}
|
||||
cc.blockingpicker.updatePicker(base.NewErrPicker(err))
|
||||
cc.csMgr.updateState(connectivity.TransientFailure)
|
||||
cc.mu.Unlock()
|
||||
return ret
|
||||
}
|
||||
}
|
||||
} else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok {
|
||||
cc.applyServiceConfig(sc)
|
||||
}
|
||||
|
||||
var balCfg serviceconfig.LoadBalancingConfig
|
||||
if cc.dopts.balancerBuilder == nil {
|
||||
// Only look at balancer types and switch balancer if balancer dial
|
||||
// option is not set.
|
||||
var newBalancerName string
|
||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
newBalancerName = cc.sc.lbConfig.name
|
||||
balCfg = cc.sc.lbConfig.cfg
|
||||
} else {
|
||||
var isGRPCLB bool
|
||||
for _, a := range s.Addresses {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
isGRPCLB = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isGRPCLB {
|
||||
newBalancerName = grpclbName
|
||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
||||
newBalancerName = *cc.sc.LB
|
||||
} else {
|
||||
newBalancerName = PickFirstBalancerName
|
||||
}
|
||||
}
|
||||
cc.switchBalancer(newBalancerName)
|
||||
} else if cc.balancerWrapper == nil {
|
||||
// Balancer dial option was set, and this is the first time handling
|
||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
||||
if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
balCfg = cc.sc.lbConfig.cfg
|
||||
}
|
||||
|
||||
cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||
return nil
|
||||
cbn := cc.curBalancerName
|
||||
bw := cc.balancerWrapper
|
||||
cc.mu.Unlock()
|
||||
if cbn != grpclbName {
|
||||
// Filter any grpclb addresses since we don't have the grpclb balancer.
|
||||
for i := 0; i < len(s.Addresses); {
|
||||
if s.Addresses[i].Type == resolver.GRPCLB {
|
||||
copy(s.Addresses[i:], s.Addresses[i+1:])
|
||||
s.Addresses = s.Addresses[:len(s.Addresses)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
|
||||
if ret == nil {
|
||||
ret = uccsErr // prefer ErrBadResolver state since any other error is
|
||||
// currently meaningless to the caller.
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// switchBalancer starts the switching from current balancer to the balancer
|
||||
@ -631,7 +703,7 @@ func (cc *ClientConn) switchBalancer(name string) {
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
cc.mu.Lock()
|
||||
if cc.conns == nil {
|
||||
cc.mu.Unlock()
|
||||
@ -639,7 +711,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
||||
}
|
||||
// TODO(bar switching) send updates to all balancer wrappers when balancer
|
||||
// gracefully switching is supported.
|
||||
cc.balancerWrapper.handleSubConnStateChange(sc, s)
|
||||
cc.balancerWrapper.handleSubConnStateChange(sc, s, err)
|
||||
cc.mu.Unlock()
|
||||
}
|
||||
|
||||
@ -736,7 +808,7 @@ func (ac *addrConn) connect() error {
|
||||
}
|
||||
// Update connectivity state within the lock to prevent subsequent or
|
||||
// concurrent calls from resetting the transport more than once.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.mu.Unlock()
|
||||
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
@ -822,7 +894,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
|
||||
t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||
Ctx: ctx,
|
||||
FullMethodName: method,
|
||||
})
|
||||
if err != nil {
|
||||
@ -831,10 +904,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
|
||||
return t, done, nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
|
||||
func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
|
||||
if sc == nil {
|
||||
// should never reach here.
|
||||
return fmt.Errorf("got nil pointer for service config")
|
||||
return
|
||||
}
|
||||
cc.sc = sc
|
||||
|
||||
@ -850,10 +923,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error {
|
||||
cc.retryThrottler.Store((*retryThrottler)(nil))
|
||||
}
|
||||
|
||||
return nil
|
||||
if cc.dopts.balancerBuilder == nil {
|
||||
// Only look at balancer types and switch balancer if balancer dial
|
||||
// option is not set.
|
||||
var newBalancerName string
|
||||
if cc.sc != nil && cc.sc.lbConfig != nil {
|
||||
newBalancerName = cc.sc.lbConfig.name
|
||||
} else {
|
||||
var isGRPCLB bool
|
||||
for _, a := range addrs {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
isGRPCLB = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isGRPCLB {
|
||||
newBalancerName = grpclbName
|
||||
} else if cc.sc != nil && cc.sc.LB != nil {
|
||||
newBalancerName = *cc.sc.LB
|
||||
} else {
|
||||
newBalancerName = PickFirstBalancerName
|
||||
}
|
||||
}
|
||||
cc.switchBalancer(newBalancerName)
|
||||
} else if cc.balancerWrapper == nil {
|
||||
// Balancer dial option was set, and this is the first time handling
|
||||
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
||||
cc.curBalancerName = cc.dopts.balancerBuilder.Name()
|
||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
||||
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
||||
cc.mu.RLock()
|
||||
r := cc.resolverWrapper
|
||||
cc.mu.RUnlock()
|
||||
@ -875,8 +976,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
||||
// This API is EXPERIMENTAL.
|
||||
func (cc *ClientConn) ResetConnectBackoff() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for ac := range cc.conns {
|
||||
conns := cc.conns
|
||||
cc.mu.Unlock()
|
||||
for ac := range conns {
|
||||
ac.resetConnectBackoff()
|
||||
}
|
||||
}
|
||||
@ -962,7 +1064,7 @@ type addrConn struct {
|
||||
}
|
||||
|
||||
// Note: this requires a lock on ac.mu.
|
||||
func (ac *addrConn) updateConnectivityState(s connectivity.State) {
|
||||
func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
|
||||
if ac.state == s {
|
||||
return
|
||||
}
|
||||
@ -975,7 +1077,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State) {
|
||||
Severity: channelz.CtINFO,
|
||||
})
|
||||
}
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s)
|
||||
ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
|
||||
}
|
||||
|
||||
// adjustParams updates parameters used to create transports upon
|
||||
@ -995,7 +1097,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
|
||||
func (ac *addrConn) resetTransport() {
|
||||
for i := 0; ; i++ {
|
||||
if i > 0 {
|
||||
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
||||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||||
}
|
||||
|
||||
ac.mu.Lock()
|
||||
@ -1024,7 +1126,7 @@ func (ac *addrConn) resetTransport() {
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
|
||||
connectDeadline := time.Now().Add(dialDuration)
|
||||
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
ac.transport = nil
|
||||
ac.mu.Unlock()
|
||||
|
||||
@ -1037,7 +1139,7 @@ func (ac *addrConn) resetTransport() {
|
||||
ac.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ac.updateConnectivityState(connectivity.TransientFailure)
|
||||
ac.updateConnectivityState(connectivity.TransientFailure, err)
|
||||
|
||||
// Backoff.
|
||||
b := ac.resetBackoff
|
||||
@ -1093,6 +1195,7 @@ func (ac *addrConn) resetTransport() {
|
||||
// first successful one. It returns the transport, the address and a Event in
|
||||
// the successful case. The Event fires when the returned transport disconnects.
|
||||
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
|
||||
var firstConnErr error
|
||||
for _, addr := range addrs {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
@ -1121,11 +1224,14 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
||||
if err == nil {
|
||||
return newTr, addr, reconnect, nil
|
||||
}
|
||||
if firstConnErr == nil {
|
||||
firstConnErr = err
|
||||
}
|
||||
ac.cc.blockingpicker.updateConnectionError(err)
|
||||
}
|
||||
|
||||
// Couldn't connect to any address.
|
||||
return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
|
||||
return nil, resolver.Address{}, nil, firstConnErr
|
||||
}
|
||||
|
||||
// createTransport creates a connection to addr. It returns the transport and a
|
||||
@ -1136,10 +1242,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
onCloseCalled := make(chan struct{})
|
||||
reconnect := grpcsync.NewEvent()
|
||||
|
||||
authority := ac.cc.authority
|
||||
// addr.ServerName takes precedent over ClientConn authority, if present.
|
||||
if addr.ServerName != "" {
|
||||
authority = addr.ServerName
|
||||
}
|
||||
|
||||
target := transport.TargetInfo{
|
||||
Addr: addr.Addr,
|
||||
Metadata: addr.Metadata,
|
||||
Authority: ac.cc.authority,
|
||||
Authority: authority,
|
||||
}
|
||||
|
||||
once := sync.Once{}
|
||||
@ -1152,7 +1264,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
// state to Connecting.
|
||||
//
|
||||
// TODO: this should be Idle when grpc-go properly supports it.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
}
|
||||
})
|
||||
ac.mu.Unlock()
|
||||
@ -1167,7 +1279,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
// state to Connecting.
|
||||
//
|
||||
// TODO: this should be Idle when grpc-go properly supports it.
|
||||
ac.updateConnectivityState(connectivity.Connecting)
|
||||
ac.updateConnectivityState(connectivity.Connecting, nil)
|
||||
}
|
||||
})
|
||||
ac.mu.Unlock()
|
||||
@ -1193,7 +1305,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(connectDeadline.Sub(time.Now())):
|
||||
case <-time.After(time.Until(connectDeadline)):
|
||||
// We didn't get the preface in time.
|
||||
newTr.Close()
|
||||
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
|
||||
@ -1224,7 +1336,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
||||
var healthcheckManagingState bool
|
||||
defer func() {
|
||||
if !healthcheckManagingState {
|
||||
ac.updateConnectivityState(connectivity.Ready)
|
||||
ac.updateConnectivityState(connectivity.Ready, nil)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -1260,13 +1372,13 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
||||
ac.mu.Unlock()
|
||||
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
|
||||
}
|
||||
setConnectivityState := func(s connectivity.State) {
|
||||
setConnectivityState := func(s connectivity.State, lastErr error) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
if ac.transport != currentTr {
|
||||
return
|
||||
}
|
||||
ac.updateConnectivityState(s)
|
||||
ac.updateConnectivityState(s, lastErr)
|
||||
}
|
||||
// Start the health checking stream.
|
||||
go func() {
|
||||
@ -1331,8 +1443,8 @@ func (ac *addrConn) tearDown(err error) {
|
||||
curTr := ac.transport
|
||||
ac.transport = nil
|
||||
// We have to set the state to Shutdown before anything else to prevent races
|
||||
// between setting the state and logic that waits on context cancelation / etc.
|
||||
ac.updateConnectivityState(connectivity.Shutdown)
|
||||
// between setting the state and logic that waits on context cancellation / etc.
|
||||
ac.updateConnectivityState(connectivity.Shutdown, nil)
|
||||
ac.cancel()
|
||||
ac.curAddr = resolver.Address{}
|
||||
if err == errConnDrain && curTr != nil {
|
||||
@ -1355,7 +1467,7 @@ func (ac *addrConn) tearDown(err error) {
|
||||
},
|
||||
})
|
||||
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
|
||||
// the entity beng deleted, and thus prevent it from being deleted right away.
|
||||
// the entity being deleted, and thus prevent it from being deleted right away.
|
||||
channelz.RemoveEntry(ac.channelzID)
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
@ -1445,3 +1557,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
|
||||
// Deprecated: This error is never returned by grpc and should not be
|
||||
// referenced by users.
|
||||
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||
|
||||
func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||
for _, rb := range cc.dopts.resolvers {
|
||||
if cc.parsedTarget.Scheme == rb.Scheme() {
|
||||
return rb
|
||||
}
|
||||
}
|
||||
return resolver.Get(cc.parsedTarget.Scheme)
|
||||
}
|
||||
|
275
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
275
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@ -24,16 +24,12 @@ package credentials // import "google.golang.org/grpc/credentials"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
@ -45,7 +41,8 @@ type PerRPCCredentials interface {
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// timeout and cancellation. Additionally, RequestInfo data will be
|
||||
// available via ctx to this call.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
@ -54,6 +51,48 @@ type PerRPCCredentials interface {
|
||||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// SecurityLevel defines the protection level on an established connection.
|
||||
//
|
||||
// This API is experimental.
|
||||
type SecurityLevel int
|
||||
|
||||
const (
|
||||
// NoSecurity indicates a connection is insecure.
|
||||
// The zero SecurityLevel value is invalid for backward compatibility.
|
||||
NoSecurity SecurityLevel = iota + 1
|
||||
// IntegrityOnly indicates a connection only provides integrity protection.
|
||||
IntegrityOnly
|
||||
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
||||
PrivacyAndIntegrity
|
||||
)
|
||||
|
||||
// String returns SecurityLevel in a string format.
|
||||
func (s SecurityLevel) String() string {
|
||||
switch s {
|
||||
case NoSecurity:
|
||||
return "NoSecurity"
|
||||
case IntegrityOnly:
|
||||
return "IntegrityOnly"
|
||||
case PrivacyAndIntegrity:
|
||||
return "PrivacyAndIntegrity"
|
||||
}
|
||||
return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
|
||||
}
|
||||
|
||||
// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
|
||||
// It should be embedded in a struct implementing AuthInfo to provide additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
type CommonAuthInfo struct {
|
||||
SecurityLevel SecurityLevel
|
||||
}
|
||||
|
||||
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
|
||||
func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
// security protocol, security protocol version in use, server name, etc.
|
||||
type ProtocolInfo struct {
|
||||
@ -68,6 +107,8 @@ type ProtocolInfo struct {
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
|
||||
// information about the credentials in it.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
@ -82,7 +123,8 @@ type TransportCredentials interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
// Implementations must use the provided context to implement timely cancellation.
|
||||
// The auth information should embed CommonAuthInfo to return additional information about
|
||||
// the credentials. Implementations must use the provided context to implement timely cancellation.
|
||||
// gRPC will try to reconnect if the error returned is a temporary error
|
||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
|
||||
// If the returned error is a wrapper error, implementations should make sure that
|
||||
@ -92,7 +134,8 @@ type TransportCredentials interface {
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
// the connection. The auth information should embed CommonAuthInfo to return additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||
@ -125,145 +168,63 @@ type Bundle interface {
|
||||
NewWithMode(mode string) (Bundle, error)
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
|
||||
//
|
||||
// This API is experimental.
|
||||
type RequestInfo struct {
|
||||
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
|
||||
Method string
|
||||
// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
|
||||
AuthInfo AuthInfo
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
|
||||
//
|
||||
// This API is experimental.
|
||||
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||
//
|
||||
// This API is experimental.
|
||||
func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
||||
type internalInfo interface {
|
||||
GetCommonAuthInfo() *CommonAuthInfo
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
ri, _ := RequestInfoFromContext(ctx)
|
||||
if ri.AuthInfo == nil {
|
||||
return errors.New("unable to obtain SecurityLevel from context")
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
ServerName: c.config.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
colonPos := strings.LastIndex(authority, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(authority)
|
||||
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
||||
// CommonAuthInfo.SecurityLevel has an invalid value.
|
||||
if ci.GetCommonAuthInfo().SecurityLevel == 0 {
|
||||
return nil
|
||||
}
|
||||
cfg.ServerName = authority[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
||||
return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
return NewTLS(c.config)
|
||||
}
|
||||
|
||||
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
c.config.ServerName = serverNameOverride
|
||||
// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
|
||||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
func init() {
|
||||
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||
// in order to provide security info to channelz.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityInfo interface {
|
||||
GetSecurityValue() ChannelzSecurityValue
|
||||
}
|
||||
@ -271,66 +232,20 @@ type ChannelzSecurityInfo interface {
|
||||
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
|
||||
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
|
||||
// and *OtherChannelzSecurityValue.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityValue interface {
|
||||
isChannelzSecurityValue()
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
|
||||
// from GetSecurityValue(), which contains protocol specific security info. Note
|
||||
// the Value field will be sent to users of channelz requesting channel info, and
|
||||
// thus sensitive info should better be avoided.
|
||||
//
|
||||
// This API is experimental.
|
||||
type OtherChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
Name string
|
||||
Value proto.Message
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
||||
|
225
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
Normal file
225
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
)
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
CommonAuthInfo
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
ServerName: c.config.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
serverName, _, err := net.SplitHostPort(authority)
|
||||
if err != nil {
|
||||
// If the authority had no host port or if the authority cannot be parsed, use it as-is.
|
||||
serverName = authority
|
||||
}
|
||||
cfg.ServerName = serverName
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
close(errChannel)
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
conn.Close()
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
return NewTLS(c.config)
|
||||
}
|
||||
|
||||
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
c.config.ServerName = serverNameOverride
|
||||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
82
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
82
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@ -24,11 +24,12 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -47,7 +48,7 @@ type dialOptions struct {
|
||||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs backoff.Strategy
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
@ -57,9 +58,7 @@ type dialOptions struct {
|
||||
callOptions []CallOption
|
||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||
// balancer, and also by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
// This is to support grpclb.
|
||||
resolverBuilder resolver.Builder
|
||||
balancerBuilder balancer.Builder
|
||||
channelzParentID int64
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
@ -68,6 +67,11 @@ type dialOptions struct {
|
||||
minConnectTimeout func() time.Duration
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
// This is used by ccResolverWrapper to backoff between successive calls to
|
||||
// resolver.ResolveNow(). The user will have no need to configure this, but
|
||||
// we need to be able to configure this in tests.
|
||||
resolveNowBackoff func(int) time.Duration
|
||||
resolvers []resolver.Builder
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
@ -226,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// withResolverBuilder is only for grpclb.
|
||||
func withResolverBuilder(b resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolverBuilder = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
@ -246,8 +243,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithConnectParams configures the dialer to use the provided ConnectParams.
|
||||
//
|
||||
// The backoff configuration specified as part of the ConnectParams overrides
|
||||
// all defaults specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
||||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||
// override only a subset of the backoff configuration.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithConnectParams(p ConnectParams) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||
o.minConnectTimeout = func() time.Duration {
|
||||
return p.MinConnectTimeout
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
//
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
@ -255,19 +272,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
|
||||
// for use.
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
return withBackoff(backoff.Exponential{
|
||||
MaxDelay: b.MaxDelay,
|
||||
})
|
||||
bc := backoff.DefaultConfig
|
||||
bc.MaxDelay = b.MaxDelay
|
||||
return withBackoff(internalbackoff.Exponential{Config: bc})
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
|
||||
// connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||
func withBackoff(bs backoff.Strategy) DialOption {
|
||||
func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
})
|
||||
@ -322,8 +338,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||
//
|
||||
// Deprecated: use DialContext and context.WithTimeout instead. Will be
|
||||
// supported throughout 1.x.
|
||||
// Deprecated: use DialContext instead of Dial and context.WithTimeout
|
||||
// instead. Will be supported throughout 1.x.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
@ -341,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.WithResolverBuilder = withResolverBuilder
|
||||
internal.WithHealthCheckFunc = withHealthCheckFunc
|
||||
}
|
||||
|
||||
@ -455,6 +470,8 @@ func WithAuthority(a string) DialOption {
|
||||
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
@ -539,6 +556,7 @@ func defaultDialOptions() dialOptions {
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
},
|
||||
resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,3 +570,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||
o.minConnectTimeout = f
|
||||
})
|
||||
}
|
||||
|
||||
// withResolveNowBackoff specifies the function that clientconn uses to backoff
|
||||
// between successive calls to resolver.ResolveNow().
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withResolveNowBackoff(f func(int) time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolveNowBackoff = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithResolvers allows a list of resolver implementations to be registered
|
||||
// locally with the ClientConn without needing to be globally registered via
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
||||
|
4
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
4
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@ -46,6 +46,10 @@ type Compressor interface {
|
||||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
// EXPERIMENTAL: if a Compressor implements
|
||||
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
|
||||
// to determine the size of the buffer allocated for the result of decompression.
|
||||
// Return -1 to indicate unknown size.
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
15
vendor/google.golang.org/grpc/go.mod
generated
vendored
15
vendor/google.golang.org/grpc/go.mod
generated
vendored
@ -1,19 +1,16 @@
|
||||
module google.golang.org/grpc
|
||||
|
||||
go 1.11
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.26.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/mock v1.1.1
|
||||
github.com/golang/protobuf v1.2.0
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.2.0
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
||||
)
|
||||
|
36
vendor/google.golang.org/grpc/go.sum
generated
vendored
36
vendor/google.golang.org/grpc/go.sum
generated
vendored
@ -1,37 +1,53 @@
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
2
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
2
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
@ -89,7 +89,7 @@ func Fatal(args ...interface{}) {
|
||||
}
|
||||
|
||||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||
// It calles os.Exit() with exit code 1.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
// Make sure fatal logs will exit.
|
||||
|
42
vendor/google.golang.org/grpc/health/client.go
generated
vendored
42
vendor/google.golang.org/grpc/health/client.go
generated
vendored
@ -33,20 +33,20 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const maxDelay = 120 * time.Second
|
||||
|
||||
var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay}
|
||||
var backoffFunc = func(ctx context.Context, retries int) bool {
|
||||
d := backoffStrategy.Backoff(retries)
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return false
|
||||
var (
|
||||
backoffStrategy = backoff.DefaultExponential
|
||||
backoffFunc = func(ctx context.Context, retries int) bool {
|
||||
d := backoffStrategy.Backoff(retries)
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-timer.C:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.HealthCheckFunc = clientHealthCheck
|
||||
@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch"
|
||||
|
||||
// This function implements the protocol defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
|
||||
func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error {
|
||||
tryCnt := 0
|
||||
|
||||
retryConnection:
|
||||
@ -70,7 +70,7 @@ retryConnection:
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
}
|
||||
setConnectivityState(connectivity.Connecting)
|
||||
setConnectivityState(connectivity.Connecting, nil)
|
||||
rawS, err := newStream(healthCheckMethod)
|
||||
if err != nil {
|
||||
continue retryConnection
|
||||
@ -79,7 +79,7 @@ retryConnection:
|
||||
s, ok := rawS.(grpc.ClientStream)
|
||||
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
|
||||
if !ok {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
|
||||
}
|
||||
|
||||
@ -95,22 +95,22 @@ retryConnection:
|
||||
|
||||
// Reports healthy for the LBing purposes if health check is not implemented in the server.
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
|
||||
if err != nil {
|
||||
setConnectivityState(connectivity.TransientFailure)
|
||||
setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err))
|
||||
continue retryConnection
|
||||
}
|
||||
|
||||
// As a message has been received, removes the need for backoff for the next retry by reseting the try count.
|
||||
// As a message has been received, removes the need for backoff for the next retry by resetting the try count.
|
||||
tryCnt = 0
|
||||
if resp.Status == healthpb.HealthCheckResponse_SERVING {
|
||||
setConnectivityState(connectivity.Ready)
|
||||
setConnectivityState(connectivity.Ready, nil)
|
||||
} else {
|
||||
setConnectivityState(connectivity.TransientFailure)
|
||||
setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
96
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
96
vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
generated
vendored
@ -1,15 +1,16 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc/health/v1/health.proto
|
||||
|
||||
package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
package grpc_health_v1
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -21,7 +22,7 @@ var _ = math.Inf
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type HealthCheckResponse_ServingStatus int32
|
||||
|
||||
@ -38,6 +39,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{
|
||||
2: "NOT_SERVING",
|
||||
3: "SERVICE_UNKNOWN",
|
||||
}
|
||||
|
||||
var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"SERVING": 1,
|
||||
@ -48,8 +50,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{
|
||||
func (x HealthCheckResponse_ServingStatus) String() string {
|
||||
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
|
||||
}
|
||||
|
||||
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1, 0}
|
||||
}
|
||||
|
||||
type HealthCheckRequest struct {
|
||||
@ -63,16 +66,17 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
|
||||
func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckRequest) ProtoMessage() {}
|
||||
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{0}
|
||||
}
|
||||
|
||||
func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
|
||||
func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckRequest.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckRequest.Size(m)
|
||||
@ -101,16 +105,17 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
|
||||
func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HealthCheckResponse) ProtoMessage() {}
|
||||
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
|
||||
return fileDescriptor_e265fd9d4e077217, []int{1}
|
||||
}
|
||||
|
||||
func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
|
||||
func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_HealthCheckResponse.Merge(m, src)
|
||||
}
|
||||
func (m *HealthCheckResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_HealthCheckResponse.Size(m)
|
||||
@ -129,9 +134,34 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
|
||||
proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
|
||||
proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
|
||||
proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
|
||||
|
||||
var fileDescriptor_e265fd9d4e077217 = []byte{
|
||||
// 297 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@ -239,6 +269,17 @@ type HealthServer interface {
|
||||
Watch(*HealthCheckRequest, Health_WatchServer) error
|
||||
}
|
||||
|
||||
// UnimplementedHealthServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedHealthServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
|
||||
}
|
||||
func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
|
||||
}
|
||||
|
||||
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
|
||||
s.RegisterService(&_Health_serviceDesc, srv)
|
||||
}
|
||||
@ -300,28 +341,3 @@ var _Health_serviceDesc = grpc.ServiceDesc{
|
||||
},
|
||||
Metadata: "grpc/health/v1/health.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
|
||||
|
||||
var fileDescriptor_health_6b1a06aa67f91efd = []byte{
|
||||
// 297 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
|
||||
0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
|
||||
0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
|
||||
0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
|
||||
0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
|
||||
0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
|
||||
0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
|
||||
0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
|
||||
0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
|
||||
0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
|
||||
0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
|
||||
0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
|
||||
0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
|
||||
0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
|
||||
0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
|
||||
0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
|
||||
0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
|
||||
0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
|
||||
0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
10
vendor/google.golang.org/grpc/health/server.go
generated
vendored
10
vendor/google.golang.org/grpc/health/server.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
|
||||
// Server implements `service Health`.
|
||||
type Server struct {
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
|
||||
// will stay in NOT_SERVING.
|
||||
shutdown bool
|
||||
@ -54,8 +54,8 @@ func NewServer() *Server {
|
||||
|
||||
// Check implements `service Health`.
|
||||
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
if servingStatus, ok := s.statusMap[in.Service]; ok {
|
||||
return &healthpb.HealthCheckResponse{
|
||||
Status: servingStatus,
|
||||
@ -139,7 +139,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H
|
||||
// Shutdown sets all serving status to NOT_SERVING, and configures the server to
|
||||
// ignore all future status changes.
|
||||
//
|
||||
// This changes serving status for all services. To set status for a perticular
|
||||
// This changes serving status for all services. To set status for a particular
|
||||
// services, call SetServingStatus().
|
||||
func (s *Server) Shutdown() {
|
||||
s.mu.Lock()
|
||||
@ -153,7 +153,7 @@ func (s *Server) Shutdown() {
|
||||
// Resume sets all serving status to SERVING, and configures the server to
|
||||
// accept all future status changes.
|
||||
//
|
||||
// This changes serving status for all services. To set status for a perticular
|
||||
// This changes serving status for all services. To set status for a particular
|
||||
// services, call SetServingStatus().
|
||||
func (s *Server) Resume() {
|
||||
s.mu.Lock()
|
||||
|
27
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
27
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
@ -25,44 +25,39 @@ package backoff
|
||||
import (
|
||||
"time"
|
||||
|
||||
grpcbackoff "google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
//
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// baseDelay is the amount of time to wait before retrying after the first
|
||||
// failure.
|
||||
baseDelay = 1.0 * time.Second
|
||||
// factor is applied to the backoff after each retry.
|
||||
factor = 1.6
|
||||
// jitter provides a range to randomize backoff delays.
|
||||
jitter = 0.2
|
||||
)
|
||||
// DefaultExponential is an exponential backoff implementation using the
|
||||
// default values for all the configurable knobs defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
// Config contains all options to configure the backoff algorithm.
|
||||
Config grpcbackoff.Config
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
return bc.Config.BaseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(bc.MaxDelay)
|
||||
backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= factor
|
||||
backoff *= bc.Config.Multiplier
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
|
||||
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
|
12
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
@ -34,7 +34,7 @@ type Logger interface {
|
||||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment varialbe or flags).
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
@ -98,7 +98,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
return fmt.Errorf("conflicting rules for service %v found", service)
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
@ -112,10 +112,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig)
|
||||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
@ -127,10 +127,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting rules for method %v found", method)
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
|
4
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
@ -43,7 +43,7 @@ import (
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the privous config.
|
||||
// later overrides the previous config.
|
||||
func NewLoggerFromConfigString(s string) Logger {
|
||||
if s == "" {
|
||||
return nil
|
||||
@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if m == "*" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
|
||||
}
|
||||
if suffix != "" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||
|
2
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@ -63,7 +63,7 @@ func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
// Write() marshalls the proto message and writes it to the given writer. Each
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
|
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package buffer provides an implementation of an unbounded buffer.
|
||||
package buffer
|
||||
|
||||
import "sync"
|
||||
|
||||
// Unbounded is an implementation of an unbounded buffer which does not use
|
||||
// extra goroutines. This is typically used for passing updates from one entity
|
||||
// to another within gRPC.
|
||||
//
|
||||
// All methods on this type are thread-safe and don't block on anything except
|
||||
// the underlying mutex used for synchronization.
|
||||
//
|
||||
// Unbounded supports values of any type to be stored in it by using a channel
|
||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
||||
// allocation, and also that users need a type assertion while reading. For
|
||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
||||
// defining a new type specific implementation of this buffer is preferred. See
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
|
||||
// NewUnbounded returns a new instance of Unbounded.
|
||||
func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan interface{}, 1)}
|
||||
}
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
// returned by Get(). Users are expected to call this every time they read a
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
// are sent on.
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
7
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -25,11 +25,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
|
||||
)
|
||||
|
15
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
15
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -28,9 +28,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// WithResolverBuilder is exported by dialoptions.go
|
||||
WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
|
||||
// WithHealthCheckFunc is not exported by dialoptions.go
|
||||
// WithHealthCheckFunc is set by dialoptions.go
|
||||
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
|
||||
// HealthCheckFunc is used to provide client-side LB channel health checking
|
||||
HealthCheckFunc HealthChecker
|
||||
@ -39,14 +37,17 @@ var (
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// ParseServiceConfig is a function to parse JSON service configs into
|
||||
// opaque data structures.
|
||||
ParseServiceConfig func(sc string) (interface{}, error)
|
||||
// StatusRawProto is exported by status/status.go. This func returns a
|
||||
// pointer to the wrapped Status proto for a given status.Status without a
|
||||
// call to proto.Clone(). The returned Status proto should not be mutated by
|
||||
// the caller.
|
||||
StatusRawProto interface{} // func (*status.Status) *spb.Status
|
||||
// NewRequestInfoContext creates a new context based on the argument context attaching
|
||||
// the passed in RequestInfo to the new context.
|
||||
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
@ -57,7 +58,7 @@ var (
|
||||
//
|
||||
// The health checking protocol is defined at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
|
||||
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
|
||||
|
||||
const (
|
||||
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
|
||||
|
@ -33,18 +33,22 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
|
||||
// addresses from SRV records. Must not be changed after init time.
|
||||
var EnableSRVLookups = false
|
||||
|
||||
func init() {
|
||||
resolver.Register(NewBuilder())
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPort = "443"
|
||||
defaultFreq = time.Minute * 30
|
||||
defaultDNSSvrPort = "53"
|
||||
golang = "GO"
|
||||
// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
|
||||
@ -94,47 +98,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) {
|
||||
|
||||
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
|
||||
func NewBuilder() resolver.Builder {
|
||||
return &dnsBuilder{minFreq: defaultFreq}
|
||||
return &dnsBuilder{}
|
||||
}
|
||||
|
||||
type dnsBuilder struct {
|
||||
// minimum frequency of polling the DNS server.
|
||||
minFreq time.Duration
|
||||
}
|
||||
type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// IP address.
|
||||
if net.ParseIP(host) != nil {
|
||||
host, _ = formatIP(host)
|
||||
addr := []resolver.Address{{Addr: host + ":" + port}}
|
||||
i := &ipResolver{
|
||||
cc: cc,
|
||||
ip: addr,
|
||||
rn: make(chan struct{}, 1),
|
||||
q: make(chan struct{}),
|
||||
}
|
||||
cc.NewAddress(addr)
|
||||
go i.watcher()
|
||||
return i, nil
|
||||
if ipAddr, ok := formatIP(host); ok {
|
||||
addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
|
||||
cc.UpdateState(resolver.State{Addresses: addr})
|
||||
return deadResolver{}, nil
|
||||
}
|
||||
|
||||
// DNS address (non-IP).
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d := &dnsResolver{
|
||||
freq: b.minFreq,
|
||||
backoff: backoff.Exponential{MaxDelay: b.minFreq},
|
||||
host: host,
|
||||
port: port,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cc: cc,
|
||||
t: time.NewTimer(0),
|
||||
rn: make(chan struct{}, 1),
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
@ -150,6 +140,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||
|
||||
d.wg.Add(1)
|
||||
go d.watcher()
|
||||
d.ResolveNow(resolver.ResolveNowOptions{})
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@ -164,53 +155,23 @@ type netResolver interface {
|
||||
LookupTXT(ctx context.Context, name string) (txts []string, err error)
|
||||
}
|
||||
|
||||
// ipResolver watches for the name resolution update for an IP address.
|
||||
type ipResolver struct {
|
||||
cc resolver.ClientConn
|
||||
ip []resolver.Address
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
q chan struct{}
|
||||
}
|
||||
// deadResolver is a resolver that does nothing.
|
||||
type deadResolver struct{}
|
||||
|
||||
// ResolveNow resend the address it stores, no resolution is needed.
|
||||
func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
select {
|
||||
case i.rn <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
|
||||
|
||||
// Close closes the ipResolver.
|
||||
func (i *ipResolver) Close() {
|
||||
close(i.q)
|
||||
}
|
||||
|
||||
func (i *ipResolver) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-i.rn:
|
||||
i.cc.NewAddress(i.ip)
|
||||
case <-i.q:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
func (deadResolver) Close() {}
|
||||
|
||||
// dnsResolver watches for the name resolution update for a non-IP target.
|
||||
type dnsResolver struct {
|
||||
freq time.Duration
|
||||
backoff backoff.Exponential
|
||||
retryCount int
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
host string
|
||||
port string
|
||||
resolver netResolver
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cc resolver.ClientConn
|
||||
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
|
||||
rn chan struct{}
|
||||
t *time.Timer
|
||||
// wg is used to enforce Close() to return after the watcher() goroutine has finished.
|
||||
// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
|
||||
// replace the real lookup functions with mocked ones to facilitate testing.
|
||||
@ -222,7 +183,7 @@ type dnsResolver struct {
|
||||
}
|
||||
|
||||
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
|
||||
func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
|
||||
select {
|
||||
case d.rn <- struct{}{}:
|
||||
default:
|
||||
@ -233,7 +194,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
|
||||
func (d *dnsResolver) Close() {
|
||||
d.cancel()
|
||||
d.wg.Wait()
|
||||
d.t.Stop()
|
||||
}
|
||||
|
||||
func (d *dnsResolver) watcher() {
|
||||
@ -242,27 +202,15 @@ func (d *dnsResolver) watcher() {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
return
|
||||
case <-d.t.C:
|
||||
case <-d.rn:
|
||||
if !d.t.Stop() {
|
||||
// Before resetting a timer, it should be stopped to prevent racing with
|
||||
// reads on it's channel.
|
||||
<-d.t.C
|
||||
}
|
||||
}
|
||||
|
||||
result, sc := d.lookup()
|
||||
// Next lookup should happen within an interval defined by d.freq. It may be
|
||||
// more often due to exponential retry on empty address list.
|
||||
if len(result) == 0 {
|
||||
d.retryCount++
|
||||
d.t.Reset(d.backoff.Backoff(d.retryCount))
|
||||
state, err := d.lookup()
|
||||
if err != nil {
|
||||
d.cc.ReportError(err)
|
||||
} else {
|
||||
d.retryCount = 0
|
||||
d.t.Reset(d.freq)
|
||||
d.cc.UpdateState(*state)
|
||||
}
|
||||
d.cc.NewServiceConfig(sc)
|
||||
d.cc.NewAddress(result)
|
||||
|
||||
// Sleep to prevent excessive re-resolutions. Incoming resolution requests
|
||||
// will be queued in d.rn.
|
||||
@ -276,37 +224,68 @@ func (d *dnsResolver) watcher() {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupSRV() []resolver.Address {
|
||||
func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
if !EnableSRVLookups {
|
||||
return nil, nil
|
||||
}
|
||||
var newAddrs []resolver.Address
|
||||
_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
|
||||
return nil
|
||||
err = handleDNSError(err, "SRV") // may become nil
|
||||
return nil, err
|
||||
}
|
||||
for _, s := range srvs {
|
||||
lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
|
||||
continue
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
a, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
err = handleDNSError(err, "A") // may become nil
|
||||
if err == nil {
|
||||
// If there are other SRV records, look them up and ignore this
|
||||
// one that does not exist.
|
||||
continue
|
||||
}
|
||||
addr := a + ":" + strconv.Itoa(int(s.Port))
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range lbAddrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
||||
}
|
||||
}
|
||||
return newAddrs
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() string {
|
||||
var filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
err = filterError(err)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
grpclog.Infoln(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
|
||||
if err != nil {
|
||||
grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
|
||||
return ""
|
||||
if envconfig.TXTErrIgnore {
|
||||
return nil
|
||||
}
|
||||
if err = handleDNSError(err, "TXT"); err != nil {
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var res string
|
||||
for _, s := range ss {
|
||||
@ -315,40 +294,45 @@ func (d *dnsResolver) lookupTXT() string {
|
||||
|
||||
// TXT record must have "grpc_config=" attribute in order to be used as service config.
|
||||
if !strings.HasPrefix(res, txtAttribute) {
|
||||
grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
return ""
|
||||
grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
|
||||
// This is not an error; it is the equivalent of not having a service config.
|
||||
return nil
|
||||
}
|
||||
return strings.TrimPrefix(res, txtAttribute)
|
||||
sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
|
||||
return d.cc.ParseServiceConfig(sc)
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() []resolver.Address {
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
|
||||
return nil
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range addrs {
|
||||
a, ok := formatIP(a)
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
|
||||
continue
|
||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||
}
|
||||
addr := a + ":" + d.port
|
||||
addr := ip + ":" + d.port
|
||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
|
||||
}
|
||||
return newAddrs
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
|
||||
newAddrs := d.lookupSRV()
|
||||
// Support fallback to non-balancer address.
|
||||
newAddrs = append(newAddrs, d.lookupHost()...)
|
||||
if d.disableServiceConfig {
|
||||
return newAddrs, ""
|
||||
func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||
srv, srvErr := d.lookupSRV()
|
||||
addrs, hostErr := d.lookupHost()
|
||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||
return nil, hostErr
|
||||
}
|
||||
sc := d.lookupTXT()
|
||||
return newAddrs, canaryingSC(sc)
|
||||
state := &resolver.State{
|
||||
Addresses: append(addrs, srv...),
|
||||
}
|
||||
if !d.disableServiceConfig {
|
||||
state.ServiceConfig = d.lookupTXT()
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||
@ -434,12 +418,12 @@ func canaryingSC(js string) string {
|
||||
var rcs []rawChoice
|
||||
err := json.Unmarshal([]byte(js), &rcs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
|
||||
grpclog.Warningf("dns: error parsing service config json: %v", err)
|
||||
return ""
|
||||
}
|
||||
cliHostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
|
||||
grpclog.Warningf("dns: error getting client hostname: %v", err)
|
||||
return ""
|
||||
}
|
||||
var sc string
|
33
vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
generated
vendored
Normal file
33
vendor/google.golang.org/grpc/internal/resolver/dns/go113.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// +build go1.13
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
|
||||
import "net"
|
||||
|
||||
func init() {
|
||||
filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
|
||||
// The name does not exist; not an error.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
@ -26,7 +26,7 @@ const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
@ -48,7 +48,7 @@ func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
||||
func (*passthroughResolver) Close() {}
|
||||
|
12
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false }
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) (bool, error) // Used only on the client side.
|
||||
endStream bool // Valid on server side.
|
||||
initStream func(uint32) error // Used only on the client side.
|
||||
onWrite func()
|
||||
wq *writeQuota // write quota for the stream created.
|
||||
cleanup *cleanupStream // Valid on the server side.
|
||||
@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
sendPing, err := hdr.initStream(str.id)
|
||||
if err != nil {
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
return nil
|
||||
}
|
||||
if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.estdStreams[str.id] = str
|
||||
if sendPing {
|
||||
return l.pingHandler(&ping{data: [8]byte{}})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
10
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
10
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
return err
|
||||
@ -334,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
|
203
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
203
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -44,8 +45,14 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// clientConnectionCounter counts the number of connections a client has
|
||||
// initiated (equal to the number of http2Clients created). Must be accessed
|
||||
// atomically.
|
||||
var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
@ -62,8 +69,6 @@ type http2Client struct {
|
||||
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
|
||||
// that the server sent GoAway on this transport.
|
||||
goAway chan struct{}
|
||||
// awakenKeepalive is used to wake up keepalive when after it has gone dormant.
|
||||
awakenKeepalive chan struct{}
|
||||
|
||||
framer *framer
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
@ -77,9 +82,6 @@ type http2Client struct {
|
||||
|
||||
perRPCCreds []credentials.PerRPCCredentials
|
||||
|
||||
// Boolean to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
@ -110,6 +112,16 @@ type http2Client struct {
|
||||
// goAwayReason records the http2.ErrCode and debug data received with the
|
||||
// GoAway frame.
|
||||
goAwayReason GoAwayReason
|
||||
// A condition variable used to signal when the keepalive goroutine should
|
||||
// go dormant. The condition for dormancy is based on the number of active
|
||||
// streams and the `PermitWithoutStream` keepalive client parameter. And
|
||||
// since the number of active streams is guarded by the above mutex, we use
|
||||
// the same for this condition variable as well.
|
||||
kpDormancyCond *sync.Cond
|
||||
// A boolean to track whether the keepalive goroutine is dormant or not.
|
||||
// This is checked before attempting to signal the above condition
|
||||
// variable.
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
@ -119,6 +131,8 @@ type http2Client struct {
|
||||
onClose func()
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||
@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
awakenKeepalive: make(chan struct{}, 1),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
scheme: scheme,
|
||||
@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
// Make sure awakenKeepalive can't be written upon.
|
||||
// keepalive routine will make it writable, if need be.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
|
||||
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
ct: t,
|
||||
done: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
@ -380,23 +394,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
}
|
||||
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
|
||||
aud := t.createAudience(callHdr)
|
||||
authData, err := t.getTrAuthData(ctx, aud)
|
||||
ri := credentials.RequestInfo{
|
||||
Method: callHdr.Method,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
|
||||
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
|
||||
callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -419,6 +434,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
@ -564,7 +580,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
hdr := &headerFrame{
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
initStream: func(id uint32) (bool, error) {
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
t.mu.Unlock()
|
||||
@ -574,29 +590,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
t.activeStreams[id] = s
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
}
|
||||
var sendPing bool
|
||||
// If the number of active streams change from 0 to 1, then check if keepalive
|
||||
// has gone dormant. If so, wake it up.
|
||||
if len(t.activeStreams) == 1 && t.keepaliveEnabled {
|
||||
select {
|
||||
case t.awakenKeepalive <- struct{}{}:
|
||||
sendPing = true
|
||||
// Fill the awakenKeepalive channel again as this channel must be
|
||||
// kept non-writable except at the point that the keepalive()
|
||||
// goroutine is waiting either to be awaken or shutdown.
|
||||
t.awakenKeepalive <- struct{}{}
|
||||
default:
|
||||
}
|
||||
// If the keepalive goroutine has gone dormant, wake it up.
|
||||
if t.kpDormant {
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return sendPing, nil
|
||||
return nil
|
||||
},
|
||||
onOrphaned: cleanup,
|
||||
wq: s.wq,
|
||||
@ -674,12 +680,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
header, _, _ := metadata.FromOutgoingContextRaw(ctx)
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
@ -778,6 +786,11 @@ func (t *http2Client) Close() error {
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
if t.kpDormant {
|
||||
// If the keepalive goroutine is blocked on this condition variable, we
|
||||
// should unblock it so that the goroutine eventually exits.
|
||||
t.kpDormancyCond.Signal()
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
@ -853,11 +866,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
return t.controlBuf.put(df)
|
||||
}
|
||||
|
||||
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
|
||||
func (t *http2Client) getStream(f http2.Frame) *Stream {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
return s, ok
|
||||
s := t.activeStreams[f.Header().StreamID]
|
||||
t.mu.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// adjustWindow sends out extra window update over the initial window size
|
||||
@ -937,8 +950,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
t.controlBuf.put(bdpPing)
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
@ -969,8 +982,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
}
|
||||
|
||||
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
s, ok := t.getStream(f)
|
||||
if !ok {
|
||||
s := t.getStream(f)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeRefusedStream {
|
||||
@ -1147,8 +1160,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
|
||||
// operateHeaders takes action on the decoded headers.
|
||||
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
s, ok := t.getStream(frame)
|
||||
if !ok {
|
||||
s := t.getStream(frame)
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
endStream := frame.StreamEnded()
|
||||
@ -1177,12 +1190,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
@ -1191,6 +1206,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
if !endStream {
|
||||
// HEADERS frame block carries a Response-Headers.
|
||||
isHeader = true
|
||||
@ -1233,7 +1249,7 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
@ -1248,7 +1264,7 @@ func (t *http2Client) reader() {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
if err != nil {
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
@ -1292,56 +1308,83 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
}
|
||||
|
||||
func minTime(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
timeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
timer := time.NewTimer(t.kp.Time)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were here.
|
||||
outstandingPing = false
|
||||
// Next timer should fire at kp.Time seconds from lastRead time.
|
||||
timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
// Check if keepalive should go dormant.
|
||||
if outstandingPing && timeoutLeft <= 0 {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
// If the transport is closing, we should exit from the
|
||||
// keepalive goroutine here. If not, we could have a race
|
||||
// between the call to Signal() from Close() and the call to
|
||||
// Wait() here, whereby the keepalive goroutine ends up
|
||||
// blocking on the condition variable which will never be
|
||||
// signalled again.
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
|
||||
// Make awakenKeepalive writable.
|
||||
<-t.awakenKeepalive
|
||||
t.mu.Unlock()
|
||||
select {
|
||||
case <-t.awakenKeepalive:
|
||||
// If the control gets here a ping has been sent
|
||||
// need to reset the timer with keepalive.Timeout.
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
t.mu.Unlock()
|
||||
// If a ping was sent out previously (because there were active
|
||||
// streams at that point) which wasn't acked and its timeout
|
||||
// hadn't fired, but we got here and are about to go dormant,
|
||||
// we should make sure that we unconditionally send a ping once
|
||||
// we awaken.
|
||||
outstandingPing = false
|
||||
t.kpDormant = true
|
||||
t.kpDormancyCond.Wait()
|
||||
}
|
||||
t.kpDormant = false
|
||||
t.mu.Unlock()
|
||||
|
||||
// We get here either because we were dormant and a new stream was
|
||||
// created which unblocked the Wait() call, or because the
|
||||
// keepalive timer expired. In both cases, we need to send a ping.
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
// Send ping.
|
||||
t.controlBuf.put(p)
|
||||
timeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
|
||||
// By the time control gets here a ping has been sent one way or the other.
|
||||
timer.Reset(t.kp.Timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
continue
|
||||
}
|
||||
infof("transport: closing client transport due to idleness.")
|
||||
t.Close()
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
}
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, timeoutLeft)
|
||||
timeoutLeft -= sleepDuration
|
||||
timer.Reset(sleepDuration)
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
|
152
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
152
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -62,11 +62,15 @@ var (
|
||||
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
// (equal to the number of http2Servers created). Must be accessed atomically.
|
||||
var serverConnectionCounter uint64
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
ctxDone <-chan struct{} // Cache the context.Done() chan
|
||||
cancel context.CancelFunc
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
loopy *loopyWriter
|
||||
readerDone chan struct{} // sync point to enable testing.
|
||||
@ -84,12 +88,8 @@ type http2Server struct {
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
// Flag to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
|
||||
// Keepalive enforcement policy.
|
||||
kep keepalive.EnforcementPolicy
|
||||
// The time instance last ping was received.
|
||||
@ -125,6 +125,8 @@ type http2Server struct {
|
||||
channelzID int64 // channelz unique identification number
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
@ -175,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
Val: *config.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
if config.HeaderTableSize != nil {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingHeaderTableSize,
|
||||
Val: *config.HeaderTableSize,
|
||||
})
|
||||
}
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||
}
|
||||
@ -206,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if kep.MinTime == 0 {
|
||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
done := make(chan struct{})
|
||||
t := &http2Server{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
ctxDone: ctx.Done(),
|
||||
ctx: context.Background(),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
@ -231,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.controlBuf = newControlBuffer(t.ctxDone)
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
t.bdpEst = &bdpEstimator{
|
||||
bdp: initialWindowSize,
|
||||
@ -249,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
@ -273,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
||||
}
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||
@ -362,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
@ -378,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeRefusedStream,
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
}
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
s.cancel()
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
@ -408,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(state.data.mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
@ -441,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
@ -749,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
@ -800,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
outHeader := &stats.OutHeader{}
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
@ -863,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -885,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
||||
s.cancel()
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
@ -907,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
}
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
@ -924,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
// after an additional duration of keepalive.Timeout.
|
||||
func (t *http2Server) keepalive() {
|
||||
p := &ping{}
|
||||
var pingSent bool
|
||||
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
keepalive := time.NewTimer(t.kp.Time)
|
||||
// NOTE: All exit paths of this function should reset their
|
||||
// respective timers. A failure to do so will cause the
|
||||
// following clean-up to deadlock and eventually leak.
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
kpTimeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
// Initialize the different timers to their default values.
|
||||
idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
kpTimer := time.NewTimer(t.kp.Time)
|
||||
defer func() {
|
||||
if !maxIdle.Stop() {
|
||||
<-maxIdle.C
|
||||
}
|
||||
if !maxAge.Stop() {
|
||||
<-maxAge.C
|
||||
}
|
||||
if !keepalive.Stop() {
|
||||
<-keepalive.C
|
||||
}
|
||||
// We need to drain the underlying channel in these timers after a call
|
||||
// to Stop(), only if we are interested in resetting them. Clearly we
|
||||
// are not interested in resetting them here.
|
||||
idleTimer.Stop()
|
||||
ageTimer.Stop()
|
||||
kpTimer.Stop()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-maxIdle.C:
|
||||
case <-idleTimer.C:
|
||||
t.mu.Lock()
|
||||
idle := t.idle
|
||||
if idle.IsZero() { // The connection is non-idle.
|
||||
t.mu.Unlock()
|
||||
maxIdle.Reset(t.kp.MaxConnectionIdle)
|
||||
idleTimer.Reset(t.kp.MaxConnectionIdle)
|
||||
continue
|
||||
}
|
||||
val := t.kp.MaxConnectionIdle - time.Since(idle)
|
||||
@ -958,44 +980,52 @@ func (t *http2Server) keepalive() {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxIdle.Reset(infinity)
|
||||
return
|
||||
}
|
||||
maxIdle.Reset(val)
|
||||
case <-maxAge.C:
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-maxAge.C:
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
infof("transport: closing server transport due to maximum connection age.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxAge.Reset(infinity)
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
case <-keepalive.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
pingSent = false
|
||||
keepalive.Reset(t.kp.Time)
|
||||
case <-kpTimer.C:
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were
|
||||
// here. Setup the timer to fire at kp.Time seconds from
|
||||
// lastRead time and continue.
|
||||
outstandingPing = false
|
||||
kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
if pingSent {
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
infof("transport: closing server transport due to idleness.")
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
keepalive.Reset(infinity)
|
||||
return
|
||||
}
|
||||
pingSent = true
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
kpTimeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
keepalive.Reset(t.kp.Timeout)
|
||||
case <-t.ctx.Done():
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
|
||||
kpTimeoutLeft -= sleepDuration
|
||||
kpTimer.Reset(sleepDuration)
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1015,7 +1045,7 @@ func (t *http2Server) Close() error {
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
close(t.done)
|
||||
err := t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
@ -1155,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-timer.C:
|
||||
case <-t.ctx.Done():
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
|
||||
@ -1205,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||
select {
|
||||
case sz := <-resp:
|
||||
return int64(sz)
|
||||
case <-t.ctxDone:
|
||||
case <-t.done:
|
||||
return -1
|
||||
case <-timer.C:
|
||||
return -2
|
||||
|
70
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
70
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -73,10 +73,11 @@ type recvMsg struct {
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
// Note recvBuffer differs from controlBuffer only in that recvBuffer
|
||||
// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
|
||||
// recvBuffer is written to much more often than
|
||||
// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
|
||||
//
|
||||
// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
|
||||
// holds a channel of recvMsg structs instead of objects implementing "item"
|
||||
// interface. recvBuffer is written to much more often and using strict recvMsg
|
||||
// structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
@ -233,6 +234,7 @@ const (
|
||||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ct *http2Client // nil for server side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
@ -251,6 +253,10 @@ type Stream struct {
|
||||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
@ -303,34 +309,28 @@ func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
func (s *Stream) waitOnHeader() {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// We prefer success over failure when reading messages because we delay
|
||||
// context error in stream.Read(). To keep behavior consistent, we also
|
||||
// prefer success here.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
// Close the stream to prevent headers/trailers from changing after
|
||||
// this function returns.
|
||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
||||
// headerChan could possibly not be closed yet if closeStream raced
|
||||
// with operateHeaders; wait until it is closed explicitly here.
|
||||
<-s.headerChan
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
s.waitOnHeader()
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} {
|
||||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||
// metadata or iii) the stream is canceled/expired.
|
||||
//
|
||||
// On server side, it returns the out header after t.WriteHeader is called.
|
||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
||||
// does not block and must not be called until after WriteHeader.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
if s.headerChan == nil && s.header != nil {
|
||||
if s.headerChan == nil {
|
||||
// On server side, return the header in stream. It will be the out
|
||||
// header after t.WriteHeader is called.
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
s.waitOnHeader()
|
||||
if !s.headerValid {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
return nil, err
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return s.noHeaders, nil
|
||||
// before headers are received, returns true, nil. Client-side only.
|
||||
func (s *Stream) TrailersOnly() bool {
|
||||
s.waitOnHeader()
|
||||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
@ -534,6 +525,7 @@ type ServerConfig struct {
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
172
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
172
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@ -20,6 +20,7 @@ package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
@ -31,49 +32,78 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// v2PickerWrapper wraps a balancer.Picker while providing the
|
||||
// balancer.V2Picker API. It requires a pickerWrapper to generate errors
|
||||
// including the latest connectionError. To be deleted when balancer.Picker is
|
||||
// updated to the balancer.V2Picker API.
|
||||
type v2PickerWrapper struct {
|
||||
picker balancer.Picker
|
||||
connErr *connErr
|
||||
}
|
||||
|
||||
func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
sc, done, err := v.picker.Pick(info.Ctx, info)
|
||||
if err != nil {
|
||||
if err == balancer.ErrTransientFailure {
|
||||
return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError()))
|
||||
}
|
||||
return balancer.PickResult{}, err
|
||||
}
|
||||
return balancer.PickResult{SubConn: sc, Done: done}, nil
|
||||
}
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
picker balancer.V2Picker
|
||||
|
||||
// The latest connection happened.
|
||||
connErrMu sync.Mutex
|
||||
connErr error
|
||||
// The latest connection error. TODO: remove when V1 picker is deprecated;
|
||||
// balancer should be responsible for providing the error.
|
||||
*connErr
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
return bp
|
||||
type connErr struct {
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||
bp.connErrMu.Lock()
|
||||
bp.connErr = err
|
||||
bp.connErrMu.Unlock()
|
||||
func (c *connErr) updateConnectionError(err error) {
|
||||
c.mu.Lock()
|
||||
c.err = err
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) connectionError() error {
|
||||
bp.connErrMu.Lock()
|
||||
err := bp.connErr
|
||||
bp.connErrMu.Unlock()
|
||||
func (c *connErr) connectionError() error {
|
||||
c.mu.Lock()
|
||||
err := c.err
|
||||
c.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}}
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr})
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) {
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bp.picker = p
|
||||
// bp.blockingCh should never be nil.
|
||||
close(bp.blockingCh)
|
||||
bp.blockingCh = make(chan struct{})
|
||||
bp.mu.Unlock()
|
||||
pw.picker = p
|
||||
// pw.blockingCh should never be nil.
|
||||
close(pw.blockingCh)
|
||||
pw.blockingCh = make(chan struct{})
|
||||
pw.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
@ -100,83 +130,85 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
||||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
var ch chan struct{}
|
||||
|
||||
var lastPickErr error
|
||||
for {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if bp.picker == nil {
|
||||
ch = bp.blockingCh
|
||||
if pw.picker == nil {
|
||||
ch = pw.blockingCh
|
||||
}
|
||||
if ch == bp.blockingCh {
|
||||
if ch == pw.blockingCh {
|
||||
// This could happen when either:
|
||||
// - bp.picker is nil (the previous if condition), or
|
||||
// - pw.picker is nil (the previous if condition), or
|
||||
// - has called pick on the current picker.
|
||||
bp.mu.Unlock()
|
||||
pw.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if connectionErr := bp.connectionError(); connectionErr != nil {
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
|
||||
}
|
||||
var errStr string
|
||||
if lastPickErr != nil {
|
||||
errStr = "latest balancer error: " + lastPickErr.Error()
|
||||
} else if connectionErr := pw.connectionError(); connectionErr != nil {
|
||||
errStr = "latest connection error: " + connectionErr.Error()
|
||||
} else {
|
||||
errStr = ctx.Err().Error()
|
||||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, errStr)
|
||||
}
|
||||
return nil, nil, ctx.Err()
|
||||
case <-ch:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch = bp.blockingCh
|
||||
p := bp.picker
|
||||
bp.mu.Unlock()
|
||||
ch = pw.blockingCh
|
||||
p := pw.picker
|
||||
pw.mu.Unlock()
|
||||
|
||||
subConn, done, err := p.Pick(ctx, opts)
|
||||
pickResult, err := p.Pick(info)
|
||||
|
||||
if err != nil {
|
||||
switch err {
|
||||
case balancer.ErrNoSubConnAvailable:
|
||||
if err == balancer.ErrNoSubConnAvailable {
|
||||
continue
|
||||
case balancer.ErrTransientFailure:
|
||||
}
|
||||
if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() {
|
||||
if !failfast {
|
||||
lastPickErr = err
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, err.Error())
|
||||
default:
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
// err is some other error.
|
||||
return nil, nil, status.Error(codes.Unknown, err.Error())
|
||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
// err is some other error.
|
||||
return nil, nil, status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := subConn.(*acBalancerWrapper)
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
|
||||
continue
|
||||
}
|
||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, done), nil
|
||||
return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||||
}
|
||||
return t, done, nil
|
||||
return t, pickResult.Done, nil
|
||||
}
|
||||
if done != nil {
|
||||
if pickResult.Done != nil {
|
||||
// Calling done with nil error, no bytes sent and no bytes received.
|
||||
// DoneInfo with default value works.
|
||||
done(balancer.DoneInfo{})
|
||||
pickResult.Done(balancer.DoneInfo{})
|
||||
}
|
||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||
// If ok == false, ac.state is not READY.
|
||||
@ -186,12 +218,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) close() {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
if bp.done {
|
||||
func (pw *pickerWrapper) close() {
|
||||
pw.mu.Lock()
|
||||
defer pw.mu.Unlock()
|
||||
if pw.done {
|
||||
return
|
||||
}
|
||||
bp.done = true
|
||||
close(bp.blockingCh)
|
||||
pw.done = true
|
||||
close(pw.blockingCh)
|
||||
}
|
||||
|
107
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
107
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
@ -19,12 +19,14 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||
@ -45,35 +47,67 @@ func (*pickfirstBuilder) Name() string {
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2
|
||||
|
||||
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
}
|
||||
b.ResolverError(err)
|
||||
return
|
||||
}
|
||||
if b.sc == nil {
|
||||
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
//TODO(yuxuanli): why not change the cc state to Idle?
|
||||
if grpclog.V(2) {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(addrs)
|
||||
b.sc.Connect()
|
||||
}
|
||||
b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s})
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
switch b.state {
|
||||
case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting:
|
||||
// Set a failing picker if we don't have a good picker.
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}},
|
||||
)
|
||||
}
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error {
|
||||
if len(cs.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
if b.sc == nil {
|
||||
var err error
|
||||
b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
}
|
||||
b.state = connectivity.TransientFailure
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure,
|
||||
Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}},
|
||||
)
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
b.state = connectivity.Idle
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}})
|
||||
b.sc.Connect()
|
||||
} else {
|
||||
b.sc.UpdateAddresses(cs.ResolverState.Addresses)
|
||||
b.sc.Connect()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
}
|
||||
@ -83,18 +117,28 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn
|
||||
}
|
||||
return
|
||||
}
|
||||
if s == connectivity.Shutdown {
|
||||
b.state = s.ConnectivityState
|
||||
if s.ConnectivityState == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
|
||||
switch s {
|
||||
switch s.ConnectivityState {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
b.cc.UpdateBalancerState(s, &picker{sc: sc})
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})
|
||||
case connectivity.Connecting:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})
|
||||
case connectivity.TransientFailure:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
|
||||
err := balancer.ErrTransientFailure
|
||||
// TODO: this can be unconditional after the V1 API is removed, as
|
||||
// SubConnState will always contain a connection error.
|
||||
if s.ConnectionError != nil {
|
||||
err = balancer.TransientFailureError(s.ConnectionError)
|
||||
}
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: s.ConnectivityState,
|
||||
Picker: &picker{err: err},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,15 +146,12 @@ func (b *pickfirstBalancer) Close() {
|
||||
}
|
||||
|
||||
type picker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
result balancer.PickResult
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return p.result, p.err
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
18
vendor/google.golang.org/grpc/reflection/README.md
generated
vendored
Normal file
18
vendor/google.golang.org/grpc/reflection/README.md
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
# Reflection
|
||||
|
||||
Package reflection implements server reflection service.
|
||||
|
||||
The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
|
||||
|
||||
To register server reflection on a gRPC server:
|
||||
```go
|
||||
import "google.golang.org/grpc/reflection"
|
||||
|
||||
s := grpc.NewServer()
|
||||
pb.RegisterYourOwnServer(s, &server{})
|
||||
|
||||
// Register reflection service on gRPC server.
|
||||
reflection.Register(s)
|
||||
|
||||
s.Serve(lis)
|
||||
```
|
750
vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
generated
vendored
Normal file
750
vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
generated
vendored
Normal file
@ -0,0 +1,750 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc_reflection_v1alpha/reflection.proto
|
||||
|
||||
package grpc_reflection_v1alpha
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// The message sent by the client when calling ServerReflectionInfo method.
|
||||
type ServerReflectionRequest struct {
|
||||
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
|
||||
// To use reflection service, the client should set one of the following
|
||||
// fields in message_request. The server distinguishes requests by their
|
||||
// defined field and then handles them using corresponding methods.
|
||||
//
|
||||
// Types that are valid to be assigned to MessageRequest:
|
||||
// *ServerReflectionRequest_FileByFilename
|
||||
// *ServerReflectionRequest_FileContainingSymbol
|
||||
// *ServerReflectionRequest_FileContainingExtension
|
||||
// *ServerReflectionRequest_AllExtensionNumbersOfType
|
||||
// *ServerReflectionRequest_ListServices
|
||||
MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} }
|
||||
func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerReflectionRequest) ProtoMessage() {}
|
||||
func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{0}
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServerReflectionRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ServerReflectionRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ServerReflectionRequest.Size(m)
|
||||
}
|
||||
func (m *ServerReflectionRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ServerReflectionRequest) GetHost() string {
|
||||
if m != nil {
|
||||
return m.Host
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isServerReflectionRequest_MessageRequest interface {
|
||||
isServerReflectionRequest_MessageRequest()
|
||||
}
|
||||
|
||||
type ServerReflectionRequest_FileByFilename struct {
|
||||
FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionRequest_FileContainingSymbol struct {
|
||||
FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionRequest_FileContainingExtension struct {
|
||||
FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionRequest_AllExtensionNumbersOfType struct {
|
||||
AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionRequest_ListServices struct {
|
||||
ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {}
|
||||
|
||||
func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {}
|
||||
|
||||
func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
|
||||
|
||||
func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {}
|
||||
|
||||
func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
|
||||
|
||||
func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
|
||||
if m != nil {
|
||||
return m.MessageRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) GetFileByFilename() string {
|
||||
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok {
|
||||
return x.FileByFilename
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) GetFileContainingSymbol() string {
|
||||
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok {
|
||||
return x.FileContainingSymbol
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
|
||||
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok {
|
||||
return x.FileContainingExtension
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
|
||||
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
|
||||
return x.AllExtensionNumbersOfType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ServerReflectionRequest) GetListServices() string {
|
||||
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok {
|
||||
return x.ListServices
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*ServerReflectionRequest_FileByFilename)(nil),
|
||||
(*ServerReflectionRequest_FileContainingSymbol)(nil),
|
||||
(*ServerReflectionRequest_FileContainingExtension)(nil),
|
||||
(*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
|
||||
(*ServerReflectionRequest_ListServices)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
// The type name and extension number sent by the client when requesting
|
||||
// file_containing_extension.
|
||||
type ExtensionRequest struct {
|
||||
// Fully-qualified type name. The format should be <package>.<type>
|
||||
ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
|
||||
ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} }
|
||||
func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionRequest) ProtoMessage() {}
|
||||
func (*ExtensionRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{1}
|
||||
}
|
||||
|
||||
func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ExtensionRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExtensionRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ExtensionRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ExtensionRequest.Size(m)
|
||||
}
|
||||
func (m *ExtensionRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExtensionRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ExtensionRequest) GetContainingType() string {
|
||||
if m != nil {
|
||||
return m.ContainingType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ExtensionRequest) GetExtensionNumber() int32 {
|
||||
if m != nil {
|
||||
return m.ExtensionNumber
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// The message sent by the server to answer ServerReflectionInfo method.
|
||||
type ServerReflectionResponse struct {
|
||||
ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
|
||||
OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
|
||||
// The server sets one of the following fields according to the
|
||||
// message_request in the request.
|
||||
//
|
||||
// Types that are valid to be assigned to MessageResponse:
|
||||
// *ServerReflectionResponse_FileDescriptorResponse
|
||||
// *ServerReflectionResponse_AllExtensionNumbersResponse
|
||||
// *ServerReflectionResponse_ListServicesResponse
|
||||
// *ServerReflectionResponse_ErrorResponse
|
||||
MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} }
|
||||
func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerReflectionResponse) ProtoMessage() {}
|
||||
func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{2}
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServerReflectionResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ServerReflectionResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ServerReflectionResponse.Size(m)
|
||||
}
|
||||
func (m *ServerReflectionResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ServerReflectionResponse) GetValidHost() string {
|
||||
if m != nil {
|
||||
return m.ValidHost
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
|
||||
if m != nil {
|
||||
return m.OriginalRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isServerReflectionResponse_MessageResponse interface {
|
||||
isServerReflectionResponse_MessageResponse()
|
||||
}
|
||||
|
||||
type ServerReflectionResponse_FileDescriptorResponse struct {
|
||||
FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionResponse_AllExtensionNumbersResponse struct {
|
||||
AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionResponse_ListServicesResponse struct {
|
||||
ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ServerReflectionResponse_ErrorResponse struct {
|
||||
ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {}
|
||||
|
||||
func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
|
||||
}
|
||||
|
||||
func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {}
|
||||
|
||||
func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
|
||||
|
||||
func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
|
||||
if m != nil {
|
||||
return m.MessageResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
|
||||
if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok {
|
||||
return x.FileDescriptorResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
|
||||
if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
|
||||
return x.AllExtensionNumbersResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
|
||||
if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok {
|
||||
return x.ListServicesResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
|
||||
if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok {
|
||||
return x.ErrorResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*ServerReflectionResponse_FileDescriptorResponse)(nil),
|
||||
(*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
|
||||
(*ServerReflectionResponse_ListServicesResponse)(nil),
|
||||
(*ServerReflectionResponse_ErrorResponse)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
// Serialized FileDescriptorProto messages sent by the server answering
|
||||
// a file_by_filename, file_containing_symbol, or file_containing_extension
|
||||
// request.
|
||||
type FileDescriptorResponse struct {
|
||||
// Serialized FileDescriptorProto messages. We avoid taking a dependency on
|
||||
// descriptor.proto, which uses proto2 only features, by making them opaque
|
||||
// bytes instead.
|
||||
FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} }
|
||||
func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*FileDescriptorResponse) ProtoMessage() {}
|
||||
func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{3}
|
||||
}
|
||||
|
||||
func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FileDescriptorResponse.Merge(m, src)
|
||||
}
|
||||
func (m *FileDescriptorResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_FileDescriptorResponse.Size(m)
|
||||
}
|
||||
func (m *FileDescriptorResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
|
||||
if m != nil {
|
||||
return m.FileDescriptorProto
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A list of extension numbers sent by the server answering
|
||||
// all_extension_numbers_of_type request.
|
||||
type ExtensionNumberResponse struct {
|
||||
// Full name of the base type, including the package name. The format
|
||||
// is <package>.<type>
|
||||
BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
|
||||
ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} }
|
||||
func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExtensionNumberResponse) ProtoMessage() {}
|
||||
func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{4}
|
||||
}
|
||||
|
||||
func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ExtensionNumberResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ExtensionNumberResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ExtensionNumberResponse.Size(m)
|
||||
}
|
||||
func (m *ExtensionNumberResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ExtensionNumberResponse) GetBaseTypeName() string {
|
||||
if m != nil {
|
||||
return m.BaseTypeName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 {
|
||||
if m != nil {
|
||||
return m.ExtensionNumber
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A list of ServiceResponse sent by the server answering list_services request.
|
||||
type ListServiceResponse struct {
|
||||
// The information of each service may be expanded in the future, so we use
|
||||
// ServiceResponse message to encapsulate it.
|
||||
Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} }
|
||||
func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListServiceResponse) ProtoMessage() {}
|
||||
func (*ListServiceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{5}
|
||||
}
|
||||
|
||||
func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListServiceResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListServiceResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ListServiceResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListServiceResponse.Size(m)
|
||||
}
|
||||
func (m *ListServiceResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListServiceResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListServiceResponse) GetService() []*ServiceResponse {
|
||||
if m != nil {
|
||||
return m.Service
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The information of a single service used by ListServiceResponse to answer
|
||||
// list_services request.
|
||||
type ServiceResponse struct {
|
||||
// Full name of a registered service, including its package name. The format
|
||||
// is <package>.<service>
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServiceResponse) Reset() { *m = ServiceResponse{} }
|
||||
func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServiceResponse) ProtoMessage() {}
|
||||
func (*ServiceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{6}
|
||||
}
|
||||
|
||||
func (m *ServiceResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServiceResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ServiceResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServiceResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ServiceResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ServiceResponse.Size(m)
|
||||
}
|
||||
func (m *ServiceResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServiceResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ServiceResponse) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// The error code and error message sent by the server when an error occurs.
|
||||
type ErrorResponse struct {
|
||||
// This field uses the error codes defined in grpc::StatusCode.
|
||||
ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
|
||||
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ErrorResponse) Reset() { *m = ErrorResponse{} }
|
||||
func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ErrorResponse) ProtoMessage() {}
|
||||
func (*ErrorResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_42a8ac412db3cb03, []int{7}
|
||||
}
|
||||
|
||||
func (m *ErrorResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ErrorResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ErrorResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ErrorResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ErrorResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ErrorResponse.Size(m)
|
||||
}
|
||||
func (m *ErrorResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ErrorResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ErrorResponse) GetErrorCode() int32 {
|
||||
if m != nil {
|
||||
return m.ErrorCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ErrorResponse) GetErrorMessage() string {
|
||||
if m != nil {
|
||||
return m.ErrorMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest")
|
||||
proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest")
|
||||
proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse")
|
||||
proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse")
|
||||
proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse")
|
||||
proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse")
|
||||
proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse")
|
||||
proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03)
|
||||
}
|
||||
|
||||
var fileDescriptor_42a8ac412db3cb03 = []byte{
|
||||
// 656 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
|
||||
0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
|
||||
0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e,
|
||||
0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e,
|
||||
0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7,
|
||||
0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7,
|
||||
0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e,
|
||||
0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94,
|
||||
0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43,
|
||||
0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36,
|
||||
0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d,
|
||||
0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3,
|
||||
0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd,
|
||||
0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde,
|
||||
0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd,
|
||||
0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc,
|
||||
0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8,
|
||||
0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f,
|
||||
0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33,
|
||||
0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb,
|
||||
0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58,
|
||||
0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a,
|
||||
0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce,
|
||||
0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e,
|
||||
0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25,
|
||||
0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d,
|
||||
0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8,
|
||||
0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62,
|
||||
0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77,
|
||||
0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97,
|
||||
0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0,
|
||||
0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39,
|
||||
0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2,
|
||||
0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad,
|
||||
0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29,
|
||||
0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0,
|
||||
0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73,
|
||||
0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d,
|
||||
0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6,
|
||||
0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5,
|
||||
0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// ServerReflectionClient is the client API for ServerReflection service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type ServerReflectionClient interface {
|
||||
// The reflection service is structured as a bidirectional stream, ensuring
|
||||
// all related requests go to a single server.
|
||||
ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error)
|
||||
}
|
||||
|
||||
type serverReflectionClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient {
|
||||
return &serverReflectionClient{cc}
|
||||
}
|
||||
|
||||
func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &serverReflectionServerReflectionInfoClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type ServerReflection_ServerReflectionInfoClient interface {
|
||||
Send(*ServerReflectionRequest) error
|
||||
Recv() (*ServerReflectionResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type serverReflectionServerReflectionInfoClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) {
|
||||
m := new(ServerReflectionResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ServerReflectionServer is the server API for ServerReflection service.
|
||||
type ServerReflectionServer interface {
|
||||
// The reflection service is structured as a bidirectional stream, ensuring
|
||||
// all related requests go to a single server.
|
||||
ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error
|
||||
}
|
||||
|
||||
// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedServerReflectionServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented")
|
||||
}
|
||||
|
||||
func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) {
|
||||
s.RegisterService(&_ServerReflection_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream})
|
||||
}
|
||||
|
||||
type ServerReflection_ServerReflectionInfoServer interface {
|
||||
Send(*ServerReflectionResponse) error
|
||||
Recv() (*ServerReflectionRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type serverReflectionServerReflectionInfoServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) {
|
||||
m := new(ServerReflectionRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _ServerReflection_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "grpc.reflection.v1alpha.ServerReflection",
|
||||
HandlerType: (*ServerReflectionServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "ServerReflectionInfo",
|
||||
Handler: _ServerReflection_ServerReflectionInfo_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "grpc_reflection_v1alpha/reflection.proto",
|
||||
}
|
136
vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
generated
vendored
Normal file
136
vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2016 gRPC authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Service exported by server reflection
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package grpc.reflection.v1alpha;
|
||||
|
||||
service ServerReflection {
|
||||
// The reflection service is structured as a bidirectional stream, ensuring
|
||||
// all related requests go to a single server.
|
||||
rpc ServerReflectionInfo(stream ServerReflectionRequest)
|
||||
returns (stream ServerReflectionResponse);
|
||||
}
|
||||
|
||||
// The message sent by the client when calling ServerReflectionInfo method.
|
||||
message ServerReflectionRequest {
|
||||
string host = 1;
|
||||
// To use reflection service, the client should set one of the following
|
||||
// fields in message_request. The server distinguishes requests by their
|
||||
// defined field and then handles them using corresponding methods.
|
||||
oneof message_request {
|
||||
// Find a proto file by the file name.
|
||||
string file_by_filename = 3;
|
||||
|
||||
// Find the proto file that declares the given fully-qualified symbol name.
|
||||
// This field should be a fully-qualified symbol name
|
||||
// (e.g. <package>.<service>[.<method>] or <package>.<type>).
|
||||
string file_containing_symbol = 4;
|
||||
|
||||
// Find the proto file which defines an extension extending the given
|
||||
// message type with the given field number.
|
||||
ExtensionRequest file_containing_extension = 5;
|
||||
|
||||
// Finds the tag numbers used by all known extensions of extendee_type, and
|
||||
// appends them to ExtensionNumberResponse in an undefined order.
|
||||
// Its corresponding method is best-effort: it's not guaranteed that the
|
||||
// reflection service will implement this method, and it's not guaranteed
|
||||
// that this method will provide all extensions. Returns
|
||||
// StatusCode::UNIMPLEMENTED if it's not implemented.
|
||||
// This field should be a fully-qualified type name. The format is
|
||||
// <package>.<type>
|
||||
string all_extension_numbers_of_type = 6;
|
||||
|
||||
// List the full names of registered services. The content will not be
|
||||
// checked.
|
||||
string list_services = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The type name and extension number sent by the client when requesting
|
||||
// file_containing_extension.
|
||||
message ExtensionRequest {
|
||||
// Fully-qualified type name. The format should be <package>.<type>
|
||||
string containing_type = 1;
|
||||
int32 extension_number = 2;
|
||||
}
|
||||
|
||||
// The message sent by the server to answer ServerReflectionInfo method.
|
||||
message ServerReflectionResponse {
|
||||
string valid_host = 1;
|
||||
ServerReflectionRequest original_request = 2;
|
||||
// The server sets one of the following fields according to the
|
||||
// message_request in the request.
|
||||
oneof message_response {
|
||||
// This message is used to answer file_by_filename, file_containing_symbol,
|
||||
// file_containing_extension requests with transitive dependencies.
|
||||
// As the repeated label is not allowed in oneof fields, we use a
|
||||
// FileDescriptorResponse message to encapsulate the repeated fields.
|
||||
// The reflection service is allowed to avoid sending FileDescriptorProtos
|
||||
// that were previously sent in response to earlier requests in the stream.
|
||||
FileDescriptorResponse file_descriptor_response = 4;
|
||||
|
||||
// This message is used to answer all_extension_numbers_of_type requests.
|
||||
ExtensionNumberResponse all_extension_numbers_response = 5;
|
||||
|
||||
// This message is used to answer list_services requests.
|
||||
ListServiceResponse list_services_response = 6;
|
||||
|
||||
// This message is used when an error occurs.
|
||||
ErrorResponse error_response = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// Serialized FileDescriptorProto messages sent by the server answering
|
||||
// a file_by_filename, file_containing_symbol, or file_containing_extension
|
||||
// request.
|
||||
message FileDescriptorResponse {
|
||||
// Serialized FileDescriptorProto messages. We avoid taking a dependency on
|
||||
// descriptor.proto, which uses proto2 only features, by making them opaque
|
||||
// bytes instead.
|
||||
repeated bytes file_descriptor_proto = 1;
|
||||
}
|
||||
|
||||
// A list of extension numbers sent by the server answering
|
||||
// all_extension_numbers_of_type request.
|
||||
message ExtensionNumberResponse {
|
||||
// Full name of the base type, including the package name. The format
|
||||
// is <package>.<type>
|
||||
string base_type_name = 1;
|
||||
repeated int32 extension_number = 2;
|
||||
}
|
||||
|
||||
// A list of ServiceResponse sent by the server answering list_services request.
|
||||
message ListServiceResponse {
|
||||
// The information of each service may be expanded in the future, so we use
|
||||
// ServiceResponse message to encapsulate it.
|
||||
repeated ServiceResponse service = 1;
|
||||
}
|
||||
|
||||
// The information of a single service used by ListServiceResponse to answer
|
||||
// list_services request.
|
||||
message ServiceResponse {
|
||||
// Full name of a registered service, including its package name. The format
|
||||
// is <package>.<service>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The error code and error message sent by the server when an error occurs.
|
||||
message ErrorResponse {
|
||||
// This field uses the error codes defined in grpc::StatusCode.
|
||||
int32 error_code = 1;
|
||||
string error_message = 2;
|
||||
}
|
454
vendor/google.golang.org/grpc/reflection/serverreflection.go
generated
vendored
Normal file
454
vendor/google.golang.org/grpc/reflection/serverreflection.go
generated
vendored
Normal file
@ -0,0 +1,454 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto
|
||||
|
||||
/*
|
||||
Package reflection implements server reflection service.
|
||||
|
||||
The service implemented is defined in:
|
||||
https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
|
||||
|
||||
To register server reflection on a gRPC server:
|
||||
import "google.golang.org/grpc/reflection"
|
||||
|
||||
s := grpc.NewServer()
|
||||
pb.RegisterYourOwnServer(s, &server{})
|
||||
|
||||
// Register reflection service on gRPC server.
|
||||
reflection.Register(s)
|
||||
|
||||
s.Serve(lis)
|
||||
|
||||
*/
|
||||
package reflection // import "google.golang.org/grpc/reflection"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type serverReflectionServer struct {
|
||||
s *grpc.Server
|
||||
|
||||
initSymbols sync.Once
|
||||
serviceNames []string
|
||||
symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files
|
||||
}
|
||||
|
||||
// Register registers the server reflection service on the given gRPC server.
|
||||
func Register(s *grpc.Server) {
|
||||
rpb.RegisterServerReflectionServer(s, &serverReflectionServer{
|
||||
s: s,
|
||||
})
|
||||
}
|
||||
|
||||
// protoMessage is used for type assertion on proto messages.
|
||||
// Generated proto message implements function Descriptor(), but Descriptor()
|
||||
// is not part of interface proto.Message. This interface is needed to
|
||||
// call Descriptor().
|
||||
type protoMessage interface {
|
||||
Descriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) {
|
||||
s.initSymbols.Do(func() {
|
||||
serviceInfo := s.s.GetServiceInfo()
|
||||
|
||||
s.symbols = map[string]*dpb.FileDescriptorProto{}
|
||||
s.serviceNames = make([]string, 0, len(serviceInfo))
|
||||
processed := map[string]struct{}{}
|
||||
for svc, info := range serviceInfo {
|
||||
s.serviceNames = append(s.serviceNames, svc)
|
||||
fdenc, ok := parseMetadata(info.Metadata)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fd, err := decodeFileDesc(fdenc)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
s.processFile(fd, processed)
|
||||
}
|
||||
sort.Strings(s.serviceNames)
|
||||
})
|
||||
|
||||
return s.serviceNames, s.symbols
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) {
|
||||
filename := fd.GetName()
|
||||
if _, ok := processed[filename]; ok {
|
||||
return
|
||||
}
|
||||
processed[filename] = struct{}{}
|
||||
|
||||
prefix := fd.GetPackage()
|
||||
|
||||
for _, msg := range fd.MessageType {
|
||||
s.processMessage(fd, prefix, msg)
|
||||
}
|
||||
for _, en := range fd.EnumType {
|
||||
s.processEnum(fd, prefix, en)
|
||||
}
|
||||
for _, ext := range fd.Extension {
|
||||
s.processField(fd, prefix, ext)
|
||||
}
|
||||
for _, svc := range fd.Service {
|
||||
svcName := fqn(prefix, svc.GetName())
|
||||
s.symbols[svcName] = fd
|
||||
for _, meth := range svc.Method {
|
||||
name := fqn(svcName, meth.GetName())
|
||||
s.symbols[name] = fd
|
||||
}
|
||||
}
|
||||
|
||||
for _, dep := range fd.Dependency {
|
||||
fdenc := proto.FileDescriptor(dep)
|
||||
fdDep, err := decodeFileDesc(fdenc)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
s.processFile(fdDep, processed)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) {
|
||||
msgName := fqn(prefix, msg.GetName())
|
||||
s.symbols[msgName] = fd
|
||||
|
||||
for _, nested := range msg.NestedType {
|
||||
s.processMessage(fd, msgName, nested)
|
||||
}
|
||||
for _, en := range msg.EnumType {
|
||||
s.processEnum(fd, msgName, en)
|
||||
}
|
||||
for _, ext := range msg.Extension {
|
||||
s.processField(fd, msgName, ext)
|
||||
}
|
||||
for _, fld := range msg.Field {
|
||||
s.processField(fd, msgName, fld)
|
||||
}
|
||||
for _, oneof := range msg.OneofDecl {
|
||||
oneofName := fqn(msgName, oneof.GetName())
|
||||
s.symbols[oneofName] = fd
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) {
|
||||
enName := fqn(prefix, en.GetName())
|
||||
s.symbols[enName] = fd
|
||||
|
||||
for _, val := range en.Value {
|
||||
valName := fqn(enName, val.GetName())
|
||||
s.symbols[valName] = fd
|
||||
}
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) {
|
||||
fldName := fqn(prefix, fld.GetName())
|
||||
s.symbols[fldName] = fd
|
||||
}
|
||||
|
||||
func fqn(prefix, name string) string {
|
||||
if prefix == "" {
|
||||
return name
|
||||
}
|
||||
return prefix + "." + name
|
||||
}
|
||||
|
||||
// fileDescForType gets the file descriptor for the given type.
|
||||
// The given type should be a proto message.
|
||||
func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) {
|
||||
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to create message from type: %v", st)
|
||||
}
|
||||
enc, _ := m.Descriptor()
|
||||
|
||||
return decodeFileDesc(enc)
|
||||
}
|
||||
|
||||
// decodeFileDesc does decompression and unmarshalling on the given
|
||||
// file descriptor byte slice.
|
||||
func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
|
||||
raw, err := decompress(enc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decompress enc: %v", err)
|
||||
}
|
||||
|
||||
fd := new(dpb.FileDescriptorProto)
|
||||
if err := proto.Unmarshal(raw, fd); err != nil {
|
||||
return nil, fmt.Errorf("bad descriptor: %v", err)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// decompress does gzip decompression.
|
||||
func decompress(b []byte) ([]byte, error) {
|
||||
r, err := gzip.NewReader(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
|
||||
}
|
||||
out, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func typeForName(name string) (reflect.Type, error) {
|
||||
pt := proto.MessageType(name)
|
||||
if pt == nil {
|
||||
return nil, fmt.Errorf("unknown type: %q", name)
|
||||
}
|
||||
st := pt.Elem()
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
|
||||
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to create message from type: %v", st)
|
||||
}
|
||||
|
||||
var extDesc *proto.ExtensionDesc
|
||||
for id, desc := range proto.RegisteredExtensions(m) {
|
||||
if id == ext {
|
||||
extDesc = desc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if extDesc == nil {
|
||||
return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext)
|
||||
}
|
||||
|
||||
return decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
|
||||
}
|
||||
|
||||
func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) {
|
||||
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to create message from type: %v", st)
|
||||
}
|
||||
|
||||
exts := proto.RegisteredExtensions(m)
|
||||
out := make([]int32, 0, len(exts))
|
||||
for id := range exts {
|
||||
out = append(out, id)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// fileDescEncodingByFilename finds the file descriptor for given filename,
|
||||
// does marshalling on it and returns the marshalled result.
|
||||
func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) {
|
||||
enc := proto.FileDescriptor(name)
|
||||
if enc == nil {
|
||||
return nil, fmt.Errorf("unknown file: %v", name)
|
||||
}
|
||||
fd, err := decodeFileDesc(enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Marshal(fd)
|
||||
}
|
||||
|
||||
// parseMetadata finds the file descriptor bytes specified meta.
|
||||
// For SupportPackageIsVersion4, m is the name of the proto file, we
|
||||
// call proto.FileDescriptor to get the byte slice.
|
||||
// For SupportPackageIsVersion3, m is a byte slice itself.
|
||||
func parseMetadata(meta interface{}) ([]byte, bool) {
|
||||
// Check if meta is the file name.
|
||||
if fileNameForMeta, ok := meta.(string); ok {
|
||||
return proto.FileDescriptor(fileNameForMeta), true
|
||||
}
|
||||
|
||||
// Check if meta is the byte slice.
|
||||
if enc, ok := meta.([]byte); ok {
|
||||
return enc, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol,
|
||||
// does marshalling on it and returns the marshalled result.
|
||||
// The given symbol can be a type, a service or a method.
|
||||
func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) {
|
||||
_, symbols := s.getSymbols()
|
||||
fd := symbols[name]
|
||||
if fd == nil {
|
||||
// Check if it's a type name that was not present in the
|
||||
// transitive dependencies of the registered services.
|
||||
if st, err := typeForName(name); err == nil {
|
||||
fd, err = s.fileDescForType(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fd == nil {
|
||||
return nil, fmt.Errorf("unknown symbol: %v", name)
|
||||
}
|
||||
|
||||
return proto.Marshal(fd)
|
||||
}
|
||||
|
||||
// fileDescEncodingContainingExtension finds the file descriptor containing given extension,
|
||||
// does marshalling on it and returns the marshalled result.
|
||||
func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) {
|
||||
st, err := typeForName(typeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fd, err := fileDescContainingExtension(st, extNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Marshal(fd)
|
||||
}
|
||||
|
||||
// allExtensionNumbersForTypeName returns all extension numbers for the given type.
|
||||
func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) {
|
||||
st, err := typeForName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extNums, err := s.allExtensionNumbersForType(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extNums, nil
|
||||
}
|
||||
|
||||
// ServerReflectionInfo is the reflection service handler.
|
||||
func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error {
|
||||
for {
|
||||
in, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out := &rpb.ServerReflectionResponse{
|
||||
ValidHost: in.Host,
|
||||
OriginalRequest: in,
|
||||
}
|
||||
switch req := in.MessageRequest.(type) {
|
||||
case *rpb.ServerReflectionRequest_FileByFilename:
|
||||
b, err := s.fileDescEncodingByFilename(req.FileByFilename)
|
||||
if err != nil {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
|
||||
ErrorResponse: &rpb.ErrorResponse{
|
||||
ErrorCode: int32(codes.NotFound),
|
||||
ErrorMessage: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
|
||||
FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
|
||||
}
|
||||
}
|
||||
case *rpb.ServerReflectionRequest_FileContainingSymbol:
|
||||
b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol)
|
||||
if err != nil {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
|
||||
ErrorResponse: &rpb.ErrorResponse{
|
||||
ErrorCode: int32(codes.NotFound),
|
||||
ErrorMessage: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
|
||||
FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
|
||||
}
|
||||
}
|
||||
case *rpb.ServerReflectionRequest_FileContainingExtension:
|
||||
typeName := req.FileContainingExtension.ContainingType
|
||||
extNum := req.FileContainingExtension.ExtensionNumber
|
||||
b, err := s.fileDescEncodingContainingExtension(typeName, extNum)
|
||||
if err != nil {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
|
||||
ErrorResponse: &rpb.ErrorResponse{
|
||||
ErrorCode: int32(codes.NotFound),
|
||||
ErrorMessage: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
|
||||
FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
|
||||
}
|
||||
}
|
||||
case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType:
|
||||
extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType)
|
||||
if err != nil {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
|
||||
ErrorResponse: &rpb.ErrorResponse{
|
||||
ErrorCode: int32(codes.NotFound),
|
||||
ErrorMessage: err.Error(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{
|
||||
AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{
|
||||
BaseTypeName: req.AllExtensionNumbersOfType,
|
||||
ExtensionNumber: extNums,
|
||||
},
|
||||
}
|
||||
}
|
||||
case *rpb.ServerReflectionRequest_ListServices:
|
||||
svcNames, _ := s.getSymbols()
|
||||
serviceResponses := make([]*rpb.ServiceResponse, len(svcNames))
|
||||
for i, n := range svcNames {
|
||||
serviceResponses[i] = &rpb.ServiceResponse{
|
||||
Name: n,
|
||||
}
|
||||
}
|
||||
out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{
|
||||
ListServicesResponse: &rpb.ListServiceResponse{
|
||||
Service: serviceResponses,
|
||||
},
|
||||
}
|
||||
default:
|
||||
return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest)
|
||||
}
|
||||
|
||||
if err := stream.Send(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
90
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
90
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@ -21,6 +21,11 @@
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
@ -69,12 +74,18 @@ func GetDefaultScheme() string {
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the address is for a backend server.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
||||
//
|
||||
// Deprecated: use Attributes in Address instead.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
@ -83,33 +94,75 @@ const (
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Type is the type of this address.
|
||||
Type AddressType
|
||||
|
||||
// ServerName is the name of this address.
|
||||
// If non-empty, the ServerName is used as the transport certification authority for
|
||||
// the address, instead of the hostname from the Dial target string. In most cases,
|
||||
// this should not be set.
|
||||
//
|
||||
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// If Type is GRPCLB, ServerName should be the name of the remote load
|
||||
// balancer, not the name of the backend.
|
||||
//
|
||||
// WARNING: ServerName must only be populated with trusted values. It
|
||||
// is insecure to populate it with data from untrusted inputs since untrusted
|
||||
// values could be used to bypass the authority checks performed by TLS.
|
||||
ServerName string
|
||||
|
||||
// Attributes contains arbitrary data about this address intended for
|
||||
// consumption by the load balancing policy.
|
||||
Attributes *attributes.Attributes
|
||||
|
||||
// Type is the type of this address.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Type AddressType
|
||||
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
//
|
||||
// Deprecated: use Attributes instead.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BuildOption includes additional information for the builder to create
|
||||
// BuildOptions includes additional information for the builder to create
|
||||
// the resolver.
|
||||
type BuildOption struct {
|
||||
// DisableServiceConfig indicates whether resolver should fetch service config data.
|
||||
type BuildOptions struct {
|
||||
// DisableServiceConfig indicates whether a resolver implementation should
|
||||
// fetch service config data.
|
||||
DisableServiceConfig bool
|
||||
// DialCreds is the transport credentials used by the ClientConn for
|
||||
// communicating with the target gRPC service (set via
|
||||
// WithTransportCredentials). In cases where a name resolution service
|
||||
// requires the same credentials, the resolver may use this field. In most
|
||||
// cases though, it is not appropriate, and this field may be ignored.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle used by the ClientConn for
|
||||
// communicating with the target gRPC service (set via
|
||||
// WithCredentialsBundle). In cases where a name resolution service
|
||||
// requires the same credentials, the resolver may use this field. In most
|
||||
// cases though, it is not appropriate, and this field may be ignored.
|
||||
CredsBundle credentials.Bundle
|
||||
// Dialer is the custom dialer used by the ClientConn for dialling the
|
||||
// target gRPC service (set via WithDialer). In cases where a name
|
||||
// resolution service requires the same dialer, the resolver may use this
|
||||
// field. In most cases though, it is not appropriate, and this field may
|
||||
// be ignored.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// State contains the current Resolver state relevant to the ClientConn.
|
||||
type State struct {
|
||||
Addresses []Address // Resolved addresses for the target
|
||||
// ServiceConfig is the parsed service config; obtained from
|
||||
// serviceconfig.Parse.
|
||||
ServiceConfig serviceconfig.Config
|
||||
// Addresses is the latest set of resolved addresses for the target.
|
||||
Addresses []Address
|
||||
|
||||
// TODO: add Err error
|
||||
// ServiceConfig contains the result from parsing the latest service
|
||||
// config. If it is nil, it indicates no service config is present or the
|
||||
// resolver does not provide service configs.
|
||||
ServiceConfig *serviceconfig.ParseResult
|
||||
|
||||
// Attributes contains arbitrary data about the resolver intended for
|
||||
// consumption by the load balancing policy.
|
||||
Attributes *attributes.Attributes
|
||||
}
|
||||
|
||||
// ClientConn contains the callbacks for resolver to notify any updates
|
||||
@ -122,6 +175,10 @@ type State struct {
|
||||
type ClientConn interface {
|
||||
// UpdateState updates the state of the ClientConn appropriately.
|
||||
UpdateState(State)
|
||||
// ReportError notifies the ClientConn that the Resolver encountered an
|
||||
// error. The ClientConn will notify the load balancer and begin calling
|
||||
// ResolveNow on the Resolver with exponential backoff.
|
||||
ReportError(error)
|
||||
// NewAddress is called by resolver to notify ClientConn a new list
|
||||
// of resolved addresses.
|
||||
// The address list should be the complete list of resolved addresses.
|
||||
@ -133,6 +190,9 @@ type ClientConn interface {
|
||||
//
|
||||
// Deprecated: Use UpdateState instead.
|
||||
NewServiceConfig(serviceConfig string)
|
||||
// ParseServiceConfig parses the provided service config and returns an
|
||||
// object that provides the parsed config.
|
||||
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
|
||||
}
|
||||
|
||||
// Target represents a target for gRPC, as specified in:
|
||||
@ -164,14 +224,14 @@ type Builder interface {
|
||||
//
|
||||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
|
||||
Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
// ResolveNowOption includes additional information for ResolveNow.
|
||||
type ResolveNowOption struct{}
|
||||
// ResolveNowOptions includes additional information for ResolveNow.
|
||||
type ResolveNowOptions struct{}
|
||||
|
||||
// Resolver watches for the updates on the specified target.
|
||||
// Updates include address updates and service config updates.
|
||||
@ -180,7 +240,7 @@ type Resolver interface {
|
||||
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
||||
//
|
||||
// It could be called multiple times concurrently.
|
||||
ResolveNow(ResolveNowOption)
|
||||
ResolveNow(ResolveNowOptions)
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
175
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
175
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@ -21,22 +21,29 @@ package grpc
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConnection interface.
|
||||
// It implements resolver.ClientConn interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolver resolver.Resolver
|
||||
addrCh chan []resolver.Address
|
||||
scCh chan string
|
||||
done uint32 // accessed atomically; set to 1 when closed.
|
||||
curState resolver.State
|
||||
cc *ClientConn
|
||||
resolverMu sync.Mutex
|
||||
resolver resolver.Resolver
|
||||
done *grpcsync.Event
|
||||
curState resolver.State
|
||||
|
||||
pollingMu sync.Mutex
|
||||
polling chan struct{}
|
||||
}
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
@ -67,60 +74,126 @@ func parseTarget(target string) (ret resolver.Target) {
|
||||
return ret
|
||||
}
|
||||
|
||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||
// builder for this scheme and builds the resolver. The monitoring goroutine
|
||||
// for it is not started yet and can be created by calling start().
|
||||
//
|
||||
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||
// used instead.
|
||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
rb := cc.dopts.resolverBuilder
|
||||
if rb == nil {
|
||||
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
||||
// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
|
||||
// returns a ccResolverWrapper object which wraps the newly built resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) {
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
done: grpcsync.NewEvent(),
|
||||
}
|
||||
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
addrCh: make(chan []resolver.Address, 1),
|
||||
scCh: make(chan string, 1),
|
||||
var credsClone credentials.TransportCredentials
|
||||
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
|
||||
credsClone = creds.Clone()
|
||||
}
|
||||
rbo := resolver.BuildOptions{
|
||||
DisableServiceConfig: cc.dopts.disableServiceConfig,
|
||||
DialCreds: credsClone,
|
||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||
Dialer: cc.dopts.copts.Dialer,
|
||||
}
|
||||
|
||||
var err error
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
|
||||
// We need to hold the lock here while we assign to the ccr.resolver field
|
||||
// to guard against a data race caused by the following code path,
|
||||
// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
|
||||
// accessing ccr.resolver which is being assigned here.
|
||||
ccr.resolverMu.Lock()
|
||||
defer ccr.resolverMu.Unlock()
|
||||
ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
|
||||
ccr.resolverMu.Lock()
|
||||
if !ccr.done.HasFired() {
|
||||
ccr.resolver.ResolveNow(o)
|
||||
}
|
||||
ccr.resolverMu.Unlock()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolverMu.Lock()
|
||||
ccr.resolver.Close()
|
||||
atomic.StoreUint32(&ccr.done, 1)
|
||||
ccr.done.Fire()
|
||||
ccr.resolverMu.Unlock()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) isDone() bool {
|
||||
return atomic.LoadUint32(&ccr.done) == 1
|
||||
// poll begins or ends asynchronous polling of the resolver based on whether
|
||||
// err is ErrBadResolverState.
|
||||
func (ccr *ccResolverWrapper) poll(err error) {
|
||||
ccr.pollingMu.Lock()
|
||||
defer ccr.pollingMu.Unlock()
|
||||
if err != balancer.ErrBadResolverState {
|
||||
// stop polling
|
||||
if ccr.polling != nil {
|
||||
close(ccr.polling)
|
||||
ccr.polling = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
if ccr.polling != nil {
|
||||
// already polling
|
||||
return
|
||||
}
|
||||
p := make(chan struct{})
|
||||
ccr.polling = p
|
||||
go func() {
|
||||
for i := 0; ; i++ {
|
||||
ccr.resolveNow(resolver.ResolveNowOptions{})
|
||||
t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
|
||||
select {
|
||||
case <-p:
|
||||
t.Stop()
|
||||
return
|
||||
case <-ccr.done.Done():
|
||||
// Resolver has been closed.
|
||||
t.Stop()
|
||||
return
|
||||
case <-t.C:
|
||||
select {
|
||||
case <-p:
|
||||
return
|
||||
default:
|
||||
}
|
||||
// Timer expired; re-resolve.
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
|
||||
if channelz.IsOn() {
|
||||
ccr.addChannelzTraceEvent(s)
|
||||
}
|
||||
ccr.cc.updateResolverState(s)
|
||||
ccr.curState = s
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) ReportError(err error) {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err)
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Resolver reported error: %v", err),
|
||||
Severity: channelz.CtWarning,
|
||||
})
|
||||
}
|
||||
ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implementation to send addresses to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
|
||||
@ -128,31 +201,53 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
|
||||
}
|
||||
ccr.curState.Addresses = addrs
|
||||
ccr.cc.updateResolverState(ccr.curState)
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implementation to send service
|
||||
// configs to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
if ccr.isDone() {
|
||||
if ccr.done.HasFired() {
|
||||
return
|
||||
}
|
||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||
c, err := parseServiceConfig(sc)
|
||||
if err != nil {
|
||||
if ccr.cc.dopts.disableServiceConfig {
|
||||
grpclog.Infof("Service config lookups disabled; ignoring config")
|
||||
return
|
||||
}
|
||||
scpr := parseServiceConfig(sc)
|
||||
if scpr.Err != nil {
|
||||
grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err)
|
||||
if channelz.IsOn() {
|
||||
channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
|
||||
Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err),
|
||||
Severity: channelz.CtWarning,
|
||||
})
|
||||
}
|
||||
ccr.poll(balancer.ErrBadResolverState)
|
||||
return
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c})
|
||||
ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
|
||||
}
|
||||
ccr.curState.ServiceConfig = c
|
||||
ccr.cc.updateResolverState(ccr.curState)
|
||||
ccr.curState.ServiceConfig = scpr
|
||||
ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
|
||||
return parseServiceConfig(scJSON)
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
|
||||
var updates []string
|
||||
oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig)
|
||||
newSC, newOK := s.ServiceConfig.(*ServiceConfig)
|
||||
var oldSC, newSC *ServiceConfig
|
||||
var oldOK, newOK bool
|
||||
if ccr.curState.ServiceConfig != nil {
|
||||
oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if s.ServiceConfig != nil {
|
||||
newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
|
||||
}
|
||||
if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
|
||||
updates = append(updates, "service config updated")
|
||||
}
|
||||
|
56
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
56
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@ -648,35 +648,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||
return nil, st.Err()
|
||||
}
|
||||
|
||||
var size int
|
||||
if pf == compressionMade {
|
||||
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
|
||||
// use this decompressor as the default.
|
||||
if dc != nil {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
size = len(d)
|
||||
} else {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
} else {
|
||||
size = len(d)
|
||||
}
|
||||
if len(d) > maxReceiveMessageSize {
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
// implementation.
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Using compressor, decompress d, returning data and size.
|
||||
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
|
||||
func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
|
||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if sizer, ok := compressor.(interface {
|
||||
DecompressedSize(compressedBytes []byte) int
|
||||
}); ok {
|
||||
if size := sizer.DecompressedSize(d); size >= 0 {
|
||||
if size > maxReceiveMessageSize {
|
||||
return nil, size, nil
|
||||
}
|
||||
// size is used as an estimate to size the buffer, but we
|
||||
// will read more data if available.
|
||||
// +MinRead so ReadFrom will not reallocate if size is correct.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
|
||||
bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return buf.Bytes(), int(bytesRead), err
|
||||
}
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return d, len(d), err
|
||||
}
|
||||
|
||||
// For the two compressor parameters, both should not be set, but if they are,
|
||||
// dc takes precedence over compressor.
|
||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||
@ -848,7 +871,7 @@ type channelzData struct {
|
||||
|
||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||
// support package version is 5.
|
||||
// support package version is 6.
|
||||
//
|
||||
// Older versions are kept for compatibility. They may be removed if
|
||||
// compatibility cannot be maintained.
|
||||
@ -858,6 +881,7 @@ const (
|
||||
SupportPackageIsVersion3 = true
|
||||
SupportPackageIsVersion4 = true
|
||||
SupportPackageIsVersion5 = true
|
||||
SupportPackageIsVersion6 = true
|
||||
)
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
160
vendor/google.golang.org/grpc/server.go
generated
vendored
160
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -130,6 +130,7 @@ type serverOptions struct {
|
||||
readBufferSize int
|
||||
connectionTimeout time.Duration
|
||||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
@ -343,8 +344,8 @@ func StatsHandler(h stats.Handler) ServerOption {
|
||||
// unknown service handler. The provided method is a bidi-streaming RPC service
|
||||
// handler that will be invoked instead of returning the "unimplemented" gRPC
|
||||
// error whenever a request is received for an unregistered service or method.
|
||||
// The handling function has full access to the Context of the request and the
|
||||
// stream, and the invocation bypasses interceptors.
|
||||
// The handling function and stream interceptor (if set) have full access to
|
||||
// the ServerStream, including its Context.
|
||||
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.unknownStreamDesc = &StreamDesc{
|
||||
@ -377,6 +378,16 @@ func MaxHeaderListSize(s uint32) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// HeaderTableSize returns a ServerOption that sets the size of dynamic
|
||||
// header table for stream.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func HeaderTableSize(s uint32) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.headerTableSize = &s
|
||||
})
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
@ -686,6 +697,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
ChannelzParentID: s.channelzID,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, config)
|
||||
if err != nil {
|
||||
@ -853,41 +865,58 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}()
|
||||
}
|
||||
if trInfo != nil {
|
||||
defer trInfo.tr.Finish()
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
}
|
||||
// The deferred error handling for tracing, stats handler and channelz are
|
||||
// combined into one function to reduce stack usage -- a defer takes ~56-64
|
||||
// bytes on the stack, so overflowing the stack will require a stack
|
||||
// re-allocation, which is expensive.
|
||||
//
|
||||
// To maintain behavior similar to separate deferred statements, statements
|
||||
// should be executed in the reverse order. That is, tracing first, stats
|
||||
// handler second, and channelz last. Note that panics *within* defers will
|
||||
// lead to different behavior, but that's an acceptable compromise; that
|
||||
// would be undefined behavior territory anyway.
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
if trInfo != nil {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -1087,31 +1116,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}()
|
||||
}
|
||||
sh := s.opts.statsHandler
|
||||
var statsBegin *stats.Begin
|
||||
if sh != nil {
|
||||
beginTime := time.Now()
|
||||
begin := &stats.Begin{
|
||||
statsBegin = &stats.Begin{
|
||||
BeginTime: beginTime,
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), begin)
|
||||
defer func() {
|
||||
end := &stats.End{
|
||||
BeginTime: beginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}()
|
||||
sh.HandleRPC(stream.Context(), statsBegin)
|
||||
}
|
||||
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||
ss := &serverStream{
|
||||
@ -1126,6 +1139,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
statsHandler: sh,
|
||||
}
|
||||
|
||||
if sh != nil || trInfo != nil || channelz.IsOn() {
|
||||
// See comment in processUnaryRPC on defers.
|
||||
defer func() {
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
|
||||
if sh != nil {
|
||||
end := &stats.End{
|
||||
BeginTime: statsBegin.BeginTime,
|
||||
EndTime: time.Now(),
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
end.Error = toRPCErr(err)
|
||||
}
|
||||
sh.HandleRPC(stream.Context(), end)
|
||||
}
|
||||
|
||||
if channelz.IsOn() {
|
||||
if err != nil && err != io.EOF {
|
||||
s.incrCallsFailed()
|
||||
} else {
|
||||
s.incrCallsSucceeded()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ss.binlog = binarylog.GetMethodLogger(stream.Method())
|
||||
if ss.binlog != nil {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
@ -1179,16 +1227,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
var appErr error
|
||||
var server interface{}
|
||||
|
39
vendor/google.golang.org/grpc/service_config.go
generated
vendored
39
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -136,9 +136,9 @@ type retryPolicy struct {
|
||||
maxAttempts int
|
||||
|
||||
// Exponential backoff parameters. The initial retry attempt will occur at
|
||||
// random(0, initialBackoffMS). In general, the nth attempt will occur at
|
||||
// random(0, initialBackoff). In general, the nth attempt will occur at
|
||||
// random(0,
|
||||
// min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
|
||||
// min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
|
||||
//
|
||||
// These fields are required and must be greater than zero.
|
||||
initialBackoff time.Duration
|
||||
@ -261,20 +261,17 @@ type jsonSC struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.ParseServiceConfig = func(sc string) (interface{}, error) {
|
||||
return parseServiceConfig(sc)
|
||||
}
|
||||
internal.ParseServiceConfigForTesting = parseServiceConfig
|
||||
}
|
||||
|
||||
func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
if len(js) == 0 {
|
||||
return nil, fmt.Errorf("no JSON service config provided")
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
|
||||
}
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
LB: rsc.LoadBalancingPolicy,
|
||||
@ -288,7 +285,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
if len(lbcfg) != 1 {
|
||||
err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
grpclog.Warningf(err.Error())
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
var name string
|
||||
var jsonCfg json.RawMessage
|
||||
@ -303,17 +300,25 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
var err error
|
||||
sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
|
||||
}
|
||||
} else if string(jsonCfg) != "{}" {
|
||||
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||
}
|
||||
break
|
||||
}
|
||||
if sc.lbConfig == nil {
|
||||
// We had a loadBalancingConfig field but did not encounter a
|
||||
// supported policy. The config is considered invalid in this
|
||||
// case.
|
||||
err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
|
||||
grpclog.Warningf(err.Error())
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
}
|
||||
|
||||
if rsc.MethodConfig == nil {
|
||||
return &sc, nil
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
}
|
||||
for _, m := range *rsc.MethodConfig {
|
||||
if m.Name == nil {
|
||||
@ -322,7 +327,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
mc := MethodConfig{
|
||||
@ -331,7 +336,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
}
|
||||
if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
return nil, err
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
if *m.MaxRequestMessageBytes > int64(maxInt) {
|
||||
@ -356,13 +361,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) {
|
||||
|
||||
if sc.retryThrottling != nil {
|
||||
if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
|
||||
return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
|
||||
}
|
||||
if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
|
||||
return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)
|
||||
return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
|
||||
}
|
||||
}
|
||||
return &sc, nil
|
||||
return &serviceconfig.ParseResult{Config: &sc}
|
||||
}
|
||||
|
||||
func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
|
||||
|
21
vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
generated
vendored
21
vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
generated
vendored
@ -22,27 +22,20 @@
|
||||
// This package is EXPERIMENTAL.
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
// Config represents an opaque data structure holding a service config.
|
||||
type Config interface {
|
||||
isConfig()
|
||||
isServiceConfig()
|
||||
}
|
||||
|
||||
// LoadBalancingConfig represents an opaque data structure holding a load
|
||||
// balancer config.
|
||||
// balancing config.
|
||||
type LoadBalancingConfig interface {
|
||||
isLoadBalancingConfig()
|
||||
}
|
||||
|
||||
// Parse parses the JSON service config provided into an internal form or
|
||||
// returns an error if the config is invalid.
|
||||
func Parse(ServiceConfigJSON string) (Config, error) {
|
||||
c, err := internal.ParseServiceConfig(ServiceConfigJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.(Config), err
|
||||
// ParseResult contains a service config or an error. Exactly one must be
|
||||
// non-nil.
|
||||
type ParseResult struct {
|
||||
Config Config
|
||||
Err error
|
||||
}
|
||||
|
11
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
11
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@ -91,6 +91,8 @@ type InHeader struct {
|
||||
LocalAddr net.Addr
|
||||
// Compression is the compression algorithm used for the RPC.
|
||||
Compression string
|
||||
// Header contains the header metadata received.
|
||||
Header metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if the stats information is from client side.
|
||||
@ -104,6 +106,9 @@ type InTrailer struct {
|
||||
Client bool
|
||||
// WireLength is the wire length of trailer.
|
||||
WireLength int
|
||||
// Trailer contains the trailer metadata received from the server. This
|
||||
// field is only valid if this InTrailer is from the client side.
|
||||
Trailer metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if the stats information is from client side.
|
||||
@ -146,6 +151,8 @@ type OutHeader struct {
|
||||
LocalAddr net.Addr
|
||||
// Compression is the compression algorithm used for the RPC.
|
||||
Compression string
|
||||
// Header contains the header metadata sent.
|
||||
Header metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if this stats information is from client side.
|
||||
@ -159,6 +166,9 @@ type OutTrailer struct {
|
||||
Client bool
|
||||
// WireLength is the wire length of trailer.
|
||||
WireLength int
|
||||
// Trailer contains the trailer metadata sent to the client. This
|
||||
// field is only valid if this OutTrailer is from the server side.
|
||||
Trailer metadata.MD
|
||||
}
|
||||
|
||||
// IsClient indicates if this stats information is from client side.
|
||||
@ -176,6 +186,7 @@ type End struct {
|
||||
EndTime time.Time
|
||||
// Trailer contains the trailer metadata received from the server. This
|
||||
// field is only valid if this End is from the client side.
|
||||
// Deprecated: use Trailer in InTrailer instead.
|
||||
Trailer metadata.MD
|
||||
// Error is the error the RPC ended with. It is an error generated from
|
||||
// status.Status and can be converted back to status.Status using
|
||||
|
2
vendor/google.golang.org/grpc/stream.go
generated
vendored
2
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -488,7 +488,7 @@ func (cs *clientStream) shouldRetry(err error) error {
|
||||
pushback := 0
|
||||
hasPushback := false
|
||||
if cs.attempt.s != nil {
|
||||
if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to {
|
||||
if !cs.attempt.s.TrailersOnly() {
|
||||
return err
|
||||
}
|
||||
|
||||
|
3
vendor/google.golang.org/grpc/trace.go
generated
vendored
3
vendor/google.golang.org/grpc/trace.go
generated
vendored
@ -41,9 +41,6 @@ func methodFamily(m string) string {
|
||||
if i := strings.Index(m, "/"); i >= 0 {
|
||||
m = m[:i] // remove everything from second slash
|
||||
}
|
||||
if i := strings.LastIndex(m, "."); i >= 0 {
|
||||
m = m[i+1:] // cut down to last dotted component
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.23.1"
|
||||
const Version = "1.27.1"
|
||||
|
87
vendor/google.golang.org/grpc/vet.sh
generated
vendored
87
vendor/google.golang.org/grpc/vet.sh
generated
vendored
@ -31,12 +31,15 @@ PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
|
||||
if [[ "$1" = "-install" ]]; then
|
||||
# Check for module support
|
||||
if go help mod >& /dev/null; then
|
||||
# Install the pinned versions as defined in module tools.
|
||||
pushd ./test/tools
|
||||
go install \
|
||||
golang.org/x/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
honnef.co/go/tools/cmd/staticcheck \
|
||||
github.com/client9/misspell/cmd/misspell \
|
||||
github.com/golang/protobuf/protoc-gen-go
|
||||
popd
|
||||
else
|
||||
# Ye olde `go get` incantation.
|
||||
# Note: this gets the latest version of all tools (vs. the pinned versions
|
||||
@ -67,18 +70,21 @@ elif [[ "$#" -ne 0 ]]; then
|
||||
fi
|
||||
|
||||
# - Ensure all source files contain a copyright message.
|
||||
git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output
|
||||
(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go')
|
||||
|
||||
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
|
||||
(! grep 'func Test[^(]' *_test.go)
|
||||
(! grep 'func Test[^(]' test/*.go)
|
||||
|
||||
# - Do not import x/net/context.
|
||||
(! git grep -l 'x/net/context' -- "*.go")
|
||||
|
||||
# - Do not import math/rand for real library code. Use internal/grpcrand for
|
||||
# thread safety.
|
||||
git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test')
|
||||
git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test')
|
||||
|
||||
# - Ensure all ptypes proto packages are renamed when importing.
|
||||
git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/")
|
||||
(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go")
|
||||
|
||||
# - Check imports that are illegal in appengine (until Go 1.11).
|
||||
# TODO: Remove when we drop Go 1.10 support
|
||||
@ -86,10 +92,12 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
|
||||
|
||||
# - gofmt, goimports, golint (with exceptions for generated code), go vet.
|
||||
gofmt -s -d -l . 2>&1 | fail_on_output
|
||||
goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output
|
||||
goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go")
|
||||
golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
|
||||
go vet -all .
|
||||
|
||||
misspell -error .
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
@ -105,30 +113,47 @@ if go help mod >& /dev/null; then
|
||||
fi
|
||||
|
||||
# - Collection of static analysis checks
|
||||
# TODO(dfawley): don't use deprecated functions in examples.
|
||||
staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore '
|
||||
google.golang.org/grpc/balancer.go:SA1019
|
||||
google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019
|
||||
google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019
|
||||
google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019
|
||||
google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019
|
||||
google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019
|
||||
google.golang.org/grpc/balancer_conn_wrappers.go:SA1019
|
||||
google.golang.org/grpc/balancer_test.go:SA1019
|
||||
google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
|
||||
google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019
|
||||
google.golang.org/grpc/clientconn.go:S1024
|
||||
google.golang.org/grpc/clientconn_state_transition_test.go:SA1019
|
||||
google.golang.org/grpc/clientconn_test.go:SA1019
|
||||
google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019
|
||||
google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019
|
||||
google.golang.org/grpc/internal/transport/handler_server.go:SA1019
|
||||
google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019
|
||||
google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019
|
||||
google.golang.org/grpc/stats/stats_test.go:SA1019
|
||||
google.golang.org/grpc/test/balancer_test.go:SA1019
|
||||
google.golang.org/grpc/test/channelz_test.go:SA1019
|
||||
google.golang.org/grpc/test/end2end_test.go:SA1019
|
||||
google.golang.org/grpc/test/healthcheck_test.go:SA1019
|
||||
' ./...
|
||||
misspell -error .
|
||||
#
|
||||
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
||||
# plugins.
|
||||
SC_OUT="$(mktemp)"
|
||||
staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
||||
# Error if anything other than deprecation warnings are printed.
|
||||
(! grep -v "is deprecated:.*SA1019" "${SC_OUT}")
|
||||
# Only ignore the following deprecated types/fields/functions.
|
||||
(! grep -Fv '.HandleResolvedAddrs
|
||||
.HandleSubConnStateChange
|
||||
.HeaderMap
|
||||
.NewAddress
|
||||
.NewServiceConfig
|
||||
.Metadata is deprecated: use Attributes
|
||||
.Type is deprecated: use Attributes
|
||||
.UpdateBalancerState
|
||||
balancer.Picker
|
||||
grpc.CallCustomCodec
|
||||
grpc.Code
|
||||
grpc.Compressor
|
||||
grpc.Decompressor
|
||||
grpc.MaxMsgSize
|
||||
grpc.MethodConfig
|
||||
grpc.NewGZIPCompressor
|
||||
grpc.NewGZIPDecompressor
|
||||
grpc.RPCCompressor
|
||||
grpc.RPCDecompressor
|
||||
grpc.RoundRobin
|
||||
grpc.ServiceConfig
|
||||
grpc.WithBalancer
|
||||
grpc.WithBalancerName
|
||||
grpc.WithCompressor
|
||||
grpc.WithDecompressor
|
||||
grpc.WithDialer
|
||||
grpc.WithMaxMsgSize
|
||||
grpc.WithServiceConfig
|
||||
grpc.WithTimeout
|
||||
http.CloseNotifier
|
||||
naming.Resolver
|
||||
naming.Update
|
||||
naming.Watcher
|
||||
resolver.Backend
|
||||
resolver.GRPCLB' "${SC_OUT}"
|
||||
)
|
||||
|
22
vendor/modules.txt
vendored
22
vendor/modules.txt
vendored
@ -143,11 +143,13 @@ github.com/gobwas/glob/syntax/ast
|
||||
github.com/gobwas/glob/syntax/lexer
|
||||
github.com/gobwas/glob/util/runes
|
||||
github.com/gobwas/glob/util/strings
|
||||
# github.com/golang/protobuf v1.3.2
|
||||
# github.com/golang/protobuf v1.3.4
|
||||
github.com/golang/protobuf/proto
|
||||
github.com/golang/protobuf/protoc-gen-go/descriptor
|
||||
github.com/golang/protobuf/ptypes
|
||||
github.com/golang/protobuf/ptypes/any
|
||||
github.com/golang/protobuf/ptypes/duration
|
||||
github.com/golang/protobuf/ptypes/empty
|
||||
github.com/golang/protobuf/ptypes/timestamp
|
||||
# github.com/golang/snappy v0.0.1
|
||||
github.com/golang/snappy
|
||||
@ -173,7 +175,7 @@ github.com/gosimple/slug
|
||||
## explicit
|
||||
github.com/grafana/grafana-plugin-model/go/datasource
|
||||
github.com/grafana/grafana-plugin-model/go/renderer
|
||||
# github.com/grafana/grafana-plugin-sdk-go v0.35.0
|
||||
# github.com/grafana/grafana-plugin-sdk-go v0.39.0
|
||||
## explicit
|
||||
github.com/grafana/grafana-plugin-sdk-go/backend/grpcplugin
|
||||
github.com/grafana/grafana-plugin-sdk-go/data
|
||||
@ -181,7 +183,7 @@ github.com/grafana/grafana-plugin-sdk-go/genproto/pluginv2
|
||||
# github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
|
||||
## explicit
|
||||
github.com/hashicorp/go-hclog
|
||||
# github.com/hashicorp/go-plugin v1.0.1
|
||||
# github.com/hashicorp/go-plugin v1.2.2
|
||||
## explicit
|
||||
github.com/hashicorp/go-plugin
|
||||
github.com/hashicorp/go-plugin/internal/plugin
|
||||
@ -426,12 +428,13 @@ google.golang.org/appengine/internal/modules
|
||||
google.golang.org/appengine/internal/remote_api
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873
|
||||
## explicit
|
||||
# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.23.1
|
||||
# google.golang.org/grpc v1.27.1
|
||||
## explicit
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
google.golang.org/grpc/backoff
|
||||
google.golang.org/grpc/balancer
|
||||
google.golang.org/grpc/balancer/base
|
||||
google.golang.org/grpc/balancer/roundrobin
|
||||
@ -449,19 +452,22 @@ google.golang.org/grpc/internal
|
||||
google.golang.org/grpc/internal/backoff
|
||||
google.golang.org/grpc/internal/balancerload
|
||||
google.golang.org/grpc/internal/binarylog
|
||||
google.golang.org/grpc/internal/buffer
|
||||
google.golang.org/grpc/internal/channelz
|
||||
google.golang.org/grpc/internal/envconfig
|
||||
google.golang.org/grpc/internal/grpcrand
|
||||
google.golang.org/grpc/internal/grpcsync
|
||||
google.golang.org/grpc/internal/resolver/dns
|
||||
google.golang.org/grpc/internal/resolver/passthrough
|
||||
google.golang.org/grpc/internal/syscall
|
||||
google.golang.org/grpc/internal/transport
|
||||
google.golang.org/grpc/keepalive
|
||||
google.golang.org/grpc/metadata
|
||||
google.golang.org/grpc/naming
|
||||
google.golang.org/grpc/peer
|
||||
google.golang.org/grpc/reflection
|
||||
google.golang.org/grpc/reflection/grpc_reflection_v1alpha
|
||||
google.golang.org/grpc/resolver
|
||||
google.golang.org/grpc/resolver/dns
|
||||
google.golang.org/grpc/resolver/passthrough
|
||||
google.golang.org/grpc/serviceconfig
|
||||
google.golang.org/grpc/stats
|
||||
google.golang.org/grpc/status
|
||||
|
Loading…
Reference in New Issue
Block a user