Merge branch 'backend_plugins'

* backend_plugins: (34 commits)
  code style fixes
  Adds Table in backend datasource contract.
  Adds Tables types to protobuf
  Review tsdb protobuf contract
  supports windows compatible plugin binaries
  moves plugin proxy to plugin package
  improves name for plugin logger
  uses pluginmanagers log instead of global
  removes commented code
  makes datasource handshake more explicit
  backend plugins: improves logging
  dont spawn new subprocess while shutting down
  plugins: restart killed plugins
  query result should be a map
  test for plugin path builder
  merge backend datasources and datasources
  use int64 for timestamps
  fixes invalid valud/timestamp order
  merge backend-datasource and datasource type
  fixes broken unit test
  ...
This commit is contained in:
bergquist 2018-01-15 10:48:06 +01:00
commit 3951df136c
210 changed files with 62654 additions and 996 deletions

View File

@ -26,3 +26,6 @@ test: test-go test-js
run:
./bin/grafana-server
protoc:
protoc -I pkg/tsdb/models pkg/tsdb/models/*.proto --go_out=plugins=grpc:pkg/tsdb/models/.

View File

@ -62,17 +62,22 @@ func (g *GrafanaServerImpl) Start() error {
search.Init()
login.Init()
social.NewOAuthService()
plugins.Init()
pluginManager, err := plugins.NewPluginManager(g.context)
if err != nil {
return fmt.Errorf("Failed to start plugins. error: %v", err)
}
g.childRoutines.Go(func() error { return pluginManager.Run(g.context) })
if err := provisioning.Init(g.context, setting.HomePath, setting.Cfg); err != nil {
return fmt.Errorf("Failed to provision Grafana from config. error: %v", err)
}
closer, err := tracing.Init(setting.Cfg)
tracingCloser, err := tracing.Init(setting.Cfg)
if err != nil {
return fmt.Errorf("Tracing settings is not valid. error: %v", err)
}
defer closer.Close()
defer tracingCloser.Close()
// init alerting
if setting.AlertingEnabled && setting.ExecuteAlerts {

View File

@ -236,8 +236,8 @@ func LogFilterHandler(maxLevel log15.Lvl, filters map[string]log15.Lvl, h log15.
if len(filters) > 0 {
for i := 0; i < len(r.Ctx); i += 2 {
key := r.Ctx[i].(string)
if key == "logger" {
key, ok := r.Ctx[i].(string)
if ok && key == "logger" {
loggerName, strOk := r.Ctx[i+1].(string)
if strOk {
if filterLevel, ok := filters[loggerName]; ok {

View File

@ -1,6 +1,7 @@
package plugins
import (
"context"
"io/ioutil"
"testing"
@ -91,7 +92,7 @@ func pluginScenario(desc string, t *testing.T, fn func()) {
setting.Cfg = ini.Empty()
sec, _ := setting.Cfg.NewSection("plugin.test-app")
sec.NewKey("path", "../../tests/test-app")
err := Init()
err := initPlugins(context.Background())
So(err, ShouldBeNil)

View File

@ -1,6 +1,7 @@
package plugins
import (
"context"
"testing"
"github.com/grafana/grafana/pkg/bus"
@ -17,7 +18,7 @@ func TestPluginDashboards(t *testing.T) {
setting.Cfg = ini.Empty()
sec, _ := setting.Cfg.NewSection("plugin.test-app")
sec.NewKey("path", "../../tests/test-app")
err := Init()
err := initPlugins(context.Background())
So(err, ShouldBeNil)

View File

@ -0,0 +1,150 @@
package tsdb
import (
"fmt"
"github.com/grafana/grafana/pkg/components/null"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/tsdb"
proto "github.com/grafana/grafana/pkg/tsdb/models"
"golang.org/x/net/context"
)
func NewDatasourcePluginWrapper(log log.Logger, plugin TsdbPlugin) *DatasourcePluginWrapper {
return &DatasourcePluginWrapper{TsdbPlugin: plugin, logger: log}
}
type DatasourcePluginWrapper struct {
TsdbPlugin
logger log.Logger
}
func (tw *DatasourcePluginWrapper) Query(ctx context.Context, ds *models.DataSource, query *tsdb.TsdbQuery) (*tsdb.Response, error) {
jsonData, err := ds.JsonData.MarshalJSON()
if err != nil {
return nil, err
}
pbQuery := &proto.TsdbQuery{
Datasource: &proto.DatasourceInfo{
JsonData: string(jsonData),
Name: ds.Name,
Type: ds.Type,
Url: ds.Url,
Id: ds.Id,
OrgId: ds.OrgId,
},
TimeRange: &proto.TimeRange{
FromRaw: query.TimeRange.From,
ToRaw: query.TimeRange.To,
ToEpochMs: query.TimeRange.GetToAsMsEpoch(),
FromEpochMs: query.TimeRange.GetFromAsMsEpoch(),
},
Queries: []*proto.Query{},
}
for _, q := range query.Queries {
modelJson, _ := q.Model.MarshalJSON()
pbQuery.Queries = append(pbQuery.Queries, &proto.Query{
ModelJson: string(modelJson),
IntervalMs: q.IntervalMs,
RefId: q.RefId,
MaxDataPoints: q.MaxDataPoints,
})
}
pbres, err := tw.TsdbPlugin.Query(ctx, pbQuery)
if err != nil {
return nil, err
}
res := &tsdb.Response{
Results: map[string]*tsdb.QueryResult{},
}
for _, r := range pbres.Results {
res.Results[r.RefId] = &tsdb.QueryResult{
RefId: r.RefId,
Series: []*tsdb.TimeSeries{},
}
for _, s := range r.GetSeries() {
points := tsdb.TimeSeriesPoints{}
for _, p := range s.Points {
po := tsdb.NewTimePoint(null.FloatFrom(p.Value), float64(p.Timestamp))
points = append(points, po)
}
res.Results[r.RefId].Series = append(res.Results[r.RefId].Series, &tsdb.TimeSeries{
Name: s.Name,
Tags: s.Tags,
Points: points,
})
}
mappedTables, err := tw.mapTables(r)
if err != nil {
return nil, err
}
res.Results[r.RefId].Tables = mappedTables
}
return res, nil
}
func (tw *DatasourcePluginWrapper) mapTables(r *proto.QueryResult) ([]*tsdb.Table, error) {
var tables []*tsdb.Table
for _, t := range r.GetTables() {
mappedTable, err := tw.mapTable(t)
if err != nil {
return nil, err
}
tables = append(tables, mappedTable)
}
return tables, nil
}
func (tw *DatasourcePluginWrapper) mapTable(t *proto.Table) (*tsdb.Table, error) {
table := &tsdb.Table{}
for _, c := range t.GetColumns() {
table.Columns = append(table.Columns, tsdb.TableColumn{
Text: c.Name,
})
}
for _, r := range t.GetRows() {
row := tsdb.RowValues{}
for _, rv := range r.Values {
mappedRw, err := tw.mapRowValue(rv)
if err != nil {
return nil, err
}
row = append(row, mappedRw)
}
table.Rows = append(table.Rows, row)
}
return table, nil
}
func (tw *DatasourcePluginWrapper) mapRowValue(rv *proto.RowValue) (interface{}, error) {
switch rv.Kind {
case proto.RowValue_TYPE_NULL:
return nil, nil
case proto.RowValue_TYPE_INT64:
return rv.Int64Value, nil
case proto.RowValue_TYPE_BOOL:
return rv.BoolValue, nil
case proto.RowValue_TYPE_STRING:
return rv.StringValue, nil
case proto.RowValue_TYPE_DOUBLE:
return rv.DoubleValue, nil
case proto.RowValue_TYPE_BYTES:
return rv.BytesValue, nil
default:
return nil, fmt.Errorf("Unsupported row value %v from plugin", rv.Kind)
}
}

View File

@ -0,0 +1,108 @@
package tsdb
import (
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/tsdb"
"github.com/grafana/grafana/pkg/tsdb/models"
"testing"
)
func TestMapTables(t *testing.T) {
dpw := NewDatasourcePluginWrapper(log.New("test-logger"), nil)
var qr = &proto.QueryResult{}
qr.Tables = append(qr.Tables, &proto.Table{
Columns: []*proto.TableColumn{},
Rows: nil,
})
want := []*tsdb.Table{{}}
have, err := dpw.mapTables(qr)
if err != nil {
t.Errorf("failed to map tables. error: %v", err)
}
if len(want) != len(have) {
t.Errorf("could not map all tables")
}
}
func TestMapTable(t *testing.T) {
dpw := NewDatasourcePluginWrapper(log.New("test-logger"), nil)
source := &proto.Table{
Columns: []*proto.TableColumn{{Name: "column1"}, {Name: "column2"}},
Rows: []*proto.TableRow{{
Values: []*proto.RowValue{
{
Kind: proto.RowValue_TYPE_BOOL,
BoolValue: true,
},
{
Kind: proto.RowValue_TYPE_INT64,
Int64Value: 42,
},
},
}},
}
want := &tsdb.Table{
Columns: []tsdb.TableColumn{{Text: "column1"}, {Text: "column2"}},
}
have, err := dpw.mapTable(source)
if err != nil {
t.Fatalf("failed to map table. error: %v", err)
}
for i := range have.Columns {
if want.Columns[i] != have.Columns[i] {
t.Fatalf("have column: %s, want %s", have, want)
}
}
if len(have.Rows) != 1 {
t.Fatalf("Expects one row but got %d", len(have.Rows))
}
rowValuesCount := len(have.Rows[0])
if rowValuesCount != 2 {
t.Fatalf("Expects two row values, got %d", rowValuesCount)
}
}
func TestMappingRowValue(t *testing.T) {
dpw := NewDatasourcePluginWrapper(log.New("test-logger"), nil)
boolRowValue, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_BOOL, BoolValue: true})
haveBool, ok := boolRowValue.(bool)
if !ok || haveBool != true {
t.Fatalf("Expected true, was %s", haveBool)
}
intRowValue, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_INT64, Int64Value: 42})
haveInt, ok := intRowValue.(int64)
if !ok || haveInt != 42 {
t.Fatalf("Expected %d, was %d", 42, haveInt)
}
stringRowValue, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_STRING, StringValue: "grafana"})
haveString, ok := stringRowValue.(string)
if !ok || haveString != "grafana" {
t.Fatalf("Expected %s, was %s", "grafana", haveString)
}
doubleRowValue, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_DOUBLE, DoubleValue: 1.5})
haveDouble, ok := doubleRowValue.(float64)
if !ok || haveDouble != 1.5 {
t.Fatalf("Expected %v, was %v", 1.5, haveDouble)
}
bytesRowValue, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_BYTES, BytesValue: []byte{66}})
haveBytes, ok := bytesRowValue.([]byte)
if !ok || len(haveBytes) != 1 || haveBytes[0] != 66 {
t.Fatalf("Expected %v, was %v", []byte{66}, haveBytes)
}
haveNil, _ := dpw.mapRowValue(&proto.RowValue{Kind: proto.RowValue_TYPE_NULL})
if haveNil != nil {
t.Fatalf("Expected %v, was %v", nil, haveNil)
}
}

View File

@ -0,0 +1,22 @@
package tsdb
import (
proto "github.com/grafana/grafana/pkg/tsdb/models"
"golang.org/x/net/context"
)
type GRPCClient struct {
proto.TsdbPluginClient
}
func (m *GRPCClient) Query(ctx context.Context, req *proto.TsdbQuery) (*proto.Response, error) {
return m.TsdbPluginClient.Query(ctx, req)
}
type GRPCServer struct {
TsdbPlugin
}
func (m *GRPCServer) Query(ctx context.Context, req *proto.TsdbQuery) (*proto.Response, error) {
return m.TsdbPlugin.Query(ctx, req)
}

View File

@ -0,0 +1,27 @@
package tsdb
import (
"golang.org/x/net/context"
proto "github.com/grafana/grafana/pkg/tsdb/models"
plugin "github.com/hashicorp/go-plugin"
"google.golang.org/grpc"
)
type TsdbPlugin interface {
Query(ctx context.Context, req *proto.TsdbQuery) (*proto.Response, error)
}
type TsdbPluginImpl struct {
plugin.NetRPCUnsupportedPlugin
Plugin TsdbPlugin
}
func (p *TsdbPluginImpl) GRPCServer(s *grpc.Server) error {
proto.RegisterTsdbPluginServer(s, &GRPCServer{p.Plugin})
return nil
}
func (p *TsdbPluginImpl) GRPCClient(c *grpc.ClientConn) (interface{}, error) {
return &GRPCClient{proto.NewTsdbPluginClient(c)}, nil
}

View File

@ -1,9 +1,22 @@
package plugins
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/models"
shared "github.com/grafana/grafana/pkg/plugins/datasource/tsdb"
"github.com/grafana/grafana/pkg/tsdb"
plugin "github.com/hashicorp/go-plugin"
)
type DataSourcePlugin struct {
@ -16,6 +29,12 @@ type DataSourcePlugin struct {
Mixed bool `json:"mixed,omitempty"`
HasQueryHelp bool `json:"hasQueryHelp,omitempty"`
Routes []*AppPluginRoute `json:"routes"`
Backend bool `json:"backend,omitempty"`
Executable string `json:"executable,omitempty"`
log log.Logger
client *plugin.Client
}
func (p *DataSourcePlugin) Load(decoder *json.Decoder, pluginDir string) error {
@ -39,3 +58,87 @@ func (p *DataSourcePlugin) Load(decoder *json.Decoder, pluginDir string) error {
DataSources[p.Id] = p
return nil
}
var handshakeConfig = plugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: "grafana_plugin_type",
MagicCookieValue: "datasource",
}
func composeBinaryName(executable, os, arch string) string {
var extension string
os = strings.ToLower(os)
if os == "windows" {
extension = ".exe"
}
return fmt.Sprintf("%s_%s_%s%s", executable, os, strings.ToLower(arch), extension)
}
func (p *DataSourcePlugin) initBackendPlugin(ctx context.Context, log log.Logger) error {
p.log = log.New("plugin-id", p.Id)
err := p.spawnSubProcess()
if err == nil {
go p.restartKilledProcess(ctx)
}
return err
}
func (p *DataSourcePlugin) spawnSubProcess() error {
cmd := composeBinaryName(p.Executable, runtime.GOOS, runtime.GOARCH)
fullpath := path.Join(p.PluginDir, cmd)
p.client = plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: handshakeConfig,
Plugins: map[string]plugin.Plugin{p.Id: &shared.TsdbPluginImpl{}},
Cmd: exec.Command(fullpath),
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
Logger: LogWrapper{Logger: p.log},
})
rpcClient, err := p.client.Client()
if err != nil {
return err
}
raw, err := rpcClient.Dispense(p.Id)
if err != nil {
return err
}
plugin := raw.(shared.TsdbPlugin)
tsdb.RegisterTsdbQueryEndpoint(p.Id, func(dsInfo *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
return shared.NewDatasourcePluginWrapper(p.log, plugin), nil
})
return nil
}
func (p *DataSourcePlugin) restartKilledProcess(ctx context.Context) error {
ticker := time.NewTicker(time.Second * 1)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
if p.client.Exited() {
err := p.spawnSubProcess()
p.log.Debug("Spawning new sub process", "name", p.Name, "id", p.Id)
if err != nil {
p.log.Error("Failed to spawn subprocess")
}
}
}
}
}
func (p *DataSourcePlugin) Kill() {
if p.client != nil {
p.log.Debug("Killing subprocess ", "name", p.Name)
p.client.Kill()
}
}

View File

@ -0,0 +1,35 @@
package plugins
import (
"testing"
)
func TestComposeBinaryName(t *testing.T) {
tests := []struct {
name string
os string
arch string
expectedPath string
}{
{
name: "simple-json",
os: "linux",
arch: "amd64",
expectedPath: `simple-json_linux_amd64`,
},
{
name: "simple-json",
os: "windows",
arch: "amd64",
expectedPath: `simple-json_windows_amd64.exe`,
},
}
for _, v := range tests {
have := composeBinaryName(v.name, v.os, v.arch)
if have != v.expectedPath {
t.Errorf("expected %s got %s", v.expectedPath, have)
}
}
}

View File

@ -0,0 +1,49 @@
package plugins
import (
"log"
glog "github.com/grafana/grafana/pkg/log"
hclog "github.com/hashicorp/go-hclog"
)
type LogWrapper struct {
Logger glog.Logger
}
func (lw LogWrapper) Trace(msg string, args ...interface{}) {
lw.Logger.Debug(msg, args...)
}
func (lw LogWrapper) Debug(msg string, args ...interface{}) {
lw.Logger.Debug(msg, args...)
}
func (lw LogWrapper) Info(msg string, args ...interface{}) {
lw.Logger.Info(msg, args...)
}
func (lw LogWrapper) Warn(msg string, args ...interface{}) {
lw.Logger.Warn(msg, args...)
}
func (lw LogWrapper) Error(msg string, args ...interface{}) {
lw.Logger.Error(msg, args...)
}
func (lw LogWrapper) IsTrace() bool { return true }
func (lw LogWrapper) IsDebug() bool { return true }
func (lw LogWrapper) IsInfo() bool { return true }
func (lw LogWrapper) IsWarn() bool { return true }
func (lw LogWrapper) IsError() bool { return true }
func (lw LogWrapper) With(args ...interface{}) hclog.Logger {
return LogWrapper{Logger: lw.Logger.New(args...)}
}
func (lw LogWrapper) Named(name string) hclog.Logger {
return LogWrapper{Logger: lw.Logger.New()}
}
func (lw LogWrapper) ResetNamed(name string) hclog.Logger {
return LogWrapper{Logger: lw.Logger.New()}
}
func (lw LogWrapper) StandardLogger(ops *hclog.StandardLoggerOptions) *log.Logger {
return nil
}

View File

@ -1,6 +1,7 @@
package plugins
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -34,14 +35,41 @@ type PluginScanner struct {
errors []error
}
func Init() error {
type PluginManager struct {
log log.Logger
}
func NewPluginManager(ctx context.Context) (*PluginManager, error) {
err := initPlugins(ctx)
if err != nil {
return nil, err
}
return &PluginManager{
log: log.New("plugins"),
}, nil
}
func (p *PluginManager) Run(ctx context.Context) error {
<-ctx.Done()
for _, p := range DataSources {
p.Kill()
}
p.log.Info("Stopped Plugins", "error", ctx.Err())
return ctx.Err()
}
func initPlugins(ctx context.Context) error {
plog = log.New("plugins")
DataSources = make(map[string]*DataSourcePlugin)
StaticRoutes = make([]*PluginStaticRoute, 0)
Panels = make(map[string]*PanelPlugin)
Apps = make(map[string]*AppPlugin)
Plugins = make(map[string]*PluginBase)
DataSources = map[string]*DataSourcePlugin{}
StaticRoutes = []*PluginStaticRoute{}
Panels = map[string]*PanelPlugin{}
Apps = map[string]*AppPlugin{}
Plugins = map[string]*PluginBase{}
PluginTypes = map[string]interface{}{
"panel": PanelPlugin{},
"datasource": DataSourcePlugin{},
@ -53,9 +81,8 @@ func Init() error {
// check if plugins dir exists
if _, err := os.Stat(setting.PluginsPath); os.IsNotExist(err) {
plog.Warn("Plugin dir does not exist", "dir", setting.PluginsPath)
if err = os.MkdirAll(setting.PluginsPath, os.ModePerm); err != nil {
plog.Warn("Failed to create plugin dir", "dir", setting.PluginsPath, "error", err)
plog.Error("Failed to create plugin dir", "dir", setting.PluginsPath, "error", err)
} else {
plog.Info("Plugin dir created", "dir", setting.PluginsPath)
scan(setting.PluginsPath)
@ -70,9 +97,18 @@ func Init() error {
for _, panel := range Panels {
panel.initFrontendPlugin()
}
for _, panel := range DataSources {
panel.initFrontendPlugin()
for _, ds := range DataSources {
if ds.Backend {
err := ds.initBackendPlugin(ctx, plog)
if err != nil {
plog.Error("Failed to init plugin.", "error", err, "plugin", ds.Id)
}
}
ds.initFrontendPlugin()
}
for _, app := range Apps {
app.initApp()
}

View File

@ -1,6 +1,7 @@
package plugins
import (
"context"
"path/filepath"
"testing"
@ -14,7 +15,7 @@ func TestPluginScans(t *testing.T) {
Convey("When scaning for plugins", t, func() {
setting.StaticRootPath, _ = filepath.Abs("../../public/")
setting.Cfg = ini.Empty()
err := Init()
err := initPlugins(context.Background())
So(err, ShouldBeNil)
So(len(DataSources), ShouldBeGreaterThan, 1)
@ -29,7 +30,7 @@ func TestPluginScans(t *testing.T) {
setting.Cfg = ini.Empty()
sec, _ := setting.Cfg.NewSection("plugin.nginx-app")
sec.NewKey("path", "../../tests/test-app")
err := Init()
err := initPlugins(context.Background())
So(err, ShouldBeNil)
So(len(Apps), ShouldBeGreaterThan, 0)

View File

@ -14,9 +14,7 @@ type TsdbQuery struct {
type Query struct {
RefId string
Model *simplejson.Json
Depends []string
DataSource *models.DataSource
Results []*TimeSeries
MaxDataPoints int64
IntervalMs int64
}

View File

@ -0,0 +1,636 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: tsdb_plugin.proto
/*
Package proto is a generated protocol buffer package.
It is generated from these files:
tsdb_plugin.proto
It has these top-level messages:
TsdbQuery
Query
TimeRange
Response
QueryResult
Table
TableColumn
TableRow
RowValue
DatasourceInfo
TimeSeries
Point
*/
package proto
import proto1 "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto1.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package
type RowValue_Kind int32
const (
// Field type null.
RowValue_TYPE_NULL RowValue_Kind = 0
// Field type double.
RowValue_TYPE_DOUBLE RowValue_Kind = 1
// Field type int64.
RowValue_TYPE_INT64 RowValue_Kind = 2
// Field type bool.
RowValue_TYPE_BOOL RowValue_Kind = 3
// Field type string.
RowValue_TYPE_STRING RowValue_Kind = 4
// Field type bytes.
RowValue_TYPE_BYTES RowValue_Kind = 5
)
var RowValue_Kind_name = map[int32]string{
0: "TYPE_NULL",
1: "TYPE_DOUBLE",
2: "TYPE_INT64",
3: "TYPE_BOOL",
4: "TYPE_STRING",
5: "TYPE_BYTES",
}
var RowValue_Kind_value = map[string]int32{
"TYPE_NULL": 0,
"TYPE_DOUBLE": 1,
"TYPE_INT64": 2,
"TYPE_BOOL": 3,
"TYPE_STRING": 4,
"TYPE_BYTES": 5,
}
func (x RowValue_Kind) String() string {
return proto1.EnumName(RowValue_Kind_name, int32(x))
}
func (RowValue_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
type TsdbQuery struct {
TimeRange *TimeRange `protobuf:"bytes,1,opt,name=timeRange" json:"timeRange,omitempty"`
Datasource *DatasourceInfo `protobuf:"bytes,2,opt,name=datasource" json:"datasource,omitempty"`
Queries []*Query `protobuf:"bytes,3,rep,name=queries" json:"queries,omitempty"`
}
func (m *TsdbQuery) Reset() { *m = TsdbQuery{} }
func (m *TsdbQuery) String() string { return proto1.CompactTextString(m) }
func (*TsdbQuery) ProtoMessage() {}
func (*TsdbQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *TsdbQuery) GetTimeRange() *TimeRange {
if m != nil {
return m.TimeRange
}
return nil
}
func (m *TsdbQuery) GetDatasource() *DatasourceInfo {
if m != nil {
return m.Datasource
}
return nil
}
func (m *TsdbQuery) GetQueries() []*Query {
if m != nil {
return m.Queries
}
return nil
}
type Query struct {
RefId string `protobuf:"bytes,1,opt,name=refId" json:"refId,omitempty"`
MaxDataPoints int64 `protobuf:"varint,2,opt,name=maxDataPoints" json:"maxDataPoints,omitempty"`
IntervalMs int64 `protobuf:"varint,3,opt,name=intervalMs" json:"intervalMs,omitempty"`
ModelJson string `protobuf:"bytes,4,opt,name=modelJson" json:"modelJson,omitempty"`
}
func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto1.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Query) GetRefId() string {
if m != nil {
return m.RefId
}
return ""
}
func (m *Query) GetMaxDataPoints() int64 {
if m != nil {
return m.MaxDataPoints
}
return 0
}
func (m *Query) GetIntervalMs() int64 {
if m != nil {
return m.IntervalMs
}
return 0
}
func (m *Query) GetModelJson() string {
if m != nil {
return m.ModelJson
}
return ""
}
type TimeRange struct {
FromRaw string `protobuf:"bytes,1,opt,name=fromRaw" json:"fromRaw,omitempty"`
ToRaw string `protobuf:"bytes,2,opt,name=toRaw" json:"toRaw,omitempty"`
FromEpochMs int64 `protobuf:"varint,3,opt,name=fromEpochMs" json:"fromEpochMs,omitempty"`
ToEpochMs int64 `protobuf:"varint,4,opt,name=toEpochMs" json:"toEpochMs,omitempty"`
}
func (m *TimeRange) Reset() { *m = TimeRange{} }
func (m *TimeRange) String() string { return proto1.CompactTextString(m) }
func (*TimeRange) ProtoMessage() {}
func (*TimeRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeRange) GetFromRaw() string {
if m != nil {
return m.FromRaw
}
return ""
}
func (m *TimeRange) GetToRaw() string {
if m != nil {
return m.ToRaw
}
return ""
}
func (m *TimeRange) GetFromEpochMs() int64 {
if m != nil {
return m.FromEpochMs
}
return 0
}
func (m *TimeRange) GetToEpochMs() int64 {
if m != nil {
return m.ToEpochMs
}
return 0
}
type Response struct {
Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto1.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Response) GetResults() []*QueryResult {
if m != nil {
return m.Results
}
return nil
}
type QueryResult struct {
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
RefId string `protobuf:"bytes,2,opt,name=refId" json:"refId,omitempty"`
MetaJson string `protobuf:"bytes,3,opt,name=metaJson" json:"metaJson,omitempty"`
Series []*TimeSeries `protobuf:"bytes,4,rep,name=series" json:"series,omitempty"`
Tables []*Table `protobuf:"bytes,5,rep,name=tables" json:"tables,omitempty"`
}
func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto1.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *QueryResult) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *QueryResult) GetRefId() string {
if m != nil {
return m.RefId
}
return ""
}
func (m *QueryResult) GetMetaJson() string {
if m != nil {
return m.MetaJson
}
return ""
}
func (m *QueryResult) GetSeries() []*TimeSeries {
if m != nil {
return m.Series
}
return nil
}
func (m *QueryResult) GetTables() []*Table {
if m != nil {
return m.Tables
}
return nil
}
type Table struct {
Columns []*TableColumn `protobuf:"bytes,1,rep,name=columns" json:"columns,omitempty"`
Rows []*TableRow `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"`
}
func (m *Table) Reset() { *m = Table{} }
func (m *Table) String() string { return proto1.CompactTextString(m) }
func (*Table) ProtoMessage() {}
func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Table) GetColumns() []*TableColumn {
if m != nil {
return m.Columns
}
return nil
}
func (m *Table) GetRows() []*TableRow {
if m != nil {
return m.Rows
}
return nil
}
type TableColumn struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *TableColumn) Reset() { *m = TableColumn{} }
func (m *TableColumn) String() string { return proto1.CompactTextString(m) }
func (*TableColumn) ProtoMessage() {}
func (*TableColumn) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *TableColumn) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type TableRow struct {
Values []*RowValue `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
}
func (m *TableRow) Reset() { *m = TableRow{} }
func (m *TableRow) String() string { return proto1.CompactTextString(m) }
func (*TableRow) ProtoMessage() {}
func (*TableRow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *TableRow) GetValues() []*RowValue {
if m != nil {
return m.Values
}
return nil
}
type RowValue struct {
Kind RowValue_Kind `protobuf:"varint,1,opt,name=kind,enum=plugins.RowValue_Kind" json:"kind,omitempty"`
DoubleValue float64 `protobuf:"fixed64,2,opt,name=doubleValue" json:"doubleValue,omitempty"`
Int64Value int64 `protobuf:"varint,3,opt,name=int64Value" json:"int64Value,omitempty"`
BoolValue bool `protobuf:"varint,4,opt,name=boolValue" json:"boolValue,omitempty"`
StringValue string `protobuf:"bytes,5,opt,name=stringValue" json:"stringValue,omitempty"`
BytesValue []byte `protobuf:"bytes,6,opt,name=bytesValue,proto3" json:"bytesValue,omitempty"`
}
func (m *RowValue) Reset() { *m = RowValue{} }
func (m *RowValue) String() string { return proto1.CompactTextString(m) }
func (*RowValue) ProtoMessage() {}
func (*RowValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *RowValue) GetKind() RowValue_Kind {
if m != nil {
return m.Kind
}
return RowValue_TYPE_NULL
}
func (m *RowValue) GetDoubleValue() float64 {
if m != nil {
return m.DoubleValue
}
return 0
}
func (m *RowValue) GetInt64Value() int64 {
if m != nil {
return m.Int64Value
}
return 0
}
func (m *RowValue) GetBoolValue() bool {
if m != nil {
return m.BoolValue
}
return false
}
func (m *RowValue) GetStringValue() string {
if m != nil {
return m.StringValue
}
return ""
}
func (m *RowValue) GetBytesValue() []byte {
if m != nil {
return m.BytesValue
}
return nil
}
type DatasourceInfo struct {
Id int64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
OrgId int64 `protobuf:"varint,2,opt,name=orgId" json:"orgId,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
Type string `protobuf:"bytes,4,opt,name=type" json:"type,omitempty"`
Url string `protobuf:"bytes,5,opt,name=url" json:"url,omitempty"`
JsonData string `protobuf:"bytes,6,opt,name=jsonData" json:"jsonData,omitempty"`
SecureJsonData string `protobuf:"bytes,7,opt,name=secureJsonData" json:"secureJsonData,omitempty"`
}
func (m *DatasourceInfo) Reset() { *m = DatasourceInfo{} }
func (m *DatasourceInfo) String() string { return proto1.CompactTextString(m) }
func (*DatasourceInfo) ProtoMessage() {}
func (*DatasourceInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *DatasourceInfo) GetId() int64 {
if m != nil {
return m.Id
}
return 0
}
func (m *DatasourceInfo) GetOrgId() int64 {
if m != nil {
return m.OrgId
}
return 0
}
func (m *DatasourceInfo) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *DatasourceInfo) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *DatasourceInfo) GetUrl() string {
if m != nil {
return m.Url
}
return ""
}
func (m *DatasourceInfo) GetJsonData() string {
if m != nil {
return m.JsonData
}
return ""
}
func (m *DatasourceInfo) GetSecureJsonData() string {
if m != nil {
return m.SecureJsonData
}
return ""
}
type TimeSeries struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Tags map[string]string `protobuf:"bytes,2,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Points []*Point `protobuf:"bytes,3,rep,name=points" json:"points,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto1.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *TimeSeries) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *TimeSeries) GetTags() map[string]string {
if m != nil {
return m.Tags
}
return nil
}
func (m *TimeSeries) GetPoints() []*Point {
if m != nil {
return m.Points
}
return nil
}
type Point struct {
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
func (m *Point) Reset() { *m = Point{} }
func (m *Point) String() string { return proto1.CompactTextString(m) }
func (*Point) ProtoMessage() {}
func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *Point) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *Point) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
func init() {
proto1.RegisterType((*TsdbQuery)(nil), "plugins.TsdbQuery")
proto1.RegisterType((*Query)(nil), "plugins.Query")
proto1.RegisterType((*TimeRange)(nil), "plugins.TimeRange")
proto1.RegisterType((*Response)(nil), "plugins.Response")
proto1.RegisterType((*QueryResult)(nil), "plugins.QueryResult")
proto1.RegisterType((*Table)(nil), "plugins.Table")
proto1.RegisterType((*TableColumn)(nil), "plugins.TableColumn")
proto1.RegisterType((*TableRow)(nil), "plugins.TableRow")
proto1.RegisterType((*RowValue)(nil), "plugins.RowValue")
proto1.RegisterType((*DatasourceInfo)(nil), "plugins.DatasourceInfo")
proto1.RegisterType((*TimeSeries)(nil), "plugins.TimeSeries")
proto1.RegisterType((*Point)(nil), "plugins.Point")
proto1.RegisterEnum("plugins.RowValue_Kind", RowValue_Kind_name, RowValue_Kind_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for TsdbPlugin service
type TsdbPluginClient interface {
Query(ctx context.Context, in *TsdbQuery, opts ...grpc.CallOption) (*Response, error)
}
type tsdbPluginClient struct {
cc *grpc.ClientConn
}
func NewTsdbPluginClient(cc *grpc.ClientConn) TsdbPluginClient {
return &tsdbPluginClient{cc}
}
func (c *tsdbPluginClient) Query(ctx context.Context, in *TsdbQuery, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/plugins.TsdbPlugin/Query", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for TsdbPlugin service
type TsdbPluginServer interface {
Query(context.Context, *TsdbQuery) (*Response, error)
}
func RegisterTsdbPluginServer(s *grpc.Server, srv TsdbPluginServer) {
s.RegisterService(&_TsdbPlugin_serviceDesc, srv)
}
func _TsdbPlugin_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TsdbQuery)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(TsdbPluginServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/plugins.TsdbPlugin/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(TsdbPluginServer).Query(ctx, req.(*TsdbQuery))
}
return interceptor(ctx, in, info, handler)
}
var _TsdbPlugin_serviceDesc = grpc.ServiceDesc{
ServiceName: "plugins.TsdbPlugin",
HandlerType: (*TsdbPluginServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _TsdbPlugin_Query_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "tsdb_plugin.proto",
}
func init() { proto1.RegisterFile("tsdb_plugin.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 811 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x55, 0x5d, 0x8f, 0x1b, 0x35,
0x14, 0x65, 0xbe, 0x92, 0xcc, 0x0d, 0x0d, 0xa9, 0xa9, 0x20, 0x5a, 0x01, 0x5a, 0x46, 0x50, 0x05,
0x90, 0x22, 0x08, 0xa5, 0x45, 0x85, 0xa7, 0xd0, 0x08, 0xa5, 0x84, 0xdd, 0xc5, 0x3b, 0x45, 0x2a,
0x0f, 0x54, 0x93, 0x8c, 0x13, 0x86, 0xce, 0x8c, 0x83, 0xed, 0xd9, 0x10, 0xf1, 0xc4, 0x3f, 0xe1,
0x99, 0x67, 0x7e, 0x00, 0x3f, 0xad, 0xf2, 0x1d, 0xcf, 0x47, 0xd2, 0x7d, 0x9a, 0xb9, 0xe7, 0x1c,
0x5f, 0x5f, 0x5f, 0x1f, 0xdb, 0x70, 0x57, 0xc9, 0x78, 0xf5, 0x62, 0x97, 0x16, 0xdb, 0x24, 0x9f,
0xec, 0x04, 0x57, 0x9c, 0x74, 0xcb, 0x48, 0x06, 0xff, 0x58, 0xe0, 0x87, 0x32, 0x5e, 0xfd, 0x54,
0x30, 0x71, 0x20, 0x9f, 0x83, 0xaf, 0x92, 0x8c, 0xd1, 0x28, 0xdf, 0xb2, 0x91, 0x75, 0x6e, 0x8d,
0xfb, 0x53, 0x32, 0x31, 0xd2, 0x49, 0x58, 0x31, 0xb4, 0x11, 0x91, 0x47, 0x00, 0x71, 0xa4, 0x22,
0xc9, 0x0b, 0xb1, 0x66, 0x23, 0x1b, 0x87, 0xbc, 0x5b, 0x0f, 0x79, 0x52, 0x53, 0x8b, 0x7c, 0xc3,
0x69, 0x4b, 0x4a, 0xc6, 0xd0, 0xfd, 0xa3, 0x60, 0x22, 0x61, 0x72, 0xe4, 0x9c, 0x3b, 0xe3, 0xfe,
0x74, 0x50, 0x8f, 0xc2, 0x5a, 0x68, 0x45, 0x07, 0x7f, 0x5b, 0xe0, 0x95, 0xe5, 0xdd, 0x03, 0x4f,
0xb0, 0xcd, 0x22, 0xc6, 0xd2, 0x7c, 0x5a, 0x06, 0xe4, 0x23, 0xb8, 0x93, 0x45, 0x7f, 0xea, 0xa9,
0xae, 0x78, 0x92, 0x2b, 0x89, 0x55, 0x38, 0xf4, 0x18, 0x24, 0x1f, 0x00, 0x24, 0xb9, 0x62, 0xe2,
0x26, 0x4a, 0x7f, 0xd4, 0x53, 0x6a, 0x49, 0x0b, 0x21, 0xef, 0x81, 0x9f, 0xf1, 0x98, 0xa5, 0x4f,
0x25, 0xcf, 0x47, 0x2e, 0xe6, 0x6f, 0x80, 0xe0, 0x2f, 0xf0, 0xeb, 0xe5, 0x93, 0x11, 0x74, 0x37,
0x82, 0x67, 0x34, 0xda, 0x9b, 0x42, 0xaa, 0x50, 0x17, 0xa8, 0xb8, 0xc6, 0xed, 0xb2, 0x40, 0x0c,
0xc8, 0x39, 0xf4, 0xb5, 0x60, 0xbe, 0xe3, 0xeb, 0xdf, 0xea, 0xb9, 0xdb, 0x90, 0x9e, 0x5c, 0xf1,
0x8a, 0x77, 0x91, 0x6f, 0x80, 0xe0, 0x31, 0xf4, 0x28, 0x93, 0x3b, 0x9e, 0x4b, 0x46, 0x26, 0xd0,
0x15, 0x4c, 0x16, 0xa9, 0x92, 0x23, 0x0b, 0xdb, 0x76, 0xef, 0xa4, 0x6d, 0x48, 0xd2, 0x4a, 0x14,
0xfc, 0x6b, 0x41, 0xbf, 0x45, 0xe8, 0x0a, 0x99, 0x10, 0x5c, 0x54, 0x2d, 0xc4, 0xa0, 0x69, 0xac,
0xdd, 0x6e, 0xec, 0x19, 0xf4, 0x32, 0xa6, 0x22, 0xec, 0x88, 0x83, 0x44, 0x1d, 0x93, 0xcf, 0xa0,
0x23, 0xcb, 0xdd, 0x73, 0xb1, 0x8c, 0xb7, 0x8f, 0x6c, 0x72, 0x8d, 0x14, 0x35, 0x12, 0x72, 0x1f,
0x3a, 0x2a, 0x5a, 0xa5, 0x4c, 0x8e, 0xbc, 0x93, 0xad, 0x0e, 0x35, 0x4c, 0x0d, 0x1b, 0xfc, 0x0a,
0x1e, 0x02, 0x7a, 0x95, 0x6b, 0x9e, 0x16, 0x59, 0xfe, 0xfa, 0x2a, 0x51, 0xf0, 0x1d, 0x92, 0xb4,
0x12, 0x91, 0x8f, 0xc1, 0x15, 0x7c, 0xaf, 0x77, 0x5e, 0x8b, 0xef, 0x9e, 0xa4, 0xe7, 0x7b, 0x8a,
0x74, 0xf0, 0x21, 0xf4, 0x5b, 0xc3, 0x09, 0x01, 0x37, 0x8f, 0x32, 0x66, 0x5a, 0x81, 0xff, 0xc1,
0x57, 0xd0, 0xab, 0x06, 0x91, 0x4f, 0xa0, 0x73, 0x13, 0xa5, 0x05, 0xab, 0x8a, 0x68, 0xf2, 0x52,
0xbe, 0xff, 0x59, 0x33, 0xd4, 0x08, 0x82, 0xff, 0x6d, 0xe8, 0x55, 0x20, 0xf9, 0x14, 0xdc, 0x97,
0x49, 0x5e, 0xba, 0x74, 0x30, 0x7d, 0xe7, 0xb5, 0x51, 0x93, 0x1f, 0x92, 0x3c, 0xa6, 0xa8, 0xd1,
0xde, 0x88, 0x79, 0xb1, 0x4a, 0x19, 0x32, 0xd8, 0x7f, 0x8b, 0xb6, 0x21, 0x63, 0xdc, 0x87, 0x0f,
0x4a, 0x41, 0x63, 0x5c, 0x83, 0x68, 0xef, 0xac, 0x38, 0x4f, 0x4b, 0x5a, 0x7b, 0xa7, 0x47, 0x1b,
0x40, 0xe7, 0x97, 0x4a, 0x24, 0xf9, 0xb6, 0xe4, 0x3d, 0x5c, 0x6a, 0x1b, 0xd2, 0xf9, 0x57, 0x07,
0xc5, 0x64, 0x29, 0xe8, 0x9c, 0x5b, 0xe3, 0x37, 0x69, 0x0b, 0x09, 0x36, 0xe0, 0xea, 0x7a, 0xc9,
0x1d, 0xf0, 0xc3, 0xe7, 0x57, 0xf3, 0x17, 0x17, 0xcf, 0x96, 0xcb, 0xe1, 0x1b, 0xe4, 0x2d, 0xe8,
0x63, 0xf8, 0xe4, 0xf2, 0xd9, 0x6c, 0x39, 0x1f, 0x5a, 0x64, 0x00, 0x80, 0xc0, 0xe2, 0x22, 0x7c,
0xf8, 0x60, 0x68, 0xd7, 0xfa, 0xd9, 0xe5, 0xe5, 0x72, 0xe8, 0xd4, 0xfa, 0xeb, 0x90, 0x2e, 0x2e,
0xbe, 0x1f, 0xba, 0xb5, 0x7e, 0xf6, 0x3c, 0x9c, 0x5f, 0x0f, 0xbd, 0xe0, 0x3f, 0x0b, 0x06, 0xc7,
0xf7, 0x05, 0x19, 0x80, 0x9d, 0x94, 0x6d, 0x74, 0xa8, 0x9d, 0xc4, 0xda, 0xa6, 0x5c, 0x6c, 0x8d,
0x4d, 0x1d, 0x5a, 0x06, 0xf5, 0x36, 0x3a, 0xcd, 0x36, 0x6a, 0x4c, 0x1d, 0x76, 0xcc, 0x1c, 0x64,
0xfc, 0x27, 0x43, 0x70, 0x0a, 0x91, 0x9a, 0x16, 0xe8, 0x5f, 0x6d, 0xf0, 0xdf, 0x25, 0xcf, 0xf5,
0xac, 0xb8, 0x70, 0x9f, 0xd6, 0x31, 0xb9, 0x0f, 0x03, 0xc9, 0xd6, 0x85, 0x60, 0x4f, 0x2b, 0x45,
0x17, 0x15, 0x27, 0xa8, 0x2e, 0x1b, 0x1a, 0xcb, 0xdf, 0xe6, 0x29, 0xf2, 0x05, 0xb8, 0x2a, 0xda,
0x56, 0xee, 0x7c, 0xff, 0x96, 0x93, 0x32, 0x09, 0xa3, 0xad, 0x9c, 0xe7, 0x4a, 0x1c, 0x28, 0x4a,
0xf5, 0x89, 0xd9, 0x95, 0x97, 0xd9, 0xe9, 0xe5, 0x88, 0xd7, 0x19, 0x35, 0xec, 0xd9, 0x23, 0xf0,
0xeb, 0xa1, 0x7a, 0x81, 0x2f, 0xd9, 0xc1, 0x4c, 0xad, 0x7f, 0x75, 0xc3, 0x6e, 0x6a, 0x5f, 0xf9,
0xb4, 0x0c, 0x1e, 0xdb, 0x5f, 0x5b, 0xc1, 0x37, 0xe0, 0x61, 0x26, 0xbc, 0x7a, 0x92, 0x8c, 0x49,
0x15, 0x65, 0x3b, 0xd3, 0xea, 0x06, 0x38, 0x4e, 0x60, 0x99, 0x04, 0xd3, 0x6f, 0x01, 0xf4, 0x9b,
0x71, 0x85, 0x25, 0x91, 0x49, 0x75, 0x3d, 0xb7, 0x9e, 0x8a, 0xea, 0x45, 0x39, 0x6b, 0x9d, 0x19,
0x73, 0x85, 0xcd, 0xba, 0xbf, 0x78, 0xf8, 0x08, 0xad, 0x3a, 0xf8, 0xf9, 0xf2, 0x55, 0x00, 0x00,
0x00, 0xff, 0xff, 0x58, 0xae, 0x00, 0xd6, 0xa0, 0x06, 0x00, 0x00,
}

View File

@ -0,0 +1,98 @@
syntax = "proto3";
option go_package = "proto";
package plugins;
message TsdbQuery {
TimeRange timeRange = 1;
DatasourceInfo datasource = 2;
repeated Query queries = 3;
}
message Query {
string refId = 1;
int64 maxDataPoints = 2;
int64 intervalMs = 3;
string modelJson = 4;
}
message TimeRange {
string fromRaw = 1;
string toRaw = 2;
int64 fromEpochMs = 3;
int64 toEpochMs = 4;
}
message Response {
repeated QueryResult results = 1;
}
message QueryResult {
string error = 1;
string refId = 2;
string metaJson = 3;
repeated TimeSeries series = 4;
repeated Table tables = 5;
}
message Table {
repeated TableColumn columns = 1;
repeated TableRow rows = 2;
}
message TableColumn {
string name = 1;
}
message TableRow {
repeated RowValue values = 1;
}
message RowValue {
enum Kind {
// Field type null.
TYPE_NULL = 0;
// Field type double.
TYPE_DOUBLE = 1;
// Field type int64.
TYPE_INT64 = 2;
// Field type bool.
TYPE_BOOL = 3;
// Field type string.
TYPE_STRING = 4;
// Field type bytes.
TYPE_BYTES = 5;
};
Kind kind = 1;
double doubleValue = 2;
int64 int64Value = 3;
bool boolValue = 4;
string stringValue = 5;
bytes bytesValue = 6;
}
message DatasourceInfo {
int64 id = 1;
int64 orgId = 2;
string name = 3;
string type = 4;
string url = 5;
string jsonData = 6;
string secureJsonData = 7;
}
message TimeSeries {
string name = 1;
map<string, string> tags = 2;
repeated Point points = 3;
}
message Point {
int64 timestamp = 1;
double value = 2;
}
service TsdbPlugin {
rpc Query(TsdbQuery) returns (Response);
}

View File

@ -11,14 +11,14 @@ func NewTimeRange(from, to string) *TimeRange {
return &TimeRange{
From: from,
To: to,
Now: time.Now(),
now: time.Now(),
}
}
type TimeRange struct {
From string
To string
Now time.Time
now time.Time
}
func (tr *TimeRange) GetFromAsMsEpoch() int64 {
@ -65,12 +65,12 @@ func (tr *TimeRange) ParseFrom() (time.Time, error) {
return time.Time{}, err
}
return tr.Now.Add(diff), nil
return tr.now.Add(diff), nil
}
func (tr *TimeRange) ParseTo() (time.Time, error) {
if tr.To == "now" {
return tr.Now, nil
return tr.now, nil
} else if strings.HasPrefix(tr.To, "now-") {
withoutNow := strings.Replace(tr.To, "now-", "", 1)
@ -79,7 +79,7 @@ func (tr *TimeRange) ParseTo() (time.Time, error) {
return time.Time{}, nil
}
return tr.Now.Add(diff), nil
return tr.now.Add(diff), nil
}
if res, ok := tryParseUnixMsEpoch(tr.To); ok {

View File

@ -16,7 +16,7 @@ func TestTimeRange(t *testing.T) {
tr := TimeRange{
From: "5m",
To: "now",
Now: now,
now: now,
}
Convey("5m ago ", func() {
@ -39,7 +39,7 @@ func TestTimeRange(t *testing.T) {
tr := TimeRange{
From: "5h",
To: "now-10m",
Now: now,
now: now,
}
Convey("5h ago ", func() {
@ -65,7 +65,7 @@ func TestTimeRange(t *testing.T) {
tr := TimeRange{
From: "1474973725473",
To: "1474975757930",
Now: now,
now: now,
}
res, err := tr.ParseFrom()
@ -82,7 +82,7 @@ func TestTimeRange(t *testing.T) {
tr := TimeRange{
From: "asdf",
To: "asdf",
Now: now,
now: now,
}
_, err = tr.ParseFrom()

View File

@ -0,0 +1,37 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
# at src/google/protobuf/descriptor.proto
regenerate:
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,849 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
// Based on original Protocol Buffers design by
// Sanjay Ghemawat, Jeff Dean, and others.
//
// The messages in this file describe the definitions found in .proto files.
// A valid .proto file can be translated directly to a FileDescriptorProto
// without any other information (e.g. without reading its imports).
syntax = "proto2";
package google.protobuf;
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
option java_package = "com.google.protobuf";
option java_outer_classname = "DescriptorProtos";
option csharp_namespace = "Google.Protobuf.Reflection";
option objc_class_prefix = "GPB";
// descriptor.proto must be optimized for speed because reflection-based
// algorithms don't work during bootstrapping.
option optimize_for = SPEED;
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
message FileDescriptorSet {
repeated FileDescriptorProto file = 1;
}
// Describes a complete .proto file.
message FileDescriptorProto {
optional string name = 1; // file name, relative to root of source tree
optional string package = 2; // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
repeated string dependency = 3;
// Indexes of the public imported files in the dependency list above.
repeated int32 public_dependency = 10;
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
repeated int32 weak_dependency = 11;
// All top-level definitions in this file.
repeated DescriptorProto message_type = 4;
repeated EnumDescriptorProto enum_type = 5;
repeated ServiceDescriptorProto service = 6;
repeated FieldDescriptorProto extension = 7;
optional FileOptions options = 8;
// This field contains optional information about the original source code.
// You may safely remove this entire field without harming runtime
// functionality of the descriptors -- the information is needed only by
// development tools.
optional SourceCodeInfo source_code_info = 9;
// The syntax of the proto file.
// The supported values are "proto2" and "proto3".
optional string syntax = 12;
}
// Describes a message type.
message DescriptorProto {
optional string name = 1;
repeated FieldDescriptorProto field = 2;
repeated FieldDescriptorProto extension = 6;
repeated DescriptorProto nested_type = 3;
repeated EnumDescriptorProto enum_type = 4;
message ExtensionRange {
optional int32 start = 1;
optional int32 end = 2;
optional ExtensionRangeOptions options = 3;
}
repeated ExtensionRange extension_range = 5;
repeated OneofDescriptorProto oneof_decl = 8;
optional MessageOptions options = 7;
// Range of reserved tag numbers. Reserved tag numbers may not be used by
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
message ReservedRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Exclusive.
}
repeated ReservedRange reserved_range = 9;
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
repeated string reserved_name = 10;
}
message ExtensionRangeOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// Describes a field within a message.
message FieldDescriptorProto {
enum Type {
// 0 is reserved for errors.
// Order is weird for historical reasons.
TYPE_DOUBLE = 1;
TYPE_FLOAT = 2;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
// negative values are likely.
TYPE_INT64 = 3;
TYPE_UINT64 = 4;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
// negative values are likely.
TYPE_INT32 = 5;
TYPE_FIXED64 = 6;
TYPE_FIXED32 = 7;
TYPE_BOOL = 8;
TYPE_STRING = 9;
// Tag-delimited aggregate.
// Group type is deprecated and not supported in proto3. However, Proto3
// implementations should still be able to parse the group wire format and
// treat group fields as unknown fields.
TYPE_GROUP = 10;
TYPE_MESSAGE = 11; // Length-delimited aggregate.
// New in version 2.
TYPE_BYTES = 12;
TYPE_UINT32 = 13;
TYPE_ENUM = 14;
TYPE_SFIXED32 = 15;
TYPE_SFIXED64 = 16;
TYPE_SINT32 = 17; // Uses ZigZag encoding.
TYPE_SINT64 = 18; // Uses ZigZag encoding.
};
enum Label {
// 0 is reserved for errors
LABEL_OPTIONAL = 1;
LABEL_REQUIRED = 2;
LABEL_REPEATED = 3;
};
optional string name = 1;
optional int32 number = 3;
optional Label label = 4;
// If type_name is set, this need not be set. If both this and type_name
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
optional Type type = 5;
// For message and enum types, this is the name of the type. If the name
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
// rules are used to find the type (i.e. first the nested types within this
// message are searched, then within the parent, on up to the root
// namespace).
optional string type_name = 6;
// For extensions, this is the name of the type being extended. It is
// resolved in the same manner as type_name.
optional string extendee = 2;
// For numeric types, contains the original text representation of the value.
// For booleans, "true" or "false".
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
// TODO(kenton): Base-64 encode?
optional string default_value = 7;
// If set, gives the index of a oneof in the containing type's oneof_decl
// list. This field is a member of that oneof.
optional int32 oneof_index = 9;
// JSON name of this field. The value is set by protocol compiler. If the
// user has set a "json_name" option on this field, that option's value
// will be used. Otherwise, it's deduced from the field's name by converting
// it to camelCase.
optional string json_name = 10;
optional FieldOptions options = 8;
}
// Describes a oneof.
message OneofDescriptorProto {
optional string name = 1;
optional OneofOptions options = 2;
}
// Describes an enum type.
message EnumDescriptorProto {
optional string name = 1;
repeated EnumValueDescriptorProto value = 2;
optional EnumOptions options = 3;
}
// Describes a value within an enum.
message EnumValueDescriptorProto {
optional string name = 1;
optional int32 number = 2;
optional EnumValueOptions options = 3;
}
// Describes a service.
message ServiceDescriptorProto {
optional string name = 1;
repeated MethodDescriptorProto method = 2;
optional ServiceOptions options = 3;
}
// Describes a method of a service.
message MethodDescriptorProto {
optional string name = 1;
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
optional string input_type = 2;
optional string output_type = 3;
optional MethodOptions options = 4;
// Identifies if client streams multiple client messages
optional bool client_streaming = 5 [default=false];
// Identifies if server streams multiple server messages
optional bool server_streaming = 6 [default=false];
}
// ===================================================================
// Options
// Each of the definitions above may have "options" attached. These are
// just annotations which may cause code to be generated slightly differently
// or may contain hints for code that manipulates protocol messages.
//
// Clients may define custom options as extensions of the *Options messages.
// These extensions may not yet be known at parsing time, so the parser cannot
// store the values in them. Instead it stores them in a field in the *Options
// message called uninterpreted_option. This field must have the same name
// across all *Options messages. We then use this field to populate the
// extensions when we build a descriptor, at which point all protos have been
// parsed and so all extensions are known.
//
// Extension numbers for custom options may be chosen as follows:
// * For options which will only be used within a single application or
// organization, or for experimental options, use field numbers 50000
// through 99999. It is up to you to ensure that you do not use the
// same number for multiple options.
// * For options which will be published and used publicly by multiple
// independent entities, e-mail protobuf-global-extension-registry@google.com
// to reserve extension numbers. Simply provide your project name (e.g.
// Objective-C plugin) and your project website (if available) -- there's no
// need to explain how you intend to use them. Usually you only need one
// extension number. You can declare multiple options with only one extension
// number by putting them in a sub-message. See the Custom Options section of
// the docs for examples:
// https://developers.google.com/protocol-buffers/docs/proto#options
// If this turns out to be popular, a web service will be set up
// to automatically assign option numbers.
message FileOptions {
// Sets the Java package where classes generated from this .proto will be
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
// domain names.
optional string java_package = 1;
// If set, all the classes from the .proto file are wrapped in a single
// outer class with the given name. This applies to both Proto1
// (equivalent to the old "--one_java_file" option) and Proto2 (where
// a .proto always translates to a single class, but you may want to
// explicitly choose the class name).
optional string java_outer_classname = 8;
// If set true, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
// file. Thus, these types will *not* be nested inside the outer class
// named by java_outer_classname. However, the outer class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
optional bool java_multiple_files = 10 [default=false];
// This option does nothing.
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
// If set true, then the Java2 code generator will generate code that
// throws an exception whenever an attempt is made to assign a non-UTF-8
// byte sequence to a string field.
// Message reflection will do the same.
// However, an extension field still accepts non-UTF-8 byte sequences.
// This option has no effect on when used with the lite runtime.
optional bool java_string_check_utf8 = 27 [default=false];
// Generated classes can be optimized for speed or code size.
enum OptimizeMode {
SPEED = 1; // Generate complete code for parsing, serialization,
// etc.
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
}
optional OptimizeMode optimize_for = 9 [default=SPEED];
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
// - The basename of the package import path, if provided.
// - Otherwise, the package statement in the .proto file, if present.
// - Otherwise, the basename of the .proto file, without extension.
optional string go_package = 11;
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
// Generic services were the only kind of service generation supported by
// early versions of google.protobuf.
//
// Generic services are now considered deprecated in favor of using plugins
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
optional bool cc_generic_services = 16 [default=false];
optional bool java_generic_services = 17 [default=false];
optional bool py_generic_services = 18 [default=false];
optional bool php_generic_services = 42 [default=false];
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
// least, this is a formalization for deprecating files.
optional bool deprecated = 23 [default=false];
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
optional bool cc_enable_arenas = 31 [default=false];
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
optional string objc_class_prefix = 36;
// Namespace for generated classes; defaults to the package.
optional string csharp_namespace = 37;
// By default Swift generators will take the proto package and CamelCase it
// replacing '.' with underscore and use that to prefix the types/symbols
// defined. When this options is provided, they will use this value instead
// to prefix the types/symbols defined.
optional string swift_prefix = 39;
// Sets the php class prefix which is prepended to all php generated classes
// from this .proto. Default is empty.
optional string php_class_prefix = 40;
// Use this option to change the namespace of php generated classes. Default
// is empty. When this option is empty, the package name will be used for
// determining the namespace.
optional string php_namespace = 41;
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
reserved 38;
}
message MessageOptions {
// Set true to use the old proto1 MessageSet wire format for extensions.
// This is provided for backwards-compatibility with the MessageSet wire
// format. You should not use this for any other reason: It's less
// efficient, has fewer features, and is more complicated.
//
// The message must be defined exactly as follows:
// message Foo {
// option message_set_wire_format = true;
// extensions 4 to max;
// }
// Note that the message cannot have any defined fields; MessageSets only
// have extensions.
//
// All extensions of your type must be singular messages; e.g. they cannot
// be int32s, enums, or repeated messages.
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
optional bool message_set_wire_format = 1 [default=false];
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
optional bool no_standard_descriptor_accessor = 2 [default=false];
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
optional bool deprecated = 3 [default=false];
// Whether the message is an automatically generated map entry type for the
// maps field.
//
// For maps fields:
// map<KeyType, ValueType> map_field = 1;
// The parsed descriptor looks like:
// message MapFieldEntry {
// option map_entry = true;
// optional KeyType key = 1;
// optional ValueType value = 2;
// }
// repeated MapFieldEntry map_field = 1;
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
// The reflection APIs in such implementions still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
// instead. The option should only be implicitly set by the proto compiler
// parser.
optional bool map_entry = 7;
reserved 8; // javalite_serializable
reserved 9; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message FieldOptions {
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
// options below. This option is not yet implemented in the open source
// release -- sorry, we'll try to include it in a future version!
optional CType ctype = 1 [default = STRING];
enum CType {
// Default mode.
STRING = 0;
CORD = 1;
STRING_PIECE = 2;
}
// The packed option can be enabled for repeated primitive fields to enable
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
// false will avoid using packed encoding.
optional bool packed = 2;
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
// is represented as JavaScript string, which avoids loss of precision that
// can happen when a large value is converted to a floating point JavaScript.
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
// use the JavaScript "number" type. The behavior of the default option
// JS_NORMAL is implementation dependent.
//
// This option is an enum to permit additional types to be added, e.g.
// goog.math.Integer.
optional JSType jstype = 6 [default = JS_NORMAL];
enum JSType {
// Use the default type.
JS_NORMAL = 0;
// Use JavaScript strings.
JS_STRING = 1;
// Use JavaScript numbers.
JS_NUMBER = 2;
}
// Should this field be parsed lazily? Lazy applies only to message-type
// fields. It means that when the outer message is initially parsed, the
// inner message's contents will not be parsed but instead stored in encoded
// form. The inner message will actually be parsed when it is first accessed.
//
// This is only a hint. Implementations are free to choose whether to use
// eager or lazy parsing regardless of the value of this option. However,
// setting this option true suggests that the protocol author believes that
// using lazy parsing on this field is worth the additional bookkeeping
// overhead typically needed to implement it.
//
// This option does not affect the public interface of any generated code;
// all method signatures remain the same. Furthermore, thread-safety of the
// interface is not affected by this option; const methods remain safe to
// call from multiple threads concurrently, while non-const methods continue
// to require exclusive access.
//
//
// Note that implementations may choose not to check required fields within
// a lazy sub-message. That is, calling IsInitialized() on the outer message
// may return true even if the inner message has missing required fields.
// This is necessary because otherwise the inner message would have to be
// parsed in order to perform the check, defeating the purpose of lazy
// parsing. An implementation which chooses not to check required fields
// must be consistent about it. That is, for any particular sub-message, the
// implementation must either *always* check its required fields, or *never*
// check its required fields, regardless of whether or not the message has
// been parsed.
optional bool lazy = 5 [default=false];
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
optional bool deprecated = 3 [default=false];
// For Google-internal migration only. Do not use.
optional bool weak = 10 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
reserved 4; // removed jtype
}
message OneofOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumOptions {
// Set this option to true to allow mapping different tag names to the same
// value.
optional bool allow_alias = 2;
// Is this enum deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
optional bool deprecated = 3 [default=false];
reserved 5; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumValueOptions {
// Is this enum value deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
optional bool deprecated = 1 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message ServiceOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
// this is a formalization for deprecating services.
optional bool deprecated = 33 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message MethodOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this method deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
optional bool deprecated = 33 [default=false];
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
enum IdempotencyLevel {
IDEMPOTENCY_UNKNOWN = 0;
NO_SIDE_EFFECTS = 1; // implies idempotent
IDEMPOTENT = 2; // idempotent, but may have side effects
}
optional IdempotencyLevel idempotency_level =
34 [default=IDEMPOTENCY_UNKNOWN];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// A message representing a option the parser does not recognize. This only
// appears in options protos created by the compiler::Parser class.
// DescriptorPool resolves these when building Descriptor objects. Therefore,
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
// in them.
message UninterpretedOption {
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
// "foo.(bar.baz).qux".
message NamePart {
required string name_part = 1;
required bool is_extension = 2;
}
repeated NamePart name = 2;
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
optional string identifier_value = 3;
optional uint64 positive_int_value = 4;
optional int64 negative_int_value = 5;
optional double double_value = 6;
optional bytes string_value = 7;
optional string aggregate_value = 8;
}
// ===================================================================
// Optional source code info
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
message SourceCodeInfo {
// A Location identifies a piece of source code in a .proto file which
// corresponds to a particular definition. This information is intended
// to be useful to IDEs, code indexers, documentation generators, and similar
// tools.
//
// For example, say we have a file like:
// message Foo {
// optional string foo = 1;
// }
// Let's look at just the field definition:
// optional string foo = 1;
// ^ ^^ ^^ ^ ^^^
// a bc de f ghi
// We have the following locations:
// span path represents
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
//
// Notes:
// - A location may refer to a repeated field itself (i.e. not to any
// particular index within it). This is used whenever a set of elements are
// logically enclosed in a single code segment. For example, an entire
// extend block (possibly containing multiple extension definitions) will
// have an outer location whose path refers to the "extensions" repeated
// field without an index.
// - Multiple locations may have the same path. This happens when a single
// logical declaration is spread out across multiple places. The most
// obvious example is the "extend" block again -- there may be multiple
// extend blocks in the same scope, each of which will have the same path.
// - A location's span is not always a subset of its parent's span. For
// example, the "extendee" of an extension declaration appears at the
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
// does not mean that it is a descendent. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
// ignore those that it doesn't understand, as more types of locations could
// be recorded in the future.
repeated Location location = 1;
message Location {
// Identifies which part of the FileDescriptorProto was defined at this
// location.
//
// Each element is a field number or an index. They form a path from
// the root FileDescriptorProto to the place where the definition. For
// example, this path:
// [ 4, 3, 2, 7, 1 ]
// refers to:
// file.message_type(3) // 4, 3
// .field(7) // 2, 7
// .name() // 1
// This is because FileDescriptorProto.message_type has field number 4:
// repeated DescriptorProto message_type = 4;
// and DescriptorProto.field has field number 2:
// repeated FieldDescriptorProto field = 2;
// and FieldDescriptorProto.name has field number 1:
// optional string name = 1;
//
// Thus, the above path gives the location of a field name. If we removed
// the last element:
// [ 4, 3, 2, 7 ]
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
repeated int32 path = 1 [packed=true];
// Always has exactly three or four elements: start line, start column,
// end line (optional, otherwise assumed same as start line), end column.
// These are packed into a single field for efficiency. Note that line
// and column numbers are zero-based -- typically you will want to add
// 1 to each before displaying to a user.
repeated int32 span = 2 [packed=true];
// If this SourceCodeInfo represents a complete declaration, these are any
// comments appearing before and after the declaration which appear to be
// attached to the declaration.
//
// A series of line comments appearing on consecutive lines, with no other
// tokens appearing on those lines, will be treated as a single comment.
//
// leading_detached_comments will keep paragraphs of comments that appear
// before (but not connected to) the current element. Each paragraph,
// separated by empty lines, will be one comment element in the repeated
// field.
//
// Only the comment content is provided; comment markers (e.g. //) are
// stripped out. For block comments, leading whitespace and an asterisk
// will be stripped from the beginning of each line other than the first.
// Newlines are included in the output.
//
// Examples:
//
// optional int32 foo = 1; // Comment attached to foo.
// // Comment attached to bar.
// optional int32 bar = 2;
//
// optional string baz = 3;
// // Comment attached to baz.
// // Another line attached to baz.
//
// // Comment attached to qux.
// //
// // Another line attached to qux.
// optional double qux = 4;
//
// // Detached comment for corge. This is not leading or trailing comments
// // to qux or corge because there are blank lines separating it from
// // both.
//
// // Detached comment for corge paragraph 2.
//
// optional string corge = 5;
// /* Block comment attached
// * to corge. Leading asterisks
// * will be removed. */
// /* Block comment attached to
// * grault. */
// optional int32 grault = 6;
//
// // ignored detached comments.
optional string leading_comments = 3;
optional string trailing_comments = 4;
repeated string leading_detached_comments = 6;
}
}
// Describes the relationship between generated code and its original source
// file. A GeneratedCodeInfo message is associated with only one generated
// source file, but may contain references to different source .proto files.
message GeneratedCodeInfo {
// An Annotation connects some span of text in generated code to an element
// of its generating .proto file.
repeated Annotation annotation = 1;
message Annotation {
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
repeated int32 path = 1 [packed=true];
// Identifies the filesystem path to the original source .proto.
optional string source_file = 2;
// Identifies the starting offset in bytes in the generated code
// that relates to the identified object.
optional int32 begin = 3;
// Identifies the ending offset in bytes in the generated code that
// relates to the identified offset. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
optional int32 end = 4;
}
}

139
vendor/github.com/golang/protobuf/ptypes/any.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

178
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
google/protobuf/any.proto
It has these top-level messages:
Any
*/
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
}

149
vendor/github.com/golang/protobuf/ptypes/any/any.proto generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

35
vendor/github.com/golang/protobuf/ptypes/doc.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes

102
vendor/github.com/golang/protobuf/ptypes/duration.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}

View File

@ -0,0 +1,144 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
google/protobuf/duration.proto
It has these top-level messages:
Duration
*/
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,117 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

43
vendor/github.com/golang/protobuf/ptypes/regen.sh generated vendored Executable file
View File

@ -0,0 +1,43 @@
#!/bin/bash -e
#
# This script fetches and rebuilds the "well-known types" protocol buffers.
# To run this you will need protoc and goprotobuf installed;
# see https://github.com/golang/protobuf for instructions.
# You also need Go and Git installed.
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
PROTO_FILES=(any duration empty struct timestamp wrappers)
function die() {
echo 1>&2 $*
exit 1
}
# Sanity check that the right tools are accessible.
for tool in go git protoc protoc-gen-go; do
q=$(which $tool) || die "didn't find $tool"
echo 1>&2 "$tool: $q"
done
tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
trap 'rm -rf $tmpdir' EXIT
echo -n 1>&2 "finding package dir... "
pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
cd "$base"
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
for file in ${PROTO_FILES[@]}; do
echo 1>&2 "* $file"
protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
done
echo 1>&2 "All OK"

134
vendor/github.com/golang/protobuf/ptypes/timestamp.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View File

@ -0,0 +1,160 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
google/protobuf/timestamp.proto
It has these top-level messages:
Timestamp
*/
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,133 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

21
vendor/github.com/hashicorp/go-hclog/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 HashiCorp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

123
vendor/github.com/hashicorp/go-hclog/README.md generated vendored Normal file
View File

@ -0,0 +1,123 @@
# go-hclog
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
`go-hclog` is a package for Go that provides a simple key/value logging
interface for use in development and production environments.
It provides logging levels that provide decreased output based upon the
desired amount of output, unlike the standard library `log` package.
It does not provide `Printf` style logging, only key/value logging that is
exposed as arguments to the logging functions for simplicity.
It provides a human readable output mode for use in development as well as
JSON output mode for production.
## Stability Note
While this library is fully open source and HashiCorp will be maintaining it
(since we are and will be making extensive use of it), the API and output
format is subject to minor changes as we fully bake and vet it in our projects.
This notice will be removed once it's fully integrated into our major projects
and no further changes are anticipated.
## Installation and Docs
Install using `go get github.com/hashicorp/go-hclog`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-hclog
## Usage
### Use the global logger
```go
hclog.Default().Info("hello world")
```
```text
2017-07-05T16:15:55.167-0700 [INFO ] hello world
```
(Note timestamps are removed in future examples for brevity.)
### Create a new logger
```go
appLogger := hclog.New(&hclog.LoggerOptions{
Name: "my-app",
Level: hclog.LevelFromString("DEBUG"),
})
```
### Emit an Info level message with 2 key/value pairs
```go
input := "5.5"
_, err := strconv.ParseInt(input, 10, 32)
if err != nil {
appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
}
```
```text
... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
```
### Create a new Logger for a major subsystem
```go
subsystemLogger := appLogger.Named("transport")
subsystemLogger.Info("we are transporting something")
```
```text
... [INFO ] my-app.transport: we are transporting something
```
Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
reflecting both the application and subsystem names.
### Create a new Logger with fixed key/value pairs
Using `With()` will include a specific key-value pair in all messages emitted
by that logger.
```go
requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
requestLogger := subsystemLogger.With("request", requestID)
requestLogger.Info("we are transporting a request")
```
```text
... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
```
This allows sub Loggers to be context specific without having to thread that
into all the callers.
### Use this with code that uses the standard library logger
If you want to use the standard library's `log.Logger` interface you can wrap
`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
it with the familiar `Println()`, `Printf()`, etc. For example:
```go
stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
InferLevels: true,
})
// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
stdLogger.Printf("[DEBUG] %+v", stdLogger)
```
```text
... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
```
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
specify `InferLevels: true`, you will not see any output here. You must change
`appLogger` to `DEBUG` to see output. See the docs for more information.

34
vendor/github.com/hashicorp/go-hclog/global.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package hclog
import (
"sync"
)
var (
protect sync.Once
def Logger
// The options used to create the Default logger. These are
// read only when the Default logger is created, so set them
// as soon as the process starts.
DefaultOptions = &LoggerOptions{
Level: DefaultLevel,
Output: DefaultOutput,
}
)
// Return a logger that is held globally. This can be a good starting
// place, and then you can use .With() and .Name() to create sub-loggers
// to be used in more specific contexts.
func Default() Logger {
protect.Do(func() {
def = New(DefaultOptions)
})
return def
}
// A short alias for Default()
func L() Logger {
return Default()
}

397
vendor/github.com/hashicorp/go-hclog/int.go generated vendored Normal file
View File

@ -0,0 +1,397 @@
package hclog
import (
"bufio"
"encoding"
"encoding/json"
"fmt"
"log"
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
var (
_levelToBracket = map[Level]string{
Debug: "[DEBUG]",
Trace: "[TRACE]",
Info: "[INFO ]",
Warn: "[WARN ]",
Error: "[ERROR]",
}
)
// Given the options (nil for defaults), create a new Logger
func New(opts *LoggerOptions) Logger {
if opts == nil {
opts = &LoggerOptions{}
}
output := opts.Output
if output == nil {
output = os.Stderr
}
level := opts.Level
if level == NoLevel {
level = DefaultLevel
}
return &intLogger{
m: new(sync.Mutex),
json: opts.JSONFormat,
caller: opts.IncludeLocation,
name: opts.Name,
w: bufio.NewWriter(output),
level: level,
}
}
// The internal logger implementation. Internal in that it is defined entirely
// by this package.
type intLogger struct {
json bool
caller bool
name string
// this is a pointer so that it's shared by any derived loggers, since
// those derived loggers share the bufio.Writer as well.
m *sync.Mutex
w *bufio.Writer
level Level
implied []interface{}
}
// Make sure that intLogger is a Logger
var _ Logger = &intLogger{}
// The time format to use for logging. This is a version of RFC3339 that
// contains millisecond precision
const TimeFormat = "2006-01-02T15:04:05.000Z0700"
// Log a message and a set of key/value pairs if the given level is at
// or more severe that the threshold configured in the Logger.
func (z *intLogger) Log(level Level, msg string, args ...interface{}) {
if level < z.level {
return
}
t := time.Now()
z.m.Lock()
defer z.m.Unlock()
if z.json {
z.logJson(t, level, msg, args...)
} else {
z.log(t, level, msg, args...)
}
z.w.Flush()
}
// Cleanup a path by returning the last 2 segments of the path only.
func trimCallerPath(path string) string {
// lovely borrowed from zap
// nb. To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
//
// Find the last separator.
//
idx := strings.LastIndexByte(path, '/')
if idx == -1 {
return path
}
// Find the penultimate separator.
idx = strings.LastIndexByte(path[:idx], '/')
if idx == -1 {
return path
}
return path[idx+1:]
}
// Non-JSON logging format function
func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) {
z.w.WriteString(t.Format(TimeFormat))
z.w.WriteByte(' ')
s, ok := _levelToBracket[level]
if ok {
z.w.WriteString(s)
} else {
z.w.WriteString("[UNKN ]")
}
if z.caller {
if _, file, line, ok := runtime.Caller(3); ok {
z.w.WriteByte(' ')
z.w.WriteString(trimCallerPath(file))
z.w.WriteByte(':')
z.w.WriteString(strconv.Itoa(line))
z.w.WriteByte(':')
}
}
z.w.WriteByte(' ')
if z.name != "" {
z.w.WriteString(z.name)
z.w.WriteString(": ")
}
z.w.WriteString(msg)
args = append(z.implied, args...)
var stacktrace CapturedStacktrace
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
stacktrace = cs
} else {
args = append(args, "<unknown>")
}
}
z.w.WriteByte(':')
FOR:
for i := 0; i < len(args); i = i + 2 {
var val string
switch st := args[i+1].(type) {
case string:
val = st
case int:
val = strconv.FormatInt(int64(st), 10)
case int64:
val = strconv.FormatInt(int64(st), 10)
case int32:
val = strconv.FormatInt(int64(st), 10)
case int16:
val = strconv.FormatInt(int64(st), 10)
case int8:
val = strconv.FormatInt(int64(st), 10)
case uint:
val = strconv.FormatUint(uint64(st), 10)
case uint64:
val = strconv.FormatUint(uint64(st), 10)
case uint32:
val = strconv.FormatUint(uint64(st), 10)
case uint16:
val = strconv.FormatUint(uint64(st), 10)
case uint8:
val = strconv.FormatUint(uint64(st), 10)
case CapturedStacktrace:
stacktrace = st
continue FOR
default:
val = fmt.Sprintf("%v", st)
}
z.w.WriteByte(' ')
z.w.WriteString(args[i].(string))
z.w.WriteByte('=')
if strings.ContainsAny(val, " \t\n\r") {
z.w.WriteByte('"')
z.w.WriteString(val)
z.w.WriteByte('"')
} else {
z.w.WriteString(val)
}
}
}
z.w.WriteString("\n")
if stacktrace != "" {
z.w.WriteString(string(stacktrace))
}
}
// JSON logging function
func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) {
vals := map[string]interface{}{
"@message": msg,
"@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"),
}
var levelStr string
switch level {
case Error:
levelStr = "error"
case Warn:
levelStr = "warn"
case Info:
levelStr = "info"
case Debug:
levelStr = "debug"
case Trace:
levelStr = "trace"
default:
levelStr = "all"
}
vals["@level"] = levelStr
if z.name != "" {
vals["@module"] = z.name
}
if z.caller {
if _, file, line, ok := runtime.Caller(3); ok {
vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
}
}
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
vals["stacktrace"] = cs
} else {
args = append(args, "<unknown>")
}
}
for i := 0; i < len(args); i = i + 2 {
if _, ok := args[i].(string); !ok {
// As this is the logging function not much we can do here
// without injecting into logs...
continue
}
val := args[i+1]
// Check if val is of type error. If error type doesn't
// implement json.Marshaler or encoding.TextMarshaler
// then set val to err.Error() so that it gets marshaled
if err, ok := val.(error); ok {
switch err.(type) {
case json.Marshaler, encoding.TextMarshaler:
default:
val = err.Error()
}
}
vals[args[i].(string)] = val
}
}
err := json.NewEncoder(z.w).Encode(vals)
if err != nil {
panic(err)
}
}
// Emit the message and args at DEBUG level
func (z *intLogger) Debug(msg string, args ...interface{}) {
z.Log(Debug, msg, args...)
}
// Emit the message and args at TRACE level
func (z *intLogger) Trace(msg string, args ...interface{}) {
z.Log(Trace, msg, args...)
}
// Emit the message and args at INFO level
func (z *intLogger) Info(msg string, args ...interface{}) {
z.Log(Info, msg, args...)
}
// Emit the message and args at WARN level
func (z *intLogger) Warn(msg string, args ...interface{}) {
z.Log(Warn, msg, args...)
}
// Emit the message and args at ERROR level
func (z *intLogger) Error(msg string, args ...interface{}) {
z.Log(Error, msg, args...)
}
// Indicate that the logger would emit TRACE level logs
func (z *intLogger) IsTrace() bool {
return z.level == Trace
}
// Indicate that the logger would emit DEBUG level logs
func (z *intLogger) IsDebug() bool {
return z.level <= Debug
}
// Indicate that the logger would emit INFO level logs
func (z *intLogger) IsInfo() bool {
return z.level <= Info
}
// Indicate that the logger would emit WARN level logs
func (z *intLogger) IsWarn() bool {
return z.level <= Warn
}
// Indicate that the logger would emit ERROR level logs
func (z *intLogger) IsError() bool {
return z.level <= Error
}
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (z *intLogger) With(args ...interface{}) Logger {
var nz intLogger = *z
nz.implied = append(nz.implied, args...)
return &nz
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
func (z *intLogger) Named(name string) Logger {
var nz intLogger = *z
if nz.name != "" {
nz.name = nz.name + "." + name
}
return &nz
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy.
func (z *intLogger) ResetNamed(name string) Logger {
var nz intLogger = *z
nz.name = name
return &nz
}
// Create a *log.Logger that will send it's data through this Logger. This
// allows packages that expect to be using the standard library log to actually
// use this logger.
func (z *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(&stdlogAdapter{z, opts.InferLevels}, "", 0)
}

138
vendor/github.com/hashicorp/go-hclog/log.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package hclog
import (
"io"
"log"
"os"
"strings"
)
var (
DefaultOutput = os.Stderr
DefaultLevel = Info
)
type Level int
const (
// This is a special level used to indicate that no level has been
// set and allow for a default to be used.
NoLevel Level = 0
// The most verbose level. Intended to be used for the tracing of actions
// in code, such as function enters/exits, etc.
Trace Level = 1
// For programmer lowlevel analysis.
Debug Level = 2
// For information about steady state operations.
Info Level = 3
// For information about rare but handled events.
Warn Level = 4
// For information about unrecoverable events.
Error Level = 5
)
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
// the level string is invalid. This facilitates setting the log level via
// config or environment variable by name in a predictable way.
func LevelFromString(levelStr string) Level {
// We don't care about case. Accept "INFO" or "info"
levelStr = strings.ToLower(strings.TrimSpace(levelStr))
switch levelStr {
case "trace":
return Trace
case "debug":
return Debug
case "info":
return Info
case "warn":
return Warn
case "error":
return Error
default:
return NoLevel
}
}
// The main Logger interface. All code should code against this interface only.
type Logger interface {
// Args are alternating key, val pairs
// keys must be strings
// vals can be any type, but display is implementation specific
// Emit a message and key/value pairs at the TRACE level
Trace(msg string, args ...interface{})
// Emit a message and key/value pairs at the DEBUG level
Debug(msg string, args ...interface{})
// Emit a message and key/value pairs at the INFO level
Info(msg string, args ...interface{})
// Emit a message and key/value pairs at the WARN level
Warn(msg string, args ...interface{})
// Emit a message and key/value pairs at the ERROR level
Error(msg string, args ...interface{})
// Indicate if TRACE logs would be emitted. This and the other Is* guards
// are used to elide expensive logging code based on the current level.
IsTrace() bool
// Indicate if DEBUG logs would be emitted. This and the other Is* guards
IsDebug() bool
// Indicate if INFO logs would be emitted. This and the other Is* guards
IsInfo() bool
// Indicate if WARN logs would be emitted. This and the other Is* guards
IsWarn() bool
// Indicate if ERROR logs would be emitted. This and the other Is* guards
IsError() bool
// Creates a sublogger that will always have the given key/value pairs
With(args ...interface{}) Logger
// Create a logger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
Named(name string) Logger
// Create a logger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamed(name string) Logger
// Return a value that conforms to the stdlib log.Logger interface
StandardLogger(opts *StandardLoggerOptions) *log.Logger
}
type StandardLoggerOptions struct {
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
InferLevels bool
}
type LoggerOptions struct {
// Name of the subsystem to prefix logs with
Name string
// The threshold for the logger. Anything less severe is supressed
Level Level
// Where to write the logs to. Defaults to os.Stdout if nil
Output io.Writer
// Control if the output should be in JSON.
JSONFormat bool
// Include file and line information in each log line
IncludeLocation bool
}

108
vendor/github.com/hashicorp/go-hclog/stacktrace.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package hclog
import (
"bytes"
"runtime"
"strconv"
"strings"
"sync"
)
var (
_stacktraceIgnorePrefixes = []string{
"runtime.goexit",
"runtime.main",
}
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
)
// A stacktrace gathered by a previous call to log.Stacktrace. If passed
// to a logging function, the stacktrace will be appended.
type CapturedStacktrace string
// Gather a stacktrace of the current goroutine and return it to be passed
// to a logging function.
func Stacktrace() CapturedStacktrace {
return CapturedStacktrace(takeStacktrace())
}
func takeStacktrace() string {
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var buffer bytes.Buffer
for {
// Skip the call to runtime.Counters and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
n := runtime.Callers(2, programCounters.pcs)
if n < cap(programCounters.pcs) {
programCounters.pcs = programCounters.pcs[:n]
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs)
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if shouldIgnoreStacktraceFunction(frame.Function) {
continue
}
if i != 0 {
buffer.WriteByte('\n')
}
i++
buffer.WriteString(frame.Function)
buffer.WriteByte('\n')
buffer.WriteByte('\t')
buffer.WriteString(frame.File)
buffer.WriteByte(':')
buffer.WriteString(strconv.Itoa(int(frame.Line)))
}
return buffer.String()
}
func shouldIgnoreStacktraceFunction(function string) bool {
for _, prefix := range _stacktraceIgnorePrefixes {
if strings.HasPrefix(function, prefix) {
return true
}
}
return false
}
type programCounters struct {
pcs []uintptr
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
}

62
vendor/github.com/hashicorp/go-hclog/stdlog.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package hclog
import (
"bytes"
"strings"
)
// Provides a io.Writer to shim the data out of *log.Logger
// and back into our Logger. This is basically the only way to
// build upon *log.Logger.
type stdlogAdapter struct {
hl Logger
inferLevels bool
}
// Take the data, infer the levels if configured, and send it through
// a regular Logger
func (s *stdlogAdapter) Write(data []byte) (int, error) {
str := string(bytes.TrimRight(data, " \t\n"))
if s.inferLevels {
level, str := s.pickLevel(str)
switch level {
case Trace:
s.hl.Trace(str)
case Debug:
s.hl.Debug(str)
case Info:
s.hl.Info(str)
case Warn:
s.hl.Warn(str)
case Error:
s.hl.Error(str)
default:
s.hl.Info(str)
}
} else {
s.hl.Info(str)
}
return len(data), nil
}
// Detect, based on conventions, what log level this is
func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
switch {
case strings.HasPrefix(str, "[DEBUG]"):
return Debug, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[TRACE]"):
return Trace, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[INFO]"):
return Info, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[WARN]"):
return Warn, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERROR]"):
return Error, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERR]"):
return Error, strings.TrimSpace(str[5:])
default:
return Info, str
}
}

353
vendor/github.com/hashicorp/go-plugin/LICENSE generated vendored Normal file
View File

@ -0,0 +1,353 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

168
vendor/github.com/hashicorp/go-plugin/README.md generated vendored Normal file
View File

@ -0,0 +1,168 @@
# Go Plugin System over RPC
`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
that has been in use by HashiCorp tooling for over 4 years. While initially
created for [Packer](https://www.packer.io), it is additionally in use by
[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and
[Vault](https://www.vaultproject.io).
While the plugin system is over RPC, it is currently only designed to work
over a local [reliable] network. Plugins over a real network are not supported
and will lead to unexpected behavior.
This plugin system has been used on millions of machines across many different
projects and has proven to be battle hardened and ready for production use.
## Features
The HashiCorp plugin system supports a number of features:
**Plugins are Go interface implementations.** This makes writing and consuming
plugins feel very natural. To a plugin author: you just implement an
interface as if it were going to run in the same process. For a plugin user:
you just use and call functions on an interface as if it were in the same
process. This plugin system handles the communication in between.
**Cross-language support.** Plugins can be written (and consumed) by
almost every major language. This library supports serving plugins via
[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
in any language.
**Complex arguments and return values are supported.** This library
provides APIs for handling complex arguments and return values such
as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
(`MuxBroker`) for creating new connections between the client/server to
serve additional interfaces or transfer raw data.
**Bidirectional communication.** Because the plugin system supports
complex arguments, the host process can send it interface implementations
and the plugin can call back into the host process.
**Built-in Logging.** Any plugins that use the `log` standard library
will have log data automatically sent to the host process. The host
process will mirror this output prefixed with the path to the plugin
binary. This makes debugging with plugins simple. If the host system
uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
will be structured. If the plugin also uses hclog, logs from the plugin
will be sent to the host hclog and be structured.
**Protocol Versioning.** A very basic "protocol version" is supported that
can be incremented to invalidate any previous plugins. This is useful when
interface signatures are changing, protocol level changes are necessary,
etc. When a protocol version is incompatible, a human friendly error
message is shown to the end user.
**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
to use stdout/stderr as usual and the output will get mirrored back to
the host process. The host process can control what `io.Writer` these
streams go to to prevent this from happening.
**TTY Preservation.** Plugin subprocesses are connected to the identical
stdin file descriptor as the host process, allowing software that requires
a TTY to work. For example, a plugin can execute `ssh` and even though there
are multiple subprocesses and RPC happening, it will look and act perfectly
to the end user.
**Host upgrade while a plugin is running.** Plugins can be "reattached"
so that the host process can be upgraded while the plugin is still running.
This requires the host/plugin to know this is possible and daemonize
properly. `NewClient` takes a `ReattachConfig` to determine if and how to
reattach.
**Cryptographically Secure Plugins.** Plugins can be verified with an expected
checksum and RPC communications can be configured to use TLS. The host process
must be properly secured to protect this configuration.
## Architecture
The HashiCorp plugin system works by launching subprocesses and communicating
over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io). A single
connection is made between any plugin and the host process. For net/rpc-based
plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
library to multiplex any other connections on top. For gRPC-based plugins,
the HTTP2 protocol handles multiplexing.
This architecture has a number of benefits:
* Plugins can't crash your host process: A panic in a plugin doesn't
panic the plugin user.
* Plugins are very easy to write: just write a Go application and `go build`.
Or use any other language to write a gRPC server with a tiny amount of
boilerplate to support go-plugin.
* Plugins are very easy to install: just put the binary in a location where
the host will find it (depends on the host but this library also provides
helpers), and the plugin host handles the rest.
* Plugins can be relatively secure: The plugin only has access to the
interfaces and args given to it, not to the entire memory space of the
process. Additionally, go-plugin can communicate with the plugin over
TLS.
## Usage
To use the plugin system, you must take the following steps. These are
high-level steps that must be done. Examples are available in the
`examples/` directory.
1. Choose the interface(s) you want to expose for plugins.
2. For each interface, implement an implementation of that interface
that communicates over a `net/rpc` connection or other a
[gRPC](http://www.grpc.io) connection or both. You'll have to implement
both a client and server implementation.
3. Create a `Plugin` implementation that knows how to create the RPC
client/server for a given plugin type.
4. Plugin authors call `plugin.Serve` to serve a plugin from the
`main` function.
5. Plugin users use `plugin.Client` to launch a subprocess and request
an interface implementation over RPC.
That's it! In practice, step 2 is the most tedious and time consuming step.
Even so, it isn't very difficult and you can see examples in the `examples/`
directory as well as throughout our various open source projects.
For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
## Roadmap
Our plugin system is constantly evolving. As we use the plugin system for
new projects or for new features in existing projects, we constantly find
improvements we can make.
At this point in time, the roadmap for the plugin system is:
**Semantic Versioning.** Plugins will be able to implement a semantic version.
This plugin system will give host processes a system for constraining
versions. This is in addition to the protocol versioning already present
which is more for larger underlying changes.
**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
to support automatic download + install of plugins. Paired with cryptographically
secure plugins (above), we can make this a safe operation for an amazing
user experience.
## What About Shared Libraries?
When we started using plugins (late 2012, early 2013), plugins over RPC
were the only option since Go didn't support dynamic library loading. Today,
Go still doesn't support dynamic library loading, but they do intend to.
Since 2012, our plugin system has stabilized from millions of users using it,
and has many benefits we've come to value greatly.
For example, we intend to use this plugin system in
[Vault](https://www.vaultproject.io), and dynamic library loading will
simply never be acceptable in Vault for security reasons. That is an extreme
example, but we believe our library system has more upsides than downsides
over dynamic library loading and since we've had it built and tested for years,
we'll likely continue to use it.
Shared libraries have one major advantage over our system which is much
higher performance. In real world scenarios across our various tools,
we've never required any more performance out of our plugin system and it
has seen very high throughput, so this isn't a concern for us at the moment.

772
vendor/github.com/hashicorp/go-plugin/client.go generated vendored Normal file
View File

@ -0,0 +1,772 @@
package plugin
import (
"bufio"
"crypto/subtle"
"crypto/tls"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
hclog "github.com/hashicorp/go-hclog"
)
// If this is 1, then we've called CleanupClients. This can be used
// by plugin RPC implementations to change error behavior since you
// can expected network connection errors at this point. This should be
// read by using sync/atomic.
var Killed uint32 = 0
// This is a slice of the "managed" clients which are cleaned up when
// calling Cleanup
var managedClients = make([]*Client, 0, 5)
var managedClientsLock sync.Mutex
// Error types
var (
// ErrProcessNotFound is returned when a client is instantiated to
// reattach to an existing process and it isn't found.
ErrProcessNotFound = errors.New("Reattachment process not found")
// ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
// the one provided in the SecureConfig.
ErrChecksumsDoNotMatch = errors.New("checksums did not match")
// ErrSecureNoChecksum is returned when an empty checksum is provided to the
// SecureConfig.
ErrSecureConfigNoChecksum = errors.New("no checksum provided")
// ErrSecureNoHash is returned when a nil Hash object is provided to the
// SecureConfig.
ErrSecureConfigNoHash = errors.New("no hash implementation provided")
// ErrSecureConfigAndReattach is returned when both Reattach and
// SecureConfig are set.
ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
)
// Client handles the lifecycle of a plugin application. It launches
// plugins, connects to them, dispenses interface implementations, and handles
// killing the process.
//
// Plugin hosts should use one Client for each plugin executable. To
// dispense a plugin type, use the `Client.Client` function, and then
// cal `Dispense`. This awkward API is mostly historical but is used to split
// the client that deals with subprocess management and the client that
// does RPC management.
//
// See NewClient and ClientConfig for using a Client.
type Client struct {
config *ClientConfig
exited bool
doneLogging chan struct{}
l sync.Mutex
address net.Addr
process *os.Process
client ClientProtocol
protocol Protocol
logger hclog.Logger
}
// ClientConfig is the configuration used to initialize a new
// plugin client. After being used to initialize a plugin client,
// that configuration must not be modified again.
type ClientConfig struct {
// HandshakeConfig is the configuration that must match servers.
HandshakeConfig
// Plugins are the plugins that can be consumed.
Plugins map[string]Plugin
// One of the following must be set, but not both.
//
// Cmd is the unstarted subprocess for starting the plugin. If this is
// set, then the Client starts the plugin process on its own and connects
// to it.
//
// Reattach is configuration for reattaching to an existing plugin process
// that is already running. This isn't common.
Cmd *exec.Cmd
Reattach *ReattachConfig
// SecureConfig is configuration for verifying the integrity of the
// executable. It can not be used with Reattach.
SecureConfig *SecureConfig
// TLSConfig is used to enable TLS on the RPC client.
TLSConfig *tls.Config
// Managed represents if the client should be managed by the
// plugin package or not. If true, then by calling CleanupClients,
// it will automatically be cleaned up. Otherwise, the client
// user is fully responsible for making sure to Kill all plugin
// clients. By default the client is _not_ managed.
Managed bool
// The minimum and maximum port to use for communicating with
// the subprocess. If not set, this defaults to 10,000 and 25,000
// respectively.
MinPort, MaxPort uint
// StartTimeout is the timeout to wait for the plugin to say it
// has started successfully.
StartTimeout time.Duration
// If non-nil, then the stderr of the client will be written to here
// (as well as the log). This is the original os.Stderr of the subprocess.
// This isn't the output of synced stderr.
Stderr io.Writer
// SyncStdout, SyncStderr can be set to override the
// respective os.Std* values in the plugin. Care should be taken to
// avoid races here. If these are nil, then this will automatically be
// hooked up to os.Stdin, Stdout, and Stderr, respectively.
//
// If the default values (nil) are used, then this package will not
// sync any of these streams.
SyncStdout io.Writer
SyncStderr io.Writer
// AllowedProtocols is a list of allowed protocols. If this isn't set,
// then only netrpc is allowed. This is so that older go-plugin systems
// can show friendly errors if they see a plugin with an unknown
// protocol.
//
// By setting this, you can cause an error immediately on plugin start
// if an unsupported protocol is used with a good error message.
//
// If this isn't set at all (nil value), then only net/rpc is accepted.
// This is done for legacy reasons. You must explicitly opt-in to
// new protocols.
AllowedProtocols []Protocol
// Logger is the logger that the client will used. If none is provided,
// it will default to hclog's default logger.
Logger hclog.Logger
}
// ReattachConfig is used to configure a client to reattach to an
// already-running plugin process. You can retrieve this information by
// calling ReattachConfig on Client.
type ReattachConfig struct {
Protocol Protocol
Addr net.Addr
Pid int
}
// SecureConfig is used to configure a client to verify the integrity of an
// executable before running. It does this by verifying the checksum is
// expected. Hash is used to specify the hashing method to use when checksumming
// the file. The configuration is verified by the client by calling the
// SecureConfig.Check() function.
//
// The host process should ensure the checksum was provided by a trusted and
// authoritative source. The binary should be installed in such a way that it
// can not be modified by an unauthorized user between the time of this check
// and the time of execution.
type SecureConfig struct {
Checksum []byte
Hash hash.Hash
}
// Check takes the filepath to an executable and returns true if the checksum of
// the file matches the checksum provided in the SecureConfig.
func (s *SecureConfig) Check(filePath string) (bool, error) {
if len(s.Checksum) == 0 {
return false, ErrSecureConfigNoChecksum
}
if s.Hash == nil {
return false, ErrSecureConfigNoHash
}
file, err := os.Open(filePath)
if err != nil {
return false, err
}
defer file.Close()
_, err = io.Copy(s.Hash, file)
if err != nil {
return false, err
}
sum := s.Hash.Sum(nil)
return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
}
// This makes sure all the managed subprocesses are killed and properly
// logged. This should be called before the parent process running the
// plugins exits.
//
// This must only be called _once_.
func CleanupClients() {
// Set the killed to true so that we don't get unexpected panics
atomic.StoreUint32(&Killed, 1)
// Kill all the managed clients in parallel and use a WaitGroup
// to wait for them all to finish up.
var wg sync.WaitGroup
managedClientsLock.Lock()
for _, client := range managedClients {
wg.Add(1)
go func(client *Client) {
client.Kill()
wg.Done()
}(client)
}
managedClientsLock.Unlock()
log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
wg.Wait()
}
// Creates a new plugin client which manages the lifecycle of an external
// plugin and gets the address for the RPC connection.
//
// The client must be cleaned up at some point by calling Kill(). If
// the client is a managed client (created with NewManagedClient) you
// can just call CleanupClients at the end of your program and they will
// be properly cleaned.
func NewClient(config *ClientConfig) (c *Client) {
if config.MinPort == 0 && config.MaxPort == 0 {
config.MinPort = 10000
config.MaxPort = 25000
}
if config.StartTimeout == 0 {
config.StartTimeout = 1 * time.Minute
}
if config.Stderr == nil {
config.Stderr = ioutil.Discard
}
if config.SyncStdout == nil {
config.SyncStdout = ioutil.Discard
}
if config.SyncStderr == nil {
config.SyncStderr = ioutil.Discard
}
if config.AllowedProtocols == nil {
config.AllowedProtocols = []Protocol{ProtocolNetRPC}
}
if config.Logger == nil {
config.Logger = hclog.New(&hclog.LoggerOptions{
Output: hclog.DefaultOutput,
Level: hclog.Trace,
Name: "plugin",
})
}
c = &Client{
config: config,
logger: config.Logger,
}
if config.Managed {
managedClientsLock.Lock()
managedClients = append(managedClients, c)
managedClientsLock.Unlock()
}
return
}
// Client returns the protocol client for this connection.
//
// Subsequent calls to this will return the same client.
func (c *Client) Client() (ClientProtocol, error) {
_, err := c.Start()
if err != nil {
return nil, err
}
c.l.Lock()
defer c.l.Unlock()
if c.client != nil {
return c.client, nil
}
switch c.protocol {
case ProtocolNetRPC:
c.client, err = newRPCClient(c)
case ProtocolGRPC:
c.client, err = newGRPCClient(c)
default:
return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
}
if err != nil {
c.client = nil
return nil, err
}
return c.client, nil
}
// Tells whether or not the underlying process has exited.
func (c *Client) Exited() bool {
c.l.Lock()
defer c.l.Unlock()
return c.exited
}
// End the executing subprocess (if it is running) and perform any cleanup
// tasks necessary such as capturing any remaining logs and so on.
//
// This method blocks until the process successfully exits.
//
// This method can safely be called multiple times.
func (c *Client) Kill() {
// Grab a lock to read some private fields.
c.l.Lock()
process := c.process
addr := c.address
doneCh := c.doneLogging
c.l.Unlock()
// If there is no process, we never started anything. Nothing to kill.
if process == nil {
return
}
// We need to check for address here. It is possible that the plugin
// started (process != nil) but has no address (addr == nil) if the
// plugin failed at startup. If we do have an address, we need to close
// the plugin net connections.
graceful := false
if addr != nil {
// Close the client to cleanly exit the process.
client, err := c.Client()
if err == nil {
err = client.Close()
// If there is no error, then we attempt to wait for a graceful
// exit. If there was an error, we assume that graceful cleanup
// won't happen and just force kill.
graceful = err == nil
if err != nil {
// If there was an error just log it. We're going to force
// kill in a moment anyways.
c.logger.Warn("error closing client during Kill", "err", err)
}
}
}
// If we're attempting a graceful exit, then we wait for a short period
// of time to allow that to happen. To wait for this we just wait on the
// doneCh which would be closed if the process exits.
if graceful {
select {
case <-doneCh:
return
case <-time.After(250 * time.Millisecond):
}
}
// If graceful exiting failed, just kill it
process.Kill()
// Wait for the client to finish logging so we have a complete log
<-doneCh
}
// Starts the underlying subprocess, communicating with it to negotiate
// a port for RPC connections, and returning the address to connect via RPC.
//
// This method is safe to call multiple times. Subsequent calls have no effect.
// Once a client has been started once, it cannot be started again, even if
// it was killed.
func (c *Client) Start() (addr net.Addr, err error) {
c.l.Lock()
defer c.l.Unlock()
if c.address != nil {
return c.address, nil
}
// If one of cmd or reattach isn't set, then it is an error. We wrap
// this in a {} for scoping reasons, and hopeful that the escape
// analysis will pop the stock here.
{
cmdSet := c.config.Cmd != nil
attachSet := c.config.Reattach != nil
secureSet := c.config.SecureConfig != nil
if cmdSet == attachSet {
return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
}
if secureSet && attachSet {
return nil, ErrSecureConfigAndReattach
}
}
// Create the logging channel for when we kill
c.doneLogging = make(chan struct{})
if c.config.Reattach != nil {
// Verify the process still exists. If not, then it is an error
p, err := os.FindProcess(c.config.Reattach.Pid)
if err != nil {
return nil, err
}
// Attempt to connect to the addr since on Unix systems FindProcess
// doesn't actually return an error if it can't find the process.
conn, err := net.Dial(
c.config.Reattach.Addr.Network(),
c.config.Reattach.Addr.String())
if err != nil {
p.Kill()
return nil, ErrProcessNotFound
}
conn.Close()
// Goroutine to mark exit status
go func(pid int) {
// Wait for the process to die
pidWait(pid)
// Log so we can see it
c.logger.Debug("reattached plugin process exited")
// Mark it
c.l.Lock()
defer c.l.Unlock()
c.exited = true
// Close the logging channel since that doesn't work on reattach
close(c.doneLogging)
}(p.Pid)
// Set the address and process
c.address = c.config.Reattach.Addr
c.process = p
c.protocol = c.config.Reattach.Protocol
if c.protocol == "" {
// Default the protocol to net/rpc for backwards compatibility
c.protocol = ProtocolNetRPC
}
return c.address, nil
}
env := []string{
fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
}
stdout_r, stdout_w := io.Pipe()
stderr_r, stderr_w := io.Pipe()
cmd := c.config.Cmd
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = append(cmd.Env, env...)
cmd.Stdin = os.Stdin
cmd.Stderr = stderr_w
cmd.Stdout = stdout_w
if c.config.SecureConfig != nil {
if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
return nil, fmt.Errorf("error verifying checksum: %s", err)
} else if !ok {
return nil, ErrChecksumsDoNotMatch
}
}
c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args)
err = cmd.Start()
if err != nil {
return
}
// Set the process
c.process = cmd.Process
// Make sure the command is properly cleaned up if there is an error
defer func() {
r := recover()
if err != nil || r != nil {
cmd.Process.Kill()
}
if r != nil {
panic(r)
}
}()
// Start goroutine to wait for process to exit
exitCh := make(chan struct{})
go func() {
// Make sure we close the write end of our stderr/stdout so
// that the readers send EOF properly.
defer stderr_w.Close()
defer stdout_w.Close()
// Wait for the command to end.
cmd.Wait()
// Log and make sure to flush the logs write away
c.logger.Debug("plugin process exited", "path", cmd.Path)
os.Stderr.Sync()
// Mark that we exited
close(exitCh)
// Set that we exited, which takes a lock
c.l.Lock()
defer c.l.Unlock()
c.exited = true
}()
// Start goroutine that logs the stderr
go c.logStderr(stderr_r)
// Start a goroutine that is going to be reading the lines
// out of stdout
linesCh := make(chan []byte)
go func() {
defer close(linesCh)
buf := bufio.NewReader(stdout_r)
for {
line, err := buf.ReadBytes('\n')
if line != nil {
linesCh <- line
}
if err == io.EOF {
return
}
}
}()
// Make sure after we exit we read the lines from stdout forever
// so they don't block since it is an io.Pipe
defer func() {
go func() {
for _ = range linesCh {
}
}()
}()
// Some channels for the next step
timeout := time.After(c.config.StartTimeout)
// Start looking for the address
c.logger.Debug("waiting for RPC address", "path", cmd.Path)
select {
case <-timeout:
err = errors.New("timeout while waiting for plugin to start")
case <-exitCh:
err = errors.New("plugin exited before we could connect")
case lineBytes := <-linesCh:
// Trim the line and split by "|" in order to get the parts of
// the output.
line := strings.TrimSpace(string(lineBytes))
parts := strings.SplitN(line, "|", 6)
if len(parts) < 4 {
err = fmt.Errorf(
"Unrecognized remote plugin message: %s\n\n"+
"This usually means that the plugin is either invalid or simply\n"+
"needs to be recompiled to support the latest protocol.", line)
return
}
// Check the core protocol. Wrapped in a {} for scoping.
{
var coreProtocol int64
coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
if err != nil {
err = fmt.Errorf("Error parsing core protocol version: %s", err)
return
}
if int(coreProtocol) != CoreProtocolVersion {
err = fmt.Errorf("Incompatible core API version with plugin. "+
"Plugin version: %s, Ours: %d\n\n"+
"To fix this, the plugin usually only needs to be recompiled.\n"+
"Please report this to the plugin author.", parts[0], CoreProtocolVersion)
return
}
}
// Parse the protocol version
var protocol int64
protocol, err = strconv.ParseInt(parts[1], 10, 0)
if err != nil {
err = fmt.Errorf("Error parsing protocol version: %s", err)
return
}
// Test the API version
if uint(protocol) != c.config.ProtocolVersion {
err = fmt.Errorf("Incompatible API version with plugin. "+
"Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion)
return
}
switch parts[2] {
case "tcp":
addr, err = net.ResolveTCPAddr("tcp", parts[3])
case "unix":
addr, err = net.ResolveUnixAddr("unix", parts[3])
default:
err = fmt.Errorf("Unknown address type: %s", parts[3])
}
// If we have a server type, then record that. We default to net/rpc
// for backwards compatibility.
c.protocol = ProtocolNetRPC
if len(parts) >= 5 {
c.protocol = Protocol(parts[4])
}
found := false
for _, p := range c.config.AllowedProtocols {
if p == c.protocol {
found = true
break
}
}
if !found {
err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
c.protocol, c.config.AllowedProtocols)
return
}
}
c.address = addr
return
}
// ReattachConfig returns the information that must be provided to NewClient
// to reattach to the plugin process that this client started. This is
// useful for plugins that detach from their parent process.
//
// If this returns nil then the process hasn't been started yet. Please
// call Start or Client before calling this.
func (c *Client) ReattachConfig() *ReattachConfig {
c.l.Lock()
defer c.l.Unlock()
if c.address == nil {
return nil
}
if c.config.Cmd != nil && c.config.Cmd.Process == nil {
return nil
}
// If we connected via reattach, just return the information as-is
if c.config.Reattach != nil {
return c.config.Reattach
}
return &ReattachConfig{
Protocol: c.protocol,
Addr: c.address,
Pid: c.config.Cmd.Process.Pid,
}
}
// Protocol returns the protocol of server on the remote end. This will
// start the plugin process if it isn't already started. Errors from
// starting the plugin are surpressed and ProtocolInvalid is returned. It
// is recommended you call Start explicitly before calling Protocol to ensure
// no errors occur.
func (c *Client) Protocol() Protocol {
_, err := c.Start()
if err != nil {
return ProtocolInvalid
}
return c.protocol
}
// dialer is compatible with grpc.WithDialer and creates the connection
// to the plugin.
func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
// Connect to the client
conn, err := net.Dial(c.address.Network(), c.address.String())
if err != nil {
return nil, err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
// Make sure to set keep alive so that the connection doesn't die
tcpConn.SetKeepAlive(true)
}
// If we have a TLS config we wrap our connection. We only do this
// for net/rpc since gRPC uses its own mechanism for TLS.
if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {
conn = tls.Client(conn, c.config.TLSConfig)
}
return conn, nil
}
func (c *Client) logStderr(r io.Reader) {
bufR := bufio.NewReader(r)
for {
line, err := bufR.ReadString('\n')
if line != "" {
c.config.Stderr.Write([]byte(line))
line = strings.TrimRightFunc(line, unicode.IsSpace)
l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
entry, err := parseJSON(line)
// If output is not JSON format, print directly to Debug
if err != nil {
l.Debug(line)
} else {
out := flattenKVPairs(entry.KVPairs)
l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat))
switch hclog.LevelFromString(entry.Level) {
case hclog.Trace:
l.Trace(entry.Message, out...)
case hclog.Debug:
l.Debug(entry.Message, out...)
case hclog.Info:
l.Info(entry.Message, out...)
case hclog.Warn:
l.Warn(entry.Message, out...)
case hclog.Error:
l.Error(entry.Message, out...)
}
}
}
if err == io.EOF {
break
}
}
// Flag that we've completed logging for others
close(c.doneLogging)
}

28
vendor/github.com/hashicorp/go-plugin/discover.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package plugin
import (
"path/filepath"
)
// Discover discovers plugins that are in a given directory.
//
// The directory doesn't need to be absolute. For example, "." will work fine.
//
// This currently assumes any file matching the glob is a plugin.
// In the future this may be smarter about checking that a file is
// executable and so on.
//
// TODO: test
func Discover(glob, dir string) ([]string, error) {
var err error
// Make the directory absolute if it isn't already
if !filepath.IsAbs(dir) {
dir, err = filepath.Abs(dir)
if err != nil {
return nil, err
}
}
return filepath.Glob(filepath.Join(dir, glob))
}

24
vendor/github.com/hashicorp/go-plugin/error.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package plugin
// This is a type that wraps error types so that they can be messaged
// across RPC channels. Since "error" is an interface, we can't always
// gob-encode the underlying structure. This is a valid error interface
// implementer that we will push across.
type BasicError struct {
Message string
}
// NewBasicError is used to create a BasicError.
//
// err is allowed to be nil.
func NewBasicError(err error) *BasicError {
if err == nil {
return nil
}
return &BasicError{err.Error()}
}
func (e *BasicError) Error() string {
return e.Message
}

83
vendor/github.com/hashicorp/go-plugin/grpc_client.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package plugin
import (
"fmt"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/health/grpc_health_v1"
)
// newGRPCClient creates a new GRPCClient. The Client argument is expected
// to be successfully started already with a lock held.
func newGRPCClient(c *Client) (*GRPCClient, error) {
// Build dialing options.
opts := make([]grpc.DialOption, 0, 5)
// We use a custom dialer so that we can connect over unix domain sockets
opts = append(opts, grpc.WithDialer(c.dialer))
// go-plugin expects to block the connection
opts = append(opts, grpc.WithBlock())
// Fail right away
opts = append(opts, grpc.FailOnNonTempDialError(true))
// If we have no TLS configuration set, we need to explicitly tell grpc
// that we're connecting with an insecure connection.
if c.config.TLSConfig == nil {
opts = append(opts, grpc.WithInsecure())
} else {
opts = append(opts, grpc.WithTransportCredentials(
credentials.NewTLS(c.config.TLSConfig)))
}
// Connect. Note the first parameter is unused because we use a custom
// dialer that has the state to see the address.
conn, err := grpc.Dial("unused", opts...)
if err != nil {
return nil, err
}
return &GRPCClient{
Conn: conn,
Plugins: c.config.Plugins,
}, nil
}
// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
type GRPCClient struct {
Conn *grpc.ClientConn
Plugins map[string]Plugin
}
// ClientProtocol impl.
func (c *GRPCClient) Close() error {
return c.Conn.Close()
}
// ClientProtocol impl.
func (c *GRPCClient) Dispense(name string) (interface{}, error) {
raw, ok := c.Plugins[name]
if !ok {
return nil, fmt.Errorf("unknown plugin type: %s", name)
}
p, ok := raw.(GRPCPlugin)
if !ok {
return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
}
return p.GRPCClient(c.Conn)
}
// ClientProtocol impl.
func (c *GRPCClient) Ping() error {
client := grpc_health_v1.NewHealthClient(c.Conn)
_, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
Service: GRPCServiceName,
})
return err
}

115
vendor/github.com/hashicorp/go-plugin/grpc_server.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package plugin
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
)
// GRPCServiceName is the name of the service that the health check should
// return as passing.
const GRPCServiceName = "plugin"
// DefaultGRPCServer can be used with the "GRPCServer" field for Server
// as a default factory method to create a gRPC server with no extra options.
func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
return grpc.NewServer(opts...)
}
// GRPCServer is a ServerType implementation that serves plugins over
// gRPC. This allows plugins to easily be written for other languages.
//
// The GRPCServer outputs a custom configuration as a base64-encoded
// JSON structure represented by the GRPCServerConfig config structure.
type GRPCServer struct {
// Plugins are the list of plugins to serve.
Plugins map[string]Plugin
// Server is the actual server that will accept connections. This
// will be used for plugin registration as well.
Server func([]grpc.ServerOption) *grpc.Server
// TLS should be the TLS configuration if available. If this is nil,
// the connection will not have transport security.
TLS *tls.Config
// DoneCh is the channel that is closed when this server has exited.
DoneCh chan struct{}
// Stdout/StderrLis are the readers for stdout/stderr that will be copied
// to the stdout/stderr connection that is output.
Stdout io.Reader
Stderr io.Reader
config GRPCServerConfig
server *grpc.Server
}
// ServerProtocol impl.
func (s *GRPCServer) Init() error {
// Create our server
var opts []grpc.ServerOption
if s.TLS != nil {
opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
}
s.server = s.Server(opts)
// Register the health service
healthCheck := health.NewServer()
healthCheck.SetServingStatus(
GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
// Register all our plugins onto the gRPC server.
for k, raw := range s.Plugins {
p, ok := raw.(GRPCPlugin)
if !ok {
return fmt.Errorf("%q is not a GRPC-compatibile plugin", k)
}
if err := p.GRPCServer(s.server); err != nil {
return fmt.Errorf("error registring %q: %s", k, err)
}
}
return nil
}
// Config is the GRPCServerConfig encoded as JSON then base64.
func (s *GRPCServer) Config() string {
// Create a buffer that will contain our final contents
var buf bytes.Buffer
// Wrap the base64 encoding with JSON encoding.
if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
// We panic since ths shouldn't happen under any scenario. We
// carefully control the structure being encoded here and it should
// always be successful.
panic(err)
}
return buf.String()
}
func (s *GRPCServer) Serve(lis net.Listener) {
// Start serving in a goroutine
go s.server.Serve(lis)
// Wait until graceful completion
<-s.DoneCh
}
// GRPCServerConfig is the extra configuration passed along for consumers
// to facilitate using GRPC plugins.
type GRPCServerConfig struct {
StdoutAddr string `json:"stdout_addr"`
StderrAddr string `json:"stderr_addr"`
}

73
vendor/github.com/hashicorp/go-plugin/log_entry.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package plugin
import (
"encoding/json"
"time"
)
// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
type logEntry struct {
Message string `json:"@message"`
Level string `json:"@level"`
Timestamp time.Time `json:"timestamp"`
KVPairs []*logEntryKV `json:"kv_pairs"`
}
// logEntryKV is a key value pair within the Output payload
type logEntryKV struct {
Key string `json:"key"`
Value interface{} `json:"value"`
}
// flattenKVPairs is used to flatten KVPair slice into []interface{}
// for hclog consumption.
func flattenKVPairs(kvs []*logEntryKV) []interface{} {
var result []interface{}
for _, kv := range kvs {
result = append(result, kv.Key)
result = append(result, kv.Value)
}
return result
}
// parseJSON handles parsing JSON output
func parseJSON(input string) (*logEntry, error) {
var raw map[string]interface{}
entry := &logEntry{}
err := json.Unmarshal([]byte(input), &raw)
if err != nil {
return nil, err
}
// Parse hclog-specific objects
if v, ok := raw["@message"]; ok {
entry.Message = v.(string)
delete(raw, "@message")
}
if v, ok := raw["@level"]; ok {
entry.Level = v.(string)
delete(raw, "@level")
}
if v, ok := raw["@timestamp"]; ok {
t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
if err != nil {
return nil, err
}
entry.Timestamp = t
delete(raw, "@timestamp")
}
// Parse dynamic KV args from the hclog payload.
for k, v := range raw {
entry.KVPairs = append(entry.KVPairs, &logEntryKV{
Key: k,
Value: v,
})
}
return entry, nil
}

204
vendor/github.com/hashicorp/go-plugin/mux_broker.go generated vendored Normal file
View File

@ -0,0 +1,204 @@
package plugin
import (
"encoding/binary"
"fmt"
"log"
"net"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/yamux"
)
// MuxBroker is responsible for brokering multiplexed connections by unique ID.
//
// It is used by plugins to multiplex multiple RPC connections and data
// streams on top of a single connection between the plugin process and the
// host process.
//
// This allows a plugin to request a channel with a specific ID to connect to
// or accept a connection from, and the broker handles the details of
// holding these channels open while they're being negotiated.
//
// The Plugin interface has access to these for both Server and Client.
// The broker can be used by either (optionally) to reserve and connect to
// new multiplexed streams. This is useful for complex args and return values,
// or anything else you might need a data stream for.
type MuxBroker struct {
nextId uint32
session *yamux.Session
streams map[uint32]*muxBrokerPending
sync.Mutex
}
type muxBrokerPending struct {
ch chan net.Conn
doneCh chan struct{}
}
func newMuxBroker(s *yamux.Session) *MuxBroker {
return &MuxBroker{
session: s,
streams: make(map[uint32]*muxBrokerPending),
}
}
// Accept accepts a connection by ID.
//
// This should not be called multiple times with the same ID at one time.
func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
var c net.Conn
p := m.getStream(id)
select {
case c = <-p.ch:
close(p.doneCh)
case <-time.After(5 * time.Second):
m.Lock()
defer m.Unlock()
delete(m.streams, id)
return nil, fmt.Errorf("timeout waiting for accept")
}
// Ack our connection
if err := binary.Write(c, binary.LittleEndian, id); err != nil {
c.Close()
return nil, err
}
return c, nil
}
// AcceptAndServe is used to accept a specific stream ID and immediately
// serve an RPC server on that stream ID. This is used to easily serve
// complex arguments.
//
// The served interface is always registered to the "Plugin" name.
func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
conn, err := m.Accept(id)
if err != nil {
log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
return
}
serve(conn, "Plugin", v)
}
// Close closes the connection and all sub-connections.
func (m *MuxBroker) Close() error {
return m.session.Close()
}
// Dial opens a connection by ID.
func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
// Open the stream
stream, err := m.session.OpenStream()
if err != nil {
return nil, err
}
// Write the stream ID onto the wire.
if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
stream.Close()
return nil, err
}
// Read the ack that we connected. Then we're off!
var ack uint32
if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
stream.Close()
return nil, err
}
if ack != id {
stream.Close()
return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
}
return stream, nil
}
// NextId returns a unique ID to use next.
//
// It is possible for very long-running plugin hosts to wrap this value,
// though it would require a very large amount of RPC calls. In practice
// we've never seen it happen.
func (m *MuxBroker) NextId() uint32 {
return atomic.AddUint32(&m.nextId, 1)
}
// Run starts the brokering and should be executed in a goroutine, since it
// blocks forever, or until the session closes.
//
// Uses of MuxBroker never need to call this. It is called internally by
// the plugin host/client.
func (m *MuxBroker) Run() {
for {
stream, err := m.session.AcceptStream()
if err != nil {
// Once we receive an error, just exit
break
}
// Read the stream ID from the stream
var id uint32
if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
stream.Close()
continue
}
// Initialize the waiter
p := m.getStream(id)
select {
case p.ch <- stream:
default:
}
// Wait for a timeout
go m.timeoutWait(id, p)
}
}
func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
m.Lock()
defer m.Unlock()
p, ok := m.streams[id]
if ok {
return p
}
m.streams[id] = &muxBrokerPending{
ch: make(chan net.Conn, 1),
doneCh: make(chan struct{}),
}
return m.streams[id]
}
func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
// Wait for the stream to either be picked up and connected, or
// for a timeout.
timeout := false
select {
case <-p.doneCh:
case <-time.After(5 * time.Second):
timeout = true
}
m.Lock()
defer m.Unlock()
// Delete the stream so no one else can grab it
delete(m.streams, id)
// If we timed out, then check if we have a channel in the buffer,
// and if so, close it.
if timeout {
select {
case s := <-p.ch:
s.Close()
}
}
}

56
vendor/github.com/hashicorp/go-plugin/plugin.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// The plugin package exposes functions and helpers for communicating to
// plugins which are implemented as standalone binary applications.
//
// plugin.Client fully manages the lifecycle of executing the application,
// connecting to it, and returning the RPC client for dispensing plugins.
//
// plugin.Serve fully manages listeners to expose an RPC server from a binary
// that plugin.Client can connect to.
package plugin
import (
"errors"
"net/rpc"
"google.golang.org/grpc"
)
// Plugin is the interface that is implemented to serve/connect to an
// inteface implementation.
type Plugin interface {
// Server should return the RPC server compatible struct to serve
// the methods that the Client calls over net/rpc.
Server(*MuxBroker) (interface{}, error)
// Client returns an interface implementation for the plugin you're
// serving that communicates to the server end of the plugin.
Client(*MuxBroker, *rpc.Client) (interface{}, error)
}
// GRPCPlugin is the interface that is implemented to serve/connect to
// a plugin over gRPC.
type GRPCPlugin interface {
// GRPCServer should register this plugin for serving with the
// given GRPCServer. Unlike Plugin.Server, this is only called once
// since gRPC plugins serve singletons.
GRPCServer(*grpc.Server) error
// GRPCClient should return the interface implementation for the plugin
// you're serving via gRPC.
GRPCClient(*grpc.ClientConn) (interface{}, error)
}
// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
// Server and Client functions. This will effectively disable support for
// net/rpc based plugins.
//
// This struct can be embedded in your struct.
type NetRPCUnsupportedPlugin struct{}
func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
return nil, errors.New("net/rpc plugin protocol not supported")
}
func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
return nil, errors.New("net/rpc plugin protocol not supported")
}

24
vendor/github.com/hashicorp/go-plugin/process.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package plugin
import (
"time"
)
// pidAlive checks whether a pid is alive.
func pidAlive(pid int) bool {
return _pidAlive(pid)
}
// pidWait blocks for a process to exit.
func pidWait(pid int) error {
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for range ticker.C {
if !pidAlive(pid) {
break
}
}
return nil
}

19
vendor/github.com/hashicorp/go-plugin/process_posix.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// +build !windows
package plugin
import (
"os"
"syscall"
)
// _pidAlive tests whether a process is alive or not by sending it Signal 0,
// since Go otherwise has no way to test this.
func _pidAlive(pid int) bool {
proc, err := os.FindProcess(pid)
if err == nil {
err = proc.Signal(syscall.Signal(0))
}
return err == nil
}

View File

@ -0,0 +1,29 @@
package plugin
import (
"syscall"
)
const (
// Weird name but matches the MSDN docs
exit_STILL_ACTIVE = 259
processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
syscall.PROCESS_QUERY_INFORMATION |
syscall.SYNCHRONIZE
)
// _pidAlive tests whether a process is alive or not
func _pidAlive(pid int) bool {
h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
if err != nil {
return false
}
var ec uint32
if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
return false
}
return ec == exit_STILL_ACTIVE
}

45
vendor/github.com/hashicorp/go-plugin/protocol.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package plugin
import (
"io"
"net"
)
// Protocol is an enum representing the types of protocols.
type Protocol string
const (
ProtocolInvalid Protocol = ""
ProtocolNetRPC Protocol = "netrpc"
ProtocolGRPC Protocol = "grpc"
)
// ServerProtocol is an interface that must be implemented for new plugin
// protocols to be servers.
type ServerProtocol interface {
// Init is called once to configure and initialize the protocol, but
// not start listening. This is the point at which all validation should
// be done and errors returned.
Init() error
// Config is extra configuration to be outputted to stdout. This will
// be automatically base64 encoded to ensure it can be parsed properly.
// This can be an empty string if additional configuration is not needed.
Config() string
// Serve is called to serve connections on the given listener. This should
// continue until the listener is closed.
Serve(net.Listener)
}
// ClientProtocol is an interface that must be implemented for new plugin
// protocols to be clients.
type ClientProtocol interface {
io.Closer
// Dispense dispenses a new instance of the plugin with the given name.
Dispense(string) (interface{}, error)
// Ping checks that the client connection is still healthy.
Ping() error
}

170
vendor/github.com/hashicorp/go-plugin/rpc_client.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
package plugin
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/rpc"
"github.com/hashicorp/yamux"
)
// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
type RPCClient struct {
broker *MuxBroker
control *rpc.Client
plugins map[string]Plugin
// These are the streams used for the various stdout/err overrides
stdout, stderr net.Conn
}
// newRPCClient creates a new RPCClient. The Client argument is expected
// to be successfully started already with a lock held.
func newRPCClient(c *Client) (*RPCClient, error) {
// Connect to the client
conn, err := net.Dial(c.address.Network(), c.address.String())
if err != nil {
return nil, err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
// Make sure to set keep alive so that the connection doesn't die
tcpConn.SetKeepAlive(true)
}
if c.config.TLSConfig != nil {
conn = tls.Client(conn, c.config.TLSConfig)
}
// Create the actual RPC client
result, err := NewRPCClient(conn, c.config.Plugins)
if err != nil {
conn.Close()
return nil, err
}
// Begin the stream syncing so that stdin, out, err work properly
err = result.SyncStreams(
c.config.SyncStdout,
c.config.SyncStderr)
if err != nil {
result.Close()
return nil, err
}
return result, nil
}
// NewRPCClient creates a client from an already-open connection-like value.
// Dial is typically used instead.
func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
// Create the yamux client so we can multiplex
mux, err := yamux.Client(conn, nil)
if err != nil {
conn.Close()
return nil, err
}
// Connect to the control stream.
control, err := mux.Open()
if err != nil {
mux.Close()
return nil, err
}
// Connect stdout, stderr streams
stdstream := make([]net.Conn, 2)
for i, _ := range stdstream {
stdstream[i], err = mux.Open()
if err != nil {
mux.Close()
return nil, err
}
}
// Create the broker and start it up
broker := newMuxBroker(mux)
go broker.Run()
// Build the client using our broker and control channel.
return &RPCClient{
broker: broker,
control: rpc.NewClient(control),
plugins: plugins,
stdout: stdstream[0],
stderr: stdstream[1],
}, nil
}
// SyncStreams should be called to enable syncing of stdout,
// stderr with the plugin.
//
// This will return immediately and the syncing will continue to happen
// in the background. You do not need to launch this in a goroutine itself.
//
// This should never be called multiple times.
func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
go copyStream("stdout", stdout, c.stdout)
go copyStream("stderr", stderr, c.stderr)
return nil
}
// Close closes the connection. The client is no longer usable after this
// is called.
func (c *RPCClient) Close() error {
// Call the control channel and ask it to gracefully exit. If this
// errors, then we save it so that we always return an error but we
// want to try to close the other channels anyways.
var empty struct{}
returnErr := c.control.Call("Control.Quit", true, &empty)
// Close the other streams we have
if err := c.control.Close(); err != nil {
return err
}
if err := c.stdout.Close(); err != nil {
return err
}
if err := c.stderr.Close(); err != nil {
return err
}
if err := c.broker.Close(); err != nil {
return err
}
// Return back the error we got from Control.Quit. This is very important
// since we MUST return non-nil error if this fails so that Client.Kill
// will properly try a process.Kill.
return returnErr
}
func (c *RPCClient) Dispense(name string) (interface{}, error) {
p, ok := c.plugins[name]
if !ok {
return nil, fmt.Errorf("unknown plugin type: %s", name)
}
var id uint32
if err := c.control.Call(
"Dispenser.Dispense", name, &id); err != nil {
return nil, err
}
conn, err := c.broker.Dial(id)
if err != nil {
return nil, err
}
return p.Client(c.broker, rpc.NewClient(conn))
}
// Ping pings the connection to ensure it is still alive.
//
// The error from the RPC call is returned exactly if you want to inspect
// it for further error analysis. Any error returned from here would indicate
// that the connection to the plugin is not healthy.
func (c *RPCClient) Ping() error {
var empty struct{}
return c.control.Call("Control.Ping", true, &empty)
}

197
vendor/github.com/hashicorp/go-plugin/rpc_server.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
package plugin
import (
"errors"
"fmt"
"io"
"log"
"net"
"net/rpc"
"sync"
"github.com/hashicorp/yamux"
)
// RPCServer listens for network connections and then dispenses interface
// implementations over net/rpc.
//
// After setting the fields below, they shouldn't be read again directly
// from the structure which may be reading/writing them concurrently.
type RPCServer struct {
Plugins map[string]Plugin
// Stdout, Stderr are what this server will use instead of the
// normal stdin/out/err. This is because due to the multi-process nature
// of our plugin system, we can't use the normal process values so we
// make our own custom one we pipe across.
Stdout io.Reader
Stderr io.Reader
// DoneCh should be set to a non-nil channel that will be closed
// when the control requests the RPC server to end.
DoneCh chan<- struct{}
lock sync.Mutex
}
// ServerProtocol impl.
func (s *RPCServer) Init() error { return nil }
// ServerProtocol impl.
func (s *RPCServer) Config() string { return "" }
// ServerProtocol impl.
func (s *RPCServer) Serve(lis net.Listener) {
for {
conn, err := lis.Accept()
if err != nil {
log.Printf("[ERR] plugin: plugin server: %s", err)
return
}
go s.ServeConn(conn)
}
}
// ServeConn runs a single connection.
//
// ServeConn blocks, serving the connection until the client hangs up.
func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
// First create the yamux server to wrap this connection
mux, err := yamux.Server(conn, nil)
if err != nil {
conn.Close()
log.Printf("[ERR] plugin: error creating yamux server: %s", err)
return
}
// Accept the control connection
control, err := mux.Accept()
if err != nil {
mux.Close()
if err != io.EOF {
log.Printf("[ERR] plugin: error accepting control connection: %s", err)
}
return
}
// Connect the stdstreams (in, out, err)
stdstream := make([]net.Conn, 2)
for i, _ := range stdstream {
stdstream[i], err = mux.Accept()
if err != nil {
mux.Close()
log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
return
}
}
// Copy std streams out to the proper place
go copyStream("stdout", stdstream[0], s.Stdout)
go copyStream("stderr", stdstream[1], s.Stderr)
// Create the broker and start it up
broker := newMuxBroker(mux)
go broker.Run()
// Use the control connection to build the dispenser and serve the
// connection.
server := rpc.NewServer()
server.RegisterName("Control", &controlServer{
server: s,
})
server.RegisterName("Dispenser", &dispenseServer{
broker: broker,
plugins: s.Plugins,
})
server.ServeConn(control)
}
// done is called internally by the control server to trigger the
// doneCh to close which is listened to by the main process to cleanly
// exit.
func (s *RPCServer) done() {
s.lock.Lock()
defer s.lock.Unlock()
if s.DoneCh != nil {
close(s.DoneCh)
s.DoneCh = nil
}
}
// dispenseServer dispenses variousinterface implementations for Terraform.
type controlServer struct {
server *RPCServer
}
// Ping can be called to verify the connection (and likely the binary)
// is still alive to a plugin.
func (c *controlServer) Ping(
null bool, response *struct{}) error {
*response = struct{}{}
return nil
}
func (c *controlServer) Quit(
null bool, response *struct{}) error {
// End the server
c.server.done()
// Always return true
*response = struct{}{}
return nil
}
// dispenseServer dispenses variousinterface implementations for Terraform.
type dispenseServer struct {
broker *MuxBroker
plugins map[string]Plugin
}
func (d *dispenseServer) Dispense(
name string, response *uint32) error {
// Find the function to create this implementation
p, ok := d.plugins[name]
if !ok {
return fmt.Errorf("unknown plugin type: %s", name)
}
// Create the implementation first so we know if there is an error.
impl, err := p.Server(d.broker)
if err != nil {
// We turn the error into an errors error so that it works across RPC
return errors.New(err.Error())
}
// Reserve an ID for our implementation
id := d.broker.NextId()
*response = id
// Run the rest in a goroutine since it can only happen once this RPC
// call returns. We wait for a connection for the plugin implementation
// and serve it.
go func() {
conn, err := d.broker.Accept(id)
if err != nil {
log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
return
}
serve(conn, "Plugin", impl)
}()
return nil
}
func serve(conn io.ReadWriteCloser, name string, v interface{}) {
server := rpc.NewServer()
if err := server.RegisterName(name, v); err != nil {
log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
return
}
server.ServeConn(conn)
}

310
vendor/github.com/hashicorp/go-plugin/server.go generated vendored Normal file
View File

@ -0,0 +1,310 @@
package plugin
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/signal"
"runtime"
"strconv"
"sync/atomic"
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc"
)
// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
// We will increment this whenever we change any protocol behavior. This
// will invalidate any prior plugins but will at least allow us to iterate
// on the core in a safe way. We will do our best to do this very
// infrequently.
const CoreProtocolVersion = 1
// HandshakeConfig is the configuration used by client and servers to
// handshake before starting a plugin connection. This is embedded by
// both ServeConfig and ClientConfig.
//
// In practice, the plugin host creates a HandshakeConfig that is exported
// and plugins then can easily consume it.
type HandshakeConfig struct {
// ProtocolVersion is the version that clients must match on to
// agree they can communicate. This should match the ProtocolVersion
// set on ClientConfig when using a plugin.
ProtocolVersion uint
// MagicCookieKey and value are used as a very basic verification
// that a plugin is intended to be launched. This is not a security
// measure, just a UX feature. If the magic cookie doesn't match,
// we show human-friendly output.
MagicCookieKey string
MagicCookieValue string
}
// ServeConfig configures what sorts of plugins are served.
type ServeConfig struct {
// HandshakeConfig is the configuration that must match clients.
HandshakeConfig
// TLSProvider is a function that returns a configured tls.Config.
TLSProvider func() (*tls.Config, error)
// Plugins are the plugins that are served.
Plugins map[string]Plugin
// GRPCServer should be non-nil to enable serving the plugins over
// gRPC. This is a function to create the server when needed with the
// given server options. The server options populated by go-plugin will
// be for TLS if set. You may modify the input slice.
//
// Note that the grpc.Server will automatically be registered with
// the gRPC health checking service. This is not optional since go-plugin
// relies on this to implement Ping().
GRPCServer func([]grpc.ServerOption) *grpc.Server
}
// Protocol returns the protocol that this server should speak.
func (c *ServeConfig) Protocol() Protocol {
result := ProtocolNetRPC
if c.GRPCServer != nil {
result = ProtocolGRPC
}
return result
}
// Serve serves the plugins given by ServeConfig.
//
// Serve doesn't return until the plugin is done being executed. Any
// errors will be outputted to os.Stderr.
//
// This is the method that plugins should call in their main() functions.
func Serve(opts *ServeConfig) {
// Validate the handshake config
if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
fmt.Fprintf(os.Stderr,
"Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
"key or value was set. Please notify the plugin author and report\n"+
"this as a bug.\n")
os.Exit(1)
}
// First check the cookie
if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
fmt.Fprintf(os.Stderr,
"This binary is a plugin. These are not meant to be executed directly.\n"+
"Please execute the program that consumes these plugins, which will\n"+
"load any plugins automatically\n")
os.Exit(1)
}
// Logging goes to the original stderr
log.SetOutput(os.Stderr)
// internal logger to os.Stderr
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Output: os.Stderr,
JSONFormat: true,
})
// Create our new stdout, stderr files. These will override our built-in
// stdout/stderr so that it works across the stream boundary.
stdout_r, stdout_w, err := os.Pipe()
if err != nil {
fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
os.Exit(1)
}
stderr_r, stderr_w, err := os.Pipe()
if err != nil {
fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
os.Exit(1)
}
// Register a listener so we can accept a connection
listener, err := serverListener()
if err != nil {
logger.Error("plugin init error", "error", err)
return
}
// Close the listener on return. We wrap this in a func() on purpose
// because the "listener" reference may change to TLS.
defer func() {
listener.Close()
}()
var tlsConfig *tls.Config
if opts.TLSProvider != nil {
tlsConfig, err = opts.TLSProvider()
if err != nil {
logger.Error("plugin tls init", "error", err)
return
}
}
// Create the channel to tell us when we're done
doneCh := make(chan struct{})
// Build the server type
var server ServerProtocol
switch opts.Protocol() {
case ProtocolNetRPC:
// If we have a TLS configuration then we wrap the listener
// ourselves and do it at that level.
if tlsConfig != nil {
listener = tls.NewListener(listener, tlsConfig)
}
// Create the RPC server to dispense
server = &RPCServer{
Plugins: opts.Plugins,
Stdout: stdout_r,
Stderr: stderr_r,
DoneCh: doneCh,
}
case ProtocolGRPC:
// Create the gRPC server
server = &GRPCServer{
Plugins: opts.Plugins,
Server: opts.GRPCServer,
TLS: tlsConfig,
Stdout: stdout_r,
Stderr: stderr_r,
DoneCh: doneCh,
}
default:
panic("unknown server protocol: " + opts.Protocol())
}
// Initialize the servers
if err := server.Init(); err != nil {
logger.Error("protocol init", "error", err)
return
}
// Build the extra configuration
extra := ""
if v := server.Config(); v != "" {
extra = base64.StdEncoding.EncodeToString([]byte(v))
}
if extra != "" {
extra = "|" + extra
}
logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
// Output the address and service name to stdout so that core can bring it up.
fmt.Printf("%d|%d|%s|%s|%s%s\n",
CoreProtocolVersion,
opts.ProtocolVersion,
listener.Addr().Network(),
listener.Addr().String(),
opts.Protocol(),
extra)
os.Stdout.Sync()
// Eat the interrupts
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
go func() {
var count int32 = 0
for {
<-ch
newCount := atomic.AddInt32(&count, 1)
logger.Debug("plugin received interrupt signal, ignoring", "count", newCount)
}
}()
// Set our new out, err
os.Stdout = stdout_w
os.Stderr = stderr_w
// Accept connections and wait for completion
go server.Serve(listener)
<-doneCh
}
func serverListener() (net.Listener, error) {
if runtime.GOOS == "windows" {
return serverListener_tcp()
}
return serverListener_unix()
}
func serverListener_tcp() (net.Listener, error) {
minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
if err != nil {
return nil, err
}
maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
if err != nil {
return nil, err
}
for port := minPort; port <= maxPort; port++ {
address := fmt.Sprintf("127.0.0.1:%d", port)
listener, err := net.Listen("tcp", address)
if err == nil {
return listener, nil
}
}
return nil, errors.New("Couldn't bind plugin TCP listener")
}
func serverListener_unix() (net.Listener, error) {
tf, err := ioutil.TempFile("", "plugin")
if err != nil {
return nil, err
}
path := tf.Name()
// Close the file and remove it because it has to not exist for
// the domain socket.
if err := tf.Close(); err != nil {
return nil, err
}
if err := os.Remove(path); err != nil {
return nil, err
}
l, err := net.Listen("unix", path)
if err != nil {
return nil, err
}
// Wrap the listener in rmListener so that the Unix domain socket file
// is removed on close.
return &rmListener{
Listener: l,
Path: path,
}, nil
}
// rmListener is an implementation of net.Listener that forwards most
// calls to the listener but also removes a file as part of the close. We
// use this to cleanup the unix domain socket on close.
type rmListener struct {
net.Listener
Path string
}
func (l *rmListener) Close() error {
// Close the listener itself
if err := l.Listener.Close(); err != nil {
return err
}
// Remove the file
return os.Remove(l.Path)
}

31
vendor/github.com/hashicorp/go-plugin/server_mux.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package plugin
import (
"fmt"
"os"
)
// ServeMuxMap is the type that is used to configure ServeMux
type ServeMuxMap map[string]*ServeConfig
// ServeMux is like Serve, but serves multiple types of plugins determined
// by the argument given on the command-line.
//
// This command doesn't return until the plugin is done being executed. Any
// errors are logged or output to stderr.
func ServeMux(m ServeMuxMap) {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr,
"Invoked improperly. This is an internal command that shouldn't\n"+
"be manually invoked.\n")
os.Exit(1)
}
opts, ok := m[os.Args[1]]
if !ok {
fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
os.Exit(1)
}
Serve(opts)
}

18
vendor/github.com/hashicorp/go-plugin/stream.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package plugin
import (
"io"
"log"
)
func copyStream(name string, dst io.Writer, src io.Reader) {
if src == nil {
panic(name + ": src is nil")
}
if dst == nil {
panic(name + ": dst is nil")
}
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
}
}

120
vendor/github.com/hashicorp/go-plugin/testing.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package plugin
import (
"bytes"
"net"
"net/rpc"
"github.com/mitchellh/go-testing-interface"
"google.golang.org/grpc"
)
// The testing file contains test helpers that you can use outside of
// this package for making it easier to test plugins themselves.
// TestConn is a helper function for returning a client and server
// net.Conn connected to each other.
func TestConn(t testing.T) (net.Conn, net.Conn) {
// Listen to any local port. This listener will be closed
// after a single connection is established.
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
// Start a goroutine to accept our client connection
var serverConn net.Conn
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
defer l.Close()
var err error
serverConn, err = l.Accept()
if err != nil {
t.Fatalf("err: %s", err)
}
}()
// Connect to the server
clientConn, err := net.Dial("tcp", l.Addr().String())
if err != nil {
t.Fatalf("err: %s", err)
}
// Wait for the server side to acknowledge it has connected
<-doneCh
return clientConn, serverConn
}
// TestRPCConn returns a rpc client and server connected to each other.
func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
clientConn, serverConn := TestConn(t)
server := rpc.NewServer()
go server.ServeConn(serverConn)
client := rpc.NewClient(clientConn)
return client, server
}
// TestPluginRPCConn returns a plugin RPC client and server that are connected
// together and configured.
func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
// Create two net.Conns we can use to shuttle our control connection
clientConn, serverConn := TestConn(t)
// Start up the server
server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
go server.ServeConn(serverConn)
// Connect the client to the server
client, err := NewRPCClient(clientConn, ps)
if err != nil {
t.Fatalf("err: %s", err)
}
return client, server
}
// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
// together and configured. This is used to test gRPC connections.
func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
// Create a listener
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
// Start up the server
server := &GRPCServer{
Plugins: ps,
Server: DefaultGRPCServer,
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
}
if err := server.Init(); err != nil {
t.Fatalf("err: %s", err)
}
go server.Serve(l)
// Connect to the server
conn, err := grpc.Dial(
l.Addr().String(),
grpc.WithBlock(),
grpc.WithInsecure())
if err != nil {
t.Fatalf("err: %s", err)
}
// Connection successful, close the listener
l.Close()
// Create the client
client := &GRPCClient{
Conn: conn,
Plugins: ps,
}
return client, server
}

362
vendor/github.com/hashicorp/yamux/LICENSE generated vendored Normal file
View File

@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

86
vendor/github.com/hashicorp/yamux/README.md generated vendored Normal file
View File

@ -0,0 +1,86 @@
# Yamux
Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
It relies on an underlying connection to provide reliability
and ordering, such as TCP or Unix domain sockets, and provides
stream-oriented multiplexing. It is inspired by SPDY but is not
interoperable with it.
Yamux features include:
* Bi-directional streams
* Streams can be opened by either client or server
* Useful for NAT traversal
* Server-side push support
* Flow control
* Avoid starvation
* Back-pressure to prevent overwhelming a receiver
* Keep Alives
* Enables persistent connections over a load balancer
* Efficient
* Enables thousands of logical streams with low overhead
## Documentation
For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
## Specification
The full specification for Yamux is provided in the `spec.md` file.
It can be used as a guide to implementors of interoperable libraries.
## Usage
Using Yamux is remarkably simple:
```go
func client() {
// Get a TCP connection
conn, err := net.Dial(...)
if err != nil {
panic(err)
}
// Setup client side of yamux
session, err := yamux.Client(conn, nil)
if err != nil {
panic(err)
}
// Open a new stream
stream, err := session.Open()
if err != nil {
panic(err)
}
// Stream implements net.Conn
stream.Write([]byte("ping"))
}
func server() {
// Accept a TCP connection
conn, err := listener.Accept()
if err != nil {
panic(err)
}
// Setup server side of yamux
session, err := yamux.Server(conn, nil)
if err != nil {
panic(err)
}
// Accept a stream
stream, err := session.Accept()
if err != nil {
panic(err)
}
// Listen for a message
buf := make([]byte, 4)
stream.Read(buf)
}
```

60
vendor/github.com/hashicorp/yamux/addr.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package yamux
import (
"fmt"
"net"
)
// hasAddr is used to get the address from the underlying connection
type hasAddr interface {
LocalAddr() net.Addr
RemoteAddr() net.Addr
}
// yamuxAddr is used when we cannot get the underlying address
type yamuxAddr struct {
Addr string
}
func (*yamuxAddr) Network() string {
return "yamux"
}
func (y *yamuxAddr) String() string {
return fmt.Sprintf("yamux:%s", y.Addr)
}
// Addr is used to get the address of the listener.
func (s *Session) Addr() net.Addr {
return s.LocalAddr()
}
// LocalAddr is used to get the local address of the
// underlying connection.
func (s *Session) LocalAddr() net.Addr {
addr, ok := s.conn.(hasAddr)
if !ok {
return &yamuxAddr{"local"}
}
return addr.LocalAddr()
}
// RemoteAddr is used to get the address of remote end
// of the underlying connection
func (s *Session) RemoteAddr() net.Addr {
addr, ok := s.conn.(hasAddr)
if !ok {
return &yamuxAddr{"remote"}
}
return addr.RemoteAddr()
}
// LocalAddr returns the local address
func (s *Stream) LocalAddr() net.Addr {
return s.session.LocalAddr()
}
// LocalAddr returns the remote address
func (s *Stream) RemoteAddr() net.Addr {
return s.session.RemoteAddr()
}

157
vendor/github.com/hashicorp/yamux/const.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
package yamux
import (
"encoding/binary"
"fmt"
)
var (
// ErrInvalidVersion means we received a frame with an
// invalid version
ErrInvalidVersion = fmt.Errorf("invalid protocol version")
// ErrInvalidMsgType means we received a frame with an
// invalid message type
ErrInvalidMsgType = fmt.Errorf("invalid msg type")
// ErrSessionShutdown is used if there is a shutdown during
// an operation
ErrSessionShutdown = fmt.Errorf("session shutdown")
// ErrStreamsExhausted is returned if we have no more
// stream ids to issue
ErrStreamsExhausted = fmt.Errorf("streams exhausted")
// ErrDuplicateStream is used if a duplicate stream is
// opened inbound
ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
// ErrReceiveWindowExceeded indicates the window was exceeded
ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
// ErrTimeout is used when we reach an IO deadline
ErrTimeout = fmt.Errorf("i/o deadline reached")
// ErrStreamClosed is returned when using a closed stream
ErrStreamClosed = fmt.Errorf("stream closed")
// ErrUnexpectedFlag is set when we get an unexpected flag
ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
// ErrRemoteGoAway is used when we get a go away from the other side
ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
// ErrConnectionReset is sent if a stream is reset. This can happen
// if the backlog is exceeded, or if there was a remote GoAway.
ErrConnectionReset = fmt.Errorf("connection reset")
// ErrConnectionWriteTimeout indicates that we hit the "safety valve"
// timeout writing to the underlying stream connection.
ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
// ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
)
const (
// protoVersion is the only version we support
protoVersion uint8 = 0
)
const (
// Data is used for data frames. They are followed
// by length bytes worth of payload.
typeData uint8 = iota
// WindowUpdate is used to change the window of
// a given stream. The length indicates the delta
// update to the window.
typeWindowUpdate
// Ping is sent as a keep-alive or to measure
// the RTT. The StreamID and Length value are echoed
// back in the response.
typePing
// GoAway is sent to terminate a session. The StreamID
// should be 0 and the length is an error code.
typeGoAway
)
const (
// SYN is sent to signal a new stream. May
// be sent with a data payload
flagSYN uint16 = 1 << iota
// ACK is sent to acknowledge a new stream. May
// be sent with a data payload
flagACK
// FIN is sent to half-close the given stream.
// May be sent with a data payload.
flagFIN
// RST is used to hard close a given stream.
flagRST
)
const (
// initialStreamWindow is the initial stream window size
initialStreamWindow uint32 = 256 * 1024
)
const (
// goAwayNormal is sent on a normal termination
goAwayNormal uint32 = iota
// goAwayProtoErr sent on a protocol error
goAwayProtoErr
// goAwayInternalErr sent on an internal error
goAwayInternalErr
)
const (
sizeOfVersion = 1
sizeOfType = 1
sizeOfFlags = 2
sizeOfStreamID = 4
sizeOfLength = 4
headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
sizeOfStreamID + sizeOfLength
)
type header []byte
func (h header) Version() uint8 {
return h[0]
}
func (h header) MsgType() uint8 {
return h[1]
}
func (h header) Flags() uint16 {
return binary.BigEndian.Uint16(h[2:4])
}
func (h header) StreamID() uint32 {
return binary.BigEndian.Uint32(h[4:8])
}
func (h header) Length() uint32 {
return binary.BigEndian.Uint32(h[8:12])
}
func (h header) String() string {
return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
}
func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
h[0] = protoVersion
h[1] = msgType
binary.BigEndian.PutUint16(h[2:4], flags)
binary.BigEndian.PutUint32(h[4:8], streamID)
binary.BigEndian.PutUint32(h[8:12], length)
}

87
vendor/github.com/hashicorp/yamux/mux.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package yamux
import (
"fmt"
"io"
"os"
"time"
)
// Config is used to tune the Yamux session
type Config struct {
// AcceptBacklog is used to limit how many streams may be
// waiting an accept.
AcceptBacklog int
// EnableKeepalive is used to do a period keep alive
// messages using a ping.
EnableKeepAlive bool
// KeepAliveInterval is how often to perform the keep alive
KeepAliveInterval time.Duration
// ConnectionWriteTimeout is meant to be a "safety valve" timeout after
// we which will suspect a problem with the underlying connection and
// close it. This is only applied to writes, where's there's generally
// an expectation that things will move along quickly.
ConnectionWriteTimeout time.Duration
// MaxStreamWindowSize is used to control the maximum
// window size that we allow for a stream.
MaxStreamWindowSize uint32
// LogOutput is used to control the log destination
LogOutput io.Writer
}
// DefaultConfig is used to return a default configuration
func DefaultConfig() *Config {
return &Config{
AcceptBacklog: 256,
EnableKeepAlive: true,
KeepAliveInterval: 30 * time.Second,
ConnectionWriteTimeout: 10 * time.Second,
MaxStreamWindowSize: initialStreamWindow,
LogOutput: os.Stderr,
}
}
// VerifyConfig is used to verify the sanity of configuration
func VerifyConfig(config *Config) error {
if config.AcceptBacklog <= 0 {
return fmt.Errorf("backlog must be positive")
}
if config.KeepAliveInterval == 0 {
return fmt.Errorf("keep-alive interval must be positive")
}
if config.MaxStreamWindowSize < initialStreamWindow {
return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
}
return nil
}
// Server is used to initialize a new server-side connection.
// There must be at most one server-side connection. If a nil config is
// provided, the DefaultConfiguration will be used.
func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, false), nil
}
// Client is used to initialize a new client-side connection.
// There must be at most one client-side connection.
func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
if config == nil {
config = DefaultConfig()
}
if err := VerifyConfig(config); err != nil {
return nil, err
}
return newSession(config, conn, true), nil
}

623
vendor/github.com/hashicorp/yamux/session.go generated vendored Normal file
View File

@ -0,0 +1,623 @@
package yamux
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net"
"strings"
"sync"
"sync/atomic"
"time"
)
// Session is used to wrap a reliable ordered connection and to
// multiplex it into multiple streams.
type Session struct {
// remoteGoAway indicates the remote side does
// not want futher connections. Must be first for alignment.
remoteGoAway int32
// localGoAway indicates that we should stop
// accepting futher connections. Must be first for alignment.
localGoAway int32
// nextStreamID is the next stream we should
// send. This depends if we are a client/server.
nextStreamID uint32
// config holds our configuration
config *Config
// logger is used for our logs
logger *log.Logger
// conn is the underlying connection
conn io.ReadWriteCloser
// bufRead is a buffered reader
bufRead *bufio.Reader
// pings is used to track inflight pings
pings map[uint32]chan struct{}
pingID uint32
pingLock sync.Mutex
// streams maps a stream id to a stream, and inflight has an entry
// for any outgoing stream that has not yet been established. Both are
// protected by streamLock.
streams map[uint32]*Stream
inflight map[uint32]struct{}
streamLock sync.Mutex
// synCh acts like a semaphore. It is sized to the AcceptBacklog which
// is assumed to be symmetric between the client and server. This allows
// the client to avoid exceeding the backlog and instead blocks the open.
synCh chan struct{}
// acceptCh is used to pass ready streams to the client
acceptCh chan *Stream
// sendCh is used to mark a stream as ready to send,
// or to send a header out directly.
sendCh chan sendReady
// recvDoneCh is closed when recv() exits to avoid a race
// between stream registration and stream shutdown
recvDoneCh chan struct{}
// shutdown is used to safely close a session
shutdown bool
shutdownErr error
shutdownCh chan struct{}
shutdownLock sync.Mutex
}
// sendReady is used to either mark a stream as ready
// or to directly send a header
type sendReady struct {
Hdr []byte
Body io.Reader
Err chan error
}
// newSession is used to construct a new session
func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
s := &Session{
config: config,
logger: log.New(config.LogOutput, "", log.LstdFlags),
conn: conn,
bufRead: bufio.NewReader(conn),
pings: make(map[uint32]chan struct{}),
streams: make(map[uint32]*Stream),
inflight: make(map[uint32]struct{}),
synCh: make(chan struct{}, config.AcceptBacklog),
acceptCh: make(chan *Stream, config.AcceptBacklog),
sendCh: make(chan sendReady, 64),
recvDoneCh: make(chan struct{}),
shutdownCh: make(chan struct{}),
}
if client {
s.nextStreamID = 1
} else {
s.nextStreamID = 2
}
go s.recv()
go s.send()
if config.EnableKeepAlive {
go s.keepalive()
}
return s
}
// IsClosed does a safe check to see if we have shutdown
func (s *Session) IsClosed() bool {
select {
case <-s.shutdownCh:
return true
default:
return false
}
}
// NumStreams returns the number of currently open streams
func (s *Session) NumStreams() int {
s.streamLock.Lock()
num := len(s.streams)
s.streamLock.Unlock()
return num
}
// Open is used to create a new stream as a net.Conn
func (s *Session) Open() (net.Conn, error) {
conn, err := s.OpenStream()
if err != nil {
return nil, err
}
return conn, nil
}
// OpenStream is used to create a new stream
func (s *Session) OpenStream() (*Stream, error) {
if s.IsClosed() {
return nil, ErrSessionShutdown
}
if atomic.LoadInt32(&s.remoteGoAway) == 1 {
return nil, ErrRemoteGoAway
}
// Block if we have too many inflight SYNs
select {
case s.synCh <- struct{}{}:
case <-s.shutdownCh:
return nil, ErrSessionShutdown
}
GET_ID:
// Get an ID, and check for stream exhaustion
id := atomic.LoadUint32(&s.nextStreamID)
if id >= math.MaxUint32-1 {
return nil, ErrStreamsExhausted
}
if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
goto GET_ID
}
// Register the stream
stream := newStream(s, id, streamInit)
s.streamLock.Lock()
s.streams[id] = stream
s.inflight[id] = struct{}{}
s.streamLock.Unlock()
// Send the window update to create
if err := stream.sendWindowUpdate(); err != nil {
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
}
return nil, err
}
return stream, nil
}
// Accept is used to block until the next available stream
// is ready to be accepted.
func (s *Session) Accept() (net.Conn, error) {
conn, err := s.AcceptStream()
if err != nil {
return nil, err
}
return conn, err
}
// AcceptStream is used to block until the next available stream
// is ready to be accepted.
func (s *Session) AcceptStream() (*Stream, error) {
select {
case stream := <-s.acceptCh:
if err := stream.sendWindowUpdate(); err != nil {
return nil, err
}
return stream, nil
case <-s.shutdownCh:
return nil, s.shutdownErr
}
}
// Close is used to close the session and all streams.
// Attempts to send a GoAway before closing the connection.
func (s *Session) Close() error {
s.shutdownLock.Lock()
defer s.shutdownLock.Unlock()
if s.shutdown {
return nil
}
s.shutdown = true
if s.shutdownErr == nil {
s.shutdownErr = ErrSessionShutdown
}
close(s.shutdownCh)
s.conn.Close()
<-s.recvDoneCh
s.streamLock.Lock()
defer s.streamLock.Unlock()
for _, stream := range s.streams {
stream.forceClose()
}
return nil
}
// exitErr is used to handle an error that is causing the
// session to terminate.
func (s *Session) exitErr(err error) {
s.shutdownLock.Lock()
if s.shutdownErr == nil {
s.shutdownErr = err
}
s.shutdownLock.Unlock()
s.Close()
}
// GoAway can be used to prevent accepting further
// connections. It does not close the underlying conn.
func (s *Session) GoAway() error {
return s.waitForSend(s.goAway(goAwayNormal), nil)
}
// goAway is used to send a goAway message
func (s *Session) goAway(reason uint32) header {
atomic.SwapInt32(&s.localGoAway, 1)
hdr := header(make([]byte, headerSize))
hdr.encode(typeGoAway, 0, 0, reason)
return hdr
}
// Ping is used to measure the RTT response time
func (s *Session) Ping() (time.Duration, error) {
// Get a channel for the ping
ch := make(chan struct{})
// Get a new ping id, mark as pending
s.pingLock.Lock()
id := s.pingID
s.pingID++
s.pings[id] = ch
s.pingLock.Unlock()
// Send the ping request
hdr := header(make([]byte, headerSize))
hdr.encode(typePing, flagSYN, 0, id)
if err := s.waitForSend(hdr, nil); err != nil {
return 0, err
}
// Wait for a response
start := time.Now()
select {
case <-ch:
case <-time.After(s.config.ConnectionWriteTimeout):
s.pingLock.Lock()
delete(s.pings, id) // Ignore it if a response comes later.
s.pingLock.Unlock()
return 0, ErrTimeout
case <-s.shutdownCh:
return 0, ErrSessionShutdown
}
// Compute the RTT
return time.Now().Sub(start), nil
}
// keepalive is a long running goroutine that periodically does
// a ping to keep the connection alive.
func (s *Session) keepalive() {
for {
select {
case <-time.After(s.config.KeepAliveInterval):
_, err := s.Ping()
if err != nil {
s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
s.exitErr(ErrKeepAliveTimeout)
return
}
case <-s.shutdownCh:
return
}
}
}
// waitForSendErr waits to send a header, checking for a potential shutdown
func (s *Session) waitForSend(hdr header, body io.Reader) error {
errCh := make(chan error, 1)
return s.waitForSendErr(hdr, body, errCh)
}
// waitForSendErr waits to send a header with optional data, checking for a
// potential shutdown. Since there's the expectation that sends can happen
// in a timely manner, we enforce the connection write timeout here.
func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout)
defer timer.Stop()
ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
select {
case s.sendCh <- ready:
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
select {
case err := <-errCh:
return err
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
}
// sendNoWait does a send without waiting. Since there's the expectation that
// the send happens right here, we enforce the connection write timeout if we
// can't queue the header to be sent.
func (s *Session) sendNoWait(hdr header) error {
timer := time.NewTimer(s.config.ConnectionWriteTimeout)
defer timer.Stop()
select {
case s.sendCh <- sendReady{Hdr: hdr}:
return nil
case <-s.shutdownCh:
return ErrSessionShutdown
case <-timer.C:
return ErrConnectionWriteTimeout
}
}
// send is a long running goroutine that sends data
func (s *Session) send() {
for {
select {
case ready := <-s.sendCh:
// Send a header if ready
if ready.Hdr != nil {
sent := 0
for sent < len(ready.Hdr) {
n, err := s.conn.Write(ready.Hdr[sent:])
if err != nil {
s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
asyncSendErr(ready.Err, err)
s.exitErr(err)
return
}
sent += n
}
}
// Send data from a body if given
if ready.Body != nil {
_, err := io.Copy(s.conn, ready.Body)
if err != nil {
s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
asyncSendErr(ready.Err, err)
s.exitErr(err)
return
}
}
// No error, successful send
asyncSendErr(ready.Err, nil)
case <-s.shutdownCh:
return
}
}
}
// recv is a long running goroutine that accepts new data
func (s *Session) recv() {
if err := s.recvLoop(); err != nil {
s.exitErr(err)
}
}
// recvLoop continues to receive data until a fatal error is encountered
func (s *Session) recvLoop() error {
defer close(s.recvDoneCh)
hdr := header(make([]byte, headerSize))
var handler func(header) error
for {
// Read the header
if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
}
return err
}
// Verify the version
if hdr.Version() != protoVersion {
s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
return ErrInvalidVersion
}
// Switch on the type
switch hdr.MsgType() {
case typeData:
handler = s.handleStreamMessage
case typeWindowUpdate:
handler = s.handleStreamMessage
case typeGoAway:
handler = s.handleGoAway
case typePing:
handler = s.handlePing
default:
return ErrInvalidMsgType
}
// Invoke the handler
if err := handler(hdr); err != nil {
return err
}
}
}
// handleStreamMessage handles either a data or window update frame
func (s *Session) handleStreamMessage(hdr header) error {
// Check for a new stream creation
id := hdr.StreamID()
flags := hdr.Flags()
if flags&flagSYN == flagSYN {
if err := s.incomingStream(id); err != nil {
return err
}
}
// Get the stream
s.streamLock.Lock()
stream := s.streams[id]
s.streamLock.Unlock()
// If we do not have a stream, likely we sent a RST
if stream == nil {
// Drain any data on the wire
if hdr.MsgType() == typeData && hdr.Length() > 0 {
s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
return nil
}
} else {
s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
}
return nil
}
// Check if this is a window update
if hdr.MsgType() == typeWindowUpdate {
if err := stream.incrSendWindow(hdr, flags); err != nil {
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return err
}
return nil
}
// Read the new data
if err := stream.readData(hdr, flags, s.bufRead); err != nil {
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return err
}
return nil
}
// handlePing is invokde for a typePing frame
func (s *Session) handlePing(hdr header) error {
flags := hdr.Flags()
pingID := hdr.Length()
// Check if this is a query, respond back in a separate context so we
// don't interfere with the receiving thread blocking for the write.
if flags&flagSYN == flagSYN {
go func() {
hdr := header(make([]byte, headerSize))
hdr.encode(typePing, flagACK, 0, pingID)
if err := s.sendNoWait(hdr); err != nil {
s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
}
}()
return nil
}
// Handle a response
s.pingLock.Lock()
ch := s.pings[pingID]
if ch != nil {
delete(s.pings, pingID)
close(ch)
}
s.pingLock.Unlock()
return nil
}
// handleGoAway is invokde for a typeGoAway frame
func (s *Session) handleGoAway(hdr header) error {
code := hdr.Length()
switch code {
case goAwayNormal:
atomic.SwapInt32(&s.remoteGoAway, 1)
case goAwayProtoErr:
s.logger.Printf("[ERR] yamux: received protocol error go away")
return fmt.Errorf("yamux protocol error")
case goAwayInternalErr:
s.logger.Printf("[ERR] yamux: received internal error go away")
return fmt.Errorf("remote yamux internal error")
default:
s.logger.Printf("[ERR] yamux: received unexpected go away")
return fmt.Errorf("unexpected go away received")
}
return nil
}
// incomingStream is used to create a new incoming stream
func (s *Session) incomingStream(id uint32) error {
// Reject immediately if we are doing a go away
if atomic.LoadInt32(&s.localGoAway) == 1 {
hdr := header(make([]byte, headerSize))
hdr.encode(typeWindowUpdate, flagRST, id, 0)
return s.sendNoWait(hdr)
}
// Allocate a new stream
stream := newStream(s, id, streamSYNReceived)
s.streamLock.Lock()
defer s.streamLock.Unlock()
// Check if stream already exists
if _, ok := s.streams[id]; ok {
s.logger.Printf("[ERR] yamux: duplicate stream declared")
if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
}
return ErrDuplicateStream
}
// Register the stream
s.streams[id] = stream
// Check if we've exceeded the backlog
select {
case s.acceptCh <- stream:
return nil
default:
// Backlog exceeded! RST the stream
s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
delete(s.streams, id)
stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
return s.sendNoWait(stream.sendHdr)
}
}
// closeStream is used to close a stream once both sides have
// issued a close. If there was an in-flight SYN and the stream
// was not yet established, then this will give the credit back.
func (s *Session) closeStream(id uint32) {
s.streamLock.Lock()
if _, ok := s.inflight[id]; ok {
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
}
}
delete(s.streams, id)
s.streamLock.Unlock()
}
// establishStream is used to mark a stream that was in the
// SYN Sent state as established.
func (s *Session) establishStream(id uint32) {
s.streamLock.Lock()
if _, ok := s.inflight[id]; ok {
delete(s.inflight, id)
} else {
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
}
select {
case <-s.synCh:
default:
s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
}
s.streamLock.Unlock()
}

140
vendor/github.com/hashicorp/yamux/spec.md generated vendored Normal file
View File

@ -0,0 +1,140 @@
# Specification
We use this document to detail the internal specification of Yamux.
This is used both as a guide for implementing Yamux, but also for
alternative interoperable libraries to be built.
# Framing
Yamux uses a streaming connection underneath, but imposes a message
framing so that it can be shared between many logical streams. Each
frame contains a header like:
* Version (8 bits)
* Type (8 bits)
* Flags (16 bits)
* StreamID (32 bits)
* Length (32 bits)
This means that each header has a 12 byte overhead.
All fields are encoded in network order (big endian).
Each field is described below:
## Version Field
The version field is used for future backward compatibility. At the
current time, the field is always set to 0, to indicate the initial
version.
## Type Field
The type field is used to switch the frame message type. The following
message types are supported:
* 0x0 Data - Used to transmit data. May transmit zero length payloads
depending on the flags.
* 0x1 Window Update - Used to updated the senders receive window size.
This is used to implement per-session flow control.
* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
and do keep-alives over TCP.
* 0x3 Go Away - Used to close a session.
## Flag Field
The flags field is used to provide additional information related
to the message type. The following flags are supported:
* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
window update message. Also sent with a ping to indicate outbound.
* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
or window update message. Also sent with a ping to indicate response.
* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
message or window update.
* 0x8 RST - Reset a stream immediately. May be sent with a data or
window update message.
## StreamID Field
The StreamID field is used to identify the logical stream the frame
is addressing. The client side should use odd ID's, and the server even.
This prevents any collisions. Additionally, the 0 ID is reserved to represent
the session.
Both Ping and Go Away messages should always use the 0 StreamID.
## Length Field
The meaning of the length field depends on the message type:
* Data - provides the length of bytes following the header
* Window update - provides a delta update to the window size
* Ping - Contains an opaque value, echoed back
* Go Away - Contains an error code
# Message Flow
There is no explicit connection setup, as Yamux relies on an underlying
transport to be provided. However, there is a distinction between client
and server side of the connection.
## Opening a stream
To open a stream, an initial data or window update frame is sent
with a new StreamID. The SYN flag should be set to signal a new stream.
The receiver must then reply with either a data or window update frame
with the StreamID along with the ACK flag to accept the stream or with
the RST flag to reject the stream.
Because we are relying on the reliable stream underneath, a connection
can begin sending data once the SYN flag is sent. The corresponding
ACK does not need to be received. This is particularly well suited
for an RPC system where a client wants to open a stream and immediately
fire a request without waiting for the RTT of the ACK.
This does introduce the possibility of a connection being rejected
after data has been sent already. This is a slight semantic difference
from TCP, where the conection cannot be refused after it is opened.
Clients should be prepared to handle this by checking for an error
that indicates a RST was received.
## Closing a stream
To close a stream, either side sends a data or window update frame
along with the FIN flag. This does a half-close indicating the sender
will send no further data.
Once both sides have closed the connection, the stream is closed.
Alternatively, if an error occurs, the RST flag can be used to
hard close a stream immediately.
## Flow Control
When Yamux is initially starts each stream with a 256KB window size.
There is no window size for the session.
To prevent the streams from stalling, window update frames should be
sent regularly. Yamux can be configured to provide a larger limit for
windows sizes. Both sides assume the initial 256KB window, but can
immediately send a window update as part of the SYN/ACK indicating a
larger window.
Both sides should track the number of bytes sent in Data frames
only, as only they are tracked as part of the window size.
## Session termination
When a session is being terminated, the Go Away message should
be sent. The Length should be set to one of the following to
provide an error code:
* 0x0 Normal termination
* 0x1 Protocol error
* 0x2 Internal error

457
vendor/github.com/hashicorp/yamux/stream.go generated vendored Normal file
View File

@ -0,0 +1,457 @@
package yamux
import (
"bytes"
"io"
"sync"
"sync/atomic"
"time"
)
type streamState int
const (
streamInit streamState = iota
streamSYNSent
streamSYNReceived
streamEstablished
streamLocalClose
streamRemoteClose
streamClosed
streamReset
)
// Stream is used to represent a logical stream
// within a session.
type Stream struct {
recvWindow uint32
sendWindow uint32
id uint32
session *Session
state streamState
stateLock sync.Mutex
recvBuf *bytes.Buffer
recvLock sync.Mutex
controlHdr header
controlErr chan error
controlHdrLock sync.Mutex
sendHdr header
sendErr chan error
sendLock sync.Mutex
recvNotifyCh chan struct{}
sendNotifyCh chan struct{}
readDeadline time.Time
writeDeadline time.Time
}
// newStream is used to construct a new stream within
// a given session for an ID
func newStream(session *Session, id uint32, state streamState) *Stream {
s := &Stream{
id: id,
session: session,
state: state,
controlHdr: header(make([]byte, headerSize)),
controlErr: make(chan error, 1),
sendHdr: header(make([]byte, headerSize)),
sendErr: make(chan error, 1),
recvWindow: initialStreamWindow,
sendWindow: initialStreamWindow,
recvNotifyCh: make(chan struct{}, 1),
sendNotifyCh: make(chan struct{}, 1),
}
return s
}
// Session returns the associated stream session
func (s *Stream) Session() *Session {
return s.session
}
// StreamID returns the ID of this stream
func (s *Stream) StreamID() uint32 {
return s.id
}
// Read is used to read from the stream
func (s *Stream) Read(b []byte) (n int, err error) {
defer asyncNotify(s.recvNotifyCh)
START:
s.stateLock.Lock()
switch s.state {
case streamLocalClose:
fallthrough
case streamRemoteClose:
fallthrough
case streamClosed:
s.recvLock.Lock()
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
s.recvLock.Unlock()
s.stateLock.Unlock()
return 0, io.EOF
}
s.recvLock.Unlock()
case streamReset:
s.stateLock.Unlock()
return 0, ErrConnectionReset
}
s.stateLock.Unlock()
// If there is no data available, block
s.recvLock.Lock()
if s.recvBuf == nil || s.recvBuf.Len() == 0 {
s.recvLock.Unlock()
goto WAIT
}
// Read any bytes
n, _ = s.recvBuf.Read(b)
s.recvLock.Unlock()
// Send a window update potentially
err = s.sendWindowUpdate()
return n, err
WAIT:
var timeout <-chan time.Time
var timer *time.Timer
if !s.readDeadline.IsZero() {
delay := s.readDeadline.Sub(time.Now())
timer = time.NewTimer(delay)
timeout = timer.C
}
select {
case <-s.recvNotifyCh:
if timer != nil {
timer.Stop()
}
goto START
case <-timeout:
return 0, ErrTimeout
}
}
// Write is used to write to the stream
func (s *Stream) Write(b []byte) (n int, err error) {
s.sendLock.Lock()
defer s.sendLock.Unlock()
total := 0
for total < len(b) {
n, err := s.write(b[total:])
total += n
if err != nil {
return total, err
}
}
return total, nil
}
// write is used to write to the stream, may return on
// a short write.
func (s *Stream) write(b []byte) (n int, err error) {
var flags uint16
var max uint32
var body io.Reader
START:
s.stateLock.Lock()
switch s.state {
case streamLocalClose:
fallthrough
case streamClosed:
s.stateLock.Unlock()
return 0, ErrStreamClosed
case streamReset:
s.stateLock.Unlock()
return 0, ErrConnectionReset
}
s.stateLock.Unlock()
// If there is no data available, block
window := atomic.LoadUint32(&s.sendWindow)
if window == 0 {
goto WAIT
}
// Determine the flags if any
flags = s.sendFlags()
// Send up to our send window
max = min(window, uint32(len(b)))
body = bytes.NewReader(b[:max])
// Send the header
s.sendHdr.encode(typeData, flags, s.id, max)
if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
return 0, err
}
// Reduce our send window
atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
// Unlock
return int(max), err
WAIT:
var timeout <-chan time.Time
if !s.writeDeadline.IsZero() {
delay := s.writeDeadline.Sub(time.Now())
timeout = time.After(delay)
}
select {
case <-s.sendNotifyCh:
goto START
case <-timeout:
return 0, ErrTimeout
}
return 0, nil
}
// sendFlags determines any flags that are appropriate
// based on the current stream state
func (s *Stream) sendFlags() uint16 {
s.stateLock.Lock()
defer s.stateLock.Unlock()
var flags uint16
switch s.state {
case streamInit:
flags |= flagSYN
s.state = streamSYNSent
case streamSYNReceived:
flags |= flagACK
s.state = streamEstablished
}
return flags
}
// sendWindowUpdate potentially sends a window update enabling
// further writes to take place. Must be invoked with the lock.
func (s *Stream) sendWindowUpdate() error {
s.controlHdrLock.Lock()
defer s.controlHdrLock.Unlock()
// Determine the delta update
max := s.session.config.MaxStreamWindowSize
delta := max - atomic.LoadUint32(&s.recvWindow)
// Determine the flags if any
flags := s.sendFlags()
// Check if we can omit the update
if delta < (max/2) && flags == 0 {
return nil
}
// Update our window
atomic.AddUint32(&s.recvWindow, delta)
// Send the header
s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
return err
}
return nil
}
// sendClose is used to send a FIN
func (s *Stream) sendClose() error {
s.controlHdrLock.Lock()
defer s.controlHdrLock.Unlock()
flags := s.sendFlags()
flags |= flagFIN
s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
return err
}
return nil
}
// Close is used to close the stream
func (s *Stream) Close() error {
closeStream := false
s.stateLock.Lock()
switch s.state {
// Opened means we need to signal a close
case streamSYNSent:
fallthrough
case streamSYNReceived:
fallthrough
case streamEstablished:
s.state = streamLocalClose
goto SEND_CLOSE
case streamLocalClose:
case streamRemoteClose:
s.state = streamClosed
closeStream = true
goto SEND_CLOSE
case streamClosed:
case streamReset:
default:
panic("unhandled state")
}
s.stateLock.Unlock()
return nil
SEND_CLOSE:
s.stateLock.Unlock()
s.sendClose()
s.notifyWaiting()
if closeStream {
s.session.closeStream(s.id)
}
return nil
}
// forceClose is used for when the session is exiting
func (s *Stream) forceClose() {
s.stateLock.Lock()
s.state = streamClosed
s.stateLock.Unlock()
s.notifyWaiting()
}
// processFlags is used to update the state of the stream
// based on set flags, if any. Lock must be held
func (s *Stream) processFlags(flags uint16) error {
// Close the stream without holding the state lock
closeStream := false
defer func() {
if closeStream {
s.session.closeStream(s.id)
}
}()
s.stateLock.Lock()
defer s.stateLock.Unlock()
if flags&flagACK == flagACK {
if s.state == streamSYNSent {
s.state = streamEstablished
}
s.session.establishStream(s.id)
}
if flags&flagFIN == flagFIN {
switch s.state {
case streamSYNSent:
fallthrough
case streamSYNReceived:
fallthrough
case streamEstablished:
s.state = streamRemoteClose
s.notifyWaiting()
case streamLocalClose:
s.state = streamClosed
closeStream = true
s.notifyWaiting()
default:
s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
return ErrUnexpectedFlag
}
}
if flags&flagRST == flagRST {
s.state = streamReset
closeStream = true
s.notifyWaiting()
}
return nil
}
// notifyWaiting notifies all the waiting channels
func (s *Stream) notifyWaiting() {
asyncNotify(s.recvNotifyCh)
asyncNotify(s.sendNotifyCh)
}
// incrSendWindow updates the size of our send window
func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
if err := s.processFlags(flags); err != nil {
return err
}
// Increase window, unblock a sender
atomic.AddUint32(&s.sendWindow, hdr.Length())
asyncNotify(s.sendNotifyCh)
return nil
}
// readData is used to handle a data frame
func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
if err := s.processFlags(flags); err != nil {
return err
}
// Check that our recv window is not exceeded
length := hdr.Length()
if length == 0 {
return nil
}
if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
return ErrRecvWindowExceeded
}
// Wrap in a limited reader
conn = &io.LimitedReader{R: conn, N: int64(length)}
// Copy into buffer
s.recvLock.Lock()
if s.recvBuf == nil {
// Allocate the receive buffer just-in-time to fit the full data frame.
// This way we can read in the whole packet without further allocations.
s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
}
if _, err := io.Copy(s.recvBuf, conn); err != nil {
s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
s.recvLock.Unlock()
return err
}
// Decrement the receive window
atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
s.recvLock.Unlock()
// Unblock any readers
asyncNotify(s.recvNotifyCh)
return nil
}
// SetDeadline sets the read and write deadlines
func (s *Stream) SetDeadline(t time.Time) error {
if err := s.SetReadDeadline(t); err != nil {
return err
}
if err := s.SetWriteDeadline(t); err != nil {
return err
}
return nil
}
// SetReadDeadline sets the deadline for future Read calls.
func (s *Stream) SetReadDeadline(t time.Time) error {
s.readDeadline = t
return nil
}
// SetWriteDeadline sets the deadline for future Write calls
func (s *Stream) SetWriteDeadline(t time.Time) error {
s.writeDeadline = t
return nil
}
// Shrink is used to compact the amount of buffers utilized
// This is useful when using Yamux in a connection pool to reduce
// the idle memory utilization.
func (s *Stream) Shrink() {
s.recvLock.Lock()
if s.recvBuf != nil && s.recvBuf.Len() == 0 {
s.recvBuf = nil
}
s.recvLock.Unlock()
}

28
vendor/github.com/hashicorp/yamux/util.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package yamux
// asyncSendErr is used to try an async send of an error
func asyncSendErr(ch chan error, err error) {
if ch == nil {
return
}
select {
case ch <- err:
default:
}
}
// asyncNotify is used to signal a waiting goroutine
func asyncNotify(ch chan struct{}) {
select {
case ch <- struct{}{}:
default:
}
}
// min computes the minimum of two values
func min(a, b uint32) uint32 {
if a < b {
return a
}
return b
}

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,52 @@
# go-testing-interface
go-testing-interface is a Go library that exports an interface that
`*testing.T` implements as well as a runtime version you can use in its
place.
The purpose of this library is so that you can export test helpers as a
public API without depending on the "testing" package, since you can't
create a `*testing.T` struct manually. This lets you, for example, use the
public testing APIs to generate mock data at runtime, rather than just at
test time.
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface).
Given a test helper written using `go-testing-interface` like this:
import "github.com/mitchellh/go-testing-interface"
func TestHelper(t testing.T) {
t.Fatal("I failed")
}
You can call the test helper in a real test easily:
import "testing"
func TestThing(t *testing.T) {
TestHelper(t)
}
You can also call the test helper at runtime if needed:
import "github.com/mitchellh/go-testing-interface"
func main() {
TestHelper(&testing.RuntimeT{})
}
## Why?!
**Why would I call a test helper that takes a *testing.T at runtime?**
You probably shouldn't. The only use case I've seen (and I've had) for this
is to implement a "dev mode" for a service where the test helpers are used
to populate mock data, create a mock DB, perhaps run service dependencies
in-memory, etc.
Outside of a "dev mode", I've never seen a use case for this and I think
there shouldn't be one since the point of the `testing.T` interface is that
you can fail immediately.

View File

@ -0,0 +1,84 @@
// +build !go1.9
package testing
import (
"fmt"
"log"
)
// T is the interface that mimics the standard library *testing.T.
//
// In unit tests you can just pass a *testing.T struct. At runtime, outside
// of tests, you can pass in a RuntimeT struct from this package.
type T interface {
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fail()
FailNow()
Failed() bool
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Log(args ...interface{})
Logf(format string, args ...interface{})
Name() string
Skip(args ...interface{})
SkipNow()
Skipf(format string, args ...interface{})
Skipped() bool
}
// RuntimeT implements T and can be instantiated and run at runtime to
// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
// for calls to Fatal. For calls to Error, you'll have to check the errors
// list to determine whether to exit yourself. Name and Skip methods are
// unimplemented noops.
type RuntimeT struct {
failed bool
}
func (t *RuntimeT) Error(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.Fail()
}
func (t *RuntimeT) Errorf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.Fail()
}
func (t *RuntimeT) Fatal(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.FailNow()
}
func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.FailNow()
}
func (t *RuntimeT) Fail() {
t.failed = true
}
func (t *RuntimeT) FailNow() {
panic("testing.T failed, see logs for output (if any)")
}
func (t *RuntimeT) Failed() bool {
return t.failed
}
func (t *RuntimeT) Log(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
}
func (t *RuntimeT) Logf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
}
func (t *RuntimeT) Name() string { return "" }
func (t *RuntimeT) Skip(args ...interface{}) {}
func (t *RuntimeT) SkipNow() {}
func (t *RuntimeT) Skipf(format string, args ...interface{}) {}
func (t *RuntimeT) Skipped() bool { return false }

View File

@ -0,0 +1,80 @@
// +build go1.9
// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition
// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC
// and is set for release shortly. We'll support this on master as the default
// as soon as 1.9 is released.
package testing
import (
"fmt"
"log"
)
// T is the interface that mimics the standard library *testing.T.
//
// In unit tests you can just pass a *testing.T struct. At runtime, outside
// of tests, you can pass in a RuntimeT struct from this package.
type T interface {
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fail()
FailNow()
Failed() bool
Helper()
Log(args ...interface{})
Logf(format string, args ...interface{})
}
// RuntimeT implements T and can be instantiated and run at runtime to
// mimic *testing.T behavior. Unlike *testing.T, this will simply panic
// for calls to Fatal. For calls to Error, you'll have to check the errors
// list to determine whether to exit yourself.
type RuntimeT struct {
failed bool
}
func (t *RuntimeT) Error(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.Fail()
}
func (t *RuntimeT) Errorf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.Fail()
}
func (t *RuntimeT) Fatal(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
t.FailNow()
}
func (t *RuntimeT) Fatalf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
t.FailNow()
}
func (t *RuntimeT) Fail() {
t.failed = true
}
func (t *RuntimeT) FailNow() {
panic("testing.T failed, see logs for output (if any)")
}
func (t *RuntimeT) Failed() bool {
return t.failed
}
func (t *RuntimeT) Helper() {}
func (t *RuntimeT) Log(args ...interface{}) {
log.Println(fmt.Sprintln(args...))
}
func (t *RuntimeT) Logf(format string, args ...interface{}) {
log.Println(fmt.Sprintf(format, args...))
}

View File

@ -7,7 +7,7 @@
// and between processes.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
@ -16,14 +16,14 @@
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
@ -34,399 +34,21 @@
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // DoSomething calls DoSomethingSlow and returns as soon as
// // it returns or ctx.Done is closed.
// func DoSomething(ctx context.Context) (Result, error) {
// c := make(chan Result, 1)
// go func() { c <- DoSomethingSlow(ctx) }()
// select {
// case res := <-c:
// return res, nil
// case <-ctx.Done():
// return nil, ctx.Err()
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
package context // import "golang.org/x/net/context"
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it's is not yet available (because the
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, &c)
return &c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) cancelCtx {
return cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return &c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
if p, ok := parentCancelCtx(c.Context); ok {
p.mu.Lock()
if p.children != nil {
delete(p.children, c)
}
p.mu.Unlock()
}
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with the deadline
// timer, so code should call cancel as soon as the operations running in this
// Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(removeFromParent, err)
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with the deadline
// timer, so code should call cancel as soon as the operations running in this
// Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

View File

@ -1,553 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"fmt"
"math/rand"
"runtime"
"strings"
"sync"
"testing"
"time"
)
// otherContext is a Context that's not one of the types defined in context.go.
// This lets us test code paths that differ based on the underlying type of the
// Context.
type otherContext struct {
Context
}
func TestBackground(t *testing.T) {
c := Background()
if c == nil {
t.Fatalf("Background returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.Background"; got != want {
t.Errorf("Background().String() = %q want %q", got, want)
}
}
func TestTODO(t *testing.T) {
c := TODO()
if c == nil {
t.Fatalf("TODO returned nil")
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
if got, want := fmt.Sprint(c), "context.TODO"; got != want {
t.Errorf("TODO().String() = %q want %q", got, want)
}
}
func TestWithCancel(t *testing.T) {
c1, cancel := WithCancel(Background())
if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want {
t.Errorf("c1.String() = %q want %q", got, want)
}
o := otherContext{c1}
c2, _ := WithCancel(o)
contexts := []Context{c1, o, c2}
for i, c := range contexts {
if d := c.Done(); d == nil {
t.Errorf("c[%d].Done() == %v want non-nil", i, d)
}
if e := c.Err(); e != nil {
t.Errorf("c[%d].Err() == %v want nil", i, e)
}
select {
case x := <-c.Done():
t.Errorf("<-c.Done() == %v want nothing (it should block)", x)
default:
}
}
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
for i, c := range contexts {
select {
case <-c.Done():
default:
t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i)
}
if e := c.Err(); e != Canceled {
t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled)
}
}
}
func TestParentFinishesChild(t *testing.T) {
// Context tree:
// parent -> cancelChild
// parent -> valueChild -> timerChild
parent, cancel := WithCancel(Background())
cancelChild, stop := WithCancel(parent)
defer stop()
valueChild := WithValue(parent, "key", "value")
timerChild, stop := WithTimeout(valueChild, 10000*time.Hour)
defer stop()
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-cancelChild.Done():
t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x)
case x := <-timerChild.Done():
t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x)
case x := <-valueChild.Done():
t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x)
default:
}
// The parent's children should contain the two cancelable children.
pc := parent.(*cancelCtx)
cc := cancelChild.(*cancelCtx)
tc := timerChild.(*timerCtx)
pc.mu.Lock()
if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] {
t.Errorf("bad linkage: pc.children = %v, want %v and %v",
pc.children, cc, tc)
}
pc.mu.Unlock()
if p, ok := parentCancelCtx(cc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc)
}
if p, ok := parentCancelCtx(tc.Context); !ok || p != pc {
t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc)
}
cancel()
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children)
}
pc.mu.Unlock()
// parent and children should all be finished.
check := func(ctx Context, name string) {
select {
case <-ctx.Done():
default:
t.Errorf("<-%s.Done() blocked, but shouldn't have", name)
}
if e := ctx.Err(); e != Canceled {
t.Errorf("%s.Err() == %v want %v", name, e, Canceled)
}
}
check(parent, "parent")
check(cancelChild, "cancelChild")
check(valueChild, "valueChild")
check(timerChild, "timerChild")
// WithCancel should return a canceled context on a canceled parent.
precanceledChild := WithValue(parent, "key", "value")
select {
case <-precanceledChild.Done():
default:
t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have")
}
if e := precanceledChild.Err(); e != Canceled {
t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled)
}
}
func TestChildFinishesFirst(t *testing.T) {
cancelable, stop := WithCancel(Background())
defer stop()
for _, parent := range []Context{Background(), cancelable} {
child, cancel := WithCancel(parent)
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
case x := <-child.Done():
t.Errorf("<-child.Done() == %v want nothing (it should block)", x)
default:
}
cc := child.(*cancelCtx)
pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background()
if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) {
t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok)
}
if pcok {
pc.mu.Lock()
if len(pc.children) != 1 || !pc.children[cc] {
t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc)
}
pc.mu.Unlock()
}
cancel()
if pcok {
pc.mu.Lock()
if len(pc.children) != 0 {
t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children)
}
pc.mu.Unlock()
}
// child should be finished.
select {
case <-child.Done():
default:
t.Errorf("<-child.Done() blocked, but shouldn't have")
}
if e := child.Err(); e != Canceled {
t.Errorf("child.Err() == %v want %v", e, Canceled)
}
// parent should not be finished.
select {
case x := <-parent.Done():
t.Errorf("<-parent.Done() == %v want nothing (it should block)", x)
default:
}
if e := parent.Err(); e != nil {
t.Errorf("parent.Err() == %v want nil", e)
}
}
}
func testDeadline(c Context, wait time.Duration, t *testing.T) {
select {
case <-time.After(wait):
t.Fatalf("context should have timed out")
case <-c.Done():
}
if e := c.Err(); e != DeadlineExceeded {
t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded)
}
}
func TestDeadline(t *testing.T) {
c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond))
o = otherContext{c}
c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond))
testDeadline(c, 200*time.Millisecond, t)
}
func TestTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 100*time.Millisecond)
if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) {
t.Errorf("c.String() = %q want prefix %q", got, prefix)
}
testDeadline(c, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o := otherContext{c}
testDeadline(o, 200*time.Millisecond, t)
c, _ = WithTimeout(Background(), 100*time.Millisecond)
o = otherContext{c}
c, _ = WithTimeout(o, 300*time.Millisecond)
testDeadline(c, 200*time.Millisecond, t)
}
func TestCanceledTimeout(t *testing.T) {
c, _ := WithTimeout(Background(), 200*time.Millisecond)
o := otherContext{c}
c, cancel := WithTimeout(o, 400*time.Millisecond)
cancel()
time.Sleep(100 * time.Millisecond) // let cancelation propagate
select {
case <-c.Done():
default:
t.Errorf("<-c.Done() blocked, but shouldn't have")
}
if e := c.Err(); e != Canceled {
t.Errorf("c.Err() == %v want %v", e, Canceled)
}
}
type key1 int
type key2 int
var k1 = key1(1)
var k2 = key2(1) // same int as k1, different type
var k3 = key2(3) // same type as k2, different int
func TestValues(t *testing.T) {
check := func(c Context, nm, v1, v2, v3 string) {
if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 {
t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0)
}
if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 {
t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0)
}
if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 {
t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0)
}
}
c0 := Background()
check(c0, "c0", "", "", "")
c1 := WithValue(Background(), k1, "c1k1")
check(c1, "c1", "c1k1", "", "")
if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want {
t.Errorf("c.String() = %q want %q", got, want)
}
c2 := WithValue(c1, k2, "c2k2")
check(c2, "c2", "c1k1", "c2k2", "")
c3 := WithValue(c2, k3, "c3k3")
check(c3, "c2", "c1k1", "c2k2", "c3k3")
c4 := WithValue(c3, k1, nil)
check(c4, "c4", "", "c2k2", "c3k3")
o0 := otherContext{Background()}
check(o0, "o0", "", "", "")
o1 := otherContext{WithValue(Background(), k1, "c1k1")}
check(o1, "o1", "c1k1", "", "")
o2 := WithValue(o1, k2, "o2k2")
check(o2, "o2", "c1k1", "o2k2", "")
o3 := otherContext{c4}
check(o3, "o3", "", "c2k2", "c3k3")
o4 := WithValue(o3, k3, nil)
check(o4, "o4", "", "c2k2", "")
}
func TestAllocs(t *testing.T) {
bg := Background()
for _, test := range []struct {
desc string
f func()
limit float64
gccgoLimit float64
}{
{
desc: "Background()",
f: func() { Background() },
limit: 0,
gccgoLimit: 0,
},
{
desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1),
f: func() {
c := WithValue(bg, k1, nil)
c.Value(k1)
},
limit: 3,
gccgoLimit: 3,
},
{
desc: "WithTimeout(bg, 15*time.Millisecond)",
f: func() {
c, _ := WithTimeout(bg, 15*time.Millisecond)
<-c.Done()
},
limit: 8,
gccgoLimit: 13,
},
{
desc: "WithCancel(bg)",
f: func() {
c, cancel := WithCancel(bg)
cancel()
<-c.Done()
},
limit: 5,
gccgoLimit: 8,
},
{
desc: "WithTimeout(bg, 100*time.Millisecond)",
f: func() {
c, cancel := WithTimeout(bg, 100*time.Millisecond)
cancel()
<-c.Done()
},
limit: 8,
gccgoLimit: 25,
},
} {
limit := test.limit
if runtime.Compiler == "gccgo" {
// gccgo does not yet do escape analysis.
// TOOD(iant): Remove this when gccgo does do escape analysis.
limit = test.gccgoLimit
}
if n := testing.AllocsPerRun(100, test.f); n > limit {
t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit))
}
}
}
func TestSimultaneousCancels(t *testing.T) {
root, cancel := WithCancel(Background())
m := map[Context]CancelFunc{root: cancel}
q := []Context{root}
// Create a tree of contexts.
for len(q) != 0 && len(m) < 100 {
parent := q[0]
q = q[1:]
for i := 0; i < 4; i++ {
ctx, cancel := WithCancel(parent)
m[ctx] = cancel
q = append(q, ctx)
}
}
// Start all the cancels in a random order.
var wg sync.WaitGroup
wg.Add(len(m))
for _, cancel := range m {
go func(cancel CancelFunc) {
cancel()
wg.Done()
}(cancel)
}
// Wait on all the contexts in a random order.
for ctx := range m {
select {
case <-ctx.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n])
}
}
// Wait for all the cancel functions to return.
done := make(chan struct{})
go func() {
wg.Wait()
close(done)
}()
select {
case <-done:
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n])
}
}
func TestInterlockedCancels(t *testing.T) {
parent, cancelParent := WithCancel(Background())
child, cancelChild := WithCancel(parent)
go func() {
parent.Done()
cancelChild()
}()
cancelParent()
select {
case <-child.Done():
case <-time.After(1 * time.Second):
buf := make([]byte, 10<<10)
n := runtime.Stack(buf, true)
t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n])
}
}
func TestLayersCancel(t *testing.T) {
testLayers(t, time.Now().UnixNano(), false)
}
func TestLayersTimeout(t *testing.T) {
testLayers(t, time.Now().UnixNano(), true)
}
func testLayers(t *testing.T, seed int64, testTimeout bool) {
rand.Seed(seed)
errorf := func(format string, a ...interface{}) {
t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...)
}
const (
timeout = 200 * time.Millisecond
minLayers = 30
)
type value int
var (
vals []*value
cancels []CancelFunc
numTimers int
ctx = Background()
)
for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ {
switch rand.Intn(3) {
case 0:
v := new(value)
ctx = WithValue(ctx, v, v)
vals = append(vals, v)
case 1:
var cancel CancelFunc
ctx, cancel = WithCancel(ctx)
cancels = append(cancels, cancel)
case 2:
var cancel CancelFunc
ctx, cancel = WithTimeout(ctx, timeout)
cancels = append(cancels, cancel)
numTimers++
}
}
checkValues := func(when string) {
for _, key := range vals {
if val := ctx.Value(key).(*value); key != val {
errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key)
}
}
}
select {
case <-ctx.Done():
errorf("ctx should not be canceled yet")
default:
}
if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) {
t.Errorf("ctx.String() = %q want prefix %q", s, prefix)
}
t.Log(ctx)
checkValues("before cancel")
if testTimeout {
select {
case <-ctx.Done():
case <-time.After(timeout + timeout/10):
errorf("ctx should have timed out")
}
checkValues("after timeout")
} else {
cancel := cancels[rand.Intn(len(cancels))]
cancel()
select {
case <-ctx.Done():
default:
errorf("ctx should be canceled")
}
checkValues("after cancel")
}
}

72
vendor/golang.org/x/net/context/go17.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, CancelFunc(f)
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, CancelFunc(f)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

20
vendor/golang.org/x/net/context/go19.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

300
vendor/golang.org/x/net/context/pre_go17.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

109
vendor/golang.org/x/net/context/pre_go19.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -1,26 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context_test
import (
"fmt"
"time"
"golang.org/x/net/context"
)
func ExampleWithTimeout() {
// Pass a context with a timeout to tell a blocking function that it
// should abandon its work after the timeout elapses.
ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
select {
case <-time.After(200 * time.Millisecond):
fmt.Println("overslept")
case <-ctx.Done():
fmt.Println(ctx.Err()) // prints "context deadline exceeded"
}
// Output:
// context deadline exceeded
}

51
vendor/golang.org/x/net/http2/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,51 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

3
vendor/golang.org/x/net/http2/Makefile generated vendored Normal file
View File

@ -0,0 +1,3 @@
curlimage:
docker build -t gohttp2/curl .

20
vendor/golang.org/x/net/http2/README generated vendored Normal file
View File

@ -0,0 +1,20 @@
This is a work-in-progress HTTP/2 implementation for Go.
It will eventually live in the Go standard library and won't require
any changes to your code to use. It will just be automatic.
Status:
* The server support is pretty good. A few things are missing
but are being worked on.
* The client work has just started but shares a lot of code
is coming along much quicker.
Docs are at https://godoc.org/golang.org/x/net/http2
Demo test server at https://http2.golang.org/
Help & bug reports welcome!
Contributing: https://golang.org/doc/contribute.html
Bugs: https://golang.org/issue/new?title=x/net/http2:+

641
vendor/golang.org/x/net/http2/ciphers.go generated vendored Normal file
View File

@ -0,0 +1,641 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
// A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
// Reserved uint16 = 0x001C-1D
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
// Reserved uint16 = 0x0047-4F
// Reserved uint16 = 0x0050-58
// Reserved uint16 = 0x0059-5C
// Unassigned uint16 = 0x005D-5F
// Reserved uint16 = 0x0060-66
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
// Unassigned uint16 = 0x006E-83
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
// Unassigned uint16 = 0x00C6-FE
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
// Unassigned uint16 = 0x01-55,*
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
// Unassigned uint16 = 0x5601 - 0xC000
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
// Unassigned uint16 = 0xC0B0-FF
// Unassigned uint16 = 0xC1-CB,*
// Unassigned uint16 = 0xCC00-A7
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
)
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
// References:
// https://tools.ietf.org/html/rfc7540#appendix-A
// Reject cipher suites from Appendix A.
// "This list includes those cipher suites that do not
// offer an ephemeral key exchange and those that are
// based on the TLS null, stream or block cipher type"
func isBadCipher(cipher uint16) bool {
switch cipher {
case cipher_TLS_NULL_WITH_NULL_NULL,
cipher_TLS_RSA_WITH_NULL_MD5,
cipher_TLS_RSA_WITH_NULL_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_RSA_WITH_RC4_128_MD5,
cipher_TLS_RSA_WITH_RC4_128_SHA,
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_RSA_WITH_DES_CBC_SHA,
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_KRB5_WITH_RC4_128_SHA,
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
cipher_TLS_KRB5_WITH_RC4_128_MD5,
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
cipher_TLS_PSK_WITH_NULL_SHA,
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_NULL_SHA256,
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
cipher_TLS_PSK_WITH_RC4_128_SHA,
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_PSK_WITH_NULL_SHA256,
cipher_TLS_PSK_WITH_NULL_SHA384,
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
cipher_TLS_RSA_WITH_AES_128_CCM,
cipher_TLS_RSA_WITH_AES_256_CCM,
cipher_TLS_RSA_WITH_AES_128_CCM_8,
cipher_TLS_RSA_WITH_AES_256_CCM_8,
cipher_TLS_PSK_WITH_AES_128_CCM,
cipher_TLS_PSK_WITH_AES_256_CCM,
cipher_TLS_PSK_WITH_AES_128_CCM_8,
cipher_TLS_PSK_WITH_AES_256_CCM_8:
return true
default:
return false
}
}

256
vendor/golang.org/x/net/http2/client_conn_pool.go generated vendored Normal file
View File

@ -0,0 +1,256 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Transport code's client connection pooling.
package http2
import (
"crypto/tls"
"net/http"
"sync"
)
// ClientConnPool manages a pool of HTTP/2 client connections.
type ClientConnPool interface {
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
MarkDead(*ClientConn)
}
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
// implementations which can close their idle connections.
type clientConnPoolIdleCloser interface {
ClientConnPool
closeIdleConnections()
}
var (
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
_ clientConnPoolIdleCloser = noDialClientConnPool{}
)
// TODO: use singleflight for dialing and addConnCalls?
type clientConnPool struct {
t *Transport
mu sync.Mutex // TODO: maybe switch to RWMutex
// TODO: add support for sharing conns based on cert names
// (e.g. share conn for googleapis.com and appspot.com)
conns map[string][]*ClientConn // key is host:port
dialing map[string]*dialCall // currently in-flight dials
keys map[*ClientConn][]string
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
}
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
return p.getClientConn(req, addr, dialOnMiss)
}
const (
dialOnMiss = true
noDialOnMiss = false
)
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil {
return nil, err
}
return cc, nil
}
p.mu.Lock()
for _, cc := range p.conns[addr] {
if cc.CanTakeNewRequest() {
p.mu.Unlock()
return cc, nil
}
}
if !dialOnMiss {
p.mu.Unlock()
return nil, ErrNoCachedConn
}
call := p.getStartDialLocked(addr)
p.mu.Unlock()
<-call.done
return call.res, call.err
}
// dialCall is an in-flight Transport dial call to a host.
type dialCall struct {
p *clientConnPool
done chan struct{} // closed when done
res *ClientConn // valid after done is closed
err error // valid after done is closed
}
// requires p.mu is held.
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
if call, ok := p.dialing[addr]; ok {
// A dial is already in-flight. Don't start another.
return call
}
call := &dialCall{p: p, done: make(chan struct{})}
if p.dialing == nil {
p.dialing = make(map[string]*dialCall)
}
p.dialing[addr] = call
go call.dial(addr)
return call
}
// run in its own goroutine.
func (c *dialCall) dial(addr string) {
const singleUse = false // shared conn
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
close(c.done)
c.p.mu.Lock()
delete(c.p.dialing, addr)
if c.err == nil {
c.p.addConnLocked(addr, c.res)
}
c.p.mu.Unlock()
}
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
// already exist. It coalesces concurrent calls with the same key.
// This is used by the http1 Transport code when it creates a new connection. Because
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
// the protocol), it can get into a situation where it has multiple TLS connections.
// This code decides which ones live or die.
// The return value used is whether c was used.
// c is never closed.
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
p.mu.Lock()
for _, cc := range p.conns[key] {
if cc.CanTakeNewRequest() {
p.mu.Unlock()
return false, nil
}
}
call, dup := p.addConnCalls[key]
if !dup {
if p.addConnCalls == nil {
p.addConnCalls = make(map[string]*addConnCall)
}
call = &addConnCall{
p: p,
done: make(chan struct{}),
}
p.addConnCalls[key] = call
go call.run(t, key, c)
}
p.mu.Unlock()
<-call.done
if call.err != nil {
return false, call.err
}
return !dup, nil
}
type addConnCall struct {
p *clientConnPool
done chan struct{} // closed when done
err error
}
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
cc, err := t.NewClientConn(tc)
p := c.p
p.mu.Lock()
if err != nil {
c.err = err
} else {
p.addConnLocked(key, cc)
}
delete(p.addConnCalls, key)
p.mu.Unlock()
close(c.done)
}
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
p.mu.Lock()
p.addConnLocked(key, cc)
p.mu.Unlock()
}
// p.mu must be held
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
for _, v := range p.conns[key] {
if v == cc {
return
}
}
if p.conns == nil {
p.conns = make(map[string][]*ClientConn)
}
if p.keys == nil {
p.keys = make(map[*ClientConn][]string)
}
p.conns[key] = append(p.conns[key], cc)
p.keys[cc] = append(p.keys[cc], key)
}
func (p *clientConnPool) MarkDead(cc *ClientConn) {
p.mu.Lock()
defer p.mu.Unlock()
for _, key := range p.keys[cc] {
vv, ok := p.conns[key]
if !ok {
continue
}
newList := filterOutClientConn(vv, cc)
if len(newList) > 0 {
p.conns[key] = newList
} else {
delete(p.conns, key)
}
}
delete(p.keys, cc)
}
func (p *clientConnPool) closeIdleConnections() {
p.mu.Lock()
defer p.mu.Unlock()
// TODO: don't close a cc if it was just added to the pool
// milliseconds ago and has never been used. There's currently
// a small race window with the HTTP/1 Transport's integration
// where it can add an idle conn just before using it, and
// somebody else can concurrently call CloseIdleConns and
// break some caller's RoundTrip.
for _, vv := range p.conns {
for _, cc := range vv {
cc.closeIfIdle()
}
}
}
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
out := in[:0]
for _, v := range in {
if v != exclude {
out = append(out, v)
}
}
// If we filtered it out, zero out the last item to prevent
// the GC from seeing it.
if len(in) != len(out) {
in[len(in)-1] = nil
}
return out
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type noDialClientConnPool struct{ *clientConnPool }
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
return p.getClientConn(req, addr, noDialOnMiss)
}

80
vendor/golang.org/x/net/http2/configure_transport.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.6
package http2
import (
"crypto/tls"
"fmt"
"net/http"
)
func configureTransport(t1 *http.Transport) (*Transport, error) {
connPool := new(clientConnPool)
t2 := &Transport{
ConnPool: noDialClientConnPool{connPool},
t1: t1,
}
connPool.t = t2
if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
return nil, err
}
if t1.TLSClientConfig == nil {
t1.TLSClientConfig = new(tls.Config)
}
if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
}
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
}
upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
addr := authorityAddr("https", authority)
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
go c.Close()
return erringRoundTripper{err}
} else if !used {
// Turns out we don't need this c.
// For example, two goroutines made requests to the same host
// at the same time, both kicking off TCP dials. (since protocol
// was unknown)
go c.Close()
}
return t2
}
if m := t1.TLSNextProto; len(m) == 0 {
t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
"h2": upgradeFn,
}
} else {
m["h2"] = upgradeFn
}
return t2, nil
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
// converting panics into errors.
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("%v", e)
}
}()
t.RegisterProtocol("https", rt)
return nil
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host.
type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req)
if err == ErrNoCachedConn {
return nil, http.ErrSkipAltProtocol
}
return res, err
}

146
vendor/golang.org/x/net/http2/databuffer.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
"sync"
)
// Buffer chunks are allocated from a pool to reduce pressure on GC.
// The maximum wasted space per dataBuffer is 2x the largest size class,
// which happens when the dataBuffer has multiple chunks and there is
// one unread byte in both the first and last chunks. We use a few size
// classes to minimize overheads for servers that typically receive very
// small request bodies.
//
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
var (
dataChunkSizeClasses = []int{
1 << 10,
2 << 10,
4 << 10,
8 << 10,
16 << 10,
}
dataChunkPools = [...]sync.Pool{
{New: func() interface{} { return make([]byte, 1<<10) }},
{New: func() interface{} { return make([]byte, 2<<10) }},
{New: func() interface{} { return make([]byte, 4<<10) }},
{New: func() interface{} { return make([]byte, 8<<10) }},
{New: func() interface{} { return make([]byte, 16<<10) }},
}
)
func getDataBufferChunk(size int64) []byte {
i := 0
for ; i < len(dataChunkSizeClasses)-1; i++ {
if size <= int64(dataChunkSizeClasses[i]) {
break
}
}
return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
for i, n := range dataChunkSizeClasses {
if len(p) == n {
dataChunkPools[i].Put(p)
return
}
}
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
// Each dataBuffer is used to read DATA frames on a single stream.
// The buffer is divided into chunks so the server can limit the
// total memory used by a single connection without limiting the
// request body size on any single stream.
type dataBuffer struct {
chunks [][]byte
r int // next byte to read is chunks[0][r]
w int // next byte to write is chunks[len(chunks)-1][w]
size int // total buffered bytes
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}
var errReadEmpty = errors.New("read from empty dataBuffer")
// Read copies bytes from the buffer into p.
// It is an error to read when no data is available.
func (b *dataBuffer) Read(p []byte) (int, error) {
if b.size == 0 {
return 0, errReadEmpty
}
var ntotal int
for len(p) > 0 && b.size > 0 {
readFrom := b.bytesFromFirstChunk()
n := copy(p, readFrom)
p = p[n:]
ntotal += n
b.r += n
b.size -= n
// If the first chunk has been consumed, advance to the next chunk.
if b.r == len(b.chunks[0]) {
putDataBufferChunk(b.chunks[0])
end := len(b.chunks) - 1
copy(b.chunks[:end], b.chunks[1:])
b.chunks[end] = nil
b.chunks = b.chunks[:end]
b.r = 0
}
}
return ntotal, nil
}
func (b *dataBuffer) bytesFromFirstChunk() []byte {
if len(b.chunks) == 1 {
return b.chunks[0][b.r:b.w]
}
return b.chunks[0][b.r:]
}
// Len returns the number of bytes of the unread portion of the buffer.
func (b *dataBuffer) Len() int {
return b.size
}
// Write appends p to the buffer.
func (b *dataBuffer) Write(p []byte) (int, error) {
ntotal := len(p)
for len(p) > 0 {
// If the last chunk is empty, allocate a new chunk. Try to allocate
// enough to fully copy p plus any additional bytes we expect to
// receive. However, this may allocate less than len(p).
want := int64(len(p))
if b.expected > want {
want = b.expected
}
chunk := b.lastChunkOrAlloc(want)
n := copy(chunk[b.w:], p)
p = p[n:]
b.w += n
b.size += n
b.expected -= int64(n)
}
return ntotal, nil
}
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
if len(b.chunks) != 0 {
last := b.chunks[len(b.chunks)-1]
if b.w < len(last) {
return last
}
}
chunk := getDataBufferChunk(want)
b.chunks = append(b.chunks, chunk)
b.w = 0
return chunk
}

133
vendor/golang.org/x/net/http2/errors.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"errors"
"fmt"
)
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
type ErrCode uint32
const (
ErrCodeNo ErrCode = 0x0
ErrCodeProtocol ErrCode = 0x1
ErrCodeInternal ErrCode = 0x2
ErrCodeFlowControl ErrCode = 0x3
ErrCodeSettingsTimeout ErrCode = 0x4
ErrCodeStreamClosed ErrCode = 0x5
ErrCodeFrameSize ErrCode = 0x6
ErrCodeRefusedStream ErrCode = 0x7
ErrCodeCancel ErrCode = 0x8
ErrCodeCompression ErrCode = 0x9
ErrCodeConnect ErrCode = 0xa
ErrCodeEnhanceYourCalm ErrCode = 0xb
ErrCodeInadequateSecurity ErrCode = 0xc
ErrCodeHTTP11Required ErrCode = 0xd
)
var errCodeName = map[ErrCode]string{
ErrCodeNo: "NO_ERROR",
ErrCodeProtocol: "PROTOCOL_ERROR",
ErrCodeInternal: "INTERNAL_ERROR",
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
ErrCodeStreamClosed: "STREAM_CLOSED",
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
ErrCodeRefusedStream: "REFUSED_STREAM",
ErrCodeCancel: "CANCEL",
ErrCodeCompression: "COMPRESSION_ERROR",
ErrCodeConnect: "CONNECT_ERROR",
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
}
func (e ErrCode) String() string {
if s, ok := errCodeName[e]; ok {
return s
}
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
// ConnectionError is an error that results in the termination of the
// entire connection.
type ConnectionError ErrCode
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
// StreamError is an error that only affects one stream within an
// HTTP/2 connection.
type StreamError struct {
StreamID uint32
Code ErrCode
Cause error // optional additional detail
}
func streamError(id uint32, code ErrCode) StreamError {
return StreamError{StreamID: id, Code: code}
}
func (e StreamError) Error() string {
if e.Cause != nil {
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
}
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
}
// 6.9.1 The Flow Control Window
// "If a sender receives a WINDOW_UPDATE that causes a flow control
// window to exceed this maximum it MUST terminate either the stream
// or the connection, as appropriate. For streams, [...]; for the
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
// connError represents an HTTP/2 ConnectionError error code, along
// with a string (for debugging) explaining why.
//
// Errors of this type are only returned by the frame parser functions
// and converted into ConnectionError(Code), after stashing away
// the Reason into the Framer's errDetail field, accessible via
// the (*Framer).ErrorDetail method.
type connError struct {
Code ErrCode // the ConnectionError error code
Reason string // additional reason
}
func (e connError) Error() string {
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
}
type pseudoHeaderError string
func (e pseudoHeaderError) Error() string {
return fmt.Sprintf("invalid pseudo-header %q", string(e))
}
type duplicatePseudoHeaderError string
func (e duplicatePseudoHeaderError) Error() string {
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
}
type headerFieldNameError string
func (e headerFieldNameError) Error() string {
return fmt.Sprintf("invalid header field name %q", string(e))
}
type headerFieldValueError string
func (e headerFieldValueError) Error() string {
return fmt.Sprintf("invalid header field value %q", string(e))
}
var (
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
errPseudoAfterRegular = errors.New("pseudo header field after regular")
)

50
vendor/golang.org/x/net/http2/flow.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Flow control
package http2
// flow is the flow control window's size.
type flow struct {
// n is the number of DATA bytes we're allowed to send.
// A flow is kept both on a conn and a per-stream.
n int32
// conn points to the shared connection-level flow that is
// shared by all streams on that conn. It is nil for the flow
// that's on the conn directly.
conn *flow
}
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
func (f *flow) available() int32 {
n := f.n
if f.conn != nil && f.conn.n < n {
n = f.conn.n
}
return n
}
func (f *flow) take(n int32) {
if n > f.available() {
panic("internal error: took too much")
}
f.n -= n
if f.conn != nil {
f.conn.n -= n
}
}
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool {
remain := (1<<31 - 1) - f.n
if n > remain {
return false
}
f.n += n
return true
}

1579
vendor/golang.org/x/net/http2/frame.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

16
vendor/golang.org/x/net/http2/go16.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.6
package http2
import (
"net/http"
"time"
)
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
return t1.ExpectContinueTimeout
}

106
vendor/golang.org/x/net/http2/go17.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7
package http2
import (
"context"
"net"
"net/http"
"net/http/httptrace"
"time"
)
type contextContext interface {
context.Context
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
ctx, cancel = context.WithCancel(context.Background())
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
if hs := opts.baseConfig(); hs != nil {
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
}
return
}
func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
return context.WithCancel(ctx)
}
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
return req.WithContext(ctx)
}
type clientTrace httptrace.ClientTrace
func reqContext(r *http.Request) context.Context { return r.Context() }
func (t *Transport) idleConnTimeout() time.Duration {
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
return 0
}
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
func traceGotConn(req *http.Request, cc *ClientConn) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil {
return
}
ci := httptrace.GotConnInfo{Conn: cc.tconn}
cc.mu.Lock()
ci.Reused = cc.nextStreamID > 1
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = time.Now().Sub(cc.lastActive)
}
cc.mu.Unlock()
trace.GotConn(ci)
}
func traceWroteHeaders(trace *clientTrace) {
if trace != nil && trace.WroteHeaders != nil {
trace.WroteHeaders()
}
}
func traceGot100Continue(trace *clientTrace) {
if trace != nil && trace.Got100Continue != nil {
trace.Got100Continue()
}
}
func traceWait100Continue(trace *clientTrace) {
if trace != nil && trace.Wait100Continue != nil {
trace.Wait100Continue()
}
}
func traceWroteRequest(trace *clientTrace, err error) {
if trace != nil && trace.WroteRequest != nil {
trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
}
}
func traceFirstResponseByte(trace *clientTrace) {
if trace != nil && trace.GotFirstResponseByte != nil {
trace.GotFirstResponseByte()
}
}
func requestTrace(req *http.Request) *clientTrace {
trace := httptrace.ContextClientTrace(req.Context())
return (*clientTrace)(trace)
}
// Ping sends a PING frame to the server and waits for the ack.
func (cc *ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx)
}

36
vendor/golang.org/x/net/http2/go17_not18.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,!go1.8
package http2
import "crypto/tls"
// temporary copy of Go 1.7's private tls.Config.clone:
func cloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

56
vendor/golang.org/x/net/http2/go18.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.8
package http2
import (
"crypto/tls"
"io"
"net/http"
)
func cloneTLSConfig(c *tls.Config) *tls.Config {
c2 := c.Clone()
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
return c2
}
var _ http.Pusher = (*responseWriter)(nil)
// Push implements http.Pusher.
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
internalOpts := pushOptions{}
if opts != nil {
internalOpts.Method = opts.Method
internalOpts.Header = opts.Header
}
return w.push(target, internalOpts)
}
func configureServer18(h1 *http.Server, h2 *Server) error {
if h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
} else {
h2.IdleTimeout = h1.ReadTimeout
}
}
return nil
}
func shouldLogPanic(panicValue interface{}) bool {
return panicValue != nil && panicValue != http.ErrAbortHandler
}
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
return req.GetBody
}
func reqBodyIsNoBody(body io.ReadCloser) bool {
return body == http.NoBody
}
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only

16
vendor/golang.org/x/net/http2/go19.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package http2
import (
"net/http"
)
func configureServer19(s *http.Server, conf *Server) error {
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
return nil
}

170
vendor/golang.org/x/net/http2/gotrack.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Defensive debug-only utility to track that functions run on the
// goroutine that they're supposed to.
package http2
import (
"bytes"
"errors"
"fmt"
"os"
"runtime"
"strconv"
"sync"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
if !DebugGoroutines {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
if !DebugGoroutines {
return
}
if curGoroutineID() != uint64(g) {
panic("running on the wrong goroutine")
}
}
func (g goroutineLock) checkNotOn() {
if !DebugGoroutines {
return
}
if curGoroutineID() == uint64(g) {
panic("running on the wrong goroutine")
}
}
var goroutineSpace = []byte("goroutine ")
func curGoroutineID() uint64 {
bp := littleBuf.Get().(*[]byte)
defer littleBuf.Put(bp)
b := *bp
b = b[:runtime.Stack(b, false)]
// Parse the 4707 out of "goroutine 4707 ["
b = bytes.TrimPrefix(b, goroutineSpace)
i := bytes.IndexByte(b, ' ')
if i < 0 {
panic(fmt.Sprintf("No space found in %q", b))
}
b = b[:i]
n, err := parseUintBytes(b, 10, 64)
if err != nil {
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
}
return n
}
var littleBuf = sync.Pool{
New: func() interface{} {
buf := make([]byte, 64)
return &buf
},
}
// parseUintBytes is like strconv.ParseUint, but using a []byte.
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
var cutoff, maxVal uint64
if bitSize == 0 {
bitSize = int(strconv.IntSize)
}
s0 := s
switch {
case len(s) < 1:
err = strconv.ErrSyntax
goto Error
case 2 <= base && base <= 36:
// valid base; nothing to do
case base == 0:
// Look for octal, hex prefix.
switch {
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
base = 16
s = s[2:]
if len(s) < 1 {
err = strconv.ErrSyntax
goto Error
}
case s[0] == '0':
base = 8
default:
base = 10
}
default:
err = errors.New("invalid base " + strconv.Itoa(base))
goto Error
}
n = 0
cutoff = cutoff64(base)
maxVal = 1<<uint(bitSize) - 1
for i := 0; i < len(s); i++ {
var v byte
d := s[i]
switch {
case '0' <= d && d <= '9':
v = d - '0'
case 'a' <= d && d <= 'z':
v = d - 'a' + 10
case 'A' <= d && d <= 'Z':
v = d - 'A' + 10
default:
n = 0
err = strconv.ErrSyntax
goto Error
}
if int(v) >= base {
n = 0
err = strconv.ErrSyntax
goto Error
}
if n >= cutoff {
// n*base overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n *= uint64(base)
n1 := n + uint64(v)
if n1 < n || n1 > maxVal {
// n+v overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
}
n = n1
}
return n, nil
Error:
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
}
// Return the first number n such that n*base >= 1<<64.
func cutoff64(base int) uint64 {
if base < 2 {
return 0
}
return (1<<64-1)/uint64(base) + 1
}

78
vendor/golang.org/x/net/http2/headermap.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"net/http"
"strings"
)
var (
commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
)
func init() {
for _, v := range []string{
"accept",
"accept-charset",
"accept-encoding",
"accept-language",
"accept-ranges",
"age",
"access-control-allow-origin",
"allow",
"authorization",
"cache-control",
"content-disposition",
"content-encoding",
"content-language",
"content-length",
"content-location",
"content-range",
"content-type",
"cookie",
"date",
"etag",
"expect",
"expires",
"from",
"host",
"if-match",
"if-modified-since",
"if-none-match",
"if-unmodified-since",
"last-modified",
"link",
"location",
"max-forwards",
"proxy-authenticate",
"proxy-authorization",
"range",
"referer",
"refresh",
"retry-after",
"server",
"set-cookie",
"strict-transport-security",
"trailer",
"transfer-encoding",
"user-agent",
"vary",
"via",
"www-authenticate",
} {
chk := http.CanonicalHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
}
}
func lowerHeader(v string) string {
if s, ok := commonLowerHeader[v]; ok {
return s
}
return strings.ToLower(v)
}

240
vendor/golang.org/x/net/http2/hpack/encode.go generated vendored Normal file
View File

@ -0,0 +1,240 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"io"
)
const (
uint32Max = ^uint32(0)
initialHeaderTableSize = 4096
)
type Encoder struct {
dynTab dynamicTable
// minSize is the minimum table size set by
// SetMaxDynamicTableSize after the previous Header Table Size
// Update.
minSize uint32
// maxSizeLimit is the maximum table size this encoder
// supports. This will protect the encoder from too large
// size.
maxSizeLimit uint32
// tableSizeUpdate indicates whether "Header Table Size
// Update" is required.
tableSizeUpdate bool
w io.Writer
buf []byte
}
// NewEncoder returns a new Encoder which performs HPACK encoding. An
// encoded data is written to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{
minSize: uint32Max,
maxSizeLimit: initialHeaderTableSize,
tableSizeUpdate: false,
w: w,
}
e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize)
return e
}
// WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update"
// if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0]
if e.tableSizeUpdate {
e.tableSizeUpdate = false
if e.minSize < e.dynTab.maxSize {
e.buf = appendTableSize(e.buf, e.minSize)
}
e.minSize = uint32Max
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
}
idx, nameValueMatch := e.searchTable(f)
if nameValueMatch {
e.buf = appendIndexed(e.buf, idx)
} else {
indexing := e.shouldIndex(f)
if indexing {
e.dynTab.add(f)
}
if idx == 0 {
e.buf = appendNewName(e.buf, f, indexing)
} else {
e.buf = appendIndexedName(e.buf, f, idx, indexing)
}
}
n, err := e.w.Write(e.buf)
if err == nil && n != len(e.buf) {
err = io.ErrShortWrite
}
return err
}
// searchTable searches f in both stable and dynamic header tables.
// The static header table is searched first. Only when there is no
// exact match for both name and value, the dynamic header table is
// then searched. If there is no match, i is 0. If both name and value
// match, i is the matched index and nameValueMatch becomes true. If
// only name matches, i points to that index and nameValueMatch
// becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
i, nameValueMatch = staticTable.search(f)
if nameValueMatch {
return i, true
}
j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) {
return j + uint64(staticTable.len()), nameValueMatch
}
return i, false
}
// SetMaxDynamicTableSize changes the dynamic header table size to v.
// The actual size is bounded by the value passed to
// SetMaxDynamicTableSizeLimit.
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
if v > e.maxSizeLimit {
v = e.maxSizeLimit
}
if v < e.minSize {
e.minSize = v
}
e.tableSizeUpdate = true
e.dynTab.setMaxSize(v)
}
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
// specified in SetMaxDynamicTableSize to v. By default, it is set to
// 4096, which is the same size of the default dynamic header table
// size described in HPACK specification. If the current maximum
// dynamic header table size is strictly greater than v, "Header Table
// Size Update" will be done in the next WriteField call and the
// maximum dynamic header table size is truncated to v.
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
e.maxSizeLimit = v
if e.dynTab.maxSize > v {
e.tableSizeUpdate = true
e.dynTab.setMaxSize(v)
}
}
// shouldIndex reports whether f should be indexed.
func (e *Encoder) shouldIndex(f HeaderField) bool {
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
}
// appendIndexed appends index i, as encoded in "Indexed Header Field"
// representation, to dst and returns the extended buffer.
func appendIndexed(dst []byte, i uint64) []byte {
first := len(dst)
dst = appendVarInt(dst, 7, i)
dst[first] |= 0x80
return dst
}
// appendNewName appends f, as encoded in one of "Literal Header field
// - New Name" representation variants, to dst and returns the
// extended buffer.
//
// If f.Sensitive is true, "Never Indexed" representation is used. If
// f.Sensitive is false and indexing is true, "Inremental Indexing"
// representation is used.
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
dst = appendHpackString(dst, f.Name)
return appendHpackString(dst, f.Value)
}
// appendIndexedName appends f and index i referring indexed name
// entry, as encoded in one of "Literal Header field - Indexed Name"
// representation variants, to dst and returns the extended buffer.
//
// If f.Sensitive is true, "Never Indexed" representation is used. If
// f.Sensitive is false and indexing is true, "Incremental Indexing"
// representation is used.
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
first := len(dst)
var n byte
if indexing {
n = 6
} else {
n = 4
}
dst = appendVarInt(dst, n, i)
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
return appendHpackString(dst, f.Value)
}
// appendTableSize appends v, as encoded in "Header Table Size Update"
// representation, to dst and returns the extended buffer.
func appendTableSize(dst []byte, v uint32) []byte {
first := len(dst)
dst = appendVarInt(dst, 5, uint64(v))
dst[first] |= 0x20
return dst
}
// appendVarInt appends i, as encoded in variable integer form using n
// bit prefix, to dst and returns the extended buffer.
//
// See
// http://http2.github.io/http2-spec/compression.html#integer.representation
func appendVarInt(dst []byte, n byte, i uint64) []byte {
k := uint64((1 << n) - 1)
if i < k {
return append(dst, byte(i))
}
dst = append(dst, byte(k))
i -= k
for ; i >= 128; i >>= 7 {
dst = append(dst, byte(0x80|(i&0x7f)))
}
return append(dst, byte(i))
}
// appendHpackString appends s, as encoded in "String Literal"
// representation, to dst and returns the the extended buffer.
//
// s will be encoded in Huffman codes only when it produces strictly
// shorter byte string.
func appendHpackString(dst []byte, s string) []byte {
huffmanLength := HuffmanEncodeLength(s)
if huffmanLength < uint64(len(s)) {
first := len(dst)
dst = appendVarInt(dst, 7, huffmanLength)
dst = AppendHuffmanString(dst, s)
dst[first] |= 0x80
} else {
dst = appendVarInt(dst, 7, uint64(len(s)))
dst = append(dst, s...)
}
return dst
}
// encodeTypeByte returns type byte. If sensitive is true, type byte
// for "Never Indexed" representation is returned. If sensitive is
// false and indexing is true, type byte for "Incremental Indexing"
// representation is returned. Otherwise, type byte for "Without
// Indexing" is returned.
func encodeTypeByte(indexing, sensitive bool) byte {
if sensitive {
return 0x10
}
if indexing {
return 0x40
}
return 0
}

490
vendor/golang.org/x/net/http2/hpack/hpack.go generated vendored Normal file
View File

@ -0,0 +1,490 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package hpack implements HPACK, a compression format for
// efficiently representing HTTP header fields in the context of HTTP/2.
//
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
package hpack
import (
"bytes"
"errors"
"fmt"
)
// A DecodingError is something the spec defines as a decoding error.
type DecodingError struct {
Err error
}
func (de DecodingError) Error() string {
return fmt.Sprintf("decoding error: %v", de.Err)
}
// An InvalidIndexError is returned when an encoder references a table
// entry before the static table or after the end of the dynamic table.
type InvalidIndexError int
func (e InvalidIndexError) Error() string {
return fmt.Sprintf("invalid indexed representation index %d", int(e))
}
// A HeaderField is a name-value pair. Both the name and value are
// treated as opaque sequences of octets.
type HeaderField struct {
Name, Value string
// Sensitive means that this header field should never be
// indexed.
Sensitive bool
}
// IsPseudo reports whether the header field is an http2 pseudo header.
// That is, it reports whether it starts with a colon.
// It is not otherwise guaranteed to be a valid pseudo header field,
// though.
func (hf HeaderField) IsPseudo() bool {
return len(hf.Name) != 0 && hf.Name[0] == ':'
}
func (hf HeaderField) String() string {
var suffix string
if hf.Sensitive {
suffix = " (sensitive)"
}
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
}
// Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and
// value without any Huffman encoding applied."
// This can overflow if somebody makes a large HeaderField
// Name and/or Value by hand, but we don't care, because that
// won't happen on the wire because the encoding doesn't allow
// it.
return uint32(len(hf.Name) + len(hf.Value) + 32)
}
// A Decoder is the decoding context for incremental processing of
// header blocks.
type Decoder struct {
dynTab dynamicTable
emit func(f HeaderField)
emitEnabled bool // whether calls to emit are enabled
maxStrLen int // 0 means unlimited
// buf is the unparsed buffer. It's only written to
// saveBuf if it was truncated in the middle of a header
// block. Because it's usually not owned, we can only
// process it under Write.
buf []byte // not owned; only valid during Write
// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer
}
// NewDecoder returns a new decoder with the provided maximum dynamic
// table size. The emitFunc will be called for each valid field
// parsed, in the same goroutine as calls to Write, before Write returns.
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
d := &Decoder{
emit: emitFunc,
emitEnabled: true,
}
d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize)
return d
}
// ErrStringLength is returned by Decoder.Write when the max string length
// (as configured by Decoder.SetMaxStringLength) would be violated.
var ErrStringLength = errors.New("hpack: string too long")
// SetMaxStringLength sets the maximum size of a HeaderField name or
// value string. If a string exceeds this length (even after any
// decompression), Write will return ErrStringLength.
// A value of 0 means unlimited and is the default from NewDecoder.
func (d *Decoder) SetMaxStringLength(n int) {
d.maxStrLen = n
}
// SetEmitFunc changes the callback used when new header fields
// are decoded.
// It must be non-nil. It does not affect EmitEnabled.
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
d.emit = emitFunc
}
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
// should be called. The default is true.
//
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
// while still decoding and keeping in-sync with decoder state, but
// without doing unnecessary decompression or generating unnecessary
// garbage for header fields past the limit.
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
// are currently enabled. The default is true.
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
// underlying buffers for garbage reasons.
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
d.dynTab.setMaxSize(v)
}
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
// stream (via dynamic table size updates) may set the maximum size
// to.
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
d.dynTab.allowedMaxSize = v
}
type dynamicTable struct {
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
table headerFieldTable
size uint32 // in bytes
maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive
}
func (dt *dynamicTable) setMaxSize(v uint32) {
dt.maxSize = v
dt.evict()
}
func (dt *dynamicTable) add(f HeaderField) {
dt.table.addEntry(f)
dt.size += f.Size()
dt.evict()
}
// If we're too big, evict old stuff.
func (dt *dynamicTable) evict() {
var n int
for dt.size > dt.maxSize && n < dt.table.len() {
dt.size -= dt.table.ents[n].Size()
n++
}
dt.table.evictOldest(n)
}
func (d *Decoder) maxTableIndex() int {
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
// the dynamic table to 2^32 bytes, where each entry will occupy more than
// one byte. Further, the staticTable has a fixed, small length.
return d.dynTab.table.len() + staticTable.len()
}
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
// See Section 2.3.3.
if i == 0 {
return
}
if i <= uint64(staticTable.len()) {
return staticTable.ents[i-1], true
}
if i > uint64(d.maxTableIndex()) {
return
}
// In the dynamic table, newer entries have lower indices.
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
// the reversed dynamic table.
dt := d.dynTab.table
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
}
// Decode decodes an entire block.
//
// TODO: remove this method and make it incremental later? This is
// easier for debugging now.
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
var hf []HeaderField
saveFunc := d.emit
defer func() { d.emit = saveFunc }()
d.emit = func(f HeaderField) { hf = append(hf, f) }
if _, err := d.Write(p); err != nil {
return nil, err
}
if err := d.Close(); err != nil {
return nil, err
}
return hf, nil
}
func (d *Decoder) Close() error {
if d.saveBuf.Len() > 0 {
d.saveBuf.Reset()
return DecodingError{errors.New("truncated headers")}
}
return nil
}
func (d *Decoder) Write(p []byte) (n int, err error) {
if len(p) == 0 {
// Prevent state machine CPU attacks (making us redo
// work up to the point of finding out we don't have
// enough data)
return
}
// Only copy the data if we have to. Optimistically assume
// that p will contain a complete header block.
if d.saveBuf.Len() == 0 {
d.buf = p
} else {
d.saveBuf.Write(p)
d.buf = d.saveBuf.Bytes()
d.saveBuf.Reset()
}
for len(d.buf) > 0 {
err = d.parseHeaderFieldRepr()
if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't
// get too large. All the varint and string
// reading code earlier should already catch
// overlong things and return ErrStringLength,
// but keep this as a last resort.
const varIntOverhead = 8 // conservative
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
return 0, ErrStringLength
}
d.saveBuf.Write(d.buf)
return len(p), nil
}
if err != nil {
break
}
}
return len(p), err
}
// errNeedMore is an internal sentinel error value that means the
// buffer is truncated and we need to read more data before we can
// continue parsing.
var errNeedMore = errors.New("need more data")
type indexType int
const (
indexedTrue indexType = iota
indexedFalse
indexedNever
)
func (v indexType) indexed() bool { return v == indexedTrue }
func (v indexType) sensitive() bool { return v == indexedNever }
// returns errNeedMore if there isn't enough data available.
// any other error is fatal.
// consumes d.buf iff it returns nil.
// precondition: must be called with len(d.buf) > 0
func (d *Decoder) parseHeaderFieldRepr() error {
b := d.buf[0]
switch {
case b&128 != 0:
// Indexed representation.
// High bit set?
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
return d.parseFieldIndexed()
case b&192 == 64:
// 6.2.1 Literal Header Field with Incremental Indexing
// 0b10xxxxxx: top two bits are 10
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
return d.parseFieldLiteral(6, indexedTrue)
case b&240 == 0:
// 6.2.2 Literal Header Field without Indexing
// 0b0000xxxx: top four bits are 0000
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
return d.parseFieldLiteral(4, indexedFalse)
case b&240 == 16:
// 6.2.3 Literal Header Field never Indexed
// 0b0001xxxx: top four bits are 0001
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
return d.parseFieldLiteral(4, indexedNever)
case b&224 == 32:
// 6.3 Dynamic Table Size Update
// Top three bits are '001'.
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
return d.parseDynamicTableSizeUpdate()
}
return DecodingError{errors.New("invalid encoding")}
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseFieldIndexed() error {
buf := d.buf
idx, buf, err := readVarInt(7, buf)
if err != nil {
return err
}
hf, ok := d.at(idx)
if !ok {
return DecodingError{InvalidIndexError(idx)}
}
d.buf = buf
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
buf := d.buf
nameIdx, buf, err := readVarInt(n, buf)
if err != nil {
return err
}
var hf HeaderField
wantStr := d.emitEnabled || it.indexed()
if nameIdx > 0 {
ihf, ok := d.at(nameIdx)
if !ok {
return DecodingError{InvalidIndexError(nameIdx)}
}
hf.Name = ihf.Name
} else {
hf.Name, buf, err = d.readString(buf, wantStr)
if err != nil {
return err
}
}
hf.Value, buf, err = d.readString(buf, wantStr)
if err != nil {
return err
}
d.buf = buf
if it.indexed() {
d.dynTab.add(hf)
}
hf.Sensitive = it.sensitive()
return d.callEmit(hf)
}
func (d *Decoder) callEmit(hf HeaderField) error {
if d.maxStrLen != 0 {
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
return ErrStringLength
}
}
if d.emitEnabled {
d.emit(hf)
}
return nil
}
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseDynamicTableSizeUpdate() error {
buf := d.buf
size, buf, err := readVarInt(5, buf)
if err != nil {
return err
}
if size > uint64(d.dynTab.allowedMaxSize) {
return DecodingError{errors.New("dynamic table size update too large")}
}
d.dynTab.setMaxSize(uint32(size))
d.buf = buf
return nil
}
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
// readVarInt reads an unsigned variable length integer off the
// beginning of p. n is the parameter as described in
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
//
// n must always be between 1 and 8.
//
// The returned remain buffer is either a smaller suffix of p, or err != nil.
// The error is errNeedMore if p doesn't contain a complete integer.
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
if n < 1 || n > 8 {
panic("bad n")
}
if len(p) == 0 {
return 0, p, errNeedMore
}
i = uint64(p[0])
if n < 8 {
i &= (1 << uint64(n)) - 1
}
if i < (1<<uint64(n))-1 {
return i, p[1:], nil
}
origP := p
p = p[1:]
var m uint64
for len(p) > 0 {
b := p[0]
p = p[1:]
i += uint64(b&127) << m
if b&128 == 0 {
return i, p, nil
}
m += 7
if m >= 63 { // TODO: proper overflow check. making this up.
return 0, origP, errVarintOverflow
}
}
return 0, origP, errNeedMore
}
// readString decodes an hpack string from p.
//
// wantStr is whether s will be used. If false, decompression and
// []byte->string garbage are skipped if s will be ignored
// anyway. This does mean that huffman decoding errors for non-indexed
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
// is returning an error anyway, and because they're not indexed, the error
// won't affect the decoding state.
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
if len(p) == 0 {
return "", p, errNeedMore
}
isHuff := p[0]&128 != 0
strLen, p, err := readVarInt(7, p)
if err != nil {
return "", p, err
}
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
return "", nil, ErrStringLength
}
if uint64(len(p)) < strLen {
return "", p, errNeedMore
}
if !isHuff {
if wantStr {
s = string(p[:strLen])
}
return s, p[strLen:], nil
}
if wantStr {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset() // don't trust others
defer bufPool.Put(buf)
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
buf.Reset()
return "", nil, err
}
s = buf.String()
buf.Reset() // be nice to GC
}
return s, p[strLen:], nil
}

212
vendor/golang.org/x/net/http2/hpack/huffman.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package hpack
import (
"bytes"
"errors"
"io"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// HuffmanDecode decodes the string in v and writes the expanded
// result to w, returning the number of bytes written to w and the
// Write call's return value. At most one Write call is made.
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
if err := huffmanDecode(buf, 0, v); err != nil {
return 0, err
}
return w.Write(buf.Bytes())
}
// HuffmanDecodeToString decodes the string in v.
func HuffmanDecodeToString(v []byte) (string, error) {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
if err := huffmanDecode(buf, 0, v); err != nil {
return "", err
}
return buf.String(), nil
}
// ErrInvalidHuffman is returned for errors found decoding
// Huffman-encoded strings.
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// huffmanDecode decodes v to buf.
// If maxLen is greater than 0, attempts to write more to buf than
// maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
n := rootHuffmanNode
// cur is the bit buffer that has not been fed into n.
// cbits is the number of low order bits in cur that are valid.
// sbits is the number of bits of the symbol prefix being decoded.
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
for _, b := range v {
cur = cur<<8 | uint(b)
cbits += 8
sbits += 8
for cbits >= 8 {
idx := byte(cur >> (cbits - 8))
n = n.children[idx]
if n == nil {
return ErrInvalidHuffman
}
if n.children == nil {
if maxLen != 0 && buf.Len() == maxLen {
return ErrStringLength
}
buf.WriteByte(n.sym)
cbits -= n.codeLen
n = rootHuffmanNode
sbits = cbits
} else {
cbits -= 8
}
}
}
for cbits > 0 {
n = n.children[byte(cur<<(8-cbits))]
if n == nil {
return ErrInvalidHuffman
}
if n.children != nil || n.codeLen > cbits {
break
}
if maxLen != 0 && buf.Len() == maxLen {
return ErrStringLength
}
buf.WriteByte(n.sym)
cbits -= n.codeLen
n = rootHuffmanNode
sbits = cbits
}
if sbits > 7 {
// Either there was an incomplete symbol, or overlong padding.
// Both are decoding errors per RFC 7541 section 5.2.
return ErrInvalidHuffman
}
if mask := uint(1<<cbits - 1); cur&mask != mask {
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
return ErrInvalidHuffman
}
return nil
}
type node struct {
// children is non-nil for internal nodes
children []*node
// The following are only valid if children is nil:
codeLen uint8 // number of bits that led to the output of sym
sym byte // output symbol
}
func newInternalNode() *node {
return &node{children: make([]*node, 256)}
}
var rootHuffmanNode = newInternalNode()
func init() {
if len(huffmanCodes) != 256 {
panic("unexpected size")
}
for i, code := range huffmanCodes {
addDecoderNode(byte(i), code, huffmanCodeLen[i])
}
}
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
cur := rootHuffmanNode
for codeLen > 8 {
codeLen -= 8
i := uint8(code >> codeLen)
if cur.children[i] == nil {
cur.children[i] = newInternalNode()
}
cur = cur.children[i]
}
shift := 8 - codeLen
start, end := int(uint8(code<<shift)), int(1<<shift)
for i := start; i < start+end; i++ {
cur.children[i] = &node{sym: sym, codeLen: codeLen}
}
}
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
// and returns the extended buffer.
func AppendHuffmanString(dst []byte, s string) []byte {
rembits := uint8(8)
for i := 0; i < len(s); i++ {
if rembits == 8 {
dst = append(dst, 0)
}
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
}
if rembits < 8 {
// special EOS symbol
code := uint32(0x3fffffff)
nbits := uint8(30)
t := uint8(code >> (nbits - rembits))
dst[len(dst)-1] |= t
}
return dst
}
// HuffmanEncodeLength returns the number of bytes required to encode
// s in Huffman codes. The result is round up to byte boundary.
func HuffmanEncodeLength(s string) uint64 {
n := uint64(0)
for i := 0; i < len(s); i++ {
n += uint64(huffmanCodeLen[s[i]])
}
return (n + 7) / 8
}
// appendByteToHuffmanCode appends Huffman code for c to dst and
// returns the extended buffer and the remaining bits in the last
// element. The appending is not byte aligned and the remaining bits
// in the last element of dst is given in rembits.
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
code := huffmanCodes[c]
nbits := huffmanCodeLen[c]
for {
if rembits > nbits {
t := uint8(code << (rembits - nbits))
dst[len(dst)-1] |= t
rembits -= nbits
break
}
t := uint8(code >> (nbits - rembits))
dst[len(dst)-1] |= t
nbits -= rembits
rembits = 8
if nbits == 0 {
break
}
dst = append(dst, 0)
}
return dst, rembits
}

Some files were not shown because too many files have changed in this diff Show More