Upgrade grafana-plugin-model (#19438)

* use grafana-plugin-model dependency that uses go modules

* use grafana-plugin-model with updated hashicorp/go-plugin

* use grafana-plugin-model with re-compiled protos

* test using protoc-gen-go v1.2.0 tag

* use grafana-plugin-model with re-compiled protos

* chore: fix deprecation warning for lint

* use latest grafana-plugin-model

Fixes #19454
This commit is contained in:
Marcus Efraimsson 2019-09-30 15:16:04 +02:00 committed by GitHub
parent 44de6812be
commit 93919427f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
247 changed files with 52474 additions and 12567 deletions

6
go.mod
View File

@ -32,9 +32,9 @@ require (
github.com/google/go-cmp v0.2.0 // indirect
github.com/gorilla/websocket v1.4.0
github.com/gosimple/slug v1.4.2
github.com/grafana/grafana-plugin-model v0.0.0-20180518082423-84176c64269d
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4
github.com/hashicorp/go-hclog v0.8.0
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104
github.com/hashicorp/go-plugin v1.0.1
github.com/hashicorp/go-version v1.1.0
github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
@ -58,7 +58,7 @@ require (
github.com/russellhaering/goxmldsig v0.0.0-20180430223755-7acd5e4a6ef7 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337
github.com/stretchr/testify v1.3.0
github.com/stretchr/testify v1.4.0
github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf
github.com/ua-parser/uap-go v0.0.0-20190826212731-daf92ba38329
github.com/uber-go/atomic v1.3.2 // indirect

12
go.sum
View File

@ -99,17 +99,19 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gosimple/slug v1.4.2 h1:jDmprx3q/9Lfk4FkGZtvzDQ9Cj9eAmsjzeQGp24PeiQ=
github.com/gosimple/slug v1.4.2/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0=
github.com/grafana/grafana-plugin-model v0.0.0-20180518082423-84176c64269d h1:J3XXvgaMHzZ+gC/pStYNc6k+LjPuhPeAevwgN0AqTeM=
github.com/grafana/grafana-plugin-model v0.0.0-20180518082423-84176c64269d/go.mod h1:26i1retIKRnMtFVuSmJe6N6D1d1ifdzOMi507YjCYm4=
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4 h1:SPdxCL9BChFTlyi0Khv64vdCW4TMna8+sxL7+Chx+Ag=
github.com/grafana/grafana-plugin-model v0.0.0-20190930120109-1fc953a61fb4/go.mod h1:nc0XxBzjeGcrMltCDw269LoWF9S8ibhgxolCdA1R8To=
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY=
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104 h1:9iQ/zrTOJqzP+kH37s6xNb6T1RysiT7fnDD3DJbspVw=
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec h1:CGkYB1Q7DSsH/ku+to+foV4agt2F2miquaLUgF6L178=
@ -229,6 +231,8 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf h1:Z2X3Os7oRzpdJ75iPqWZc0HeJWFYNCvKsfpQwFpRNTA=
github.com/teris-io/shortid v0.0.0-20171029131806-771a37caa5cf/go.mod h1:M8agBzgqHIhgj7wEn9/0hJUZcrvt9VY+Ln+S1I5Mha0=

View File

@ -370,7 +370,7 @@ func applyCommandLineDefaultProperties(props map[string]string, file *ini.File)
func applyCommandLineProperties(props map[string]string, file *ini.File) {
for _, section := range file.Sections() {
sectionName := section.Name() + "."
if section.Name() == ini.DEFAULT_SECTION {
if section.Name() == ini.DefaultSection {
sectionName = ""
}
for _, key := range section.Keys() {

View File

@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow

63
vendor/github.com/golang/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2018 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import "errors"
// Deprecated: do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: do not use.
func RegisterMessageSetType(Message, int32, string) {}

View File

@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
m1, m2 := e1.value, e2.value
m1 := extensionAsLegacyType(e1.value)
m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.

View File

@ -185,9 +185,25 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
desc *ExtensionDesc
// value is a concrete value for the extension field. Let the type of
// desc.ExtensionType be the "API type" and the type of Extension.value
// be the "storage type". The API type and storage type are the same except:
// * For scalars (except []byte), the API type uses *T,
// while the storage type uses T.
// * For repeated fields, the API type uses []T, while the storage type
// uses *[]T.
//
// The reason for the divergence is so that the storage type more naturally
// matches what is expected of when retrieving the values through the
// protobuf reflection APIs.
//
// The value may only be populated if desc is also populated.
value interface{}
enc []byte
// enc is the raw bytes for the extension field.
enc []byte
}
// SetRawExtension is for testing only.
@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
// extensionAsLegacyType converts an value in the storage type as the API type.
// See Extension.value.
func extensionAsLegacyType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
// Represent primitive types as a pointer to the value.
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Slice:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
}
return v
}
// extensionAsStorageType converts an value in the API type as the storage type.
// See Extension.value.
func extensionAsStorageType(v interface{}) interface{} {
switch rv := reflect.ValueOf(v); rv.Kind() {
case reflect.Ptr:
// Represent slice types as the value itself.
switch rv.Type().Elem().Kind() {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
if rv.IsNil() {
v = reflect.Zero(rv.Type().Elem()).Interface()
} else {
v = rv.Elem().Interface()
}
}
case reflect.Slice:
// Represent slice types as a pointer to the value.
if rv.Type().Elem().Kind() != reflect.Uint8 {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
}
return v
}

View File

@ -341,26 +341,6 @@ type Message interface {
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
const (
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion3 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
ProtoPackageIsVersion1 = true
)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.

View File

@ -36,13 +36,7 @@ package proto
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
return marshalMessageSet(exts, false)
}
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var u marshalInfo
siz := u.sizeMessageSet(exts)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, exts, deterministic)
case map[int32]Extension:
// This is an old-style extension map.
// Wrap it in a new-style XXX_InternalExtensions.
ie := XXX_InternalExtensions{
p: &struct {
mu sync.Mutex
extensionMap map[int32]Extension
}{
extensionMap: exts,
},
}
var u marshalInfo
siz := u.sizeMessageSet(&ie)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, &ie, deterministic)
default:
return nil, errors.New("proto: not an extension map")
}
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var mu sync.Locker
m, mu = exts.extensionsRead()
if m != nil {
// Keep the extensions map locked until we're done marshaling to prevent
// races between marshaling and unmarshaling the lazily-{en,de}coded
// values.
mu.Lock()
defer mu.Unlock()
}
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
if i > 0 && b.Len() > 1 {
b.WriteByte(',')
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
if deref {
u = u.Elem()
}
return pointer{v: u}
}

View File

@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
} else {
// The interface is not of pointer type. The data word is the pointer
// to the data.
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// The interface is not of pointer type. The data word is the pointer
// to the data.
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
if deref {
p.p = *(*unsafe.Pointer)(p.p)
}
return p
}
// valToPointer converts v to a pointer. v must be of pointer type.

View File

@ -38,7 +38,6 @@ package proto
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
log.Printf("proto: tag has too few fields: %q", s)
return
}
@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) {
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
log.Printf("proto: tag has unknown wire type: %q", s)
return
}
@ -334,9 +333,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
@ -346,17 +342,20 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
type (
oneofFuncsIface interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
oneofWrappersIface interface {
XXX_OneofWrappers() []interface{}
}
)
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
@ -391,13 +390,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
var oots []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oots = m.XXX_OneofFuncs()
case oneofWrappersIface:
oots = m.XXX_OneofWrappers()
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
_, _, _, oots = om.XXX_OneofFuncs()
if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {

View File

@ -87,6 +87,7 @@ type marshalElemInfo struct {
sizer sizer
marshaler marshaler
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
deref bool // dereference the pointer before operating on it; implies isptr
}
var (
@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
case oneofWrappersIface:
oneofImplementers = m.XXX_OneofWrappers()
}
n := t.NumField()
@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
panic("tag is not an integer")
}
wt := wiretype(tags[0])
if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
t = t.Elem()
}
sizer, marshaler := typeMarshaler(t, tags, false, false)
var deref bool
if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
t = reflect.PtrTo(t)
deref = true
}
e = &marshalElemInfo{
wiretag: uint64(tag)<<3 | wt,
tagsize: SizeVarint(uint64(tag) << 3),
sizer: sizer,
marshaler: marshaler,
isptr: t.Kind() == reflect.Ptr,
deref: deref,
}
// update cache
@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range m.MapKeys() {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
kaddr := toAddrPointer(&ki, false, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
kaddr := toAddrPointer(&ki, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
kaddr := toAddrPointer(&ki, false, false) // pointer to key
vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
b = appendVarint(b, tag)
siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
// the last time this function was called.
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
mu.Unlock()
@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
}
mu.Unlock()
@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
if !nerr.Merge(err) {
return b, err
@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
if !nerr.Merge(err) {
@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
return n
@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
ei := u.getExtElemInfo(e.desc)
v := e.value
p := toAddrPointer(&v, ei.isptr)
p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
if !nerr.Merge(err) {
return b, err

View File

@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
var reqMask uint64 // bitmask of required fields we've seen.
var errLater error
@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Find any types associated with oneof fields.
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
if fn.IsValid() {
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
for i := res.Len() - 1; i >= 0; i-- {
v := res.Index(i) // interface{}
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
typ := tptr.Elem() // Msg_X
var oneofImplementers []interface{}
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
case oneofWrappersIface:
oneofImplementers = m.XXX_OneofWrappers()
}
for _, v := range oneofImplementers {
tptr := reflect.TypeOf(v) // *Msg_X
typ := tptr.Elem() // Msg_X
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
}
// Find the oneof field that this struct implements.
// Might take O(n^2) to process all of the oneofs, but who cares.
for _, of := range oneofFields {
if tptr.Implements(of.ityp) {
// We have found the corresponding interface for this struct.
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
f := typ.Field(0) // oneof implementers have one field
baseUnmarshal := fieldUnmarshaler(&f)
tags := strings.Split(f.Tag.Get("protobuf"), ",")
fieldNum, err := strconv.Atoi(tags[1])
if err != nil {
panic("protobuf tag field not an integer: " + tags[1])
}
var name string
for _, tag := range tags {
if strings.HasPrefix(tag, "name=") {
name = strings.TrimPrefix(tag, "name=")
break
}
}
// Find the oneof field that this struct implements.
// Might take O(n^2) to process all of the oneofs, but who cares.
for _, of := range oneofFields {
if tptr.Implements(of.ityp) {
// We have found the corresponding interface for this struct.
// That lets us know where this struct should be stored
// when we encounter it during unmarshaling.
unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
u.setTag(fieldNum, of.field, unmarshal, 0, name)
}
}
}
// Get extension ranges, if any.
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
if len(b) <= 0 {
if len(b) == 0 {
goto bad
}
x = uint64(b[0])

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
package any // import "github.com/golang/protobuf/ptypes/any"
package any
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@ -118,6 +121,10 @@ type Any struct {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
return fileDescriptor_any_744b9ca530f228db, []int{0}
return fileDescriptor_b53526c13ae22eb4, []int{0}
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (dst *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(dst, src)
func (m *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(m, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
@ -172,9 +181,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
var fileDescriptor_any_744b9ca530f228db = []byte{
var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,

View File

@ -120,17 +120,18 @@ option objc_class_prefix = "GPB";
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@ -139,6 +140,10 @@ message Any {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//

View File

@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
package duration // import "github.com/golang/protobuf/ptypes/duration"
package duration
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
return fileDescriptor_duration_e7d612259e3f0613, []int{0}
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
}
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
func (dst *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(dst, src)
func (m *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(m, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
@ -138,11 +142,9 @@ func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
}
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
var fileDescriptor_duration_e7d612259e3f0613 = []byte{
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,

View File

@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err

View File

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
package timestamp
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@ -92,8 +96,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
return fileDescriptor_292007bbfe81227e, []int{0}
}
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
func (dst *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(dst, src)
func (m *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
@ -154,11 +160,9 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
}
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,

View File

@ -103,7 +103,9 @@ option objc_class_prefix = "GPB";
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
// is required. A proto3 JSON serializer should always use UTC (as indicated by
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@ -114,8 +116,8 @@ option objc_class_prefix = "GPB";
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {

View File

@ -61,13 +61,13 @@ func (x RowValue_Kind) String() string {
return proto.EnumName(RowValue_Kind_name, int32(x))
}
func (RowValue_Kind) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{8, 0}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{8, 0}
}
type DatasourceRequest struct {
TimeRange *TimeRange `protobuf:"bytes,1,opt,name=timeRange" json:"timeRange,omitempty"`
Datasource *DatasourceInfo `protobuf:"bytes,2,opt,name=datasource" json:"datasource,omitempty"`
Queries []*Query `protobuf:"bytes,3,rep,name=queries" json:"queries,omitempty"`
TimeRange *TimeRange `protobuf:"bytes,1,opt,name=timeRange,proto3" json:"timeRange,omitempty"`
Datasource *DatasourceInfo `protobuf:"bytes,2,opt,name=datasource,proto3" json:"datasource,omitempty"`
Queries []*Query `protobuf:"bytes,3,rep,name=queries,proto3" json:"queries,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -77,7 +77,7 @@ func (m *DatasourceRequest) Reset() { *m = DatasourceRequest{} }
func (m *DatasourceRequest) String() string { return proto.CompactTextString(m) }
func (*DatasourceRequest) ProtoMessage() {}
func (*DatasourceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{0}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{0}
}
func (m *DatasourceRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DatasourceRequest.Unmarshal(m, b)
@ -119,10 +119,10 @@ func (m *DatasourceRequest) GetQueries() []*Query {
}
type Query struct {
RefId string `protobuf:"bytes,1,opt,name=refId" json:"refId,omitempty"`
MaxDataPoints int64 `protobuf:"varint,2,opt,name=maxDataPoints" json:"maxDataPoints,omitempty"`
IntervalMs int64 `protobuf:"varint,3,opt,name=intervalMs" json:"intervalMs,omitempty"`
ModelJson string `protobuf:"bytes,4,opt,name=modelJson" json:"modelJson,omitempty"`
RefId string `protobuf:"bytes,1,opt,name=refId,proto3" json:"refId,omitempty"`
MaxDataPoints int64 `protobuf:"varint,2,opt,name=maxDataPoints,proto3" json:"maxDataPoints,omitempty"`
IntervalMs int64 `protobuf:"varint,3,opt,name=intervalMs,proto3" json:"intervalMs,omitempty"`
ModelJson string `protobuf:"bytes,4,opt,name=modelJson,proto3" json:"modelJson,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -132,7 +132,7 @@ func (m *Query) Reset() { *m = Query{} }
func (m *Query) String() string { return proto.CompactTextString(m) }
func (*Query) ProtoMessage() {}
func (*Query) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{1}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{1}
}
func (m *Query) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Query.Unmarshal(m, b)
@ -181,10 +181,10 @@ func (m *Query) GetModelJson() string {
}
type TimeRange struct {
FromRaw string `protobuf:"bytes,1,opt,name=fromRaw" json:"fromRaw,omitempty"`
ToRaw string `protobuf:"bytes,2,opt,name=toRaw" json:"toRaw,omitempty"`
FromEpochMs int64 `protobuf:"varint,3,opt,name=fromEpochMs" json:"fromEpochMs,omitempty"`
ToEpochMs int64 `protobuf:"varint,4,opt,name=toEpochMs" json:"toEpochMs,omitempty"`
FromRaw string `protobuf:"bytes,1,opt,name=fromRaw,proto3" json:"fromRaw,omitempty"`
ToRaw string `protobuf:"bytes,2,opt,name=toRaw,proto3" json:"toRaw,omitempty"`
FromEpochMs int64 `protobuf:"varint,3,opt,name=fromEpochMs,proto3" json:"fromEpochMs,omitempty"`
ToEpochMs int64 `protobuf:"varint,4,opt,name=toEpochMs,proto3" json:"toEpochMs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -194,7 +194,7 @@ func (m *TimeRange) Reset() { *m = TimeRange{} }
func (m *TimeRange) String() string { return proto.CompactTextString(m) }
func (*TimeRange) ProtoMessage() {}
func (*TimeRange) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{2}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{2}
}
func (m *TimeRange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TimeRange.Unmarshal(m, b)
@ -243,7 +243,7 @@ func (m *TimeRange) GetToEpochMs() int64 {
}
type DatasourceResponse struct {
Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -253,7 +253,7 @@ func (m *DatasourceResponse) Reset() { *m = DatasourceResponse{} }
func (m *DatasourceResponse) String() string { return proto.CompactTextString(m) }
func (*DatasourceResponse) ProtoMessage() {}
func (*DatasourceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{3}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{3}
}
func (m *DatasourceResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DatasourceResponse.Unmarshal(m, b)
@ -281,11 +281,11 @@ func (m *DatasourceResponse) GetResults() []*QueryResult {
}
type QueryResult struct {
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
RefId string `protobuf:"bytes,2,opt,name=refId" json:"refId,omitempty"`
MetaJson string `protobuf:"bytes,3,opt,name=metaJson" json:"metaJson,omitempty"`
Series []*TimeSeries `protobuf:"bytes,4,rep,name=series" json:"series,omitempty"`
Tables []*Table `protobuf:"bytes,5,rep,name=tables" json:"tables,omitempty"`
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
RefId string `protobuf:"bytes,2,opt,name=refId,proto3" json:"refId,omitempty"`
MetaJson string `protobuf:"bytes,3,opt,name=metaJson,proto3" json:"metaJson,omitempty"`
Series []*TimeSeries `protobuf:"bytes,4,rep,name=series,proto3" json:"series,omitempty"`
Tables []*Table `protobuf:"bytes,5,rep,name=tables,proto3" json:"tables,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -295,7 +295,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{4}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{4}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryResult.Unmarshal(m, b)
@ -351,8 +351,8 @@ func (m *QueryResult) GetTables() []*Table {
}
type Table struct {
Columns []*TableColumn `protobuf:"bytes,1,rep,name=columns" json:"columns,omitempty"`
Rows []*TableRow `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"`
Columns []*TableColumn `protobuf:"bytes,1,rep,name=columns,proto3" json:"columns,omitempty"`
Rows []*TableRow `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -362,7 +362,7 @@ func (m *Table) Reset() { *m = Table{} }
func (m *Table) String() string { return proto.CompactTextString(m) }
func (*Table) ProtoMessage() {}
func (*Table) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{5}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{5}
}
func (m *Table) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Table.Unmarshal(m, b)
@ -397,7 +397,7 @@ func (m *Table) GetRows() []*TableRow {
}
type TableColumn struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -407,7 +407,7 @@ func (m *TableColumn) Reset() { *m = TableColumn{} }
func (m *TableColumn) String() string { return proto.CompactTextString(m) }
func (*TableColumn) ProtoMessage() {}
func (*TableColumn) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{6}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{6}
}
func (m *TableColumn) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TableColumn.Unmarshal(m, b)
@ -435,7 +435,7 @@ func (m *TableColumn) GetName() string {
}
type TableRow struct {
Values []*RowValue `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
Values []*RowValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -445,7 +445,7 @@ func (m *TableRow) Reset() { *m = TableRow{} }
func (m *TableRow) String() string { return proto.CompactTextString(m) }
func (*TableRow) ProtoMessage() {}
func (*TableRow) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{7}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{7}
}
func (m *TableRow) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TableRow.Unmarshal(m, b)
@ -473,11 +473,11 @@ func (m *TableRow) GetValues() []*RowValue {
}
type RowValue struct {
Kind RowValue_Kind `protobuf:"varint,1,opt,name=kind,enum=models.RowValue_Kind" json:"kind,omitempty"`
DoubleValue float64 `protobuf:"fixed64,2,opt,name=doubleValue" json:"doubleValue,omitempty"`
Int64Value int64 `protobuf:"varint,3,opt,name=int64Value" json:"int64Value,omitempty"`
BoolValue bool `protobuf:"varint,4,opt,name=boolValue" json:"boolValue,omitempty"`
StringValue string `protobuf:"bytes,5,opt,name=stringValue" json:"stringValue,omitempty"`
Kind RowValue_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=models.RowValue_Kind" json:"kind,omitempty"`
DoubleValue float64 `protobuf:"fixed64,2,opt,name=doubleValue,proto3" json:"doubleValue,omitempty"`
Int64Value int64 `protobuf:"varint,3,opt,name=int64Value,proto3" json:"int64Value,omitempty"`
BoolValue bool `protobuf:"varint,4,opt,name=boolValue,proto3" json:"boolValue,omitempty"`
StringValue string `protobuf:"bytes,5,opt,name=stringValue,proto3" json:"stringValue,omitempty"`
BytesValue []byte `protobuf:"bytes,6,opt,name=bytesValue,proto3" json:"bytesValue,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -488,7 +488,7 @@ func (m *RowValue) Reset() { *m = RowValue{} }
func (m *RowValue) String() string { return proto.CompactTextString(m) }
func (*RowValue) ProtoMessage() {}
func (*RowValue) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{8}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{8}
}
func (m *RowValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RowValue.Unmarshal(m, b)
@ -551,13 +551,13 @@ func (m *RowValue) GetBytesValue() []byte {
}
type DatasourceInfo struct {
Id int64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
OrgId int64 `protobuf:"varint,2,opt,name=orgId" json:"orgId,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
Type string `protobuf:"bytes,4,opt,name=type" json:"type,omitempty"`
Url string `protobuf:"bytes,5,opt,name=url" json:"url,omitempty"`
JsonData string `protobuf:"bytes,6,opt,name=jsonData" json:"jsonData,omitempty"`
DecryptedSecureJsonData map[string]string `protobuf:"bytes,7,rep,name=decryptedSecureJsonData" json:"decryptedSecureJsonData,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
OrgId int64 `protobuf:"varint,2,opt,name=orgId,proto3" json:"orgId,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"`
Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"`
JsonData string `protobuf:"bytes,6,opt,name=jsonData,proto3" json:"jsonData,omitempty"`
DecryptedSecureJsonData map[string]string `protobuf:"bytes,7,rep,name=decryptedSecureJsonData,proto3" json:"decryptedSecureJsonData,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -567,7 +567,7 @@ func (m *DatasourceInfo) Reset() { *m = DatasourceInfo{} }
func (m *DatasourceInfo) String() string { return proto.CompactTextString(m) }
func (*DatasourceInfo) ProtoMessage() {}
func (*DatasourceInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{9}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{9}
}
func (m *DatasourceInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DatasourceInfo.Unmarshal(m, b)
@ -637,9 +637,9 @@ func (m *DatasourceInfo) GetDecryptedSecureJsonData() map[string]string {
}
type TimeSeries struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Tags map[string]string `protobuf:"bytes,2,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Points []*Point `protobuf:"bytes,3,rep,name=points" json:"points,omitempty"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Tags map[string]string `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -649,7 +649,7 @@ func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{10}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{10}
}
func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
@ -691,8 +691,8 @@ func (m *TimeSeries) GetPoints() []*Point {
}
type Point struct {
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -702,7 +702,7 @@ func (m *Point) Reset() { *m = Point{} }
func (m *Point) String() string { return proto.CompactTextString(m) }
func (*Point) ProtoMessage() {}
func (*Point) Descriptor() ([]byte, []int) {
return fileDescriptor_datasource_1e8c5a099546b349, []int{11}
return fileDescriptor_datasource_09b4ea28a76e017e, []int{11}
}
func (m *Point) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Point.Unmarshal(m, b)
@ -786,8 +786,7 @@ func (c *datasourcePluginClient) Query(ctx context.Context, in *DatasourceReques
return out, nil
}
// Server API for DatasourcePlugin service
// DatasourcePluginServer is the server API for DatasourcePlugin service.
type DatasourcePluginServer interface {
Query(context.Context, *DatasourceRequest) (*DatasourceResponse, error)
}
@ -827,9 +826,9 @@ var _DatasourcePlugin_serviceDesc = grpc.ServiceDesc{
Metadata: "datasource.proto",
}
func init() { proto.RegisterFile("datasource.proto", fileDescriptor_datasource_1e8c5a099546b349) }
func init() { proto.RegisterFile("datasource.proto", fileDescriptor_datasource_09b4ea28a76e017e) }
var fileDescriptor_datasource_1e8c5a099546b349 = []byte{
var fileDescriptor_datasource_09b4ea28a76e017e = []byte{
// 841 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdb, 0x8e, 0x23, 0x35,
0x10, 0xa5, 0x2f, 0xc9, 0xa4, 0x2b, 0x3b, 0xa1, 0xd7, 0xdc, 0x42, 0x34, 0x42, 0xa1, 0xb5, 0x88,

View File

@ -24,16 +24,16 @@ var _ = math.Inf
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type RenderRequest struct {
Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"`
Width int32 `protobuf:"varint,2,opt,name=width" json:"width,omitempty"`
Height int32 `protobuf:"varint,3,opt,name=height" json:"height,omitempty"`
Timeout int32 `protobuf:"varint,4,opt,name=timeout" json:"timeout,omitempty"`
Timezone string `protobuf:"bytes,5,opt,name=timezone" json:"timezone,omitempty"`
Encoding string `protobuf:"bytes,6,opt,name=encoding" json:"encoding,omitempty"`
FilePath string `protobuf:"bytes,7,opt,name=filePath" json:"filePath,omitempty"`
RenderKey string `protobuf:"bytes,8,opt,name=renderKey" json:"renderKey,omitempty"`
Domain string `protobuf:"bytes,9,opt,name=domain" json:"domain,omitempty"`
Debug bool `protobuf:"varint,10,opt,name=debug" json:"debug,omitempty"`
Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"`
Width int32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
Height int32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
Timeout int32 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
Timezone string `protobuf:"bytes,5,opt,name=timezone,proto3" json:"timezone,omitempty"`
Encoding string `protobuf:"bytes,6,opt,name=encoding,proto3" json:"encoding,omitempty"`
FilePath string `protobuf:"bytes,7,opt,name=filePath,proto3" json:"filePath,omitempty"`
RenderKey string `protobuf:"bytes,8,opt,name=renderKey,proto3" json:"renderKey,omitempty"`
Domain string `protobuf:"bytes,9,opt,name=domain,proto3" json:"domain,omitempty"`
Debug bool `protobuf:"varint,10,opt,name=debug,proto3" json:"debug,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -43,7 +43,7 @@ func (m *RenderRequest) Reset() { *m = RenderRequest{} }
func (m *RenderRequest) String() string { return proto.CompactTextString(m) }
func (*RenderRequest) ProtoMessage() {}
func (*RenderRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_renderer_1ebd8e2bd96661e9, []int{0}
return fileDescriptor_renderer_e28a7c8dd05fc171, []int{0}
}
func (m *RenderRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenderRequest.Unmarshal(m, b)
@ -134,7 +134,7 @@ func (m *RenderRequest) GetDebug() bool {
}
type RenderResponse struct {
Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -144,7 +144,7 @@ func (m *RenderResponse) Reset() { *m = RenderResponse{} }
func (m *RenderResponse) String() string { return proto.CompactTextString(m) }
func (*RenderResponse) ProtoMessage() {}
func (*RenderResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_renderer_1ebd8e2bd96661e9, []int{1}
return fileDescriptor_renderer_e28a7c8dd05fc171, []int{1}
}
func (m *RenderResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenderResponse.Unmarshal(m, b)
@ -208,8 +208,7 @@ func (c *rendererClient) Render(ctx context.Context, in *RenderRequest, opts ...
return out, nil
}
// Server API for Renderer service
// RendererServer is the server API for Renderer service.
type RendererServer interface {
Render(context.Context, *RenderRequest) (*RenderResponse, error)
}
@ -249,9 +248,9 @@ var _Renderer_serviceDesc = grpc.ServiceDesc{
Metadata: "renderer.proto",
}
func init() { proto.RegisterFile("renderer.proto", fileDescriptor_renderer_1ebd8e2bd96661e9) }
func init() { proto.RegisterFile("renderer.proto", fileDescriptor_renderer_e28a7c8dd05fc171) }
var fileDescriptor_renderer_1ebd8e2bd96661e9 = []byte{
var fileDescriptor_renderer_e28a7c8dd05fc171 = []byte{
// 265 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xb1, 0x4e, 0xc3, 0x30,
0x10, 0x86, 0x95, 0x96, 0xa4, 0xce, 0x49, 0x54, 0xc8, 0x82, 0xea, 0x54, 0x31, 0x44, 0x1d, 0x50,

View File

@ -1 +1,2 @@
.DS_Store
.idea

View File

@ -87,6 +87,10 @@ type Client struct {
// goroutines.
clientWaitGroup sync.WaitGroup
// stderrWaitGroup is used to prevent the command's Wait() function from
// being called before we've finished reading from the stderr pipe.
stderrWaitGroup sync.WaitGroup
// processKilled is used for testing only, to flag when the process was
// forcefully killed.
processKilled bool
@ -590,6 +594,12 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Create a context for when we kill
c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
// Start goroutine that logs the stderr
c.clientWaitGroup.Add(1)
c.stderrWaitGroup.Add(1)
// logStderr calls Done()
go c.logStderr(cmdStderr)
c.clientWaitGroup.Add(1)
go func() {
// ensure the context is cancelled when we're done
@ -602,6 +612,10 @@ func (c *Client) Start() (addr net.Addr, err error) {
pid := c.process.Pid
path := cmd.Path
// wait to finish reading from stderr since the stderr pipe reader
// will be closed by the subsequent call to cmd.Wait().
c.stderrWaitGroup.Wait()
// Wait for the command to end.
err := cmd.Wait()
@ -624,11 +638,6 @@ func (c *Client) Start() (addr net.Addr, err error) {
c.exited = true
}()
// Start goroutine that logs the stderr
c.clientWaitGroup.Add(1)
// logStderr calls Done()
go c.logStderr(cmdStderr)
// Start a goroutine that is going to be reading the lines
// out of stdout
linesCh := make(chan string)
@ -936,6 +945,7 @@ var stdErrBufferSize = 64 * 1024
func (c *Client) logStderr(r io.Reader) {
defer c.clientWaitGroup.Done()
defer c.stderrWaitGroup.Done()
l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
reader := bufio.NewReaderSize(r, stdErrBufferSize)
@ -973,7 +983,22 @@ func (c *Client) logStderr(r io.Reader) {
entry, err := parseJSON(line)
// If output is not JSON format, print directly to Debug
if err != nil {
l.Debug(string(line))
// Attempt to infer the desired log level from the commonly used
// string prefixes
switch line := string(line); {
case strings.HasPrefix(line, "[TRACE]"):
l.Trace(line)
case strings.HasPrefix(line, "[DEBUG]"):
l.Debug(line)
case strings.HasPrefix(line, "[INFO]"):
l.Info(line)
case strings.HasPrefix(line, "[WARN]"):
l.Warn(line)
case strings.HasPrefix(line, "[ERROR]"):
l.Error(line)
default:
l.Debug(line)
}
} else {
out := flattenKVPairs(entry.KVPairs)
@ -989,6 +1014,11 @@ func (c *Client) logStderr(r io.Reader) {
l.Warn(entry.Message, out...)
case hclog.Error:
l.Error(entry.Message, out...)
default:
// if there was no log level, it's likely this is unexpected
// json from something other than hclog, and we should output
// it verbatim.
l.Debug(string(line))
}
}
}

View File

@ -3,6 +3,7 @@ package plugin
import (
"crypto/tls"
"fmt"
"math"
"net"
"time"
@ -32,6 +33,11 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn,
credentials.NewTLS(tls)))
}
opts = append(opts,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32)))
// Connect. Note the first parameter is unused because we use a custom
// dialer that has the state to see the address.
conn, err := grpc.Dial("unused", opts...)

View File

@ -363,14 +363,34 @@ func serverListener() (net.Listener, error) {
}
func serverListener_tcp() (net.Listener, error) {
minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
if err != nil {
return nil, err
envMinPort := os.Getenv("PLUGIN_MIN_PORT")
envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
var minPort, maxPort int64
var err error
switch {
case len(envMinPort) == 0:
minPort = 0
default:
minPort, err = strconv.ParseInt(envMinPort, 10, 32)
if err != nil {
return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
}
}
maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
if err != nil {
return nil, err
switch {
case len(envMaxPort) == 0:
maxPort = 0
default:
maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
if err != nil {
return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
}
}
if minPort > maxPort {
return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
}
for port := minPort; port <= maxPort; port++ {

1
vendor/github.com/hashicorp/yamux/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/hashicorp/yamux

View File

@ -3,6 +3,7 @@ package yamux
import (
"fmt"
"io"
"log"
"os"
"time"
)
@ -30,8 +31,13 @@ type Config struct {
// window size that we allow for a stream.
MaxStreamWindowSize uint32
// LogOutput is used to control the log destination
// LogOutput is used to control the log destination. Either Logger or
// LogOutput can be set, not both.
LogOutput io.Writer
// Logger is used to pass in the logger to be used. Either Logger or
// LogOutput can be set, not both.
Logger *log.Logger
}
// DefaultConfig is used to return a default configuration
@ -57,6 +63,11 @@ func VerifyConfig(config *Config) error {
if config.MaxStreamWindowSize < initialStreamWindow {
return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
}
if config.LogOutput != nil && config.Logger != nil {
return fmt.Errorf("both Logger and LogOutput may not be set, select one")
} else if config.LogOutput == nil && config.Logger == nil {
return fmt.Errorf("one of Logger or LogOutput must be set, select one")
}
return nil
}

View File

@ -86,9 +86,14 @@ type sendReady struct {
// newSession is used to construct a new session
func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
logger := config.Logger
if logger == nil {
logger = log.New(config.LogOutput, "", log.LstdFlags)
}
s := &Session{
config: config,
logger: log.New(config.LogOutput, "", log.LstdFlags),
logger: logger,
conn: conn,
bufRead: bufio.NewReader(conn),
pings: make(map[uint32]chan struct{}),

View File

@ -10,9 +10,7 @@ go-sqlite3
sqlite3 driver conforming to the built-in database/sql interface
Supported Golang version:
- 1.9.x
- 1.10.x
Supported Golang version: See .travis.yml
[This package follows the official Golang Release Policy.](https://golang.org/doc/devel/release.html#policy)
@ -249,7 +247,7 @@ Required dependency
brew install sqlite3
```
For OSX there is an additional package install which is required if you whish to build the `icu` extension.
For OSX there is an additional package install which is required if you wish to build the `icu` extension.
This additional package can be installed with `homebrew`.
@ -282,7 +280,7 @@ To compile this package on Windows OS you must have the `gcc` compiler installed
3) Open a terminal for the TDM-GCC toolchain, can be found in the Windows Start menu.
4) Navigate to your project folder and run the `go build ...` command for this package.
For example the TDM-GCC Toolchain can be found [here](ttps://sourceforge.net/projects/tdm-gcc/).
For example the TDM-GCC Toolchain can be found [here](https://sourceforge.net/projects/tdm-gcc/).
## Errors
@ -458,15 +456,19 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
Each connection to :memory: opens a brand new in-memory sql database, so if
Each connection to `":memory:"` opens a brand new in-memory sql database, so if
the stdlib's sql engine happens to open another connection and you've only
specified ":memory:", that connection will see a brand new database. A
workaround is to use "file::memory:?mode=memory&cache=shared". Every
connection to this string will point to the same in-memory database.
specified `":memory:"`, that connection will see a brand new database. A
workaround is to use `"file::memory:?cache=shared"` (or `"file:foobar?mode=memory&cache=shared"`). Every
connection to this string will point to the same in-memory database.
Note that if the last database connection in the pool closes, the in-memory database is deleted. Make sure the [max idle connection limit](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns) is > 0, and the [connection lifetime](https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime) is infinite.
For more information see
* [#204](https://github.com/mattn/go-sqlite3/issues/204)
* [#511](https://github.com/mattn/go-sqlite3/issues/511)
* https://www.sqlite.org/sharedcache.html#shared_cache_and_in_memory_databases
* https://www.sqlite.org/inmemorydb.html#sharedmemdb
- Reading from database with large amount of goroutines fails on OSX.
@ -481,11 +483,11 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
You need to implement the feature or call the sqlite3 cli.
More infomation see [#305](https://github.com/mattn/go-sqlite3/issues/305)
More information see [#305](https://github.com/mattn/go-sqlite3/issues/305)
- Error: `database is locked`
When you get an database is locked. Please use the following options.
When you get a database is locked. Please use the following options.
Add to DSN: `cache=shared`
@ -497,7 +499,7 @@ For an example see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatial
Second please set the database connections of the SQL package to 1.
```go
db.SetMaxOpenConn(1)
db.SetMaxOpenConns(1)
```
More information see [#209](https://github.com/mattn/go-sqlite3/issues/209)

View File

@ -368,7 +368,7 @@ func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, -1)
C.sqlite3_result_error(ctx, cstr, C.int(-1))
}
// Test support code. Tests are not allowed to import "C", so we can't

File diff suppressed because it is too large Load Diff

View File

@ -124,9 +124,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION "3.25.2"
#define SQLITE_VERSION_NUMBER 3025002
#define SQLITE_SOURCE_ID "2018-09-25 19:08:10 fb90e7189ae6d62e77ba3a308ca5d683f90bbe633cf681865365b8e92792d1c7"
#define SQLITE_VERSION "3.29.0"
#define SQLITE_VERSION_NUMBER 3029000
#define SQLITE_SOURCE_ID "2019-07-10 17:32:03 fc82b73eaac8b36950e527f12c4b5dc1e147e6f4ad2217ae43ad82882a88bfa6"
/*
** CAPI3REF: Run-Time Library Version Numbers
@ -190,6 +190,9 @@ SQLITE_API int sqlite3_libversion_number(void);
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
SQLITE_API const char *sqlite3_compileoption_get(int N);
#else
# define sqlite3_compileoption_used(X) 0
# define sqlite3_compileoption_get(X) ((void*)0)
#endif
/*
@ -824,6 +827,15 @@ struct sqlite3_io_methods {
** file space based on this hint in order to help writes to the database
** file run faster.
**
** <li>[[SQLITE_FCNTL_SIZE_LIMIT]]
** The [SQLITE_FCNTL_SIZE_LIMIT] opcode is used by in-memory VFS that
** implements [sqlite3_deserialize()] to set an upper bound on the size
** of the in-memory database. The argument is a pointer to a [sqlite3_int64].
** If the integer pointed to is negative, then it is filled in with the
** current limit. Otherwise the limit is set to the larger of the value
** of the integer pointed to and the current database size. The integer
** pointed to is set to the new limit.
**
** <li>[[SQLITE_FCNTL_CHUNK_SIZE]]
** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS
** extends and truncates the database file in chunks of a size specified
@ -1132,6 +1144,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33
#define SQLITE_FCNTL_LOCK_TIMEOUT 34
#define SQLITE_FCNTL_DATA_VERSION 35
#define SQLITE_FCNTL_SIZE_LIMIT 36
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@ -1284,8 +1297,14 @@ typedef struct sqlite3_api_routines sqlite3_api_routines;
** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
** to test whether a file is at least readable. The file can be a
** directory.
** to test whether a file is at least readable. The SQLITE_ACCESS_READ
** flag is never actually used and is not implemented in the built-in
** VFSes of SQLite. The file is named by the second argument and can be a
** directory. The xAccess method returns [SQLITE_OK] on success or some
** non-zero error code if there is an I/O error or if the name of
** the file given in the second argument is illegal. If SQLITE_OK
** is returned, then non-zero or zero is written into *pResOut to indicate
** whether or not the file is accessible.
**
** ^SQLite will always allocate at least mxPathname+1 bytes for the
** output buffer xFullPathname. The exact size of the output buffer
@ -1973,6 +1992,17 @@ struct sqlite3_mem_methods {
** negative value for this option restores the default behaviour.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
** [[SQLITE_CONFIG_MEMDB_MAXSIZE]]
** <dt>SQLITE_CONFIG_MEMDB_MAXSIZE
** <dd>The SQLITE_CONFIG_MEMDB_MAXSIZE option accepts a single parameter
** [sqlite3_int64] parameter which is the default maximum size for an in-memory
** database created using [sqlite3_deserialize()]. This default maximum
** size can be adjusted up or down for individual databases using the
** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this
** configuration setting is never used, then the default maximum is determined
** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that
** compile-time option is not set, then the default maximum is 1073741824.
** </dl>
*/
#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
@ -2003,6 +2033,7 @@ struct sqlite3_mem_methods {
#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */
#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */
#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */
/*
** CAPI3REF: Database Connection Configuration Options
@ -2018,6 +2049,7 @@ struct sqlite3_mem_methods {
** is invoked.
**
** <dl>
** [[SQLITE_DBCONFIG_LOOKASIDE]]
** <dt>SQLITE_DBCONFIG_LOOKASIDE</dt>
** <dd> ^This option takes three additional arguments that determine the
** [lookaside memory allocator] configuration for the [database connection].
@ -2040,6 +2072,7 @@ struct sqlite3_mem_methods {
** memory is in use leaves the configuration unchanged and returns
** [SQLITE_BUSY].)^</dd>
**
** [[SQLITE_DBCONFIG_ENABLE_FKEY]]
** <dt>SQLITE_DBCONFIG_ENABLE_FKEY</dt>
** <dd> ^This option is used to enable or disable the enforcement of
** [foreign key constraints]. There should be two additional arguments.
@ -2050,6 +2083,7 @@ struct sqlite3_mem_methods {
** following this call. The second parameter may be a NULL pointer, in
** which case the FK enforcement setting is not reported back. </dd>
**
** [[SQLITE_DBCONFIG_ENABLE_TRIGGER]]
** <dt>SQLITE_DBCONFIG_ENABLE_TRIGGER</dt>
** <dd> ^This option is used to enable or disable [CREATE TRIGGER | triggers].
** There should be two additional arguments.
@ -2060,9 +2094,10 @@ struct sqlite3_mem_methods {
** following this call. The second parameter may be a NULL pointer, in
** which case the trigger setting is not reported back. </dd>
**
** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]]
** <dt>SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER</dt>
** <dd> ^This option is used to enable or disable the two-argument
** version of the [fts3_tokenizer()] function which is part of the
** <dd> ^This option is used to enable or disable the
** [fts3_tokenizer()] function which is part of the
** [FTS3] full-text search engine extension.
** There should be two additional arguments.
** The first argument is an integer which is 0 to disable fts3_tokenizer() or
@ -2073,6 +2108,7 @@ struct sqlite3_mem_methods {
** following this call. The second parameter may be a NULL pointer, in
** which case the new setting is not reported back. </dd>
**
** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]]
** <dt>SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION</dt>
** <dd> ^This option is used to enable or disable the [sqlite3_load_extension()]
** interface independently of the [load_extension()] SQL function.
@ -2090,7 +2126,7 @@ struct sqlite3_mem_methods {
** be a NULL pointer, in which case the new setting is not reported back.
** </dd>
**
** <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
** [[SQLITE_DBCONFIG_MAINDBNAME]] <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
** <dd> ^This option is used to change the name of the "main" database
** schema. ^The sole argument is a pointer to a constant UTF8 string
** which will become the new schema name in place of "main". ^SQLite
@ -2099,6 +2135,7 @@ struct sqlite3_mem_methods {
** until after the database connection closes.
** </dd>
**
** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]]
** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
** <dd> Usually, when a database in wal mode is closed or detached from a
** database handle, SQLite checks if this will mean that there are now no
@ -2112,7 +2149,7 @@ struct sqlite3_mem_methods {
** have been disabled - 0 if they are not disabled, 1 if they are.
** </dd>
**
** <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
** [[SQLITE_DBCONFIG_ENABLE_QPSG]] <dt>SQLITE_DBCONFIG_ENABLE_QPSG</dt>
** <dd>^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates
** the [query planner stability guarantee] (QPSG). When the QPSG is active,
** a single SQL query statement will always use the same algorithm regardless
@ -2128,7 +2165,7 @@ struct sqlite3_mem_methods {
** following this call.
** </dd>
**
** <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
** [[SQLITE_DBCONFIG_TRIGGER_EQP]] <dt>SQLITE_DBCONFIG_TRIGGER_EQP</dt>
** <dd> By default, the output of EXPLAIN QUERY PLAN commands does not
** include output for any operations performed by trigger programs. This
** option is used to set or clear (the default) a flag that governs this
@ -2140,7 +2177,7 @@ struct sqlite3_mem_methods {
** it is not disabled, 1 if it is.
** </dd>
**
** <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
** [[SQLITE_DBCONFIG_RESET_DATABASE]] <dt>SQLITE_DBCONFIG_RESET_DATABASE</dt>
** <dd> Set the SQLITE_DBCONFIG_RESET_DATABASE flag and then run
** [VACUUM] in order to reset a database back to an empty database
** with no schema and no content. The following process works even for
@ -2159,6 +2196,58 @@ struct sqlite3_mem_methods {
** Because resetting a database is destructive and irreversible, the
** process requires the use of this obscure API and multiple steps to help
** ensure that it does not happen by accident.
**
** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
** "defensive" flag for a database connection. When the defensive
** flag is enabled, language features that allow ordinary SQL to
** deliberately corrupt the database file are disabled. The disabled
** features include but are not limited to the following:
** <ul>
** <li> The [PRAGMA writable_schema=ON] statement.
** <li> The [PRAGMA journal_mode=OFF] statement.
** <li> Writes to the [sqlite_dbpage] virtual table.
** <li> Direct writes to [shadow tables].
** </ul>
** </dd>
**
** [[SQLITE_DBCONFIG_WRITABLE_SCHEMA]] <dt>SQLITE_DBCONFIG_WRITABLE_SCHEMA</dt>
** <dd>The SQLITE_DBCONFIG_WRITABLE_SCHEMA option activates or deactivates the
** "writable_schema" flag. This has the same effect and is logically equivalent
** to setting [PRAGMA writable_schema=ON] or [PRAGMA writable_schema=OFF].
** The first argument to this setting is an integer which is 0 to disable
** the writable_schema, positive to enable writable_schema, or negative to
** leave the setting unchanged. The second parameter is a pointer to an
** integer into which is written 0 or 1 to indicate whether the writable_schema
** is enabled or disabled following this call.
** </dd>
**
** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]]
** <dt>SQLITE_DBCONFIG_LEGACY_ALTER_TABLE</dt>
** <dd>The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates
** the legacy behavior of the [ALTER TABLE RENAME] command such it
** behaves as it did prior to [version 3.24.0] (2018-06-04). See the
** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for
** additional information. This feature can also be turned on and off
** using the [PRAGMA legacy_alter_table] statement.
** </dd>
**
** [[SQLITE_DBCONFIG_DQS_DML]]
** <dt>SQLITE_DBCONFIG_DQS_DML</td>
** <dd>The SQLITE_DBCONFIG_DQS_DML option activates or deactivates
** the legacy [double-quoted string literal] misfeature for DML statement
** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The
** default value of this setting is determined by the [-DSQLITE_DQS]
** compile-time option.
** </dd>
**
** [[SQLITE_DBCONFIG_DQS_DDL]]
** <dt>SQLITE_DBCONFIG_DQS_DDL</td>
** <dd>The SQLITE_DBCONFIG_DQS option activates or deactivates
** the legacy [double-quoted string literal] misfeature for DDL statements,
** such as CREATE TABLE and CREATE INDEX. The
** default value of this setting is determined by the [-DSQLITE_DQS]
** compile-time option.
** </dd>
** </dl>
*/
@ -2172,7 +2261,12 @@ struct sqlite3_mem_methods {
#define SQLITE_DBCONFIG_ENABLE_QPSG 1007 /* int int* */
#define SQLITE_DBCONFIG_TRIGGER_EQP 1008 /* int int* */
#define SQLITE_DBCONFIG_RESET_DATABASE 1009 /* int int* */
#define SQLITE_DBCONFIG_MAX 1009 /* Largest DBCONFIG */
#define SQLITE_DBCONFIG_DEFENSIVE 1010 /* int int* */
#define SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011 /* int int* */
#define SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012 /* int int* */
#define SQLITE_DBCONFIG_DQS_DML 1013 /* int int* */
#define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */
#define SQLITE_DBCONFIG_MAX 1014 /* Largest DBCONFIG */
/*
** CAPI3REF: Enable Or Disable Extended Result Codes
@ -2329,7 +2423,7 @@ SQLITE_API int sqlite3_changes(sqlite3*);
** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
** are not counted.
**
** This the [sqlite3_total_changes(D)] interface only reports the number
** The [sqlite3_total_changes(D)] interface only reports the number
** of rows that changed due to SQL statement run against database
** connection D. Any changes by other database connections are ignored.
** To detect changes against a database file from other database
@ -2973,9 +3067,9 @@ SQLITE_API int sqlite3_set_authorizer(
** time is in units of nanoseconds, however the current implementation
** is only capable of millisecond resolution so the six least significant
** digits in the time are meaningless. Future versions of SQLite
** might provide greater resolution on the profiler callback. The
** sqlite3_profile() function is considered experimental and is
** subject to change in future versions of SQLite.
** might provide greater resolution on the profiler callback. Invoking
** either [sqlite3_trace()] or [sqlite3_trace_v2()] will cancel the
** profile callback.
*/
SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
void(*xTrace)(void*,const char*), void*);
@ -3389,6 +3483,8 @@ SQLITE_API int sqlite3_open_v2(
** is not a database file pathname pointer that SQLite passed into the xOpen
** VFS method, then the behavior of this routine is undefined and probably
** undesirable.
**
** See the [URI filename] documentation for additional information.
*/
SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
@ -3610,9 +3706,24 @@ SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
** on this hint by avoiding the use of [lookaside memory] so as not to
** deplete the limited store of lookaside memory. Future versions of
** SQLite may act on this hint differently.
**
** [[SQLITE_PREPARE_NORMALIZE]] <dt>SQLITE_PREPARE_NORMALIZE</dt>
** <dd>The SQLITE_PREPARE_NORMALIZE flag is a no-op. This flag used
** to be required for any prepared statement that wanted to use the
** [sqlite3_normalized_sql()] interface. However, the
** [sqlite3_normalized_sql()] interface is now available to all
** prepared statements, regardless of whether or not they use this
** flag.
**
** [[SQLITE_PREPARE_NO_VTAB]] <dt>SQLITE_PREPARE_NO_VTAB</dt>
** <dd>The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler
** to return an error (error code SQLITE_ERROR) if the statement uses
** any virtual tables.
** </dl>
*/
#define SQLITE_PREPARE_PERSISTENT 0x01
#define SQLITE_PREPARE_NORMALIZE 0x02
#define SQLITE_PREPARE_NO_VTAB 0x04
/*
** CAPI3REF: Compiling An SQL Statement
@ -3770,6 +3881,11 @@ SQLITE_API int sqlite3_prepare16_v3(
** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8
** string containing the SQL text of prepared statement P with
** [bound parameters] expanded.
** ^The sqlite3_normalized_sql(P) interface returns a pointer to a UTF-8
** string containing the normalized SQL text of prepared statement P. The
** semantics used to normalize a SQL statement are unspecified and subject
** to change. At a minimum, literal values will be replaced with suitable
** placeholders.
**
** ^(For example, if a prepared statement is created using the SQL
** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345
@ -3785,14 +3901,16 @@ SQLITE_API int sqlite3_prepare16_v3(
** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time
** option causes sqlite3_expanded_sql() to always return NULL.
**
** ^The string returned by sqlite3_sql(P) is managed by SQLite and is
** automatically freed when the prepared statement is finalized.
** ^The strings returned by sqlite3_sql(P) and sqlite3_normalized_sql(P)
** are managed by SQLite and are automatically freed when the prepared
** statement is finalized.
** ^The string returned by sqlite3_expanded_sql(P), on the other hand,
** is obtained from [sqlite3_malloc()] and must be free by the application
** by passing it to [sqlite3_free()].
*/
SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If An SQL Statement Writes The Database
@ -3830,6 +3948,18 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
*/
SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Query The EXPLAIN Setting For A Prepared Statement
** METHOD: sqlite3_stmt
**
** ^The sqlite3_stmt_isexplain(S) interface returns 1 if the
** prepared statement S is an EXPLAIN statement, or 2 if the
** statement S is an EXPLAIN QUERY PLAN.
** ^The sqlite3_stmt_isexplain(S) interface returns 0 if S is
** an ordinary statement or a NULL pointer.
*/
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
@ -3969,7 +4099,9 @@ typedef struct sqlite3_context sqlite3_context;
** ^The fifth argument to the BLOB and string binding interfaces
** is a destructor used to dispose of the BLOB or
** string after SQLite has finished with it. ^The destructor is called
** to dispose of the BLOB or string even if the call to bind API fails.
** to dispose of the BLOB or string even if the call to the bind API fails,
** except the destructor is not called if the third parameter is a NULL
** pointer or the fourth parameter is negative.
** ^If the fifth argument is
** the special value [SQLITE_STATIC], then SQLite assumes that the
** information is in static, unmanaged space and does not need to be freed.
@ -4886,6 +5018,8 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** <tr><td><b>sqlite3_value_nochange&nbsp;&nbsp;</b>
** <td>&rarr;&nbsp;&nbsp;<td>True if the column is unchanged in an UPDATE
** against a virtual table.
** <tr><td><b>sqlite3_value_frombind&nbsp;&nbsp;</b>
** <td>&rarr;&nbsp;&nbsp;<td>True if value originated from a [bound parameter]
** </table></blockquote>
**
** <b>Details:</b>
@ -4947,6 +5081,11 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** than within an [xUpdate] method call for an UPDATE statement, then
** the return value is arbitrary and meaningless.
**
** ^The sqlite3_value_frombind(X) interface returns non-zero if the
** value X originated from one of the [sqlite3_bind_int|sqlite3_bind()]
** interfaces. ^If X comes from an SQL literal value, or a table column,
** and expression, then sqlite3_value_frombind(X) returns zero.
**
** Please pay particular attention to the fact that the pointer returned
** from [sqlite3_value_blob()], [sqlite3_value_text()], or
** [sqlite3_value_text16()] can be invalidated by a subsequent call to
@ -4992,6 +5131,7 @@ SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
SQLITE_API int sqlite3_value_type(sqlite3_value*);
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
/*
** CAPI3REF: Finding The Subtype Of SQL Values
@ -5727,7 +5867,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
** associated with database N of connection D. ^The main database file
** has the name "main". If there is no attached database N on the database
** connection D, or if database N is a temporary or in-memory database, then
** a NULL pointer is returned.
** this function will return either a NULL pointer or an empty string.
**
** ^The filename returned by this function is the output of the
** xFullPathname method of the [VFS]. ^In other words, the filename
@ -6282,6 +6422,9 @@ struct sqlite3_module {
int (*xSavepoint)(sqlite3_vtab *pVTab, int);
int (*xRelease)(sqlite3_vtab *pVTab, int);
int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
};
/*
@ -7204,6 +7347,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
@ -7214,7 +7358,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_SORTER_MMAP 24
#define SQLITE_TESTCTRL_IMPOSTER 25
#define SQLITE_TESTCTRL_PARSER_COVERAGE 26
#define SQLITE_TESTCTRL_LAST 26 /* Largest TESTCTRL */
#define SQLITE_TESTCTRL_RESULT_INTREAL 27
#define SQLITE_TESTCTRL_LAST 27 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@ -8616,6 +8761,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** can use to customize and optimize their behavior.
**
** <dl>
** [[SQLITE_VTAB_CONSTRAINT_SUPPORT]]
** <dt>SQLITE_VTAB_CONSTRAINT_SUPPORT
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported,
@ -9385,7 +9531,7 @@ struct sqlite3_rtree_query_info {
sqlite3_int64 iRowid; /* Rowid for current entry */
sqlite3_rtree_dbl rParentScore; /* Score of parent node */
int eParentWithin; /* Visibility of parent node */
int eWithin; /* OUT: Visiblity */
int eWithin; /* OUT: Visibility */
sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
/* The following fields are only available in 3.8.11 and later */
sqlite3_value **apSqlParam; /* Original SQL values of parameters */
@ -9881,12 +10027,38 @@ SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
** consecutively. There is no chance that the iterator will visit a change
** the applies to table X, then one for table Y, and then later on visit
** another change for table X.
**
** The behavior of sqlite3changeset_start_v2() and its streaming equivalent
** may be modified by passing a combination of
** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter.
**
** Note that the sqlite3changeset_start_v2() API is still <b>experimental</b>
** and therefore subject to change.
*/
SQLITE_API int sqlite3changeset_start(
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
int nChangeset, /* Size of changeset blob in bytes */
void *pChangeset /* Pointer to blob containing changeset */
);
SQLITE_API int sqlite3changeset_start_v2(
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
int nChangeset, /* Size of changeset blob in bytes */
void *pChangeset, /* Pointer to blob containing changeset */
int flags /* SESSION_CHANGESETSTART_* flags */
);
/*
** CAPI3REF: Flags for sqlite3changeset_start_v2
**
** The following flags may passed via the 4th parameter to
** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]:
**
** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
** Invert the changeset while iterating through it. This is equivalent to
** inverting a changeset using sqlite3changeset_invert() before applying it.
** It is an error to specify this flag with a patchset.
*/
#define SQLITE_CHANGESETSTART_INVERT 0x0002
/*
@ -9930,7 +10102,7 @@ SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
** sqlite3changeset_next() is called on the iterator or until the
** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
** set to the number of columns in the table affected by the change. If
** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change
** pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change
** is an indirect change, or false (0) otherwise. See the documentation for
** [sqlite3session_indirect()] for a description of direct and indirect
** changes. Finally, if pOp is not NULL, then *pOp is set to one of
@ -10541,7 +10713,7 @@ SQLITE_API int sqlite3changeset_apply_v2(
),
void *pCtx, /* First argument passed to xConflict */
void **ppRebase, int *pnRebase, /* OUT: Rebase data */
int flags /* Combination of SESSION_APPLY_* flags */
int flags /* SESSION_CHANGESETAPPLY_* flags */
);
/*
@ -10559,8 +10731,14 @@ SQLITE_API int sqlite3changeset_apply_v2(
** causes the sessions module to omit this savepoint. In this case, if the
** caller has an open transaction or savepoint when apply_v2() is called,
** it may revert the partially applied changeset by rolling it back.
**
** <dt>SQLITE_CHANGESETAPPLY_INVERT <dd>
** Invert the changeset before applying it. This is equivalent to inverting
** a changeset using sqlite3changeset_invert() before applying it. It is
** an error to specify this flag with a patchset.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@ -10791,7 +10969,7 @@ SQLITE_API int sqlite3rebaser_configure(
** in size. This function allocates and populates a buffer with a copy
** of the changeset rebased rebased according to the configuration of the
** rebaser object passed as the first argument. If successful, (*ppOut)
** is set to point to the new buffer containing the rebased changset and
** is set to point to the new buffer containing the rebased changeset and
** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the
** responsibility of the caller to eventually free the new buffer using
** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut)
@ -10954,6 +11132,12 @@ SQLITE_API int sqlite3changeset_start_strm(
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn
);
SQLITE_API int sqlite3changeset_start_v2_strm(
sqlite3_changeset_iter **pp,
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn,
int flags
);
SQLITE_API int sqlite3session_changeset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
@ -10980,6 +11164,45 @@ SQLITE_API int sqlite3rebaser_rebase_strm(
void *pOut
);
/*
** CAPI3REF: Configure global parameters
**
** The sqlite3session_config() interface is used to make global configuration
** changes to the sessions module in order to tune it to the specific needs
** of the application.
**
** The sqlite3session_config() interface is not threadsafe. If it is invoked
** while any other thread is inside any other sessions method then the
** results are undefined. Furthermore, if it is invoked after any sessions
** related objects have been created, the results are also undefined.
**
** The first argument to the sqlite3session_config() function must be one
** of the SQLITE_SESSION_CONFIG_XXX constants defined below. The
** interpretation of the (void*) value passed as the second parameter and
** the effect of calling this function depends on the value of the first
** parameter.
**
** <dl>
** <dt>SQLITE_SESSION_CONFIG_STRMSIZE<dd>
** By default, the sessions module streaming interfaces attempt to input
** and output data in approximately 1 KiB chunks. This operand may be used
** to set and query the value of this configuration setting. The pointer
** passed as the second argument must point to a value of type (int).
** If this value is greater than 0, it is used as the new streaming data
** chunk size for both input and output. Before returning, the (int) value
** pointed to by pArg is set to the final value of the streaming interface
** chunk size.
** </dl>
**
** This function returns SQLITE_OK if successful, or an SQLite error code
** otherwise.
*/
SQLITE_API int sqlite3session_config(int op, void *pArg);
/*
** CAPI3REF: Values for sqlite3session_config().
*/
#define SQLITE_SESSION_CONFIG_STRMSIZE 1
/*
** Make sure we can call this stuff from C++.
@ -11113,12 +11336,8 @@ struct Fts5PhraseIter {
**
** Usually, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
** first token of the phrase. The exception is if the table was created
** with the offsets=0 option specified. In this case *piOff is always
** set to -1.
**
** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
** if an error occurs.
** first token of the phrase. Returns SQLITE_OK if successful, or an error
** code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@ -11159,7 +11378,7 @@ struct Fts5PhraseIter {
** Save the pointer passed as the second argument as the extension functions
** "auxiliary data". The pointer may then be retrieved by the current or any
** future invocation of the same fts5 extension function made as part of
** of the same MATCH query using the xGetAuxdata() API.
** the same MATCH query using the xGetAuxdata() API.
**
** Each extension function is allocated a single auxiliary data slot for
** each FTS query (MATCH expression). If the extension function is invoked
@ -11174,7 +11393,7 @@ struct Fts5PhraseIter {
** The xDelete callback, if one is specified, is also invoked on the
** auxiliary data pointer after the FTS5 query has finished.
**
** If an error (e.g. an OOM condition) occurs within this function, an
** If an error (e.g. an OOM condition) occurs within this function,
** the auxiliary data is set to NULL and an error code returned. If the
** xDelete parameter was not NULL, it is invoked on the auxiliary data
** pointer before returning.
@ -11407,11 +11626,11 @@ struct Fts5ExtensionApi {
** the tokenizer substitutes "first" for "1st" and the query works
** as expected.
**
** <li> By adding multiple synonyms for a single term to the FTS index.
** In this case, when tokenizing query text, the tokenizer may
** provide multiple synonyms for a single term within the document.
** FTS5 then queries the index for each synonym individually. For
** example, faced with the query:
** <li> By querying the index for all synonyms of each query term
** separately. In this case, when tokenizing query text, the
** tokenizer may provide multiple synonyms for a single term
** within the document. FTS5 then queries the index for each
** synonym individually. For example, faced with the query:
**
** <codeblock>
** ... MATCH 'first place'</codeblock>
@ -11435,7 +11654,7 @@ struct Fts5ExtensionApi {
** "place".
**
** This way, even if the tokenizer does not provide synonyms
** when tokenizing query text (it should not - to do would be
** when tokenizing query text (it should not - to do so would be
** inefficient), it doesn't matter if the user queries for
** 'first + place' or '1st + place', as there are entries in the
** FTS index corresponding to both forms of the first token.

View File

@ -683,7 +683,7 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
ai.stepArgConverters = append(ai.stepArgConverters, conv)
}
if step.IsVariadic() {
conv, err := callbackArg(t.In(start + stepNArgs).Elem())
conv, err := callbackArg(step.In(start + stepNArgs).Elem())
if err != nil {
return err
}
@ -740,6 +740,8 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
// AutoCommit return which currently auto commit or not.
func (c *SQLiteConn) AutoCommit() bool {
c.mu.Lock()
defer c.mu.Unlock()
return int(C.sqlite3_get_autocommit(c.db)) != 0
}
@ -1340,6 +1342,9 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
mutex|C.SQLITE_OPEN_READWRITE|C.SQLITE_OPEN_CREATE,
nil)
if rv != 0 {
if db != nil {
C.sqlite3_close_v2(db)
}
return nil, Error{Code: ErrNo(rv)}
}
if db == nil {
@ -1376,7 +1381,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
// - Activate User Authentication
// Check if the user wants to activate User Authentication.
// If so then first create a temporary AuthConn to the database
// This is possible because we are already succesfully authenticated.
// This is possible because we are already successfully authenticated.
//
// - Check if `sqlite_user`` table exists
// YES => Add the provided user from DSN as Admin User and
@ -1387,7 +1392,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
// Create connection to SQLite
conn := &SQLiteConn{db: db, loc: loc, txlock: txlock}
// Password Cipher has to be registerd before authentication
// Password Cipher has to be registered before authentication
if len(authCrypt) > 0 {
switch strings.ToUpper(authCrypt) {
case "SHA1":
@ -1674,7 +1679,7 @@ func (c *SQLiteConn) prepare(ctx context.Context, query string) (driver.Stmt, er
defer C.free(unsafe.Pointer(pquery))
var s *C.sqlite3_stmt
var tail *C.char
rv := C._sqlite3_prepare_v2_internal(c.db, pquery, -1, &s, &tail)
rv := C._sqlite3_prepare_v2_internal(c.db, pquery, C.int(-1), &s, &tail)
if rv != C.SQLITE_OK {
return nil, c.lastError()
}
@ -1718,7 +1723,7 @@ func (c *SQLiteConn) GetFilename(schemaName string) string {
// GetLimit returns the current value of a run-time limit.
// See: sqlite3_limit, http://www.sqlite.org/c3ref/limit.html
func (c *SQLiteConn) GetLimit(id int) int {
return int(C._sqlite3_limit(c.db, C.int(id), -1))
return int(C._sqlite3_limit(c.db, C.int(id), C.int(-1)))
}
// SetLimit changes the value of a run-time limits.
@ -2024,16 +2029,11 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
case C.SQLITE_BLOB:
p := C.sqlite3_column_blob(rc.s.s, C.int(i))
if p == nil {
dest[i] = nil
dest[i] = []byte{}
continue
}
n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i)))
switch dest[i].(type) {
default:
slice := make([]byte, n)
copy(slice[:], (*[1 << 30]byte)(p)[0:n])
dest[i] = slice
}
n := C.sqlite3_column_bytes(rc.s.s, C.int(i))
dest[i] = C.GoBytes(p, n)
case C.SQLITE_NULL:
dest[i] = nil
case C.SQLITE_TEXT:
@ -2062,9 +2062,8 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
}
dest[i] = t
default:
dest[i] = []byte(s)
dest[i] = s
}
}
}
return nil

View File

@ -13,7 +13,7 @@ import (
// This file provides several different implementations for the
// default embedded sqlite_crypt function.
// This function is uses a ceasar-cypher by default
// This function is uses a caesar-cypher by default
// and is used within the UserAuthentication module to encode
// the password.
//
@ -40,7 +40,7 @@ import (
// password X, sqlite_crypt(X,NULL) is run. A new random salt is selected
// when the second argument is NULL.
//
// The built-in version of of sqlite_crypt() uses a simple Ceasar-cypher
// The built-in version of of sqlite_crypt() uses a simple Caesar-cypher
// which prevents passwords from being revealed by searching the raw database
// for ASCII text, but is otherwise trivally broken. For better password
// security, the database should be encrypted using the SQLite Encryption

View File

@ -10,7 +10,6 @@ package sqlite3
import (
"database/sql/driver"
"errors"
"context"
)
@ -18,7 +17,8 @@ import (
// Ping implement Pinger.
func (c *SQLiteConn) Ping(ctx context.Context) error {
if c.db == nil {
return errors.New("Connection was closed")
// must be ErrBadConn for sql to close the database
return driver.ErrBadConn
}
return nil
}

View File

@ -311,12 +311,18 @@ struct sqlite3_api_routines {
int (*str_errcode)(sqlite3_str*);
int (*str_length)(sqlite3_str*);
char *(*str_value)(sqlite3_str*);
/* Version 3.25.0 and later */
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*),
void (*xValue)(sqlite3_context*),
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
void(*xDestroy)(void*));
/* Version 3.26.0 and later */
const char *(*normalized_sql)(sqlite3_stmt*);
/* Version 3.28.0 and later */
int (*stmt_isexplain)(sqlite3_stmt*);
int (*value_frombind)(sqlite3_value*);
};
/*
@ -604,6 +610,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_str_value sqlite3_api->str_value
/* Version 3.25.0 and later */
#define sqlite3_create_window_function sqlite3_api->create_window_function
/* Version 3.26.0 and later */
#define sqlite3_normalized_sql sqlite3_api->normalized_sql
/* Version 3.28.0 and later */
#define sqlite3_stmt_isexplain sqlite3_api->isexplain
#define sqlite3_value_frombind sqlite3_api->frombind
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)

11
vendor/github.com/smartystreets/assertions/Makefile generated vendored Normal file
View File

@ -0,0 +1,11 @@
#!/usr/bin/make -f
test:
go test -timeout=1s -short ./...
compile:
go build ./...
build: test compile
.PHONY: test compile build

3
vendor/github.com/smartystreets/assertions/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module github.com/smartystreets/assertions
go 1.12

View File

@ -78,7 +78,7 @@ func Convey(items ...interface{}) {
}
}
// SkipConvey is analagous to Convey except that the scope is not executed
// SkipConvey is analogous to Convey except that the scope is not executed
// (which means that child scopes defined within this scope are not run either).
// The reporter will be notified that this step was skipped.
func SkipConvey(items ...interface{}) {
@ -125,7 +125,7 @@ func So(actual interface{}, assert assertion, expected ...interface{}) {
mustGetCurrentContext().So(actual, assert, expected...)
}
// SkipSo is analagous to So except that the assertion that would have been passed
// SkipSo is analogous to So except that the assertion that would have been passed
// to So is not executed and the reporter is notified that the assertion was skipped.
func SkipSo(stuff ...interface{}) {
mustGetCurrentContext().SkipSo()
@ -202,7 +202,7 @@ func SuppressConsoleStatistics() {
reporting.SuppressConsoleStatistics()
}
// ConsoleStatistics may be called at any time to print assertion statistics.
// PrintConsoleStatistics may be called at any time to print assertion statistics.
// Generally, the best place to do this would be in a TestMain function,
// after all tests have been run. Something like this:
//

View File

@ -71,7 +71,7 @@ var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole()))
func SuppressConsoleStatistics() { consoleStatistics.Suppress() }
func PrintConsoleStatistics() { consoleStatistics.PrintSummary() }
// QuiteMode disables all console output symbols. This is only meant to be used
// QuietMode disables all console output symbols. This is only meant to be used
// for tests that are internal to goconvey where the output is distracting or
// otherwise not needed in the test output.
func QuietMode() {

View File

@ -113,6 +113,17 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
// Exactlyf asserts that two objects are equal in value and type.
//
// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
@ -157,6 +168,31 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool
return FileExists(t, path, append([]interface{}{msg}, args...)...)
}
// Greaterf asserts that the first element is greater than the second
//
// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1))
// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
@ -289,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int
return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
@ -300,6 +344,31 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf
return Len(t, object, length, append([]interface{}{msg}, args...)...)
}
// Lessf asserts that the first element is less than the second
//
// assert.Lessf(t, 1, 2, "error message %s", "formatted")
// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2))
// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// LessOrEqualf asserts that the first element is less than or equal to the second
//
// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// Nilf asserts that the specified object is nil.
//
// assert.Nilf(t, err, "error message %s", "formatted")
@ -444,6 +513,19 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in
return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
}
// Samef asserts that two pointers reference the same object.
//
// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// Subsetf asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//

View File

@ -215,6 +215,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
return Errorf(a.t, err, msg, args...)
}
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
}
// Exactly asserts that two objects are equal in value and type.
//
// a.Exactly(int32(123), int64(123))
@ -303,6 +325,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b
return FileExistsf(a.t, path, msg, args...)
}
// Greater asserts that the first element is greater than the second
//
// a.Greater(2, 1)
// a.Greater(float64(2), float64(1))
// a.Greater("b", "a")
func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Greater(a.t, e1, e2, msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
// a.GreaterOrEqual(2, 1)
// a.GreaterOrEqual(2, 2)
// a.GreaterOrEqual("b", "a")
// a.GreaterOrEqual("b", "b")
func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
}
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return GreaterOrEqualf(a.t, e1, e2, msg, args...)
}
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Greaterf(a.t, e1, e2, msg, args...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
@ -567,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
return JSONEqf(a.t, expected, actual, msg, args...)
}
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return YAMLEqf(a.t, expected, actual, msg, args...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@ -589,6 +677,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in
return Lenf(a.t, object, length, msg, args...)
}
// Less asserts that the first element is less than the second
//
// a.Less(1, 2)
// a.Less(float64(1), float64(2))
// a.Less("a", "b")
func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Less(a.t, e1, e2, msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
//
// a.LessOrEqual(1, 2)
// a.LessOrEqual(2, 2)
// a.LessOrEqual("a", "b")
// a.LessOrEqual("b", "b")
func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return LessOrEqual(a.t, e1, e2, msgAndArgs...)
}
// LessOrEqualf asserts that the first element is less than or equal to the second
//
// a.LessOrEqualf(1, 2, "error message %s", "formatted")
// a.LessOrEqualf(2, 2, "error message %s", "formatted")
// a.LessOrEqualf("a", "b", "error message %s", "formatted")
// a.LessOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return LessOrEqualf(a.t, e1, e2, msg, args...)
}
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Lessf(a.t, e1, e2, msg, args...)
}
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@ -877,6 +1015,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args .
return Regexpf(a.t, rx, str, msg, args...)
}
// Same asserts that two pointers reference the same object.
//
// a.Same(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Same(a.t, expected, actual, msgAndArgs...)
}
// Samef asserts that two pointers reference the same object.
//
// a.Samef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Samef(a.t, expected, actual, msg, args...)
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//

View File

@ -0,0 +1,309 @@
package assert
import (
"fmt"
"reflect"
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) {
switch kind {
case reflect.Int:
{
intobj1 := obj1.(int)
intobj2 := obj2.(int)
if intobj1 > intobj2 {
return -1, true
}
if intobj1 == intobj2 {
return 0, true
}
if intobj1 < intobj2 {
return 1, true
}
}
case reflect.Int8:
{
int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8)
if int8obj1 > int8obj2 {
return -1, true
}
if int8obj1 == int8obj2 {
return 0, true
}
if int8obj1 < int8obj2 {
return 1, true
}
}
case reflect.Int16:
{
int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16)
if int16obj1 > int16obj2 {
return -1, true
}
if int16obj1 == int16obj2 {
return 0, true
}
if int16obj1 < int16obj2 {
return 1, true
}
}
case reflect.Int32:
{
int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32)
if int32obj1 > int32obj2 {
return -1, true
}
if int32obj1 == int32obj2 {
return 0, true
}
if int32obj1 < int32obj2 {
return 1, true
}
}
case reflect.Int64:
{
int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64)
if int64obj1 > int64obj2 {
return -1, true
}
if int64obj1 == int64obj2 {
return 0, true
}
if int64obj1 < int64obj2 {
return 1, true
}
}
case reflect.Uint:
{
uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint)
if uintobj1 > uintobj2 {
return -1, true
}
if uintobj1 == uintobj2 {
return 0, true
}
if uintobj1 < uintobj2 {
return 1, true
}
}
case reflect.Uint8:
{
uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8)
if uint8obj1 > uint8obj2 {
return -1, true
}
if uint8obj1 == uint8obj2 {
return 0, true
}
if uint8obj1 < uint8obj2 {
return 1, true
}
}
case reflect.Uint16:
{
uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16)
if uint16obj1 > uint16obj2 {
return -1, true
}
if uint16obj1 == uint16obj2 {
return 0, true
}
if uint16obj1 < uint16obj2 {
return 1, true
}
}
case reflect.Uint32:
{
uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32)
if uint32obj1 > uint32obj2 {
return -1, true
}
if uint32obj1 == uint32obj2 {
return 0, true
}
if uint32obj1 < uint32obj2 {
return 1, true
}
}
case reflect.Uint64:
{
uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64)
if uint64obj1 > uint64obj2 {
return -1, true
}
if uint64obj1 == uint64obj2 {
return 0, true
}
if uint64obj1 < uint64obj2 {
return 1, true
}
}
case reflect.Float32:
{
float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32)
if float32obj1 > float32obj2 {
return -1, true
}
if float32obj1 == float32obj2 {
return 0, true
}
if float32obj1 < float32obj2 {
return 1, true
}
}
case reflect.Float64:
{
float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64)
if float64obj1 > float64obj2 {
return -1, true
}
if float64obj1 == float64obj2 {
return 0, true
}
if float64obj1 < float64obj2 {
return 1, true
}
}
case reflect.String:
{
stringobj1 := obj1.(string)
stringobj2 := obj2.(string)
if stringobj1 > stringobj2 {
return -1, true
}
if stringobj1 == stringobj2 {
return 0, true
}
if stringobj1 < stringobj2 {
return 1, true
}
}
}
return 0, false
}
// Greater asserts that the first element is greater than the second
//
// assert.Greater(t, 2, 1)
// assert.Greater(t, float64(2), float64(1))
// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != -1 {
return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...)
}
return true
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
// assert.GreaterOrEqual(t, 2, 1)
// assert.GreaterOrEqual(t, 2, 2)
// assert.GreaterOrEqual(t, "b", "a")
// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != -1 && res != 0 {
return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...)
}
return true
}
// Less asserts that the first element is less than the second
//
// assert.Less(t, 1, 2)
// assert.Less(t, float64(1), float64(2))
// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != 1 {
return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...)
}
return true
}
// LessOrEqual asserts that the first element is less than or equal to the second
//
// assert.LessOrEqual(t, 1, 2)
// assert.LessOrEqual(t, 2, 2)
// assert.LessOrEqual(t, "a", "b")
// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
e1Kind := reflect.ValueOf(e1).Kind()
e2Kind := reflect.ValueOf(e2).Kind()
if e1Kind != e2Kind {
return Fail(t, "Elements should be the same type", msgAndArgs...)
}
res, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
}
if res != 1 && res != 0 {
return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...)
}
return true
}

View File

@ -18,6 +18,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
yaml "gopkg.in/yaml.v2"
)
//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
@ -350,6 +351,37 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
}
// Same asserts that two pointers reference the same object.
//
// assert.Same(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual)
if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr {
return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...)
}
expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual)
if expectedType != actualType {
return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v",
expectedType, actualType), msgAndArgs...)
}
if expected != actual {
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
}
return true
}
// formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user.
//
@ -479,14 +511,14 @@ func isEmpty(object interface{}) bool {
// collection types are empty when they have no element
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
// pointers are empty if nil or if the value they point to is empty
// pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
// for all other types, compare against the zero value
// for all other types, compare against the zero value
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
@ -629,7 +661,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
func includeElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list)
elementValue := reflect.ValueOf(element)
listKind := reflect.TypeOf(list).Kind()
defer func() {
if e := recover(); e != nil {
ok = false
@ -637,11 +669,12 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) {
}
}()
if reflect.TypeOf(list).Kind() == reflect.String {
if listKind == reflect.String {
elementValue := reflect.ValueOf(element)
return true, strings.Contains(listValue.String(), elementValue.String())
}
if reflect.TypeOf(list).Kind() == reflect.Map {
if listKind == reflect.Map {
mapKeys := listValue.MapKeys()
for i := 0; i < len(mapKeys); i++ {
if ObjectsAreEqual(mapKeys[i].Interface(), element) {
@ -1337,6 +1370,24 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
}
// YAMLEq asserts that two YAML strings are equivalent.
func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
}
return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
}
func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
t := reflect.TypeOf(v)
k := t.Kind()
@ -1371,8 +1422,8 @@ func diff(expected interface{}, actual interface{}) string {
e = spewConfig.Sdump(expected)
a = spewConfig.Sdump(actual)
} else {
e = expected.(string)
a = actual.(string)
e = reflect.ValueOf(expected).String()
a = reflect.ValueOf(actual).String()
}
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
@ -1414,3 +1465,34 @@ var spewConfig = spew.ConfigState{
type tHelper interface {
Helper()
}
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
timer := time.NewTimer(waitFor)
ticker := time.NewTicker(tick)
checkPassed := make(chan bool)
defer timer.Stop()
defer ticker.Stop()
defer close(checkPassed)
for {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
case result := <-checkPassed:
if result {
return true
}
case <-ticker.C:
go func() {
checkPassed <- condition()
}()
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
if h, ok := t.(tHelper); ok { h.Helper() }
if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return }
t.FailNow()
}

View File

@ -216,6 +216,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
Errorf(a.t, err, msg, args...)
}
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
}
// Exactly asserts that two objects are equal in value and type.
//
// a.Exactly(int32(123), int64(123))
@ -304,6 +326,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) {
FileExistsf(a.t, path, msg, args...)
}
// Greater asserts that the first element is greater than the second
//
// a.Greater(2, 1)
// a.Greater(float64(2), float64(1))
// a.Greater("b", "a")
func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Greater(a.t, e1, e2, msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
// a.GreaterOrEqual(2, 1)
// a.GreaterOrEqual(2, 2)
// a.GreaterOrEqual("b", "a")
// a.GreaterOrEqual("b", "b")
func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
}
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
GreaterOrEqualf(a.t, e1, e2, msg, args...)
}
// Greaterf asserts that the first element is greater than the second
//
// a.Greaterf(2, 1, "error message %s", "formatted")
// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1))
// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Greaterf(a.t, e1, e2, msg, args...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
@ -568,6 +640,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ..
JSONEqf(a.t, expected, actual, msg, args...)
}
// YAMLEq asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEq(a.t, expected, actual, msgAndArgs...)
}
// YAMLEqf asserts that two YAML strings are equivalent.
func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
YAMLEqf(a.t, expected, actual, msg, args...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
@ -590,6 +678,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in
Lenf(a.t, object, length, msg, args...)
}
// Less asserts that the first element is less than the second
//
// a.Less(1, 2)
// a.Less(float64(1), float64(2))
// a.Less("a", "b")
func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Less(a.t, e1, e2, msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
//
// a.LessOrEqual(1, 2)
// a.LessOrEqual(2, 2)
// a.LessOrEqual("a", "b")
// a.LessOrEqual("b", "b")
func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
LessOrEqual(a.t, e1, e2, msgAndArgs...)
}
// LessOrEqualf asserts that the first element is less than or equal to the second
//
// a.LessOrEqualf(1, 2, "error message %s", "formatted")
// a.LessOrEqualf(2, 2, "error message %s", "formatted")
// a.LessOrEqualf("a", "b", "error message %s", "formatted")
// a.LessOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
LessOrEqualf(a.t, e1, e2, msg, args...)
}
// Lessf asserts that the first element is less than the second
//
// a.Lessf(1, 2, "error message %s", "formatted")
// a.Lessf(float64(1, "error message %s", "formatted"), float64(2))
// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Lessf(a.t, e1, e2, msg, args...)
}
// Nil asserts that the specified object is nil.
//
// a.Nil(err)
@ -878,6 +1016,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args .
Regexpf(a.t, rx, str, msg, args...)
}
// Same asserts that two pointers reference the same object.
//
// a.Same(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Same(a.t, expected, actual, msgAndArgs...)
}
// Samef asserts that two pointers reference the same object.
//
// a.Samef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Samef(a.t, expected, actual, msg, args...)
}
// Subset asserts that the specified list(array, slice...) contains all
// elements given in the specified subset(array, slice...).
//

View File

@ -1 +0,0 @@
TestSaveFile

View File

@ -1 +0,0 @@
TestSaveFileS

View File

@ -1 +0,0 @@
TestSaveFile

View File

@ -1 +0,0 @@
TestSaveFileS

View File

@ -1 +0,0 @@
TestSaveFile

View File

@ -1 +0,0 @@
TestSaveFileS

View File

@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// In Go 1.13, the ed25519 package was promoted to the standard library as
// crypto/ed25519, and this package became a wrapper for the standard library one.
//
// +build !go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//

73
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

View File

@ -273,7 +273,20 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if testHookOnConn != nil {
testHookOnConn()
}
// The TLSNextProto interface predates contexts, so
// the net/http package passes down its per-connection
// base context via an exported but unadvertised
// method on the Handler. This is for internal
// net/http<=>http2 use only.
var ctx context.Context
type baseContexter interface {
BaseContext() context.Context
}
if bc, ok := h.(baseContexter); ok {
ctx = bc.BaseContext()
}
conf.ServeConn(c, &ServeConnOpts{
Context: ctx,
Handler: h,
BaseConfig: hs,
})
@ -284,6 +297,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
// ServeConnOpts are options for the Server.ServeConn method.
type ServeConnOpts struct {
// Context is the base context to use.
// If nil, context.Background is used.
Context context.Context
// BaseConfig optionally sets the base configuration
// for values. If nil, defaults are used.
BaseConfig *http.Server
@ -294,6 +311,13 @@ type ServeConnOpts struct {
Handler http.Handler
}
func (o *ServeConnOpts) context() context.Context {
if o.Context != nil {
return o.Context
}
return context.Background()
}
func (o *ServeConnOpts) baseConfig() *http.Server {
if o != nil && o.BaseConfig != nil {
return o.BaseConfig
@ -439,7 +463,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
ctx, cancel = context.WithCancel(context.Background())
ctx, cancel = context.WithCancel(opts.context())
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
if hs := opts.baseConfig(); hs != nil {
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
@ -2307,7 +2331,16 @@ type chunkWriter struct{ rws *responseWriterState }
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
func (rws *responseWriterState) hasNonemptyTrailers() bool {
for _, trailer := range rws.trailers {
if _, ok := rws.handlerHeader[trailer]; ok {
return true
}
}
return false
}
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
@ -2407,7 +2440,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
rws.promoteUndeclaredTrailers()
}
endStream := rws.handlerDone && !rws.hasTrailers()
// only send trailers if they have actually been defined by the
// server handler.
hasNonemptyTrailers := rws.hasNonemptyTrailers()
endStream := rws.handlerDone && !hasNonemptyTrailers
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
@ -2416,7 +2452,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
}
}
if rws.handlerDone && rws.hasTrailers() {
if rws.handlerDone && hasNonemptyTrailers {
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
h: rws.handlerHeader,

View File

@ -28,6 +28,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"golang.org/x/net/http/httpguts"
@ -199,6 +200,7 @@ type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields:
@ -440,7 +442,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
traceGotConn(req, cc)
reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
traceGotConn(req, cc, reused)
res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
if err != nil && retry <= 6 {
if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
@ -989,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
@ -2559,15 +2562,15 @@ func traceGetConn(req *http.Request, hostPort string) {
trace.GetConn(hostPort)
}
func traceGotConn(req *http.Request, cc *ClientConn) {
func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
trace := httptrace.ContextClientTrace(req.Context())
if trace == nil || trace.GotConn == nil {
return
}
ci := httptrace.GotConnInfo{Conn: cc.tconn}
ci.Reused = reused
cc.mu.Lock()
ci.Reused = cc.nextStreamID > 1
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
ci.IdleTime = time.Now().Sub(cc.lastActive)
}

View File

@ -4,14 +4,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna // import "golang.org/x/net/idna"
@ -297,7 +299,7 @@ func (e runeError) Error() string {
}
// process implements the algorithm described in section 4 of UTS #46,
// see http://www.unicode.org/reports/tr46.
// see https://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
var isBidi bool

682
vendor/golang.org/x/net/idna/idna9.0.0.go generated vendored Normal file
View File

@ -0,0 +1,682 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
// deal with the transition from IDNA2003.
//
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
// differences between these two standards.
package idna // import "golang.org/x/net/idna"
import (
"fmt"
"strings"
"unicode/utf8"
"golang.org/x/text/secure/bidirule"
"golang.org/x/text/unicode/norm"
)
// NOTE: Unlike common practice in Go APIs, the functions will return a
// sanitized domain name in case of errors. Browsers sometimes use a partially
// evaluated string as lookup.
// TODO: the current error handling is, in my opinion, the least opinionated.
// Other strategies are also viable, though:
// Option 1) Return an empty string in case of error, but allow the user to
// specify explicitly which errors to ignore.
// Option 2) Return the partially evaluated string if it is itself a valid
// string, otherwise return the empty string in case of error.
// Option 3) Option 1 and 2.
// Option 4) Always return an empty string for now and implement Option 1 as
// needed, and document that the return string may not be empty in case of
// error in the future.
// I think Option 1 is best, but it is quite opinionated.
// ToASCII is a wrapper for Punycode.ToASCII.
func ToASCII(s string) (string, error) {
return Punycode.process(s, true)
}
// ToUnicode is a wrapper for Punycode.ToUnicode.
func ToUnicode(s string) (string, error) {
return Punycode.process(s, false)
}
// An Option configures a Profile at creation time.
type Option func(*options)
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
// compatibility. It is used by most browsers when resolving domain names. This
// option is only meaningful if combined with MapForLookup.
func Transitional(transitional bool) Option {
return func(o *options) { o.transitional = true }
}
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
// are longer than allowed by the RFC.
func VerifyDNSLength(verify bool) Option {
return func(o *options) { o.verifyDNSLength = verify }
}
// RemoveLeadingDots removes leading label separators. Leading runes that map to
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
//
// This is the behavior suggested by the UTS #46 and is adopted by some
// browsers.
func RemoveLeadingDots(remove bool) Option {
return func(o *options) { o.removeLeadingDots = remove }
}
// ValidateLabels sets whether to check the mandatory label validation criteria
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
// of hyphens ('-'), normalization, validity of runes, and the context rules.
func ValidateLabels(enable bool) Option {
return func(o *options) {
// Don't override existing mappings, but set one that at least checks
// normalization if it is not set.
if o.mapping == nil && enable {
o.mapping = normalize
}
o.trie = trie
o.validateLabels = enable
o.fromPuny = validateFromPunycode
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
//
// This option is useful, for instance, for browsers that allow characters
// outside this range, for example a '_' (U+005F LOW LINE). See
// http://www.rfc-editor.org/std/std3.txt for more details This option
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
func StrictDomainName(use bool) Option {
return func(o *options) {
o.trie = trie
o.useSTD3Rules = use
o.fromPuny = validateFromPunycode
}
}
// NOTE: the following options pull in tables. The tables should not be linked
// in as long as the options are not used.
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
// that relies on proper validation of labels should include this rule.
func BidiRule() Option {
return func(o *options) { o.bidirule = bidirule.ValidString }
}
// ValidateForRegistration sets validation options to verify that a given IDN is
// properly formatted for registration as defined by Section 4 of RFC 5891.
func ValidateForRegistration() Option {
return func(o *options) {
o.mapping = validateRegistration
StrictDomainName(true)(o)
ValidateLabels(true)(o)
VerifyDNSLength(true)(o)
BidiRule()(o)
}
}
// MapForLookup sets validation and mapping options such that a given IDN is
// transformed for domain name lookup according to the requirements set out in
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
// to add this check.
//
// The mappings include normalization and mapping case, width and other
// compatibility mappings.
func MapForLookup() Option {
return func(o *options) {
o.mapping = validateAndMap
StrictDomainName(true)(o)
ValidateLabels(true)(o)
RemoveLeadingDots(true)(o)
}
}
type options struct {
transitional bool
useSTD3Rules bool
validateLabels bool
verifyDNSLength bool
removeLeadingDots bool
trie *idnaTrie
// fromPuny calls validation rules when converting A-labels to U-labels.
fromPuny func(p *Profile, s string) error
// mapping implements a validation and mapping step as defined in RFC 5895
// or UTS 46, tailored to, for example, domain registration or lookup.
mapping func(p *Profile, s string) (string, error)
// bidirule, if specified, checks whether s conforms to the Bidi Rule
// defined in RFC 5893.
bidirule func(s string) bool
}
// A Profile defines the configuration of a IDNA mapper.
type Profile struct {
options
}
func apply(o *options, opts []Option) {
for _, f := range opts {
f(o)
}
}
// New creates a new Profile.
//
// With no options, the returned Profile is the most permissive and equals the
// Punycode Profile. Options can be passed to further restrict the Profile. The
// MapForLookup and ValidateForRegistration options set a collection of options,
// for lookup and registration purposes respectively, which can be tailored by
// adding more fine-grained options, where later options override earlier
// options.
func New(o ...Option) *Profile {
p := &Profile{}
apply(&p.options, o)
return p
}
// ToASCII converts a domain or domain label to its ASCII form. For example,
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// ToASCII("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToASCII(s string) (string, error) {
return p.process(s, true)
}
// ToUnicode converts a domain or domain label to its Unicode form. For example,
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
// ToUnicode("golang") is "golang". If an error is encountered it will return
// an error and a (partially) processed result.
func (p *Profile) ToUnicode(s string) (string, error) {
pp := *p
pp.transitional = false
return pp.process(s, false)
}
// String reports a string with a description of the profile for debugging
// purposes. The string format may change with different versions.
func (p *Profile) String() string {
s := ""
if p.transitional {
s = "Transitional"
} else {
s = "NonTransitional"
}
if p.useSTD3Rules {
s += ":UseSTD3Rules"
}
if p.validateLabels {
s += ":ValidateLabels"
}
if p.verifyDNSLength {
s += ":VerifyDNSLength"
}
return s
}
var (
// Punycode is a Profile that does raw punycode processing with a minimum
// of validation.
Punycode *Profile = punycode
// Lookup is the recommended profile for looking up domain names, according
// to Section 5 of RFC 5891. The exact configuration of this profile may
// change over time.
Lookup *Profile = lookup
// Display is the recommended profile for displaying domain names.
// The configuration of this profile may change over time.
Display *Profile = display
// Registration is the recommended profile for checking whether a given
// IDN is valid for registration, according to Section 4 of RFC 5891.
Registration *Profile = registration
punycode = &Profile{}
lookup = &Profile{options{
transitional: true,
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
display = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
removeLeadingDots: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateAndMap,
bidirule: bidirule.ValidString,
}}
registration = &Profile{options{
useSTD3Rules: true,
validateLabels: true,
verifyDNSLength: true,
trie: trie,
fromPuny: validateFromPunycode,
mapping: validateRegistration,
bidirule: bidirule.ValidString,
}}
// TODO: profiles
// Register: recommended for approving domain names: don't do any mappings
// but rather reject on invalid input. Bundle or block deviation characters.
)
type labelError struct{ label, code_ string }
func (e labelError) code() string { return e.code_ }
func (e labelError) Error() string {
return fmt.Sprintf("idna: invalid label %q", e.label)
}
type runeError rune
func (e runeError) code() string { return "P1" }
func (e runeError) Error() string {
return fmt.Sprintf("idna: disallowed rune %U", e)
}
// process implements the algorithm described in section 4 of UTS #46,
// see https://www.unicode.org/reports/tr46.
func (p *Profile) process(s string, toASCII bool) (string, error) {
var err error
if p.mapping != nil {
s, err = p.mapping(p, s)
}
// Remove leading empty labels.
if p.removeLeadingDots {
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
}
}
// It seems like we should only create this error on ToASCII, but the
// UTS 46 conformance tests suggests we should always check this.
if err == nil && p.verifyDNSLength && s == "" {
err = &labelError{s, "A4"}
}
labels := labelIter{orig: s}
for ; !labels.done(); labels.next() {
label := labels.label()
if label == "" {
// Empty labels are not okay. The label iterator skips the last
// label if it is empty.
if err == nil && p.verifyDNSLength {
err = &labelError{s, "A4"}
}
continue
}
if strings.HasPrefix(label, acePrefix) {
u, err2 := decode(label[len(acePrefix):])
if err2 != nil {
if err == nil {
err = err2
}
// Spec says keep the old label.
continue
}
labels.set(u)
if err == nil && p.validateLabels {
err = p.fromPuny(p, u)
}
if err == nil {
// This should be called on NonTransitional, according to the
// spec, but that currently does not have any effect. Use the
// original profile to preserve options.
err = p.validateLabel(u)
}
} else if err == nil {
err = p.validateLabel(label)
}
}
if toASCII {
for labels.reset(); !labels.done(); labels.next() {
label := labels.label()
if !ascii(label) {
a, err2 := encode(acePrefix, label)
if err == nil {
err = err2
}
label = a
labels.set(a)
}
n := len(label)
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
err = &labelError{label, "A4"}
}
}
}
s = labels.result()
if toASCII && p.verifyDNSLength && err == nil {
// Compute the length of the domain name minus the root label and its dot.
n := len(s)
if n > 0 && s[n-1] == '.' {
n--
}
if len(s) < 1 || n > 253 {
err = &labelError{s, "A4"}
}
}
return s, err
}
func normalize(p *Profile, s string) (string, error) {
return norm.NFC.String(s), nil
}
func validateRegistration(p *Profile, s string) (string, error) {
if !norm.NFC.IsNormalString(s) {
return s, &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
// TODO: handle the NV8 defined in the Unicode idna data set to allow
// for strict conformance to IDNA2008.
case valid, deviation:
case disallowed, mapped, unknown, ignored:
r, _ := utf8.DecodeRuneInString(s[i:])
return s, runeError(r)
}
i += sz
}
return s, nil
}
func validateAndMap(p *Profile, s string) (string, error) {
var (
err error
b []byte
k int
)
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
start := i
i += sz
// Copy bytes not copied so far.
switch p.simplify(info(v).category()) {
case valid:
continue
case disallowed:
if err == nil {
r, _ := utf8.DecodeRuneInString(s[start:])
err = runeError(r)
}
continue
case mapped, deviation:
b = append(b, s[k:start]...)
b = info(v).appendMapping(b, s[start:i])
case ignored:
b = append(b, s[k:start]...)
// drop the rune
case unknown:
b = append(b, s[k:start]...)
b = append(b, "\ufffd"...)
}
k = i
}
if k == 0 {
// No changes so far.
s = norm.NFC.String(s)
} else {
b = append(b, s[k:]...)
if norm.NFC.QuickSpan(b) != len(b) {
b = norm.NFC.Bytes(b)
}
// TODO: the punycode converters require strings as input.
s = string(b)
}
return s, err
}
// A labelIter allows iterating over domain name labels.
type labelIter struct {
orig string
slice []string
curStart int
curEnd int
i int
}
func (l *labelIter) reset() {
l.curStart = 0
l.curEnd = 0
l.i = 0
}
func (l *labelIter) done() bool {
return l.curStart >= len(l.orig)
}
func (l *labelIter) result() string {
if l.slice != nil {
return strings.Join(l.slice, ".")
}
return l.orig
}
func (l *labelIter) label() string {
if l.slice != nil {
return l.slice[l.i]
}
p := strings.IndexByte(l.orig[l.curStart:], '.')
l.curEnd = l.curStart + p
if p == -1 {
l.curEnd = len(l.orig)
}
return l.orig[l.curStart:l.curEnd]
}
// next sets the value to the next label. It skips the last label if it is empty.
func (l *labelIter) next() {
l.i++
if l.slice != nil {
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
l.curStart = len(l.orig)
}
} else {
l.curStart = l.curEnd + 1
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
l.curStart = len(l.orig)
}
}
}
func (l *labelIter) set(s string) {
if l.slice == nil {
l.slice = strings.Split(l.orig, ".")
}
l.slice[l.i] = s
}
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
func (p *Profile) simplify(cat category) category {
switch cat {
case disallowedSTD3Mapped:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = mapped
}
case disallowedSTD3Valid:
if p.useSTD3Rules {
cat = disallowed
} else {
cat = valid
}
case deviation:
if !p.transitional {
cat = valid
}
case validNV8, validXV8:
// TODO: handle V2008
cat = valid
}
return cat
}
func validateFromPunycode(p *Profile, s string) error {
if !norm.NFC.IsNormalString(s) {
return &labelError{s, "V1"}
}
for i := 0; i < len(s); {
v, sz := trie.lookupString(s[i:])
if c := p.simplify(info(v).category()); c != valid && c != deviation {
return &labelError{s, "V6"}
}
i += sz
}
return nil
}
const (
zwnj = "\u200c"
zwj = "\u200d"
)
type joinState int8
const (
stateStart joinState = iota
stateVirama
stateBefore
stateBeforeVirama
stateAfter
stateFAIL
)
var joinStates = [][numJoinTypes]joinState{
stateStart: {
joiningL: stateBefore,
joiningD: stateBefore,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateVirama,
},
stateVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
},
stateBefore: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
joinZWNJ: stateAfter,
joinZWJ: stateFAIL,
joinVirama: stateBeforeVirama,
},
stateBeforeVirama: {
joiningL: stateBefore,
joiningD: stateBefore,
joiningT: stateBefore,
},
stateAfter: {
joiningL: stateFAIL,
joiningD: stateBefore,
joiningT: stateAfter,
joiningR: stateStart,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateAfter, // no-op as we can't accept joiners here
},
stateFAIL: {
0: stateFAIL,
joiningL: stateFAIL,
joiningD: stateFAIL,
joiningT: stateFAIL,
joiningR: stateFAIL,
joinZWNJ: stateFAIL,
joinZWJ: stateFAIL,
joinVirama: stateFAIL,
},
}
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
// already implicitly satisfied by the overall implementation.
func (p *Profile) validateLabel(s string) error {
if s == "" {
if p.verifyDNSLength {
return &labelError{s, "A4"}
}
return nil
}
if p.bidirule != nil && !p.bidirule(s) {
return &labelError{s, "B"}
}
if !p.validateLabels {
return nil
}
trie := p.trie // p.validateLabels is only set if trie is set.
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
return &labelError{s, "V2"}
}
if s[0] == '-' || s[len(s)-1] == '-' {
return &labelError{s, "V3"}
}
// TODO: merge the use of this in the trie.
v, sz := trie.lookupString(s)
x := info(v)
if x.isModifier() {
return &labelError{s, "V5"}
}
// Quickly return in the absence of zero-width (non) joiners.
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
return nil
}
st := stateStart
for i := 0; ; {
jt := x.joinType()
if s[i:i+sz] == zwj {
jt = joinZWJ
} else if s[i:i+sz] == zwnj {
jt = joinZWNJ
}
st = joinStates[st][jt]
if x.isViramaModifier() {
st = joinStates[st][joinVirama]
}
if i += sz; i == len(s) {
break
}
v, sz = trie.lookupString(s[i:])
x = info(v)
}
if st == stateFAIL || st == stateAfter {
return &labelError{s, "C"}
}
return nil
}
func ascii(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}

View File

@ -1,11 +1,13 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// +build go1.10,!go1.13
package idna
// UnicodeVersion is the Unicode version from which the tables in this package are derived.
const UnicodeVersion = "10.0.0"
var mappings string = "" + // Size: 8176 bytes
var mappings string = "" + // Size: 8175 bytes
"\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x0514\x0512" +
"\x0534\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
"\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
@ -4554,4 +4556,4 @@ var idnaSparseValues = [1915]valueRange{
{value: 0x0040, lo: 0xb0, hi: 0xbf},
}
// Total table size 42115 bytes (41KiB); checksum: F4A1FA4E
// Total table size 42114 bytes (41KiB); checksum: 355A58A4

4653
vendor/golang.org/x/net/idna/tables11.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4486
vendor/golang.org/x/net/idna/tables9.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -91,9 +91,13 @@ func onesCount64(x uint64) int {
const m0 = 0x5555555555555555 // 01010101 ...
const m1 = 0x3333333333333333 // 00110011 ...
const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
const m3 = 0x00ff00ff00ff00ff // etc.
const m4 = 0x0000ffff0000ffff
// Unused in this function, but definitions preserved for
// documentation purposes:
//
// const m3 = 0x00ff00ff00ff00ff // etc.
// const m4 = 0x0000ffff0000ffff
//
// Implementation: Parallel summing of adjacent bits.
// See "Hacker's Delight", Chap. 5: Counting Bits.
// The following pattern shows the general approach:

54
vendor/golang.org/x/sys/unix/asm_linux_riscv64.s generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build riscv64,!gccgo
#include "textflag.h"
//
// System calls for linux/riscv64.
//
// Where available, just jump to package syscall's implementation of
// these functions.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
CALL runtime·entersyscall(SB)
MOV a1+8(FP), A0
MOV a2+16(FP), A1
MOV a3+24(FP), A2
MOV $0, A3
MOV $0, A4
MOV $0, A5
MOV $0, A6
MOV trap+0(FP), A7 // syscall entry
ECALL
MOV A0, r1+32(FP) // r1
MOV A1, r2+40(FP) // r2
CALL runtime·exitsyscall(SB)
RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOV a1+8(FP), A0
MOV a2+16(FP), A1
MOV a3+24(FP), A2
MOV ZERO, A3
MOV ZERO, A4
MOV ZERO, A5
MOV trap+0(FP), A7 // syscall entry
ECALL
MOV A0, r1+32(FP)
MOV A1, r2+40(FP)
RET

29
vendor/golang.org/x/sys/unix/asm_openbsd_arm64.s generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
//
// System call support for arm64, OpenBSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

View File

@ -2,16 +2,101 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix
import "syscall"
import "unsafe"
// readInt returns the size-bytes unsigned integer in native byte order at offset off.
func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
if len(b) < int(off+size) {
return 0, false
}
if isBigEndian {
return readIntBE(b[off:], size), true
}
return readIntLE(b[off:], size), true
}
func readIntBE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[1]) | uint64(b[0])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
func readIntLE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
// ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added
// to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
return syscall.ParseDirent(buf, max, names)
origlen := len(buf)
count = 0
for max != 0 && len(buf) > 0 {
reclen, ok := direntReclen(buf)
if !ok || reclen > uint64(len(buf)) {
return origlen, count, names
}
rec := buf[:reclen]
buf = buf[reclen:]
ino, ok := direntIno(rec)
if !ok {
break
}
if ino == 0 { // File absent in directory.
continue
}
const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
namlen, ok := direntNamlen(rec)
if !ok || namoff+namlen > uint64(len(rec)) {
break
}
name := rec[namoff : namoff+namlen]
for i, c := range name {
if c == 0 {
name = name[:i]
break
}
}
// Check for useless names before allocating a string.
if string(name) == "." || string(name) == ".." {
continue
}
max--
count++
names = append(names, string(name))
}
return origlen - len(buf), count, names
}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le
// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64
package unix

View File

@ -105,25 +105,25 @@ dragonfly_amd64)
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="go run mksyscall.go -l32 -arm"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
freebsd_arm64)
mkerrors="$mkerrors -m64"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
@ -146,24 +146,39 @@ netbsd_arm)
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
netbsd_arm64)
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -netbsd"
mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksysctl="./mksysctl_openbsd.pl"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkerrors="$mkerrors"
mksyscall="go run mksyscall.go -l32 -openbsd -arm"
mksysctl="./mksysctl_openbsd.pl"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_arm64)
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.

View File

@ -182,6 +182,8 @@ struct ltchars {
#include <sys/signalfd.h>
#include <sys/socket.h>
#include <sys/xattr.h>
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/errqueue.h>
#include <linux/if.h>
#include <linux/if_alg.h>
@ -197,6 +199,7 @@ struct ltchars {
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/keyctl.h>
#include <linux/loop.h>
#include <linux/magic.h>
#include <linux/memfd.h>
#include <linux/module.h>
@ -222,6 +225,7 @@ struct ltchars {
#include <linux/hdreg.h>
#include <linux/rtc.h>
#include <linux/if_xdp.h>
#include <linux/cryptouser.h>
#include <mtd/ubi-user.h>
#include <net/route.h>
@ -432,7 +436,9 @@ ccflags="$@"
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 ~ /^LO_(KEY|NAME)_SIZE$/ ||
$2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|MCAST|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 ~ /^TP_STATUS_/ ||
$2 ~ /^FALLOC_/ ||
$2 == "ICMPV6_FILTER" ||
@ -465,7 +471,7 @@ ccflags="$@"
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
$2 ~ /^CLONE_[A-Z_]+/ ||
$2 !~ /^(BPF_TIMEVAL)$/ &&
$2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^(CLOCK|TIMER)_/ ||
$2 ~ /^CAN_/ ||
@ -499,6 +505,7 @@ ccflags="$@"
$2 ~ /^NFN/ ||
$2 ~ /^XDP_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
$2 ~ /^CRYPTO_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ ||
$2 ~/^PPPIOC/ ||

View File

@ -42,9 +42,16 @@ func main() {
log.Fatal(err)
}
if goos == "aix" {
// Replace type of Atim, Mtim and Ctim by Timespec in Stat_t
// to avoid having both StTimespec and Timespec.
sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`)
b = sttimespec.ReplaceAll(b, []byte("Timespec"))
}
// Intentionally export __val fields in Fsid and Sigset_t
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}"))
valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`)
b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}"))
// Intentionally export __fds_bits field in FdSet
fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`)
@ -96,6 +103,15 @@ func main() {
cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`)
b = cgoCommandRegex.ReplaceAll(b, []byte(replacement))
// Rename Stat_t time fields
if goos == "freebsd" && goarch == "386" {
// Hide Stat_t.[AMCB]tim_ext fields
renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`)
b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_"))
}
renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`)
b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}"))
// gofmt
b, err = format.Source(b)
if err != nil {

View File

@ -214,6 +214,11 @@ func main() {
}
if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" {
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
// Imports of system calls from libc
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
@ -328,7 +333,13 @@ func main() {
} else {
call += ""
}
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist)
} else {
call += fmt.Sprintf("C.%s(%s)", sysname, arglist)
}
// Assign return values.
body := ""

View File

@ -282,6 +282,11 @@ func main() {
if !onlyCommon {
// GCCGO Prototype Generation
// Imports of system calls from libc
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
cExtern += "#define c_select select\n"
}
cExtern += fmt.Sprintf("%s %s", cRettype, sysname)
cIn := strings.Join(cIn, ", ")
cExtern += fmt.Sprintf("(%s);\n", cIn)
@ -490,7 +495,14 @@ func main() {
// GCCGO function generation
argsgccgolist := strings.Join(argsgccgo, ", ")
callgccgo := fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
var callgccgo string
if sysname == "select" {
// select is a keyword of Go. Its name is
// changed to c_select.
callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist)
} else {
callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist)
}
textgccgo += callProto
textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo)
textgccgo += "\te1 = syscall.GetErrno()\n"

355
vendor/golang.org/x/sys/unix/mksysctl_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
//
// Build a MIB with each entry being an array containing the level, type and
// a hash that will contain additional entries if the current entry is a node.
// We then walk this MIB and create a flattened sysctl name to OID hash.
package main
import (
"bufio"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
)
var (
goos, goarch string
)
// cmdLine returns this programs's commandline arguments.
func cmdLine() string {
return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ")
}
// buildTags returns build tags.
func buildTags() string {
return fmt.Sprintf("%s,%s", goarch, goos)
}
// reMatch performs regular expression match and stores the substring slice to value pointed by m.
func reMatch(re *regexp.Regexp, str string, m *[]string) bool {
*m = re.FindStringSubmatch(str)
if *m != nil {
return true
}
return false
}
type nodeElement struct {
n int
t string
pE *map[string]nodeElement
}
var (
debugEnabled bool
mib map[string]nodeElement
node *map[string]nodeElement
nodeMap map[string]string
sysCtl []string
)
var (
ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`)
ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`)
ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`)
netInetRE = regexp.MustCompile(`^netinet/`)
netInet6RE = regexp.MustCompile(`^netinet6/`)
netRE = regexp.MustCompile(`^net/`)
bracesRE = regexp.MustCompile(`{.*}`)
ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`)
fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`)
)
func debug(s string) {
if debugEnabled {
fmt.Fprintln(os.Stderr, s)
}
}
// Walk the MIB and build a sysctl name to OID mapping.
func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) {
lNode := pNode // local copy of pointer to node
var keys []string
for k := range *lNode {
keys = append(keys, k)
}
sort.Strings(keys)
for _, key := range keys {
nodename := name
if name != "" {
nodename += "."
}
nodename += key
nodeoid := append(oid, (*pNode)[key].n)
if (*pNode)[key].t == `CTLTYPE_NODE` {
if _, ok := nodeMap[nodename]; ok {
lNode = &mib
ctlName := nodeMap[nodename]
for _, part := range strings.Split(ctlName, ".") {
lNode = ((*lNode)[part]).pE
}
} else {
lNode = (*pNode)[key].pE
}
buildSysctl(lNode, nodename, nodeoid)
} else if (*pNode)[key].t != "" {
oidStr := []string{}
for j := range nodeoid {
oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j]))
}
text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n"
sysCtl = append(sysCtl, text)
}
}
}
func main() {
// Get the OS (using GOOS_TARGET if it exist)
goos = os.Getenv("GOOS_TARGET")
if goos == "" {
goos = os.Getenv("GOOS")
}
// Get the architecture (using GOARCH_TARGET if it exists)
goarch = os.Getenv("GOARCH_TARGET")
if goarch == "" {
goarch = os.Getenv("GOARCH")
}
// Check if GOOS and GOARCH environment variables are defined
if goarch == "" || goos == "" {
fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n")
os.Exit(1)
}
mib = make(map[string]nodeElement)
headers := [...]string{
`sys/sysctl.h`,
`sys/socket.h`,
`sys/tty.h`,
`sys/malloc.h`,
`sys/mount.h`,
`sys/namei.h`,
`sys/sem.h`,
`sys/shm.h`,
`sys/vmmeter.h`,
`uvm/uvmexp.h`,
`uvm/uvm_param.h`,
`uvm/uvm_swap_encrypt.h`,
`ddb/db_var.h`,
`net/if.h`,
`net/if_pfsync.h`,
`net/pipex.h`,
`netinet/in.h`,
`netinet/icmp_var.h`,
`netinet/igmp_var.h`,
`netinet/ip_ah.h`,
`netinet/ip_carp.h`,
`netinet/ip_divert.h`,
`netinet/ip_esp.h`,
`netinet/ip_ether.h`,
`netinet/ip_gre.h`,
`netinet/ip_ipcomp.h`,
`netinet/ip_ipip.h`,
`netinet/pim_var.h`,
`netinet/tcp_var.h`,
`netinet/udp_var.h`,
`netinet6/in6.h`,
`netinet6/ip6_divert.h`,
`netinet6/pim6_var.h`,
`netinet/icmp6.h`,
`netmpls/mpls.h`,
}
ctls := [...]string{
`kern`,
`vm`,
`fs`,
`net`,
//debug /* Special handling required */
`hw`,
//machdep /* Arch specific */
`user`,
`ddb`,
//vfs /* Special handling required */
`fs.posix`,
`kern.forkstat`,
`kern.intrcnt`,
`kern.malloc`,
`kern.nchstats`,
`kern.seminfo`,
`kern.shminfo`,
`kern.timecounter`,
`kern.tty`,
`kern.watchdog`,
`net.bpf`,
`net.ifq`,
`net.inet`,
`net.inet.ah`,
`net.inet.carp`,
`net.inet.divert`,
`net.inet.esp`,
`net.inet.etherip`,
`net.inet.gre`,
`net.inet.icmp`,
`net.inet.igmp`,
`net.inet.ip`,
`net.inet.ip.ifq`,
`net.inet.ipcomp`,
`net.inet.ipip`,
`net.inet.mobileip`,
`net.inet.pfsync`,
`net.inet.pim`,
`net.inet.tcp`,
`net.inet.udp`,
`net.inet6`,
`net.inet6.divert`,
`net.inet6.ip6`,
`net.inet6.icmp6`,
`net.inet6.pim6`,
`net.inet6.tcp6`,
`net.inet6.udp6`,
`net.mpls`,
`net.mpls.ifq`,
`net.key`,
`net.pflow`,
`net.pfsync`,
`net.pipex`,
`net.rt`,
`vm.swapencrypt`,
//vfsgenctl /* Special handling required */
}
// Node name "fixups"
ctlMap := map[string]string{
"ipproto": "net.inet",
"net.inet.ipproto": "net.inet",
"net.inet6.ipv6proto": "net.inet6",
"net.inet6.ipv6": "net.inet6.ip6",
"net.inet.icmpv6": "net.inet6.icmp6",
"net.inet6.divert6": "net.inet6.divert",
"net.inet6.tcp6": "net.inet.tcp",
"net.inet6.udp6": "net.inet.udp",
"mpls": "net.mpls",
"swpenc": "vm.swapencrypt",
}
// Node mappings
nodeMap = map[string]string{
"net.inet.ip.ifq": "net.ifq",
"net.inet.pfsync": "net.pfsync",
"net.mpls.ifq": "net.ifq",
}
mCtls := make(map[string]bool)
for _, ctl := range ctls {
mCtls[ctl] = true
}
for _, header := range headers {
debug("Processing " + header)
file, err := os.Open(filepath.Join("/usr/include", header))
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
s := bufio.NewScanner(file)
for s.Scan() {
var sub []string
if reMatch(ctlNames1RE, s.Text(), &sub) ||
reMatch(ctlNames2RE, s.Text(), &sub) ||
reMatch(ctlNames3RE, s.Text(), &sub) {
if sub[1] == `CTL_NAMES` {
// Top level.
node = &mib
} else {
// Node.
nodename := strings.ToLower(sub[2])
ctlName := ""
if reMatch(netInetRE, header, &sub) {
ctlName = "net.inet." + nodename
} else if reMatch(netInet6RE, header, &sub) {
ctlName = "net.inet6." + nodename
} else if reMatch(netRE, header, &sub) {
ctlName = "net." + nodename
} else {
ctlName = nodename
ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`)
}
if val, ok := ctlMap[ctlName]; ok {
ctlName = val
}
if _, ok := mCtls[ctlName]; !ok {
debug("Ignoring " + ctlName + "...")
continue
}
// Walk down from the top of the MIB.
node = &mib
for _, part := range strings.Split(ctlName, ".") {
if _, ok := (*node)[part]; !ok {
debug("Missing node " + part)
(*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}}
}
node = (*node)[part].pE
}
}
// Populate current node with entries.
i := -1
for !strings.HasPrefix(s.Text(), "}") {
s.Scan()
if reMatch(bracesRE, s.Text(), &sub) {
i++
}
if !reMatch(ctlTypeRE, s.Text(), &sub) {
continue
}
(*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}}
}
}
}
err = s.Err()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
file.Close()
}
buildSysctl(&mib, "", []int{})
sort.Strings(sysCtl)
text := strings.Join(sysCtl, "")
fmt.Printf(srcTemplate, cmdLine(), buildTags(), text)
}
const srcTemplate = `// %s
// Code generated by the command above; DO NOT EDIT.
// +build %s
package unix
type mibentry struct {
ctlname string
ctloid []_C_int
}
var sysctlMib = []mibentry {
%s
}
`

View File

@ -1,265 +0,0 @@
#!/usr/bin/env perl
# Copyright 2011 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
#
# Parse the header files for OpenBSD and generate a Go usable sysctl MIB.
#
# Build a MIB with each entry being an array containing the level, type and
# a hash that will contain additional entries if the current entry is a node.
# We then walk this MIB and create a flattened sysctl name to OID hash.
#
use strict;
if($ENV{'GOARCH'} eq "" || $ENV{'GOOS'} eq "") {
print STDERR "GOARCH or GOOS not defined in environment\n";
exit 1;
}
my $debug = 0;
my %ctls = ();
my @headers = qw (
sys/sysctl.h
sys/socket.h
sys/tty.h
sys/malloc.h
sys/mount.h
sys/namei.h
sys/sem.h
sys/shm.h
sys/vmmeter.h
uvm/uvmexp.h
uvm/uvm_param.h
uvm/uvm_swap_encrypt.h
ddb/db_var.h
net/if.h
net/if_pfsync.h
net/pipex.h
netinet/in.h
netinet/icmp_var.h
netinet/igmp_var.h
netinet/ip_ah.h
netinet/ip_carp.h
netinet/ip_divert.h
netinet/ip_esp.h
netinet/ip_ether.h
netinet/ip_gre.h
netinet/ip_ipcomp.h
netinet/ip_ipip.h
netinet/pim_var.h
netinet/tcp_var.h
netinet/udp_var.h
netinet6/in6.h
netinet6/ip6_divert.h
netinet6/pim6_var.h
netinet/icmp6.h
netmpls/mpls.h
);
my @ctls = qw (
kern
vm
fs
net
#debug # Special handling required
hw
#machdep # Arch specific
user
ddb
#vfs # Special handling required
fs.posix
kern.forkstat
kern.intrcnt
kern.malloc
kern.nchstats
kern.seminfo
kern.shminfo
kern.timecounter
kern.tty
kern.watchdog
net.bpf
net.ifq
net.inet
net.inet.ah
net.inet.carp
net.inet.divert
net.inet.esp
net.inet.etherip
net.inet.gre
net.inet.icmp
net.inet.igmp
net.inet.ip
net.inet.ip.ifq
net.inet.ipcomp
net.inet.ipip
net.inet.mobileip
net.inet.pfsync
net.inet.pim
net.inet.tcp
net.inet.udp
net.inet6
net.inet6.divert
net.inet6.ip6
net.inet6.icmp6
net.inet6.pim6
net.inet6.tcp6
net.inet6.udp6
net.mpls
net.mpls.ifq
net.key
net.pflow
net.pfsync
net.pipex
net.rt
vm.swapencrypt
#vfsgenctl # Special handling required
);
# Node name "fixups"
my %ctl_map = (
"ipproto" => "net.inet",
"net.inet.ipproto" => "net.inet",
"net.inet6.ipv6proto" => "net.inet6",
"net.inet6.ipv6" => "net.inet6.ip6",
"net.inet.icmpv6" => "net.inet6.icmp6",
"net.inet6.divert6" => "net.inet6.divert",
"net.inet6.tcp6" => "net.inet.tcp",
"net.inet6.udp6" => "net.inet.udp",
"mpls" => "net.mpls",
"swpenc" => "vm.swapencrypt"
);
# Node mappings
my %node_map = (
"net.inet.ip.ifq" => "net.ifq",
"net.inet.pfsync" => "net.pfsync",
"net.mpls.ifq" => "net.ifq"
);
my $ctlname;
my %mib = ();
my %sysctl = ();
my $node;
sub debug() {
print STDERR "$_[0]\n" if $debug;
}
# Walk the MIB and build a sysctl name to OID mapping.
sub build_sysctl() {
my ($node, $name, $oid) = @_;
my %node = %{$node};
my @oid = @{$oid};
foreach my $key (sort keys %node) {
my @node = @{$node{$key}};
my $nodename = $name.($name ne '' ? '.' : '').$key;
my @nodeoid = (@oid, $node[0]);
if ($node[1] eq 'CTLTYPE_NODE') {
if (exists $node_map{$nodename}) {
$node = \%mib;
$ctlname = $node_map{$nodename};
foreach my $part (split /\./, $ctlname) {
$node = \%{@{$$node{$part}}[2]};
}
} else {
$node = $node[2];
}
&build_sysctl($node, $nodename, \@nodeoid);
} elsif ($node[1] ne '') {
$sysctl{$nodename} = \@nodeoid;
}
}
}
foreach my $ctl (@ctls) {
$ctls{$ctl} = $ctl;
}
# Build MIB
foreach my $header (@headers) {
&debug("Processing $header...");
open HEADER, "/usr/include/$header" ||
print STDERR "Failed to open $header\n";
while (<HEADER>) {
if ($_ =~ /^#define\s+(CTL_NAMES)\s+{/ ||
$_ =~ /^#define\s+(CTL_(.*)_NAMES)\s+{/ ||
$_ =~ /^#define\s+((.*)CTL_NAMES)\s+{/) {
if ($1 eq 'CTL_NAMES') {
# Top level.
$node = \%mib;
} else {
# Node.
my $nodename = lc($2);
if ($header =~ /^netinet\//) {
$ctlname = "net.inet.$nodename";
} elsif ($header =~ /^netinet6\//) {
$ctlname = "net.inet6.$nodename";
} elsif ($header =~ /^net\//) {
$ctlname = "net.$nodename";
} else {
$ctlname = "$nodename";
$ctlname =~ s/^(fs|net|kern)_/$1\./;
}
if (exists $ctl_map{$ctlname}) {
$ctlname = $ctl_map{$ctlname};
}
if (not exists $ctls{$ctlname}) {
&debug("Ignoring $ctlname...");
next;
}
# Walk down from the top of the MIB.
$node = \%mib;
foreach my $part (split /\./, $ctlname) {
if (not exists $$node{$part}) {
&debug("Missing node $part");
$$node{$part} = [ 0, '', {} ];
}
$node = \%{@{$$node{$part}}[2]};
}
}
# Populate current node with entries.
my $i = -1;
while (defined($_) && $_ !~ /^}/) {
$_ = <HEADER>;
$i++ if $_ =~ /{.*}/;
next if $_ !~ /{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}/;
$$node{$1} = [ $i, $2, {} ];
}
}
}
close HEADER;
}
&build_sysctl(\%mib, "", []);
print <<EOF;
// mksysctl_openbsd.pl
// Code generated by the command above; DO NOT EDIT.
// +build $ENV{'GOARCH'},$ENV{'GOOS'}
package unix;
type mibentry struct {
ctlname string
ctloid []_C_int
}
var sysctlMib = []mibentry {
EOF
foreach my $name (sort keys %sysctl) {
my @oid = @{$sysctl{$name}};
print "\t{ \"$name\", []_C_int{ ", join(', ', @oid), " } }, \n";
}
print <<EOF;
}
EOF

View File

@ -139,7 +139,7 @@ func main() {
text += format(name, num, proto)
}
case "freebsd":
if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) {
if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) {
num, proto := t.sub[1], t.sub[2]
name := fmt.Sprintf("SYS_%s", t.sub[3])
text += format(name, num, proto)

View File

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build openbsd
// +build 386 amd64 arm
package unix
import (

12
vendor/golang.org/x/sys/unix/readdirent_getdents.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix dragonfly freebsd linux netbsd openbsd
package unix
// ReadDirent reads directory entries from fd and writes them into buf.
func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf)
}

View File

@ -0,0 +1,19 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package unix
import "unsafe"
// ReadDirent reads directory entries from fd and writes them into buf.
func ReadDirent(fd int, buf []byte) (n int, err error) {
// Final argument is (basep *uintptr) and the syscall doesn't take nil.
// 64 bits should be enough. (32 bits isn't even on 386). Since the
// actual system call is getdirentries64, 64 is a good guess.
// TODO(rsc): Can we use a single global basep for all calls?
var base = (*uintptr)(unsafe.Pointer(new(uint64)))
return Getdirentries(fd, buf, base)
}

View File

@ -18,10 +18,13 @@ func cmsgAlignOf(salen int) int {
salign := SizeofPtr
switch runtime.GOOS {
case "darwin", "dragonfly", "solaris":
// NOTE: It seems like 64-bit Darwin, DragonFly BSD and
// Solaris kernels still require 32-bit aligned access to
// network subsystem.
case "aix":
// There is no alignment on AIX.
salign = 1
case "darwin", "dragonfly", "solaris", "illumos":
// NOTE: It seems like 64-bit Darwin, DragonFly BSD,
// illumos, and Solaris kernels still require 32-bit
// aligned access to network subsystem.
if SizeofPtr == 8 {
salign = 4
}

View File

@ -50,5 +50,4 @@ func BytePtrFromString(s string) (*byte, error) {
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
// See mkunix.pl.
var _zero uintptr

View File

@ -280,8 +280,24 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return -1, ENOSYS
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
reclen, ok := direntReclen(buf)
if !ok {
return 0, false
}
return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
}
//sys getdirent(fd int, buf []byte) (n int, err error)
func ReadDirent(fd int, buf []byte) (n int, err error) {
func Getdents(fd int, buf []byte) (n int, err error) {
return getdirent(fd, buf)
}
@ -444,8 +460,6 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Times(tms *Tms) (ticks uintptr, err error)
//sysnb Umask(mask int) (oldmask int)
//sysnb Uname(buf *Utsname) (err error)
//TODO umount
// //sys Unmount(target string, flags int) (err error) = umount
//sys Unlink(path string) (err error)
//sys Unlinkat(dirfd int, path string, flags int) (err error)
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
@ -456,8 +470,8 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sys Dup2(oldfd int, newfd int) (err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
//sys fstat(fd int, stat *Stat_t) (err error)
//sys fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = fstatat
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
@ -466,18 +480,17 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb Getuid() (uid int)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Listen(s int, n int) (err error)
//sys Lstat(path string, stat *Stat_t) (err error)
//sys lstat(path string, stat *Stat_t) (err error)
//sys Pause() (err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64
//TODO Select
// //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys stat(path string, statptr *Stat_t) (err error)
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys Truncate(path string, length int64) (err error)
@ -493,8 +506,10 @@ func IoctlGetTermios(fd int, req uint) (*Termios, error) {
//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
// In order to use msghdr structure with Control, Controllen, nrecvmsg and nsendmsg must be used.
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = nrecvmsg
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = nsendmsg
//sys munmap(addr uintptr, length uintptr) (err error)
@ -547,3 +562,12 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys Utime(path string, buf *Utimbuf) (err error)
//sys Getsystemcfg(label int) (n uint64)
//sys umount(target string) (err error)
func Unmount(target string, flags int) (err error) {
if flags != 0 {
// AIX doesn't have any flags for umount.
return ENOSYS
}
return umount(target)
}

View File

@ -32,3 +32,19 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
func Fstat(fd int, stat *Stat_t) error {
return fstat(fd, stat)
}
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
return fstatat(dirfd, path, stat, flags)
}
func Lstat(path string, stat *Stat_t) error {
return lstat(path, stat)
}
func Stat(path string, statptr *Stat_t) error {
return stat(path, statptr)
}

View File

@ -32,3 +32,50 @@ func (msghdr *Msghdr) SetControllen(length int) {
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
// In order to only have Timespec structure, type of Stat_t's fields
// Atim, Mtim and Ctim is changed from StTimespec to Timespec during
// ztypes generation.
// On ppc64, Timespec.Nsec is an int64 while StTimespec.Nsec is an
// int32, so the fields' value must be modified.
func fixStatTimFields(stat *Stat_t) {
stat.Atim.Nsec >>= 32
stat.Mtim.Nsec >>= 32
stat.Ctim.Nsec >>= 32
}
func Fstat(fd int, stat *Stat_t) error {
err := fstat(fd, stat)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Fstatat(dirfd int, path string, stat *Stat_t, flags int) error {
err := fstatat(dirfd, path, stat, flags)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Lstat(path string, stat *Stat_t) error {
err := lstat(path, stat)
if err != nil {
return err
}
fixStatTimFields(stat)
return nil
}
func Stat(path string, statptr *Stat_t) error {
err := stat(path, statptr)
if err != nil {
return err
}
fixStatTimFields(statptr)
return nil
}

View File

@ -63,15 +63,6 @@ func Setgroups(gids []int) (err error) {
return setgroups(len(a), &a[0])
}
func ReadDirent(fd int, buf []byte) (n int, err error) {
// Final argument is (basep *uintptr) and the syscall doesn't take nil.
// 64 bits should be enough. (32 bits isn't even on 386). Since the
// actual system call is getdirentries64, 64 is a good guess.
// TODO(rsc): Can we use a single global basep for all calls?
var base = (*uintptr)(unsafe.Pointer(new(uint64)))
return Getdirentries(fd, buf, base)
}
// Wait status is 7 bits at bottom, either 0 (exited),
// 0x7F (stopped), or a signal number that caused an exit.
// The 0x80 bit is whether there was a core dump.
@ -86,6 +77,7 @@ const (
shift = 8
exited = 0
killed = 9
stopped = 0x7F
)
@ -112,6 +104,8 @@ func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
func (w WaitStatus) Stopped() bool { return w&mask == stopped && syscall.Signal(w>>shift) != SIGSTOP }
func (w WaitStatus) Killed() bool { return w&mask == killed && syscall.Signal(w>>shift) != SIGKILL }
func (w WaitStatus) Continued() bool { return w&mask == stopped && syscall.Signal(w>>shift) == SIGSTOP }
func (w WaitStatus) StopSignal() syscall.Signal {

View File

@ -77,6 +77,18 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) }
func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) }

View File

@ -57,6 +57,22 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
namlen, ok := direntNamlen(buf)
if !ok {
return 0, false
}
return (16 + namlen + 1 + 7) &^ 7, true
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
//sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) {
@ -269,6 +285,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Fstatfs(fd int, stat *Statfs_t) (err error)
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sys Getdents(fd int, buf []byte) (n int, err error)
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)

View File

@ -82,6 +82,18 @@ func nametomib(name string) (mib []_C_int, err error) {
return buf[0 : n/siz], nil
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
func Pipe(p []int) (err error) {
return Pipe2(p, 0)
}
@ -362,7 +374,21 @@ func Getdents(fd int, buf []byte) (n int, err error) {
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
if supportsABI(_ino64First) {
return getdirentries_freebsd12(fd, buf, basep)
if basep == nil || unsafe.Sizeof(*basep) == 8 {
return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep)))
}
// The freebsd12 syscall needs a 64-bit base. On 32-bit machines
// we can't just use the basep passed in. See #32498.
var base uint64 = uint64(*basep)
n, err = getdirentries_freebsd12(fd, buf, &base)
*basep = uintptr(base)
if base>>32 != 0 {
// We can't stuff the base back into a uintptr, so any
// future calls would be suspect. Generate an error.
// EIO is allowed by getdirentries.
err = EIO
}
return
}
// The old syscall entries are smaller than the new. Use 1/4 of the original
@ -404,22 +430,22 @@ func roundup(x, y int) int {
func (s *Stat_t) convertFrom(old *stat_freebsd11_t) {
*s = Stat_t{
Dev: uint64(old.Dev),
Ino: uint64(old.Ino),
Nlink: uint64(old.Nlink),
Mode: old.Mode,
Uid: old.Uid,
Gid: old.Gid,
Rdev: uint64(old.Rdev),
Atim: old.Atim,
Mtim: old.Mtim,
Ctim: old.Ctim,
Birthtim: old.Birthtim,
Size: old.Size,
Blocks: old.Blocks,
Blksize: old.Blksize,
Flags: old.Flags,
Gen: uint64(old.Gen),
Dev: uint64(old.Dev),
Ino: uint64(old.Ino),
Nlink: uint64(old.Nlink),
Mode: old.Mode,
Uid: old.Uid,
Gid: old.Gid,
Rdev: uint64(old.Rdev),
Atim: old.Atim,
Mtim: old.Mtim,
Ctim: old.Ctim,
Btim: old.Btim,
Size: old.Size,
Blocks: old.Blocks,
Blksize: old.Blksize,
Flags: old.Flags,
Gen: uint64(old.Gen),
}
}
@ -507,6 +533,70 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
//sys ptrace(request int, pid int, addr uintptr, data int) (err error)
func PtraceAttach(pid int) (err error) {
return ptrace(PTRACE_ATTACH, pid, 0, 0)
}
func PtraceCont(pid int, signal int) (err error) {
return ptrace(PTRACE_CONT, pid, 1, signal)
}
func PtraceDetach(pid int) (err error) {
return ptrace(PTRACE_DETACH, pid, 1, 0)
}
func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) {
return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0)
}
func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0)
}
func PtraceGetRegs(pid int, regsout *Reg) (err error) {
return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)}
err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
func PtraceLwpEvents(pid int, enable int) (err error) {
return ptrace(PTRACE_LWPEVENTS, pid, 0, enable)
}
func PtraceLwpInfo(pid int, info uintptr) (err error) {
return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{})))
}
func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
return PtraceIO(PIOD_READ_D, pid, addr, out, SizeofLong)
}
func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
return PtraceIO(PIOD_READ_I, pid, addr, out, SizeofLong)
}
func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
return PtraceIO(PIOD_WRITE_D, pid, addr, data, SizeofLong)
}
func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
return PtraceIO(PIOD_WRITE_I, pid, addr, data, SizeofLong)
}
func PtraceSetRegs(pid int, regs *Reg) (err error) {
return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0)
}
func PtraceSingleStep(pid int) (err error) {
return ptrace(PTRACE_SINGLESTEP, pid, 1, 0)
}
/*
* Exposed directly
*/
@ -555,7 +645,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error)
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int)

View File

@ -13,7 +13,6 @@ package unix
import (
"encoding/binary"
"net"
"runtime"
"syscall"
"unsafe"
@ -109,6 +108,12 @@ func IoctlGetInt(fd int, req uint) (int, error) {
return value, err
}
func IoctlGetUint32(fd int, req uint) (uint32, error) {
var value uint32
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
@ -759,7 +764,7 @@ const px_proto_oe = 0
type SockaddrPPPoE struct {
SID uint16
Remote net.HardwareAddr
Remote []byte
Dev string
raw RawSockaddrPPPoX
}
@ -910,7 +915,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
}
sa := &SockaddrPPPoE{
SID: binary.BigEndian.Uint16(pp[6:8]),
Remote: net.HardwareAddr(pp[8:14]),
Remote: pp[8:14],
}
for i := 14; i < 14+IFNAMSIZ; i++ {
if pp[i] == 0 {
@ -1408,8 +1413,20 @@ func Reboot(cmd int) (err error) {
return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
}
func ReadDirent(fd int, buf []byte) (n int, err error) {
return Getdents(fd, buf)
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
reclen, ok := direntReclen(buf)
if !ok {
return 0, false
}
return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
}
//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
@ -1444,6 +1461,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Acct(path string) (err error)
//sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error)
//sys Adjtimex(buf *Timex) (state int, err error)
//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error)
//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error)
//sys Chdir(path string) (err error)
//sys Chroot(path string) (err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
@ -1531,9 +1550,13 @@ func Setgid(uid int) (err error) {
return EOPNOTSUPP
}
func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) {
return signalfd(fd, sigmask, _C__NSIG/8, flags)
}
//sys Setpriority(which int, who int, prio int) (err error)
//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
//sys Signalfd(fd int, mask *Sigset_t, flags int) = SYS_SIGNALFD4
//sys signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) = SYS_SIGNALFD4
//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error)
//sys Sync()
//sys Syncfs(fd int) (err error)
@ -1662,6 +1685,82 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
return EACCES
}
//sys nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) = SYS_NAME_TO_HANDLE_AT
//sys openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) = SYS_OPEN_BY_HANDLE_AT
// fileHandle is the argument to nameToHandleAt and openByHandleAt. We
// originally tried to generate it via unix/linux/types.go with "type
// fileHandle C.struct_file_handle" but that generated empty structs
// for mips64 and mips64le. Instead, hard code it for now (it's the
// same everywhere else) until the mips64 generator issue is fixed.
type fileHandle struct {
Bytes uint32
Type int32
}
// FileHandle represents the C struct file_handle used by
// name_to_handle_at (see NameToHandleAt) and open_by_handle_at (see
// OpenByHandleAt).
type FileHandle struct {
*fileHandle
}
// NewFileHandle constructs a FileHandle.
func NewFileHandle(handleType int32, handle []byte) FileHandle {
const hdrSize = unsafe.Sizeof(fileHandle{})
buf := make([]byte, hdrSize+uintptr(len(handle)))
copy(buf[hdrSize:], handle)
fh := (*fileHandle)(unsafe.Pointer(&buf[0]))
fh.Type = handleType
fh.Bytes = uint32(len(handle))
return FileHandle{fh}
}
func (fh *FileHandle) Size() int { return int(fh.fileHandle.Bytes) }
func (fh *FileHandle) Type() int32 { return fh.fileHandle.Type }
func (fh *FileHandle) Bytes() []byte {
n := fh.Size()
if n == 0 {
return nil
}
return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n]
}
// NameToHandleAt wraps the name_to_handle_at system call; it obtains
// a handle for a path name.
func NameToHandleAt(dirfd int, path string, flags int) (handle FileHandle, mountID int, err error) {
var mid _C_int
// Try first with a small buffer, assuming the handle will
// only be 32 bytes.
size := uint32(32 + unsafe.Sizeof(fileHandle{}))
didResize := false
for {
buf := make([]byte, size)
fh := (*fileHandle)(unsafe.Pointer(&buf[0]))
fh.Bytes = size - uint32(unsafe.Sizeof(fileHandle{}))
err = nameToHandleAt(dirfd, path, fh, &mid, flags)
if err == EOVERFLOW {
if didResize {
// We shouldn't need to resize more than once
return
}
didResize = true
size = fh.Bytes + uint32(unsafe.Sizeof(fileHandle{}))
continue
}
if err != nil {
return
}
return FileHandle{fh}, int(mid), nil
}
}
// OpenByHandleAt wraps the open_by_handle_at system call; it opens a
// file via a handle as previously returned by NameToHandleAt.
func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err error) {
return openByHandleAt(mountFD, handle.fileHandle, flags)
}
/*
* Unimplemented
*/
@ -1669,8 +1768,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
// Alarm
// ArchPrctl
// Brk
// Capget
// Capset
// ClockNanosleep
// ClockSettime
// Clone

View File

@ -272,3 +272,16 @@ func SyncFileRange(fd int, off int64, n int64, flags int) error {
// order of their arguments.
return armSyncFileRange(fd, flags, off, n)
}
//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
cmdlineLen := len(cmdline)
if cmdlineLen > 0 {
// Account for the additional NULL byte added by
// BytePtrFromString in kexecFileLoad. The kexec_file_load
// syscall expects a NULL-terminated string.
cmdlineLen++
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}

View File

@ -94,6 +94,18 @@ func nametomib(name string) (mib []_C_int, err error) {
return mib, nil
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
func SysctlClockinfo(name string) (*Clockinfo, error) {
mib, err := sysctlmib(name)
if err != nil {
@ -120,9 +132,30 @@ func Pipe(p []int) (err error) {
return
}
//sys getdents(fd int, buf []byte) (n int, err error)
//sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
return getdents(fd, buf)
n, err = Getdents(fd, buf)
if err != nil || basep == nil {
return
}
var off int64
off, err = Seek(fd, 0, 1 /* SEEK_CUR */)
if err != nil {
*basep = ^uintptr(0)
return
}
*basep = uintptr(off)
if unsafe.Sizeof(*basep) == 8 {
return
}
if off>>32 != 0 {
// We can't stuff the offset back into a uintptr, so any
// future calls would be suspect. Generate an error.
// EIO is allowed by getdirentries.
err = EIO
}
return
}
const ImplementsGetwd = true

View File

@ -43,6 +43,18 @@ func nametomib(name string) (mib []_C_int, err error) {
return nil, EINVAL
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Fileno), unsafe.Sizeof(Dirent{}.Fileno))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
}
func SysctlClockinfo(name string) (*Clockinfo, error) {
mib, err := sysctlmib(name)
if err != nil {
@ -89,9 +101,30 @@ func Pipe(p []int) (err error) {
return
}
//sys getdents(fd int, buf []byte) (n int, err error)
//sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
return getdents(fd, buf)
n, err = Getdents(fd, buf)
if err != nil || basep == nil {
return
}
var off int64
off, err = Seek(fd, 0, 1 /* SEEK_CUR */)
if err != nil {
*basep = ^uintptr(0)
return
}
*basep = uintptr(off)
if unsafe.Sizeof(*basep) == 8 {
return
}
if off>>32 != 0 {
// We can't stuff the offset back into a uintptr, so any
// future calls would be suspect. Generate an error.
// EIO was allowed by getdirentries.
err = EIO
}
return
}
const ImplementsGetwd = true

37
vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build arm64,openbsd
package unix
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: nsec}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
func SetKevent(k *Kevent_t, fd, mode, flags int) {
k.Ident = uint64(fd)
k.Filter = int16(mode)
k.Flags = uint16(flags)
}
func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint32(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint32(length)
}
// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
// of openbsd/amd64 the syscall is called sysctl instead of __sysctl.
const SYS___SYSCTL = SYS_SYSCTL

View File

@ -35,6 +35,22 @@ type SockaddrDatalink struct {
raw RawSockaddrDatalink
}
func direntIno(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
}
func direntReclen(buf []byte) (uint64, bool) {
return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
}
func direntNamlen(buf []byte) (uint64, bool) {
reclen, ok := direntReclen(buf)
if !ok {
return 0, false
}
return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true
}
//sysnb pipe(p *[2]_C_int) (n int, err error)
func Pipe(p []int) (err error) {
@ -189,6 +205,7 @@ func Setgroups(gids []int) (err error) {
return setgroups(len(a), &a[0])
}
// ReadDirent reads directory entries from fd and writes them into buf.
func ReadDirent(fd int, buf []byte) (n int, err error) {
// Final argument is (basep *uintptr) and the syscall doesn't take nil.
// TODO(rsc): Can we use a single global basep for all calls?

View File

@ -87,8 +87,6 @@ type Mode_t C.mode_t
type Timespec C.struct_timespec
type StTimespec C.struct_st_timespec
type Timeval C.struct_timeval
type Timeval32 C.struct_timeval32
@ -133,6 +131,8 @@ type RawSockaddrInet6 C.struct_sockaddr_in6
type RawSockaddrUnix C.struct_sockaddr_un
type RawSockaddrDatalink C.struct_sockaddr_dl
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
@ -156,17 +156,18 @@ type Linger C.struct_linger
type Msghdr C.struct_msghdr
const (
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
SizeofMsghdr = C.sizeof_struct_msghdr
SizeofCmsghdr = C.sizeof_struct_cmsghdr
SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
)
// Routing and interface messages

View File

@ -243,11 +243,55 @@ const (
// Ptrace requests
const (
PTRACE_TRACEME = C.PT_TRACE_ME
PTRACE_CONT = C.PT_CONTINUE
PTRACE_KILL = C.PT_KILL
PTRACE_ATTACH = C.PT_ATTACH
PTRACE_CONT = C.PT_CONTINUE
PTRACE_DETACH = C.PT_DETACH
PTRACE_GETFPREGS = C.PT_GETFPREGS
PTRACE_GETFSBASE = C.PT_GETFSBASE
PTRACE_GETLWPLIST = C.PT_GETLWPLIST
PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS
PTRACE_GETREGS = C.PT_GETREGS
PTRACE_GETXSTATE = C.PT_GETXSTATE
PTRACE_IO = C.PT_IO
PTRACE_KILL = C.PT_KILL
PTRACE_LWPEVENTS = C.PT_LWP_EVENTS
PTRACE_LWPINFO = C.PT_LWPINFO
PTRACE_SETFPREGS = C.PT_SETFPREGS
PTRACE_SETREGS = C.PT_SETREGS
PTRACE_SINGLESTEP = C.PT_STEP
PTRACE_TRACEME = C.PT_TRACE_ME
)
const (
PIOD_READ_D = C.PIOD_READ_D
PIOD_WRITE_D = C.PIOD_WRITE_D
PIOD_READ_I = C.PIOD_READ_I
PIOD_WRITE_I = C.PIOD_WRITE_I
)
const (
PL_FLAG_BORN = C.PL_FLAG_BORN
PL_FLAG_EXITED = C.PL_FLAG_EXITED
PL_FLAG_SI = C.PL_FLAG_SI
)
const (
TRAP_BRKPT = C.TRAP_BRKPT
TRAP_TRACE = C.TRAP_TRACE
)
type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo
type __Siginfo C.struct___siginfo
type Sigset_t C.sigset_t
type Reg C.struct_reg
type FpReg C.struct_fpreg
type PtraceIoDesc C.struct_ptrace_io_desc
// Events (kqueue, kevent)
type Kevent_t C.struct_kevent_freebsd11

Some files were not shown because too many files have changed in this diff Show More