mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
Moving to glide
This commit is contained in:
168
vendor/gopkg.in/asn1-ber.v1/ber_test.go
generated
vendored
Normal file
168
vendor/gopkg.in/asn1-ber.v1/ber_test.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEncodeDecodeInteger(t *testing.T) {
|
||||
for _, v := range []int64{0, 10, 128, 1024, math.MaxInt64, -1, -100, -128, -1024, math.MinInt64} {
|
||||
enc := encodeInteger(v)
|
||||
dec, err := parseInt64(enc)
|
||||
if err != nil {
|
||||
t.Fatalf("Error decoding %d : %s", v, err)
|
||||
}
|
||||
if v != dec {
|
||||
t.Error("TestEncodeDecodeInteger failed for %d (got %d)", v, dec)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoolean(t *testing.T) {
|
||||
var value bool = true
|
||||
|
||||
packet := NewBoolean(ClassUniversal, TypePrimitive, TagBoolean, value, "first Packet, True")
|
||||
|
||||
newBoolean, ok := packet.Value.(bool)
|
||||
if !ok || newBoolean != value {
|
||||
t.Error("error during creating packet")
|
||||
}
|
||||
|
||||
encodedPacket := packet.Bytes()
|
||||
|
||||
newPacket := DecodePacket(encodedPacket)
|
||||
|
||||
newBoolean, ok = newPacket.Value.(bool)
|
||||
if !ok || newBoolean != value {
|
||||
t.Error("error during decoding packet")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestInteger(t *testing.T) {
|
||||
var value int64 = 10
|
||||
|
||||
packet := NewInteger(ClassUniversal, TypePrimitive, TagInteger, value, "Integer, 10")
|
||||
|
||||
{
|
||||
newInteger, ok := packet.Value.(int64)
|
||||
if !ok || newInteger != value {
|
||||
t.Error("error creating packet")
|
||||
}
|
||||
}
|
||||
|
||||
encodedPacket := packet.Bytes()
|
||||
|
||||
newPacket := DecodePacket(encodedPacket)
|
||||
|
||||
{
|
||||
newInteger, ok := newPacket.Value.(int64)
|
||||
if !ok || int64(newInteger) != value {
|
||||
t.Error("error decoding packet")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
var value string = "Hic sunt dracones"
|
||||
|
||||
packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, value, "String")
|
||||
|
||||
newValue, ok := packet.Value.(string)
|
||||
if !ok || newValue != value {
|
||||
t.Error("error during creating packet")
|
||||
}
|
||||
|
||||
encodedPacket := packet.Bytes()
|
||||
|
||||
newPacket := DecodePacket(encodedPacket)
|
||||
|
||||
newValue, ok = newPacket.Value.(string)
|
||||
if !ok || newValue != value {
|
||||
t.Error("error during decoding packet")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSequenceAndAppendChild(t *testing.T) {
|
||||
|
||||
values := []string{
|
||||
"HIC SVNT LEONES",
|
||||
"Iñtërnâtiônàlizætiøn",
|
||||
"Terra Incognita",
|
||||
}
|
||||
|
||||
sequence := NewSequence("a sequence")
|
||||
for _, s := range values {
|
||||
sequence.AppendChild(NewString(ClassUniversal, TypePrimitive, TagOctetString, s, "String"))
|
||||
}
|
||||
|
||||
if len(sequence.Children) != len(values) {
|
||||
t.Errorf("wrong length for children array should be %d, got %d", len(values), len(sequence.Children))
|
||||
}
|
||||
|
||||
encodedSequence := sequence.Bytes()
|
||||
|
||||
decodedSequence := DecodePacket(encodedSequence)
|
||||
if len(decodedSequence.Children) != len(values) {
|
||||
t.Errorf("wrong length for children array should be %d => %d", len(values), len(decodedSequence.Children))
|
||||
}
|
||||
|
||||
for i, s := range values {
|
||||
if decodedSequence.Children[i].Value.(string) != s {
|
||||
t.Errorf("expected %d to be %q, got %q", i, s, decodedSequence.Children[i].Value.(string))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPacket(t *testing.T) {
|
||||
packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, "Ad impossibilia nemo tenetur", "string")
|
||||
var buffer io.ReadWriter
|
||||
buffer = new(bytes.Buffer)
|
||||
|
||||
buffer.Write(packet.Bytes())
|
||||
|
||||
newPacket, err := ReadPacket(buffer)
|
||||
if err != nil {
|
||||
t.Error("error during ReadPacket", err)
|
||||
}
|
||||
newPacket.ByteValue = nil
|
||||
if !bytes.Equal(newPacket.ByteValue, packet.ByteValue) {
|
||||
t.Error("packets should be the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryInteger(t *testing.T) {
|
||||
// data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.7
|
||||
var data = []struct {
|
||||
v int64
|
||||
e []byte
|
||||
}{
|
||||
{v: 0, e: []byte{0x02, 0x01, 0x00}},
|
||||
{v: 127, e: []byte{0x02, 0x01, 0x7F}},
|
||||
{v: 128, e: []byte{0x02, 0x02, 0x00, 0x80}},
|
||||
{v: 256, e: []byte{0x02, 0x02, 0x01, 0x00}},
|
||||
{v: -128, e: []byte{0x02, 0x01, 0x80}},
|
||||
{v: -129, e: []byte{0x02, 0x02, 0xFF, 0x7F}},
|
||||
{v: math.MaxInt64, e: []byte{0x02, 0x08, 0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}},
|
||||
{v: math.MinInt64, e: []byte{0x02, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
|
||||
}
|
||||
|
||||
for _, d := range data {
|
||||
if b := NewInteger(ClassUniversal, TypePrimitive, TagInteger, int64(d.v), "").Bytes(); !bytes.Equal(d.e, b) {
|
||||
t.Errorf("Wrong binary generated for %d : got % X, expected % X", d.v, b, d.e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryOctetString(t *testing.T) {
|
||||
// data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.10
|
||||
|
||||
if !bytes.Equal([]byte{0x04, 0x08, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, NewString(ClassUniversal, TypePrimitive, TagOctetString, "\x01\x23\x45\x67\x89\xab\xcd\xef", "").Bytes()) {
|
||||
t.Error("wrong binary generated")
|
||||
}
|
||||
}
|
||||
135
vendor/gopkg.in/asn1-ber.v1/header_test.go
generated
vendored
Normal file
135
vendor/gopkg.in/asn1-ber.v1/header_test.go
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadHeader(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
Data []byte
|
||||
ExpectedIdentifier Identifier
|
||||
ExpectedLength int
|
||||
ExpectedBytesRead int
|
||||
ExpectedError string
|
||||
}{
|
||||
"empty": {
|
||||
Data: []byte{},
|
||||
ExpectedIdentifier: Identifier{},
|
||||
ExpectedLength: 0,
|
||||
ExpectedBytesRead: 0,
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
},
|
||||
|
||||
"valid short form": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
|
||||
127,
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedLength: 127,
|
||||
ExpectedBytesRead: 2,
|
||||
ExpectedError: "",
|
||||
},
|
||||
|
||||
"valid long form": {
|
||||
Data: []byte{
|
||||
// 2-byte encoding of tag
|
||||
byte(ClassUniversal) | byte(TypePrimitive) | byte(HighTag),
|
||||
byte(TagCharacterString),
|
||||
|
||||
// 2-byte encoding of length
|
||||
LengthLongFormBitmask | 1,
|
||||
127,
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedLength: 127,
|
||||
ExpectedBytesRead: 4,
|
||||
ExpectedError: "",
|
||||
},
|
||||
|
||||
"valid indefinite length": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString),
|
||||
LengthLongFormBitmask,
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedLength: LengthIndefinite,
|
||||
ExpectedBytesRead: 2,
|
||||
ExpectedError: "",
|
||||
},
|
||||
|
||||
"invalid indefinite length": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString),
|
||||
LengthLongFormBitmask,
|
||||
},
|
||||
ExpectedIdentifier: Identifier{},
|
||||
ExpectedLength: 0,
|
||||
ExpectedBytesRead: 2,
|
||||
ExpectedError: "indefinite length used with primitive type",
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
reader := bytes.NewBuffer(tc.Data)
|
||||
identifier, length, read, err := readHeader(reader)
|
||||
|
||||
if err != nil {
|
||||
if tc.ExpectedError == "" {
|
||||
t.Errorf("%s: unexpected error: %v", k, err)
|
||||
} else if err.Error() != tc.ExpectedError {
|
||||
t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
|
||||
}
|
||||
} else if tc.ExpectedError != "" {
|
||||
t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
|
||||
continue
|
||||
}
|
||||
|
||||
if read != tc.ExpectedBytesRead {
|
||||
t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
|
||||
}
|
||||
|
||||
if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
|
||||
t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.ClassType,
|
||||
ClassMap[tc.ExpectedIdentifier.ClassType],
|
||||
identifier.ClassType,
|
||||
ClassMap[identifier.ClassType],
|
||||
)
|
||||
}
|
||||
if identifier.TagType != tc.ExpectedIdentifier.TagType {
|
||||
t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.TagType,
|
||||
TypeMap[tc.ExpectedIdentifier.TagType],
|
||||
identifier.TagType,
|
||||
TypeMap[identifier.TagType],
|
||||
)
|
||||
}
|
||||
if identifier.Tag != tc.ExpectedIdentifier.Tag {
|
||||
t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.Tag,
|
||||
tagMap[tc.ExpectedIdentifier.Tag],
|
||||
identifier.Tag,
|
||||
tagMap[identifier.Tag],
|
||||
)
|
||||
}
|
||||
|
||||
if length != tc.ExpectedLength {
|
||||
t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
|
||||
}
|
||||
}
|
||||
}
|
||||
344
vendor/gopkg.in/asn1-ber.v1/identifier_test.go
generated
vendored
Normal file
344
vendor/gopkg.in/asn1-ber.v1/identifier_test.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadIdentifier(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
Data []byte
|
||||
|
||||
ExpectedIdentifier Identifier
|
||||
ExpectedBytesRead int
|
||||
ExpectedError string
|
||||
}{
|
||||
"empty": {
|
||||
Data: []byte{},
|
||||
ExpectedBytesRead: 0,
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
},
|
||||
|
||||
"universal primitive eoc": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagEOC,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"universal primitive character string": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
|
||||
"universal constructed bit string": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagBitString,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"universal constructed character string": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
|
||||
"application constructed object descriptor": {
|
||||
Data: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassApplication,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"context constructed object descriptor": {
|
||||
Data: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassContext,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"private constructed object descriptor": {
|
||||
Data: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassPrivate,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
|
||||
"high-tag-number tag missing bytes": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag)},
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"high-tag-number tag invalid first byte": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), 0x0},
|
||||
ExpectedError: "invalid first high-tag-number tag byte",
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"high-tag-number tag invalid first byte with continue bit": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask)},
|
||||
ExpectedError: "invalid first high-tag-number tag byte",
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"high-tag-number tag continuation missing bytes": {
|
||||
Data: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag), byte(HighTagContinueBitmask | 0x1)},
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"high-tag-number tag overflow": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(HighTagContinueBitmask | 0x1),
|
||||
byte(0x1),
|
||||
},
|
||||
ExpectedError: "high-tag-number tag overflow",
|
||||
ExpectedBytesRead: 11,
|
||||
},
|
||||
"max high-tag-number tag": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(0x7f),
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
|
||||
},
|
||||
ExpectedBytesRead: 10,
|
||||
},
|
||||
"high-tag-number encoding of low-tag value": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(TagObjectDescriptor),
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"max high-tag-number tag ignores extra data": {
|
||||
Data: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(0x7f),
|
||||
byte(0x01), // extra data, shouldn't be read
|
||||
byte(0x02), // extra data, shouldn't be read
|
||||
byte(0x03), // extra data, shouldn't be read
|
||||
},
|
||||
ExpectedIdentifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: Tag(0x7FFFFFFFFFFFFFFF), // 01111111...(63)...11111b
|
||||
},
|
||||
ExpectedBytesRead: 10,
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
reader := bytes.NewBuffer(tc.Data)
|
||||
identifier, read, err := readIdentifier(reader)
|
||||
|
||||
if err != nil {
|
||||
if tc.ExpectedError == "" {
|
||||
t.Errorf("%s: unexpected error: %v", k, err)
|
||||
} else if err.Error() != tc.ExpectedError {
|
||||
t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
|
||||
}
|
||||
} else if tc.ExpectedError != "" {
|
||||
t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
|
||||
continue
|
||||
}
|
||||
|
||||
if read != tc.ExpectedBytesRead {
|
||||
t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
|
||||
}
|
||||
|
||||
if identifier.ClassType != tc.ExpectedIdentifier.ClassType {
|
||||
t.Errorf("%s: expected class type %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.ClassType,
|
||||
ClassMap[tc.ExpectedIdentifier.ClassType],
|
||||
identifier.ClassType,
|
||||
ClassMap[identifier.ClassType],
|
||||
)
|
||||
}
|
||||
if identifier.TagType != tc.ExpectedIdentifier.TagType {
|
||||
t.Errorf("%s: expected tag type %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.TagType,
|
||||
TypeMap[tc.ExpectedIdentifier.TagType],
|
||||
identifier.TagType,
|
||||
TypeMap[identifier.TagType],
|
||||
)
|
||||
}
|
||||
if identifier.Tag != tc.ExpectedIdentifier.Tag {
|
||||
t.Errorf("%s: expected tag %d (%s), got %d (%s)", k,
|
||||
tc.ExpectedIdentifier.Tag,
|
||||
tagMap[tc.ExpectedIdentifier.Tag],
|
||||
identifier.Tag,
|
||||
tagMap[identifier.Tag],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeIdentifier(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
Identifier Identifier
|
||||
ExpectedBytes []byte
|
||||
}{
|
||||
"universal primitive eoc": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagEOC,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagEOC)},
|
||||
},
|
||||
"universal primitive character string": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypePrimitive,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypePrimitive) | byte(TagCharacterString)},
|
||||
},
|
||||
|
||||
"universal constructed bit string": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagBitString,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBitString)},
|
||||
},
|
||||
"universal constructed character string": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagCharacterString,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassUniversal) | byte(TypeConstructed) | byte(TagCharacterString)},
|
||||
},
|
||||
|
||||
"application constructed object descriptor": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassApplication,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassApplication) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
},
|
||||
"context constructed object descriptor": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassContext,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassContext) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
},
|
||||
"private constructed object descriptor": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassPrivate,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagObjectDescriptor,
|
||||
},
|
||||
ExpectedBytes: []byte{byte(ClassPrivate) | byte(TypeConstructed) | byte(TagObjectDescriptor)},
|
||||
},
|
||||
|
||||
"max low-tag-number tag": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagBMPString,
|
||||
},
|
||||
ExpectedBytes: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(TagBMPString),
|
||||
},
|
||||
},
|
||||
|
||||
"min high-tag-number tag": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: TagBMPString + 1,
|
||||
},
|
||||
ExpectedBytes: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(TagBMPString + 1),
|
||||
},
|
||||
},
|
||||
|
||||
"max high-tag-number tag": {
|
||||
Identifier: Identifier{
|
||||
ClassType: ClassUniversal,
|
||||
TagType: TypeConstructed,
|
||||
Tag: Tag(math.MaxInt64),
|
||||
},
|
||||
ExpectedBytes: []byte{
|
||||
byte(ClassUniversal) | byte(TypeConstructed) | byte(HighTag),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(HighTagContinueBitmask | 0x7f),
|
||||
byte(0x7f),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
b := encodeIdentifier(tc.Identifier)
|
||||
if bytes.Compare(tc.ExpectedBytes, b) != 0 {
|
||||
t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
158
vendor/gopkg.in/asn1-ber.v1/length_test.go
generated
vendored
Normal file
158
vendor/gopkg.in/asn1-ber.v1/length_test.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadLength(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
Data []byte
|
||||
|
||||
ExpectedLength int
|
||||
ExpectedBytesRead int
|
||||
ExpectedError string
|
||||
}{
|
||||
"empty": {
|
||||
Data: []byte{},
|
||||
ExpectedBytesRead: 0,
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
},
|
||||
"invalid first byte": {
|
||||
Data: []byte{0xFF},
|
||||
ExpectedBytesRead: 1,
|
||||
ExpectedError: "invalid length byte 0xff",
|
||||
},
|
||||
|
||||
"indefinite form": {
|
||||
Data: []byte{LengthLongFormBitmask},
|
||||
ExpectedLength: LengthIndefinite,
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
|
||||
"short-definite-form zero length": {
|
||||
Data: []byte{0},
|
||||
ExpectedLength: 0,
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"short-definite-form length 1": {
|
||||
Data: []byte{1},
|
||||
ExpectedLength: 1,
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
"short-definite-form max length": {
|
||||
Data: []byte{127},
|
||||
ExpectedLength: 127,
|
||||
ExpectedBytesRead: 1,
|
||||
},
|
||||
|
||||
"long-definite-form missing bytes": {
|
||||
Data: []byte{LengthLongFormBitmask | 1},
|
||||
ExpectedBytesRead: 1,
|
||||
ExpectedError: io.ErrUnexpectedEOF.Error(),
|
||||
},
|
||||
"long-definite-form overflow": {
|
||||
Data: []byte{LengthLongFormBitmask | 9},
|
||||
ExpectedBytesRead: 1,
|
||||
ExpectedError: "long-form length overflow",
|
||||
},
|
||||
"long-definite-form zero length": {
|
||||
Data: []byte{LengthLongFormBitmask | 1, 0x0},
|
||||
ExpectedLength: 0,
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"long-definite-form length 127": {
|
||||
Data: []byte{LengthLongFormBitmask | 1, 127},
|
||||
ExpectedLength: 127,
|
||||
ExpectedBytesRead: 2,
|
||||
},
|
||||
"long-definite-form max length": {
|
||||
Data: []byte{
|
||||
LengthLongFormBitmask | 8,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
},
|
||||
ExpectedLength: math.MaxInt64,
|
||||
ExpectedBytesRead: 9,
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
reader := bytes.NewBuffer(tc.Data)
|
||||
length, read, err := readLength(reader)
|
||||
|
||||
if err != nil {
|
||||
if tc.ExpectedError == "" {
|
||||
t.Errorf("%s: unexpected error: %v", k, err)
|
||||
} else if err.Error() != tc.ExpectedError {
|
||||
t.Errorf("%s: expected error %v, got %v", k, tc.ExpectedError, err)
|
||||
}
|
||||
} else if tc.ExpectedError != "" {
|
||||
t.Errorf("%s: expected error %v, got none", k, tc.ExpectedError)
|
||||
continue
|
||||
}
|
||||
|
||||
if read != tc.ExpectedBytesRead {
|
||||
t.Errorf("%s: expected read %d, got %d", k, tc.ExpectedBytesRead, read)
|
||||
}
|
||||
|
||||
if length != tc.ExpectedLength {
|
||||
t.Errorf("%s: expected length %d, got %d", k, tc.ExpectedLength, length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeLength(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
Length int
|
||||
ExpectedBytes []byte
|
||||
}{
|
||||
"0": {
|
||||
Length: 0,
|
||||
ExpectedBytes: []byte{0},
|
||||
},
|
||||
"1": {
|
||||
Length: 1,
|
||||
ExpectedBytes: []byte{1},
|
||||
},
|
||||
|
||||
"max short-form length": {
|
||||
Length: 127,
|
||||
ExpectedBytes: []byte{127},
|
||||
},
|
||||
"min long-form length": {
|
||||
Length: 128,
|
||||
ExpectedBytes: []byte{LengthLongFormBitmask | 1, 128},
|
||||
},
|
||||
|
||||
"max long-form length": {
|
||||
Length: math.MaxInt64,
|
||||
ExpectedBytes: []byte{
|
||||
LengthLongFormBitmask | 8,
|
||||
0x7F,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
0xFF,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testcases {
|
||||
b := encodeLength(tc.Length)
|
||||
if bytes.Compare(tc.ExpectedBytes, b) != 0 {
|
||||
t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedBytes, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
182
vendor/gopkg.in/asn1-ber.v1/suite_test.go
generated
vendored
Normal file
182
vendor/gopkg.in/asn1-ber.v1/suite_test.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
package ber
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var errEOF = io.ErrUnexpectedEOF.Error()
|
||||
|
||||
// Tests from http://www.strozhevsky.com/free_docs/free_asn1_testsuite_descr.pdf
|
||||
// Source files and descriptions at http://www.strozhevsky.com/free_docs/TEST_SUITE.zip
|
||||
var testcases = []struct {
|
||||
// File contains the path to the BER-encoded file
|
||||
File string
|
||||
// Error indicates whether a decoding error is expected
|
||||
Error string
|
||||
// AbnormalEncoding indicates whether a normalized re-encoding is expected to differ from the original source
|
||||
AbnormalEncoding bool
|
||||
// IndefiniteEncoding indicates the source file used indefinite-length encoding, so the re-encoding is expected to differ (since the length is known)
|
||||
IndefiniteEncoding bool
|
||||
}{
|
||||
// Common blocks
|
||||
{File: "tests/tc1.ber", Error: "high-tag-number tag overflow"},
|
||||
{File: "tests/tc2.ber", Error: errEOF},
|
||||
{File: "tests/tc3.ber", Error: errEOF},
|
||||
{File: "tests/tc4.ber", Error: "invalid length byte 0xff"},
|
||||
{File: "tests/tc5.ber", Error: "", AbnormalEncoding: true},
|
||||
// Real numbers (some expected failures are disabled until support is added)
|
||||
{File: "tests/tc6.ber", Error: ""}, // Error: "REAL value +0 must be encoded with zero-length value block"},
|
||||
{File: "tests/tc7.ber", Error: ""}, // Error: "REAL value -0 must be encoded as a special value"},
|
||||
{File: "tests/tc8.ber", Error: ""},
|
||||
{File: "tests/tc9.ber", Error: ""}, // Error: "Bits 6 and 5 of information octet for REAL are equal to 11"
|
||||
{File: "tests/tc10.ber", Error: ""},
|
||||
{File: "tests/tc11.ber", Error: ""}, // Error: "Incorrect NR form"
|
||||
{File: "tests/tc12.ber", Error: ""}, // Error: "Encoding of "special value" not from ASN.1 standard"
|
||||
{File: "tests/tc13.ber", Error: errEOF},
|
||||
{File: "tests/tc14.ber", Error: errEOF},
|
||||
{File: "tests/tc15.ber", Error: ""}, // Error: "Too big value of exponent"
|
||||
{File: "tests/tc16.ber", Error: ""}, // Error: "Too big value of mantissa"
|
||||
{File: "tests/tc17.ber", Error: ""}, // Error: "Too big values for exponent and mantissa + using of "scaling factor" value"
|
||||
// Integers
|
||||
{File: "tests/tc18.ber", Error: ""},
|
||||
{File: "tests/tc19.ber", Error: errEOF},
|
||||
{File: "tests/tc20.ber", Error: ""},
|
||||
// Object identifiers
|
||||
{File: "tests/tc21.ber", Error: ""},
|
||||
{File: "tests/tc22.ber", Error: ""},
|
||||
{File: "tests/tc23.ber", Error: errEOF},
|
||||
{File: "tests/tc24.ber", Error: ""},
|
||||
// Booleans
|
||||
{File: "tests/tc25.ber", Error: ""},
|
||||
{File: "tests/tc26.ber", Error: ""},
|
||||
{File: "tests/tc27.ber", Error: errEOF},
|
||||
{File: "tests/tc28.ber", Error: ""},
|
||||
{File: "tests/tc29.ber", Error: ""},
|
||||
// Null
|
||||
{File: "tests/tc30.ber", Error: ""},
|
||||
{File: "tests/tc31.ber", Error: errEOF},
|
||||
{File: "tests/tc32.ber", Error: ""},
|
||||
// Bitstring (some expected failures are disabled until support is added)
|
||||
{File: "tests/tc33.ber", Error: ""}, // Error: "Too big value for "unused bits""
|
||||
{File: "tests/tc34.ber", Error: errEOF},
|
||||
{File: "tests/tc35.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from BIT STRING types as internal types for constructive encoding"
|
||||
{File: "tests/tc36.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of "unused bits" in internal BIT STRINGs with constructive form of encoding"
|
||||
{File: "tests/tc37.ber", Error: ""},
|
||||
{File: "tests/tc38.ber", Error: "", IndefiniteEncoding: true},
|
||||
{File: "tests/tc39.ber", Error: ""},
|
||||
{File: "tests/tc40.ber", Error: ""},
|
||||
// Octet string (some expected failures are disabled until support is added)
|
||||
{File: "tests/tc41.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of different from OCTET STRING types as internal types for constructive encoding"
|
||||
{File: "tests/tc42.ber", Error: errEOF},
|
||||
{File: "tests/tc43.ber", Error: errEOF},
|
||||
{File: "tests/tc44.ber", Error: ""},
|
||||
{File: "tests/tc45.ber", Error: ""},
|
||||
// Bitstring
|
||||
{File: "tests/tc46.ber", Error: "indefinite length used with primitive type"},
|
||||
{File: "tests/tc47.ber", Error: "eoc child not allowed with definite length"},
|
||||
{File: "tests/tc48.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of more than 7 "unused bits" in BIT STRING with constrictive encoding form"
|
||||
}
|
||||
|
||||
func TestSuiteDecodePacket(t *testing.T) {
|
||||
// Debug = true
|
||||
for _, tc := range testcases {
|
||||
file := tc.File
|
||||
|
||||
dataIn, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// fmt.Printf("%s: decode %d\n", file, len(dataIn))
|
||||
packet, err := DecodePacketErr(dataIn)
|
||||
if err != nil {
|
||||
if tc.Error == "" {
|
||||
t.Errorf("%s: unexpected error during DecodePacket: %v", file, err)
|
||||
} else if tc.Error != err.Error() {
|
||||
t.Errorf("%s: expected error %q during DecodePacket, got %q", file, tc.Error, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tc.Error != "" {
|
||||
t.Errorf("%s: expected error %q, got none", file, tc.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
dataOut := packet.Bytes()
|
||||
if tc.AbnormalEncoding || tc.IndefiniteEncoding {
|
||||
// Abnormal encodings and encodings that used indefinite length should re-encode differently
|
||||
if bytes.Equal(dataOut, dataIn) {
|
||||
t.Errorf("%s: data should have been re-encoded differently", file)
|
||||
}
|
||||
} else if !bytes.Equal(dataOut, dataIn) {
|
||||
// Make sure the serialized data matches the source
|
||||
t.Errorf("%s: data should be the same", file)
|
||||
}
|
||||
|
||||
packet, err = DecodePacketErr(dataOut)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure the re-serialized data matches our original serialization
|
||||
dataOut2 := packet.Bytes()
|
||||
if !bytes.Equal(dataOut, dataOut2) {
|
||||
t.Errorf("%s: data should be the same", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuiteReadPacket(t *testing.T) {
|
||||
for _, tc := range testcases {
|
||||
file := tc.File
|
||||
|
||||
dataIn, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(dataIn)
|
||||
packet, err := ReadPacket(buffer)
|
||||
if err != nil {
|
||||
if tc.Error == "" {
|
||||
t.Errorf("%s: unexpected error during ReadPacket: %v", file, err)
|
||||
} else if tc.Error != err.Error() {
|
||||
t.Errorf("%s: expected error %q during ReadPacket, got %q", file, tc.Error, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tc.Error != "" {
|
||||
t.Errorf("%s: expected error %q, got none", file, tc.Error)
|
||||
continue
|
||||
}
|
||||
|
||||
dataOut := packet.Bytes()
|
||||
if tc.AbnormalEncoding || tc.IndefiniteEncoding {
|
||||
// Abnormal encodings and encodings that used indefinite length should re-encode differently
|
||||
if bytes.Equal(dataOut, dataIn) {
|
||||
t.Errorf("%s: data should have been re-encoded differently", file)
|
||||
}
|
||||
} else if !bytes.Equal(dataOut, dataIn) {
|
||||
// Make sure the serialized data matches the source
|
||||
t.Errorf("%s: data should be the same", file)
|
||||
}
|
||||
|
||||
packet, err = DecodePacketErr(dataOut)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure the re-serialized data matches our original serialization
|
||||
dataOut2 := packet.Bytes()
|
||||
if !bytes.Equal(dataOut, dataOut2) {
|
||||
t.Errorf("%s: data should be the same", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc1.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>@
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc10.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<07><04><><EFBFBD><EFBFBD>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc11.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
015625
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc12.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
I
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc13.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc13.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc14.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc14.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc15.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<0C> <><7F><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc16.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<0C><>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc17.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<14> <09><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc18.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<03><>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc19.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc2.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc20.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc20.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc21.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<06><>Q<EFBFBD><51>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc22.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<10><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><0F>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc23.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<><7F><EFBFBD><EFBFBD><EFBFBD>
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc24.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<15>`<60>H<EFBFBD><48>O <02><><EFBFBD>J<EFBFBD><4A><EFBFBD>c<EFBFBD><63>/
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc25.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc25.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc26.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc26.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc27.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc28.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<01>
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc29.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc29.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc3.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc30.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc30.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc31.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc31.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc32.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc32.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc33.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc34.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc35.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc35.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc36.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc36.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc37.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc37.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc38.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc38.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc39.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc39.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc4.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc40.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc40.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc41.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc41.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc42.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc42.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc43.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
$
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc44.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc44.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc45.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc45.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc46.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc46.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc47.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc47.ber
generated
vendored
Normal file
Binary file not shown.
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc48.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc48.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc5.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>@
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc6.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
+0.E-5
|
||||
1
vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc7.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
-0.E-5
|
||||
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc8.ber
generated
vendored
Normal file
BIN
vendor/gopkg.in/asn1-ber.v1/tests/tc8.ber
generated
vendored
Normal file
Binary file not shown.
1
vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber
generated
vendored
Normal file
1
vendor/gopkg.in/asn1-ber.v1/tests/tc9.ber
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<03><>
|
||||
42
vendor/gopkg.in/fsnotify.v1/example_test.go
generated
vendored
Normal file
42
vendor/gopkg.in/fsnotify.v1/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package fsnotify_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func ExampleNewWatcher() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.Events:
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err := <-watcher.Errors:
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
229
vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
generated
vendored
Normal file
229
vendor/gopkg.in/fsnotify.v1/inotify_poller_test.go
generated
vendored
Normal file
@@ -0,0 +1,229 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type testFd [2]int
|
||||
|
||||
func makeTestFd(t *testing.T) testFd {
|
||||
var tfd testFd
|
||||
errno := unix.Pipe(tfd[:])
|
||||
if errno != nil {
|
||||
t.Fatalf("Failed to create pipe: %v", errno)
|
||||
}
|
||||
return tfd
|
||||
}
|
||||
|
||||
func (tfd testFd) fd() int {
|
||||
return tfd[0]
|
||||
}
|
||||
|
||||
func (tfd testFd) closeWrite(t *testing.T) {
|
||||
errno := unix.Close(tfd[1])
|
||||
if errno != nil {
|
||||
t.Fatalf("Failed to close write end of pipe: %v", errno)
|
||||
}
|
||||
}
|
||||
|
||||
func (tfd testFd) put(t *testing.T) {
|
||||
buf := make([]byte, 10)
|
||||
_, errno := unix.Write(tfd[1], buf)
|
||||
if errno != nil {
|
||||
t.Fatalf("Failed to write to pipe: %v", errno)
|
||||
}
|
||||
}
|
||||
|
||||
func (tfd testFd) get(t *testing.T) {
|
||||
buf := make([]byte, 10)
|
||||
_, errno := unix.Read(tfd[0], buf)
|
||||
if errno != nil {
|
||||
t.Fatalf("Failed to read from pipe: %v", errno)
|
||||
}
|
||||
}
|
||||
|
||||
func (tfd testFd) close() {
|
||||
unix.Close(tfd[1])
|
||||
unix.Close(tfd[0])
|
||||
}
|
||||
|
||||
func makePoller(t *testing.T) (testFd, *fdPoller) {
|
||||
tfd := makeTestFd(t)
|
||||
poller, err := newFdPoller(tfd.fd())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create poller: %v", err)
|
||||
}
|
||||
return tfd, poller
|
||||
}
|
||||
|
||||
func TestPollerWithBadFd(t *testing.T) {
|
||||
_, err := newFdPoller(-1)
|
||||
if err != unix.EBADF {
|
||||
t.Fatalf("Expected EBADF, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollerWithData(t *testing.T) {
|
||||
tfd, poller := makePoller(t)
|
||||
defer tfd.close()
|
||||
defer poller.close()
|
||||
|
||||
tfd.put(t)
|
||||
ok, err := poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("expected poller to return true")
|
||||
}
|
||||
tfd.get(t)
|
||||
}
|
||||
|
||||
func TestPollerWithWakeup(t *testing.T) {
|
||||
tfd, poller := makePoller(t)
|
||||
defer tfd.close()
|
||||
defer poller.close()
|
||||
|
||||
err := poller.wake()
|
||||
if err != nil {
|
||||
t.Fatalf("wake failed: %v", err)
|
||||
}
|
||||
ok, err := poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatalf("expected poller to return false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollerWithClose(t *testing.T) {
|
||||
tfd, poller := makePoller(t)
|
||||
defer tfd.close()
|
||||
defer poller.close()
|
||||
|
||||
tfd.closeWrite(t)
|
||||
ok, err := poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("expected poller to return true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollerWithWakeupAndData(t *testing.T) {
|
||||
tfd, poller := makePoller(t)
|
||||
defer tfd.close()
|
||||
defer poller.close()
|
||||
|
||||
tfd.put(t)
|
||||
err := poller.wake()
|
||||
if err != nil {
|
||||
t.Fatalf("wake failed: %v", err)
|
||||
}
|
||||
|
||||
// both data and wakeup
|
||||
ok, err := poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("expected poller to return true")
|
||||
}
|
||||
|
||||
// data is still in the buffer, wakeup is cleared
|
||||
ok, err = poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
t.Fatalf("expected poller to return true")
|
||||
}
|
||||
|
||||
tfd.get(t)
|
||||
// data is gone, only wakeup now
|
||||
err = poller.wake()
|
||||
if err != nil {
|
||||
t.Fatalf("wake failed: %v", err)
|
||||
}
|
||||
ok, err = poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
if ok {
|
||||
t.Fatalf("expected poller to return false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollerConcurrent(t *testing.T) {
|
||||
tfd, poller := makePoller(t)
|
||||
defer tfd.close()
|
||||
defer poller.close()
|
||||
|
||||
oks := make(chan bool)
|
||||
live := make(chan bool)
|
||||
defer close(live)
|
||||
go func() {
|
||||
defer close(oks)
|
||||
for {
|
||||
ok, err := poller.wait()
|
||||
if err != nil {
|
||||
t.Fatalf("poller failed: %v", err)
|
||||
}
|
||||
oks <- ok
|
||||
if !<-live {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Try a write
|
||||
select {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-oks:
|
||||
t.Fatalf("poller did not wait")
|
||||
}
|
||||
tfd.put(t)
|
||||
if !<-oks {
|
||||
t.Fatalf("expected true")
|
||||
}
|
||||
tfd.get(t)
|
||||
live <- true
|
||||
|
||||
// Try a wakeup
|
||||
select {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-oks:
|
||||
t.Fatalf("poller did not wait")
|
||||
}
|
||||
err := poller.wake()
|
||||
if err != nil {
|
||||
t.Fatalf("wake failed: %v", err)
|
||||
}
|
||||
if <-oks {
|
||||
t.Fatalf("expected false")
|
||||
}
|
||||
live <- true
|
||||
|
||||
// Try a close
|
||||
select {
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
case <-oks:
|
||||
t.Fatalf("poller did not wait")
|
||||
}
|
||||
tfd.closeWrite(t)
|
||||
if !<-oks {
|
||||
t.Fatalf("expected true")
|
||||
}
|
||||
tfd.get(t)
|
||||
}
|
||||
344
vendor/gopkg.in/fsnotify.v1/inotify_test.go
generated
vendored
Normal file
344
vendor/gopkg.in/fsnotify.v1/inotify_test.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestInotifyCloseRightAway(t *testing.T) {
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher")
|
||||
}
|
||||
|
||||
// Close immediately; it won't even reach the first unix.Read.
|
||||
w.Close()
|
||||
|
||||
// Wait for the close to complete.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
isWatcherReallyClosed(t, w)
|
||||
}
|
||||
|
||||
func TestInotifyCloseSlightlyLater(t *testing.T) {
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher")
|
||||
}
|
||||
|
||||
// Wait until readEvents has reached unix.Read, and Close.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
w.Close()
|
||||
|
||||
// Wait for the close to complete.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
isWatcherReallyClosed(t, w)
|
||||
}
|
||||
|
||||
func TestInotifyCloseSlightlyLaterWithWatch(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher")
|
||||
}
|
||||
w.Add(testDir)
|
||||
|
||||
// Wait until readEvents has reached unix.Read, and Close.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
w.Close()
|
||||
|
||||
// Wait for the close to complete.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
isWatcherReallyClosed(t, w)
|
||||
}
|
||||
|
||||
func TestInotifyCloseAfterRead(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher")
|
||||
}
|
||||
|
||||
err = w.Add(testDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add .")
|
||||
}
|
||||
|
||||
// Generate an event.
|
||||
os.Create(filepath.Join(testDir, "somethingSOMETHINGsomethingSOMETHING"))
|
||||
|
||||
// Wait for readEvents to read the event, then close the watcher.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
w.Close()
|
||||
|
||||
// Wait for the close to complete.
|
||||
<-time.After(50 * time.Millisecond)
|
||||
isWatcherReallyClosed(t, w)
|
||||
}
|
||||
|
||||
func isWatcherReallyClosed(t *testing.T, w *Watcher) {
|
||||
select {
|
||||
case err, ok := <-w.Errors:
|
||||
if ok {
|
||||
t.Fatalf("w.Errors is not closed; readEvents is still alive after closing (error: %v)", err)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("w.Errors would have blocked; readEvents is still alive!")
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-w.Events:
|
||||
if ok {
|
||||
t.Fatalf("w.Events is not closed; readEvents is still alive after closing")
|
||||
}
|
||||
default:
|
||||
t.Fatalf("w.Events would have blocked; readEvents is still alive!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInotifyCloseCreate(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
err = w.Add(testDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add testDir: %v", err)
|
||||
}
|
||||
h, err := os.Create(filepath.Join(testDir, "testfile"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file in testdir: %v", err)
|
||||
}
|
||||
h.Close()
|
||||
select {
|
||||
case _ = <-w.Events:
|
||||
case err := <-w.Errors:
|
||||
t.Fatalf("Error from watcher: %v", err)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatalf("Took too long to wait for event")
|
||||
}
|
||||
|
||||
// At this point, we've received one event, so the goroutine is ready.
|
||||
// It's also blocking on unix.Read.
|
||||
// Now we try to swap the file descriptor under its nose.
|
||||
w.Close()
|
||||
w, err = NewWatcher()
|
||||
defer w.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create second watcher: %v", err)
|
||||
}
|
||||
|
||||
<-time.After(50 * time.Millisecond)
|
||||
err = w.Add(testDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Error adding testDir again: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInotifyStress(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
testFile := filepath.Join(testDir, "testfile")
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
killchan := make(chan struct{})
|
||||
defer close(killchan)
|
||||
|
||||
err = w.Add(testDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add testDir: %v", err)
|
||||
}
|
||||
|
||||
proc, err := os.FindProcess(os.Getpid())
|
||||
if err != nil {
|
||||
t.Fatalf("Error finding process: %v", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(5 * time.Millisecond):
|
||||
err := proc.Signal(unix.SIGUSR1)
|
||||
if err != nil {
|
||||
t.Fatalf("Signal failed: %v", err)
|
||||
}
|
||||
case <-killchan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(11 * time.Millisecond):
|
||||
err := w.poller.wake()
|
||||
if err != nil {
|
||||
t.Fatalf("Wake failed: %v", err)
|
||||
}
|
||||
case <-killchan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-killchan:
|
||||
return
|
||||
default:
|
||||
handle, err := os.Create(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
handle.Close()
|
||||
time.Sleep(time.Millisecond)
|
||||
err = os.Remove(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
creates := 0
|
||||
removes := 0
|
||||
after := time.After(5 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-after:
|
||||
if creates-removes > 1 || creates-removes < -1 {
|
||||
t.Fatalf("Creates and removes should not be off by more than one: %d creates, %d removes", creates, removes)
|
||||
}
|
||||
if creates < 50 {
|
||||
t.Fatalf("Expected at least 50 creates, got %d", creates)
|
||||
}
|
||||
return
|
||||
case err := <-w.Errors:
|
||||
t.Fatalf("Got an error from watcher: %v", err)
|
||||
case evt := <-w.Events:
|
||||
if evt.Name != testFile {
|
||||
t.Fatalf("Got an event for an unknown file: %s", evt.Name)
|
||||
}
|
||||
if evt.Op == Create {
|
||||
creates++
|
||||
}
|
||||
if evt.Op == Remove {
|
||||
removes++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInotifyRemoveTwice(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
testFile := filepath.Join(testDir, "testfile")
|
||||
|
||||
handle, err := os.Create(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
handle.Close()
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
err = w.Add(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add testFile: %v", err)
|
||||
}
|
||||
|
||||
err = os.Remove(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove testFile: %v", err)
|
||||
}
|
||||
|
||||
err = w.Remove(testFile)
|
||||
if err == nil {
|
||||
t.Fatalf("no error on removing invalid file")
|
||||
}
|
||||
s1 := fmt.Sprintf("%s", err)
|
||||
|
||||
err = w.Remove(testFile)
|
||||
if err == nil {
|
||||
t.Fatalf("no error on removing invalid file")
|
||||
}
|
||||
s2 := fmt.Sprintf("%s", err)
|
||||
|
||||
if s1 != s2 {
|
||||
t.Fatalf("receive different error - %s / %s", s1, s2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInotifyInnerMapLength(t *testing.T) {
|
||||
testDir := tempMkdir(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
testFile := filepath.Join(testDir, "testfile")
|
||||
|
||||
handle, err := os.Create(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Create failed: %v", err)
|
||||
}
|
||||
handle.Close()
|
||||
|
||||
w, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create watcher: %v", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
err = w.Add(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add testFile: %v", err)
|
||||
}
|
||||
go func() {
|
||||
for err := range w.Errors {
|
||||
t.Fatalf("error received: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = os.Remove(testFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove testFile: %v", err)
|
||||
}
|
||||
_ = <-w.Events // consume Remove event
|
||||
<-time.After(50 * time.Millisecond) // wait IN_IGNORE propagated
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if len(w.watches) != 0 {
|
||||
t.Fatalf("Expected watches len is 0, but got: %d, %v", len(w.watches), w.watches)
|
||||
}
|
||||
if len(w.paths) != 0 {
|
||||
t.Fatalf("Expected paths len is 0, but got: %d, %v", len(w.paths), w.paths)
|
||||
}
|
||||
}
|
||||
147
vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
generated
vendored
Normal file
147
vendor/gopkg.in/fsnotify.v1/integration_darwin_test.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// testExchangedataForWatcher tests the watcher with the exchangedata operation on OS X.
|
||||
//
|
||||
// This is widely used for atomic saves on OS X, e.g. TextMate and in Apple's NSDocument.
|
||||
//
|
||||
// See https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html
|
||||
// Also see: https://github.com/textmate/textmate/blob/cd016be29489eba5f3c09b7b70b06da134dda550/Frameworks/io/src/swap_file_data.cc#L20
|
||||
func testExchangedataForWatcher(t *testing.T, watchDir bool) {
|
||||
// Create directory to watch
|
||||
testDir1 := tempMkdir(t)
|
||||
|
||||
// For the intermediate file
|
||||
testDir2 := tempMkdir(t)
|
||||
|
||||
defer os.RemoveAll(testDir1)
|
||||
defer os.RemoveAll(testDir2)
|
||||
|
||||
resolvedFilename := "TestFsnotifyEvents.file"
|
||||
|
||||
// TextMate does:
|
||||
//
|
||||
// 1. exchangedata (intermediate, resolved)
|
||||
// 2. unlink intermediate
|
||||
//
|
||||
// Let's try to simulate that:
|
||||
resolved := filepath.Join(testDir1, resolvedFilename)
|
||||
intermediate := filepath.Join(testDir2, resolvedFilename+"~")
|
||||
|
||||
// Make sure we create the file before we start watching
|
||||
createAndSyncFile(t, resolved)
|
||||
|
||||
watcher := newWatcher(t)
|
||||
|
||||
// Test both variants in isolation
|
||||
if watchDir {
|
||||
addWatch(t, watcher, testDir1)
|
||||
} else {
|
||||
addWatch(t, watcher, resolved)
|
||||
}
|
||||
|
||||
// Receive errors on the error channel on a separate goroutine
|
||||
go func() {
|
||||
for err := range watcher.Errors {
|
||||
t.Fatalf("error received: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Receive events on the event channel on a separate goroutine
|
||||
eventstream := watcher.Events
|
||||
var removeReceived counter
|
||||
var createReceived counter
|
||||
|
||||
done := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for event := range eventstream {
|
||||
// Only count relevant events
|
||||
if event.Name == filepath.Clean(resolved) {
|
||||
if event.Op&Remove == Remove {
|
||||
removeReceived.increment()
|
||||
}
|
||||
if event.Op&Create == Create {
|
||||
createReceived.increment()
|
||||
}
|
||||
}
|
||||
t.Logf("event received: %s", event)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Repeat to make sure the watched file/directory "survives" the REMOVE/CREATE loop.
|
||||
for i := 1; i <= 3; i++ {
|
||||
// The intermediate file is created in a folder outside the watcher
|
||||
createAndSyncFile(t, intermediate)
|
||||
|
||||
// 1. Swap
|
||||
if err := unix.Exchangedata(intermediate, resolved, 0); err != nil {
|
||||
t.Fatalf("[%d] exchangedata failed: %s", i, err)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// 2. Delete the intermediate file
|
||||
err := os.Remove(intermediate)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("[%d] remove %s failed: %s", i, intermediate, err)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
}
|
||||
|
||||
// We expect this event to be received almost immediately, but let's wait 500 ms to be sure
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// The events will be (CHMOD + REMOVE + CREATE) X 2. Let's focus on the last two:
|
||||
if removeReceived.value() < 3 {
|
||||
t.Fatal("fsnotify remove events have not been received after 500 ms")
|
||||
}
|
||||
|
||||
if createReceived.value() < 3 {
|
||||
t.Fatal("fsnotify create events have not been received after 500 ms")
|
||||
}
|
||||
|
||||
watcher.Close()
|
||||
t.Log("waiting for the event channel to become closed...")
|
||||
select {
|
||||
case <-done:
|
||||
t.Log("event channel closed")
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("event stream was not closed after 2 seconds")
|
||||
}
|
||||
}
|
||||
|
||||
// TestExchangedataInWatchedDir test exchangedata operation on file in watched dir.
|
||||
func TestExchangedataInWatchedDir(t *testing.T) {
|
||||
testExchangedataForWatcher(t, true)
|
||||
}
|
||||
|
||||
// TestExchangedataInWatchedDir test exchangedata operation on watched file.
|
||||
func TestExchangedataInWatchedFile(t *testing.T) {
|
||||
testExchangedataForWatcher(t, false)
|
||||
}
|
||||
|
||||
func createAndSyncFile(t *testing.T, filepath string) {
|
||||
f1, err := os.OpenFile(filepath, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
t.Fatalf("creating %s failed: %s", filepath, err)
|
||||
}
|
||||
f1.Sync()
|
||||
f1.Close()
|
||||
}
|
||||
1237
vendor/gopkg.in/fsnotify.v1/integration_test.go
generated
vendored
Normal file
1237
vendor/gopkg.in/fsnotify.v1/integration_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
65
vendor/gopkg.in/throttled/throttled.v1/common_test.go
generated
vendored
Normal file
65
vendor/gopkg.in/throttled/throttled.v1/common_test.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/PuerkitoBio/boom/commands"
|
||||
)
|
||||
|
||||
type stats struct {
|
||||
sync.Mutex
|
||||
ok int
|
||||
dropped int
|
||||
ts []time.Time
|
||||
|
||||
body func()
|
||||
}
|
||||
|
||||
func (s *stats) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if s.body != nil {
|
||||
s.body()
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.ts = append(s.ts, time.Now())
|
||||
s.ok++
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
|
||||
func (s *stats) DeniedHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.dropped++
|
||||
w.WriteHeader(deniedStatus)
|
||||
}
|
||||
|
||||
func (s *stats) Stats() (int, int, []time.Time) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.ok, s.dropped, s.ts
|
||||
}
|
||||
|
||||
func runTest(h http.Handler, b ...commands.Boom) []*commands.Report {
|
||||
srv := httptest.NewServer(h)
|
||||
defer srv.Close()
|
||||
|
||||
var rpts []*commands.Report
|
||||
var wg sync.WaitGroup
|
||||
var mu sync.Mutex
|
||||
wg.Add(len(b))
|
||||
for i, bo := range b {
|
||||
bo.Req.Url = srv.URL + fmt.Sprintf("/%d", i)
|
||||
go func(bo commands.Boom) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
rpts = append(rpts, bo.Run())
|
||||
wg.Done()
|
||||
}(bo)
|
||||
}
|
||||
wg.Wait()
|
||||
return rpts
|
||||
}
|
||||
65
vendor/gopkg.in/throttled/throttled.v1/delayer_test.go
generated
vendored
Normal file
65
vendor/gopkg.in/throttled/throttled.v1/delayer_test.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDelayer(t *testing.T) {
|
||||
cases := []struct {
|
||||
in Delayer
|
||||
out time.Duration
|
||||
}{
|
||||
0: {PerSec(1), time.Second},
|
||||
1: {PerSec(2), 500 * time.Millisecond},
|
||||
2: {PerSec(4), 250 * time.Millisecond},
|
||||
3: {PerSec(5), 200 * time.Millisecond},
|
||||
4: {PerSec(10), 100 * time.Millisecond},
|
||||
5: {PerSec(100), 10 * time.Millisecond},
|
||||
6: {PerSec(3), 333333333 * time.Nanosecond},
|
||||
7: {PerMin(1), time.Minute},
|
||||
8: {PerMin(2), 30 * time.Second},
|
||||
9: {PerMin(4), 15 * time.Second},
|
||||
10: {PerMin(5), 12 * time.Second},
|
||||
11: {PerMin(10), 6 * time.Second},
|
||||
12: {PerMin(60), time.Second},
|
||||
13: {PerHour(1), time.Hour},
|
||||
14: {PerHour(2), 30 * time.Minute},
|
||||
15: {PerHour(4), 15 * time.Minute},
|
||||
16: {PerHour(60), time.Minute},
|
||||
17: {PerHour(120), 30 * time.Second},
|
||||
18: {D(time.Second), time.Second},
|
||||
19: {D(5 * time.Minute), 5 * time.Minute},
|
||||
20: {PerSec(200), 5 * time.Millisecond},
|
||||
21: {PerDay(24), time.Hour},
|
||||
}
|
||||
for i, c := range cases {
|
||||
got := c.in.Delay()
|
||||
if got != c.out {
|
||||
t.Errorf("%d: expected %s, got %s", i, c.out, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuota(t *testing.T) {
|
||||
cases := []struct {
|
||||
q Quota
|
||||
reqs int
|
||||
win time.Duration
|
||||
}{
|
||||
0: {PerSec(10), 10, time.Second},
|
||||
1: {PerMin(30), 30, time.Minute},
|
||||
2: {PerHour(124), 124, time.Hour},
|
||||
3: {PerDay(1), 1, 24 * time.Hour},
|
||||
4: {Q{148, 17 * time.Second}, 148, 17 * time.Second},
|
||||
}
|
||||
for i, c := range cases {
|
||||
r, w := c.q.Quota()
|
||||
if r != c.reqs {
|
||||
t.Errorf("%d: expected %d requests, got %d", i, c.reqs, r)
|
||||
}
|
||||
if w != c.win {
|
||||
t.Errorf("%d: expected %s window, got %s", i, c.win, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
2
vendor/gopkg.in/throttled/throttled.v1/doc.go
generated
vendored
2
vendor/gopkg.in/throttled/throttled.v1/doc.go
generated
vendored
@@ -74,4 +74,4 @@
|
||||
// The BSD 3-clause license. Copyright (c) 2014 Martin Angers and Contributors.
|
||||
// http://opensource.org/licenses/BSD-3-Clause
|
||||
//
|
||||
package throttled
|
||||
package throttled // import "gopkg.in/throttled/throttled.v1"
|
||||
|
||||
12
vendor/gopkg.in/throttled/throttled.v1/examples/README.md
generated
vendored
Normal file
12
vendor/gopkg.in/throttled/throttled.v1/examples/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples for all the throttlers implemented by the throttled package, as well as an example of a custom limiter.
|
||||
|
||||
* custom/ : implements a custom limiter that allows requests to path /a on even seconds, and on path /b on odd seconds.
|
||||
* interval-many/ : implements a common interval throttler to control two different handlers, one for path /a and another for path /b, so that requests to any one of the handlers go through at the specified interval.
|
||||
* interval-vary/ : implements an interval throttler that varies by path, so that requests to each different path goes through at the specified interval.
|
||||
* interval/ : implements an interval throttler so that any request goes through at the specified interval, regardless of path or any other criteria.
|
||||
* memstats/ : implements a memory-usage throttler that limits access based on current memory statistics.
|
||||
* rate-limit/ : implements a rate-limiter throttler that varies by path, so that the number of requests allowed are counted based on the requested path.
|
||||
|
||||
Each example app supports a number of command-line flags. Run the example with the -h flag to display usage and defaults.
|
||||
90
vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go
generated
vendored
Normal file
90
vendor/gopkg.in/throttled/throttled.v1/examples/custom/main.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
// Custom limiter: allow requests to the /a path on even seconds only, and
|
||||
// allow access to the /b path on odd seconds only.
|
||||
//
|
||||
// Yes this is absurd. A more realistic case could be to allow requests to some
|
||||
// contest page only during a limited time window.
|
||||
type customLimiter struct {
|
||||
}
|
||||
|
||||
func (c *customLimiter) Start() {
|
||||
// No-op
|
||||
}
|
||||
|
||||
func (c *customLimiter) Limit(w http.ResponseWriter, r *http.Request) (<-chan bool, error) {
|
||||
s := time.Now().Second()
|
||||
ch := make(chan bool, 1)
|
||||
ok := (r.URL.Path == "/a" && s%2 == 0) || (r.URL.Path == "/b" && s%2 != 0)
|
||||
ch <- ok
|
||||
if *output == "v" {
|
||||
log.Printf("Custom Limiter: Path=%s, Second=%d; ok? %v", r.URL.Path, s, ok)
|
||||
}
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h http.Handler
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
|
||||
// Keep the start time to print since-time
|
||||
start := time.Now()
|
||||
// Create the custom throttler using our custom limiter
|
||||
t := throttled.Custom(&customLimiter{})
|
||||
// Set its denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("KO: %s", time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
// Throttle the OK handler
|
||||
rand.Seed(time.Now().Unix())
|
||||
h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("ok: %s", time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
}))
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", h)
|
||||
}
|
||||
79
vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go
generated
vendored
Normal file
79
vendor/gopkg.in/throttled/throttled.v1/examples/interval-many/main.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
|
||||
bursts = flag.Int("bursts", 10, "number of bursts allowed")
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
|
||||
// Keep start time to log since-time
|
||||
start := time.Now()
|
||||
|
||||
// Create the interval throttle
|
||||
t := throttled.Interval(throttled.D(*delay), *bursts, nil, 0)
|
||||
// Set its denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("%s: KO: %s", r.URL.Path, time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
// Create OK handlers
|
||||
rand.Seed(time.Now().Unix())
|
||||
makeHandler := func(ix int) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("handler %d: %s: ok: %s", ix, r.URL.Path, time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
})
|
||||
}
|
||||
// Throttle them using the same interval throttler
|
||||
h1 := t.Throttle(makeHandler(1))
|
||||
h2 := t.Throttle(makeHandler(2))
|
||||
|
||||
// Handle two paths
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/a", h1)
|
||||
mux.Handle("/b", h2)
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", mux)
|
||||
}
|
||||
74
vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go
generated
vendored
Normal file
74
vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/main.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
|
||||
bursts = flag.Int("bursts", 10, "number of bursts allowed")
|
||||
maxkeys = flag.Int("max-keys", 1000, "maximum number of keys")
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h http.Handler
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
|
||||
// Keep the start time to print since-time
|
||||
start := time.Now()
|
||||
|
||||
// Create the interval throttler
|
||||
t := throttled.Interval(throttled.D(*delay), *bursts, &throttled.VaryBy{
|
||||
Path: true,
|
||||
}, *maxkeys)
|
||||
// Set the denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("KO: %s", time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
|
||||
// Throttle the OK handler
|
||||
rand.Seed(time.Now().Unix())
|
||||
h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("%s: ok: %s", r.URL.Path, time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
}))
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", h)
|
||||
}
|
||||
4
vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls
generated
vendored
Normal file
4
vendor/gopkg.in/throttled/throttled.v1/examples/interval-vary/siege-urls
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
http://localhost:9000/a
|
||||
http://localhost:9000/b
|
||||
http://localhost:9000/c
|
||||
|
||||
69
vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go
generated
vendored
Normal file
69
vendor/gopkg.in/throttled/throttled.v1/examples/interval/main.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
delay = flag.Duration("delay", 200*time.Millisecond, "delay between calls")
|
||||
bursts = flag.Int("bursts", 10, "number of bursts allowed")
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h http.Handler
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
|
||||
// Keep the start time to print since-time
|
||||
start := time.Now()
|
||||
// Create the interval throttler
|
||||
t := throttled.Interval(throttled.D(*delay), *bursts, nil, 0)
|
||||
// Set its denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("KO: %s", time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
// Throttle the OK handler
|
||||
rand.Seed(time.Now().Unix())
|
||||
h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("ok: %s", time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
}))
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", h)
|
||||
}
|
||||
97
vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go
generated
vendored
Normal file
97
vendor/gopkg.in/throttled/throttled.v1/examples/memstats/main.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
numgc = flag.Int("gc", 0, "number of GC runs")
|
||||
mallocs = flag.Int("mallocs", 0, "number of mallocs")
|
||||
total = flag.Int("total", 0, "total number of bytes allocated")
|
||||
allocs = flag.Int("allocs", 0, "number of bytes allocated")
|
||||
refrate = flag.Duration("refresh", 0, "refresh rate of the memory stats")
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h http.Handler
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
|
||||
// Keep the start time to print since-time
|
||||
start := time.Now()
|
||||
// Create the thresholds struct
|
||||
thresh := throttled.MemThresholds(&runtime.MemStats{
|
||||
NumGC: uint32(*numgc),
|
||||
Mallocs: uint64(*mallocs),
|
||||
TotalAlloc: uint64(*total),
|
||||
Alloc: uint64(*allocs),
|
||||
})
|
||||
if *output != "q" {
|
||||
log.Printf("thresholds: NumGC: %d, Mallocs: %d, Alloc: %dKb, Total: %dKb", thresh.NumGC, thresh.Mallocs, thresh.Alloc/1024, thresh.TotalAlloc/1024)
|
||||
}
|
||||
// Create the MemStats throttler
|
||||
t := throttled.MemStats(thresh, *refrate)
|
||||
// Set its denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("KO: %s", time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
|
||||
// Throttle the OK handler
|
||||
rand.Seed(time.Now().Unix())
|
||||
h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("ok: %s", time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
// Read the whole file in memory, to actually use 64Kb (instead of streaming to w)
|
||||
b, err := ioutil.ReadFile("test-file")
|
||||
if err != nil {
|
||||
throttled.Error(w, r, err)
|
||||
return
|
||||
}
|
||||
_, err = w.Write(b)
|
||||
if err != nil {
|
||||
throttled.Error(w, r, err)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
}))
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
var mem runtime.MemStats
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
runtime.ReadMemStats(&mem)
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
log.Printf("TotalAllocs: %d Kb, Allocs: %d Kb, Mallocs: %d, NumGC: %d", mem.TotalAlloc/1024, mem.Alloc/1024, mem.Mallocs, mem.NumGC)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", h)
|
||||
}
|
||||
BIN
vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-file
generated
vendored
Normal file
BIN
vendor/gopkg.in/throttled/throttled.v1/examples/memstats/test-file
generated
vendored
Normal file
Binary file not shown.
101
vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go
generated
vendored
Normal file
101
vendor/gopkg.in/throttled/throttled.v1/examples/rate-limit/main.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"gopkg.in/throttled/throttled.v1"
|
||||
"gopkg.in/throttled/throttled.v1/store"
|
||||
)
|
||||
|
||||
var (
|
||||
requests = flag.Int("requests", 10, "number of requests allowed in the time window")
|
||||
window = flag.Duration("window", time.Minute, "time window for the limit of requests")
|
||||
storeType = flag.String("store", "mem", "store to use, one of `mem` or `redis` (on default localhost port)")
|
||||
delayRes = flag.Duration("delay-response", 0, "delay the response by a random duration between 0 and this value")
|
||||
output = flag.String("output", "v", "type of output, one of `v`erbose, `q`uiet, `ok`-only, `ko`-only")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var h http.Handler
|
||||
var ok, ko int
|
||||
var mu sync.Mutex
|
||||
var st throttled.Store
|
||||
|
||||
// Keep the start time to print since-time
|
||||
start := time.Now()
|
||||
// Create the rate-limit store
|
||||
switch *storeType {
|
||||
case "mem":
|
||||
st = store.NewMemStore(0)
|
||||
case "redis":
|
||||
st = store.NewRedisStore(setupRedis(), "throttled:", 0)
|
||||
default:
|
||||
log.Fatalf("unsupported store: %s", *storeType)
|
||||
}
|
||||
// Create the rate-limit throttler, varying on path
|
||||
t := throttled.RateLimit(throttled.Q{Requests: *requests, Window: *window}, &throttled.VaryBy{
|
||||
Path: true,
|
||||
}, st)
|
||||
|
||||
// Set its denied handler
|
||||
t.DeniedHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ko" {
|
||||
log.Printf("KO: %s", time.Since(start))
|
||||
}
|
||||
throttled.DefaultDeniedHandler.ServeHTTP(w, r)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ko++
|
||||
})
|
||||
|
||||
// Throttle the OK handler
|
||||
rand.Seed(time.Now().Unix())
|
||||
h = t.Throttle(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if *output == "v" || *output == "ok" {
|
||||
log.Printf("ok: %s", time.Since(start))
|
||||
}
|
||||
if *delayRes > 0 {
|
||||
wait := time.Duration(rand.Intn(int(*delayRes)))
|
||||
time.Sleep(wait)
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
ok++
|
||||
}))
|
||||
|
||||
// Print stats once in a while
|
||||
go func() {
|
||||
for _ = range time.Tick(10 * time.Second) {
|
||||
mu.Lock()
|
||||
log.Printf("ok: %d, ko: %d", ok, ko)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
fmt.Println("server listening on port 9000")
|
||||
http.ListenAndServe(":9000", h)
|
||||
}
|
||||
|
||||
func setupRedis() *redis.Pool {
|
||||
pool := &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
return redis.Dial("tcp", ":6379")
|
||||
},
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
}
|
||||
return pool
|
||||
}
|
||||
114
vendor/gopkg.in/throttled/throttled.v1/interval_test.go
generated
vendored
Normal file
114
vendor/gopkg.in/throttled/throttled.v1/interval_test.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/PuerkitoBio/boom/commands"
|
||||
)
|
||||
|
||||
func TestInterval(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
cases := []struct {
|
||||
n int
|
||||
c int
|
||||
rps int
|
||||
bursts int
|
||||
}{
|
||||
0: {60, 10, 20, 100},
|
||||
1: {300, 20, 100, 100},
|
||||
2: {10, 10, 1, 10},
|
||||
3: {1000, 100, 1000, 100},
|
||||
}
|
||||
for i, c := range cases {
|
||||
// Setup the stats handler
|
||||
st := &stats{}
|
||||
// Create the throttler
|
||||
th := Interval(PerSec(c.rps), c.bursts, nil, 0)
|
||||
th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
|
||||
b := commands.Boom{
|
||||
Req: &commands.ReqOpts{},
|
||||
N: c.n,
|
||||
C: c.c,
|
||||
Output: "quiet",
|
||||
}
|
||||
// Run the test
|
||||
rpts := runTest(th.Throttle(st), b)
|
||||
// Assert results
|
||||
for _, rpt := range rpts {
|
||||
assertRPS(t, i, c.rps, rpt)
|
||||
}
|
||||
assertStats(t, i, st, rpts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntervalVary(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
cases := []struct {
|
||||
n int
|
||||
c int
|
||||
urls int
|
||||
rps int
|
||||
bursts int
|
||||
}{
|
||||
0: {60, 10, 3, 20, 100},
|
||||
1: {300, 20, 3, 100, 100},
|
||||
2: {10, 10, 3, 1, 10},
|
||||
3: {500, 10, 2, 1000, 100},
|
||||
}
|
||||
for i, c := range cases {
|
||||
// Setup the stats handler
|
||||
st := &stats{}
|
||||
// Create the throttler
|
||||
th := Interval(PerSec(c.rps), c.bursts, nil, 0)
|
||||
th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
|
||||
var booms []commands.Boom
|
||||
for j := 0; j < c.urls; j++ {
|
||||
booms = append(booms, commands.Boom{
|
||||
Req: &commands.ReqOpts{},
|
||||
N: c.n,
|
||||
C: c.c,
|
||||
Output: "quiet",
|
||||
})
|
||||
}
|
||||
// Run the test
|
||||
rpts := runTest(th.Throttle(st), booms...)
|
||||
// Assert results
|
||||
for _, rpt := range rpts {
|
||||
assertRPS(t, i, c.rps, rpt)
|
||||
}
|
||||
assertStats(t, i, st, rpts)
|
||||
}
|
||||
}
|
||||
|
||||
func assertRPS(t *testing.T, ix int, exp int, rpt *commands.Report) {
|
||||
wigglef := 0.2 * float64(exp)
|
||||
if rpt.SuccessRPS < float64(exp)-wigglef || rpt.SuccessRPS > float64(exp)+wigglef {
|
||||
t.Errorf("%d: expected RPS to be around %d, got %f", ix, exp, rpt.SuccessRPS)
|
||||
}
|
||||
}
|
||||
|
||||
func assertStats(t *testing.T, ix int, st *stats, rpts []*commands.Report) {
|
||||
ok, ko, _ := st.Stats()
|
||||
var twos, fives, max int
|
||||
for _, rpt := range rpts {
|
||||
twos += rpt.StatusCodeDist[200]
|
||||
fives += rpt.StatusCodeDist[deniedStatus]
|
||||
if len(rpt.StatusCodeDist) > max {
|
||||
max = len(rpt.StatusCodeDist)
|
||||
}
|
||||
}
|
||||
if ok != twos {
|
||||
t.Errorf("%d: expected %d status 200, got %d", ix, twos, ok)
|
||||
}
|
||||
if ko != fives {
|
||||
t.Errorf("%d: expected %d status 429, got %d", ix, fives, ok)
|
||||
}
|
||||
if max > 2 {
|
||||
t.Errorf("%d: expected at most 2 different status codes, got %d", ix, max)
|
||||
}
|
||||
}
|
||||
64
vendor/gopkg.in/throttled/throttled.v1/memstats_test.go
generated
vendored
Normal file
64
vendor/gopkg.in/throttled/throttled.v1/memstats_test.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/PuerkitoBio/boom/commands"
|
||||
)
|
||||
|
||||
func TestMemStats(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
cases := []struct {
|
||||
n int
|
||||
c int
|
||||
gc uint32
|
||||
total uint64
|
||||
rate time.Duration
|
||||
}{
|
||||
0: {1000, 10, 3, 0, 0},
|
||||
1: {200, 10, 0, 600000, 0},
|
||||
2: {500, 10, 2, 555555, 10 * time.Millisecond},
|
||||
}
|
||||
for i, c := range cases {
|
||||
// Setup the stats handler
|
||||
st := &stats{}
|
||||
// Create the throttler
|
||||
limit := MemThresholds(&runtime.MemStats{NumGC: c.gc, TotalAlloc: c.total})
|
||||
th := MemStats(limit, c.rate)
|
||||
th.DeniedHandler = http.HandlerFunc(st.DeniedHTTP)
|
||||
// Run the test
|
||||
b := commands.Boom{
|
||||
Req: &commands.ReqOpts{},
|
||||
N: c.n,
|
||||
C: c.c,
|
||||
Output: "quiet",
|
||||
}
|
||||
rpts := runTest(th.Throttle(st), b)
|
||||
// Assert results
|
||||
assertStats(t, i, st, rpts)
|
||||
assertMem(t, i, limit)
|
||||
}
|
||||
}
|
||||
|
||||
func assertMem(t *testing.T, ix int, limit *runtime.MemStats) {
|
||||
var mem runtime.MemStats
|
||||
runtime.ReadMemStats(&mem)
|
||||
if mem.NumGC < limit.NumGC {
|
||||
t.Errorf("%d: expected gc to be at least %d, got %d", ix, limit.NumGC, mem.NumGC)
|
||||
}
|
||||
if mem.TotalAlloc < limit.TotalAlloc {
|
||||
t.Errorf("%d: expected total alloc to be at least %dKb, got %dKb", ix, limit.TotalAlloc/1024, mem.TotalAlloc/1024)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadMemStats(b *testing.B) {
|
||||
var mem runtime.MemStats
|
||||
for i := 0; i < b.N; i++ {
|
||||
runtime.ReadMemStats(&mem)
|
||||
}
|
||||
}
|
||||
38
vendor/gopkg.in/throttled/throttled.v1/misc/pre-commit
generated
vendored
Executable file
38
vendor/gopkg.in/throttled/throttled.v1/misc/pre-commit
generated
vendored
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/sh
|
||||
# Copyright 2012 The Go Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
# git gofmt pre-commit hook
|
||||
#
|
||||
# To use, store as .git/hooks/pre-commit inside your repository and make sure
|
||||
# it has execute permissions.
|
||||
#
|
||||
# This script does not handle file names that contain spaces.
|
||||
|
||||
# golint is purely informational, it doesn't fail with exit code != 0 if it finds something,
|
||||
# because it may find a lot of false positives. Just print out its result for information.
|
||||
echo "lint result (informational only):"
|
||||
echo
|
||||
golint .
|
||||
|
||||
# go vet returns 1 if an error was found. Exit the hook with this exit code.
|
||||
go vet ./...
|
||||
vetres=$?
|
||||
|
||||
# Check for gofmt problems and report if any.
|
||||
gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$')
|
||||
[ -z "$gofiles" ] && echo "EXIT $vetres" && exit $vetres
|
||||
|
||||
unformatted=$(gofmt -l $gofiles)
|
||||
[ -z "$unformatted" ] && echo "EXIT $vetres" && exit $vetres
|
||||
|
||||
# Some files are not gofmt'd. Print message and fail.
|
||||
|
||||
echo >&2 "Go files must be formatted with gofmt. Please run:"
|
||||
for fn in $unformatted; do
|
||||
echo >&2 " gofmt -w $PWD/$fn"
|
||||
done
|
||||
|
||||
echo "EXIT 1"
|
||||
exit 1
|
||||
101
vendor/gopkg.in/throttled/throttled.v1/rate_test.go
generated
vendored
Normal file
101
vendor/gopkg.in/throttled/throttled.v1/rate_test.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const deniedStatus = 429
|
||||
|
||||
// Simple memory store for tests, unsafe for concurrent access
|
||||
type mapStore struct {
|
||||
cnt map[string]int
|
||||
ts map[string]time.Time
|
||||
}
|
||||
|
||||
func newMapStore() *mapStore {
|
||||
return &mapStore{
|
||||
make(map[string]int),
|
||||
make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
func (ms *mapStore) Incr(key string, window time.Duration) (int, int, error) {
|
||||
if _, ok := ms.cnt[key]; !ok {
|
||||
return 0, 0, ErrNoSuchKey
|
||||
}
|
||||
ms.cnt[key]++
|
||||
ts := ms.ts[key]
|
||||
return ms.cnt[key], RemainingSeconds(ts, window), nil
|
||||
}
|
||||
func (ms *mapStore) Reset(key string, win time.Duration) error {
|
||||
ms.cnt[key] = 1
|
||||
ms.ts[key] = time.Now().UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
quota := Q{5, 5 * time.Second}
|
||||
cases := []struct {
|
||||
limit, remain, reset, status int
|
||||
}{
|
||||
0: {5, 4, 5, 200},
|
||||
1: {5, 3, 4, 200},
|
||||
2: {5, 2, 4, 200},
|
||||
3: {5, 1, 3, 200},
|
||||
4: {5, 0, 3, 200},
|
||||
5: {5, 0, 2, deniedStatus},
|
||||
}
|
||||
// Limit the requests to 2 per second
|
||||
th := Interval(PerSec(2), 0, nil, 0)
|
||||
// Rate limit
|
||||
rl := RateLimit(quota, nil, newMapStore())
|
||||
// Create the stats
|
||||
st := &stats{}
|
||||
// Create the handler
|
||||
h := th.Throttle(rl.Throttle(st))
|
||||
|
||||
// Start the server
|
||||
srv := httptest.NewServer(h)
|
||||
defer srv.Close()
|
||||
for i, c := range cases {
|
||||
callRateLimited(t, i, c.limit, c.remain, c.reset, c.status, srv.URL)
|
||||
}
|
||||
// Wait 3 seconds and call again, should start a new window
|
||||
time.Sleep(3 * time.Second)
|
||||
callRateLimited(t, len(cases), 5, 4, 5, 200, srv.URL)
|
||||
}
|
||||
|
||||
func callRateLimited(t *testing.T, i, limit, remain, reset, status int, url string) {
|
||||
res, err := http.Get(url)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
// Assert status code
|
||||
if status != res.StatusCode {
|
||||
t.Errorf("%d: expected status %d, got %d", i, status, res.StatusCode)
|
||||
}
|
||||
// Assert headers
|
||||
if v := res.Header.Get("X-RateLimit-Limit"); v != strconv.Itoa(limit) {
|
||||
t.Errorf("%d: expected limit header to be %d, got %s", i, limit, v)
|
||||
}
|
||||
if v := res.Header.Get("X-RateLimit-Remaining"); v != strconv.Itoa(remain) {
|
||||
t.Errorf("%d: expected remain header to be %d, got %s", i, remain, v)
|
||||
}
|
||||
// Allow 1 second wiggle room
|
||||
v := res.Header.Get("X-RateLimit-Reset")
|
||||
vi, _ := strconv.Atoi(v)
|
||||
if vi < reset-1 || vi > reset+1 {
|
||||
t.Errorf("%d: expected reset header to be close to %d, got %d", i, reset, vi)
|
||||
}
|
||||
if status == deniedStatus {
|
||||
v := res.Header.Get("Retry-After")
|
||||
vi, _ := strconv.Atoi(v)
|
||||
if vi < reset-1 || vi > reset+1 {
|
||||
t.Errorf("%d: expected retry after header to be close to %d, got %d", i, reset, vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
2
vendor/gopkg.in/throttled/throttled.v1/store/doc.go
generated
vendored
2
vendor/gopkg.in/throttled/throttled.v1/store/doc.go
generated
vendored
@@ -1,2 +1,2 @@
|
||||
// Package store offers a memory-based and a Redis-based throttled.Store implementation.
|
||||
package store
|
||||
package store // import "gopkg.in/throttled/throttled.v1/store"
|
||||
|
||||
43
vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go
generated
vendored
Normal file
43
vendor/gopkg.in/throttled/throttled.v1/store/mem_test.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMemStore(t *testing.T) {
|
||||
st := NewMemStore(0)
|
||||
win := time.Second
|
||||
|
||||
// Reset stores a key with count of 1, current timestamp
|
||||
err := st.Reset("k", time.Second)
|
||||
if err != nil {
|
||||
t.Errorf("expected reset to return nil, got %s", err)
|
||||
}
|
||||
cnt, sec1, _ := st.Incr("k", win)
|
||||
if cnt != 2 {
|
||||
t.Errorf("expected reset+incr to set count to 2, got %d", cnt)
|
||||
}
|
||||
|
||||
// Incr increments the key, keeps same timestamp
|
||||
cnt, sec2, err := st.Incr("k", win)
|
||||
if err != nil {
|
||||
t.Errorf("expected 2nd incr to return nil error, got %s", err)
|
||||
}
|
||||
if cnt != 3 {
|
||||
t.Errorf("expected 2nd incr to return 3, got %d", cnt)
|
||||
}
|
||||
if sec1 != sec2 {
|
||||
t.Errorf("expected 2nd incr to return %d secs, got %d", sec1, sec2)
|
||||
}
|
||||
|
||||
// Reset on existing key brings it back to 1, new timestamp
|
||||
err = st.Reset("k", win)
|
||||
if err != nil {
|
||||
t.Errorf("expected reset on existing key to return nil, got %s", err)
|
||||
}
|
||||
cnt, _, _ = st.Incr("k", win)
|
||||
if cnt != 2 {
|
||||
t.Errorf("expected last reset+incr to return 2, got %d", cnt)
|
||||
}
|
||||
}
|
||||
66
vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go
generated
vendored
Normal file
66
vendor/gopkg.in/throttled/throttled.v1/store/redis_test.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
func getPool() *redis.Pool {
|
||||
pool := &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
return redis.Dial("tcp", ":6379")
|
||||
},
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
return err
|
||||
},
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
||||
func TestRedisStore(t *testing.T) {
|
||||
pool := getPool()
|
||||
c := pool.Get()
|
||||
if _, err := redis.String(c.Do("PING")); err != nil {
|
||||
c.Close()
|
||||
t.Skip("redis server not available on localhost port 6379")
|
||||
}
|
||||
st := NewRedisStore(pool, "throttled:", 1)
|
||||
win := 2 * time.Second
|
||||
|
||||
// Incr increments the key, even if it does not exist
|
||||
cnt, secs, err := st.Incr("k", win)
|
||||
if err != nil {
|
||||
t.Errorf("expected initial incr to return nil error, got %s", err)
|
||||
}
|
||||
if cnt != 1 {
|
||||
t.Errorf("expected initial incr to return 1, got %d", cnt)
|
||||
}
|
||||
if secs != int(win.Seconds()) {
|
||||
t.Errorf("expected initial incr to return %d secs, got %d", int(win.Seconds()), secs)
|
||||
}
|
||||
|
||||
// Waiting a second diminishes the remaining seconds
|
||||
time.Sleep(time.Second)
|
||||
_, sec2, _ := st.Incr("k", win)
|
||||
if sec2 != secs-1 {
|
||||
t.Errorf("expected 2nd incr after a 1s sleep to return %d secs, got %d", secs-1, sec2)
|
||||
}
|
||||
|
||||
// Waiting a second so the key expires, Incr should set back to 1, initial secs
|
||||
time.Sleep(1100 * time.Millisecond)
|
||||
cnt, sec3, err := st.Incr("k", win)
|
||||
if err != nil {
|
||||
t.Errorf("expected last incr to return nil error, got %s", err)
|
||||
}
|
||||
if cnt != 1 {
|
||||
t.Errorf("expected last incr to return 1, got %d", cnt)
|
||||
}
|
||||
if sec3 != int(win.Seconds()) {
|
||||
t.Errorf("expected last incr to return %d secs, got %d", int(win.Seconds()), sec3)
|
||||
}
|
||||
}
|
||||
56
vendor/gopkg.in/throttled/throttled.v1/varyby_test.go
generated
vendored
Normal file
56
vendor/gopkg.in/throttled/throttled.v1/varyby_test.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package throttled
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestVaryBy(t *testing.T) {
|
||||
u, err := url.Parse("http://localhost/test/path?q=s")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ck := &http.Cookie{Name: "ssn", Value: "test"}
|
||||
cases := []struct {
|
||||
vb *VaryBy
|
||||
r *http.Request
|
||||
k string
|
||||
}{
|
||||
0: {nil, &http.Request{}, ""},
|
||||
1: {&VaryBy{RemoteAddr: true}, &http.Request{RemoteAddr: "::"}, "::\n"},
|
||||
2: {
|
||||
&VaryBy{Method: true, Path: true},
|
||||
&http.Request{Method: "POST", URL: u},
|
||||
"post\n/test/path\n",
|
||||
},
|
||||
3: {
|
||||
&VaryBy{Headers: []string{"Content-length"}},
|
||||
&http.Request{Header: http.Header{"Content-Type": []string{"text/plain"}, "Content-Length": []string{"123"}}},
|
||||
"123\n",
|
||||
},
|
||||
4: {
|
||||
&VaryBy{Separator: ",", Method: true, Headers: []string{"Content-length"}, Params: []string{"q", "user"}},
|
||||
&http.Request{Method: "GET", Header: http.Header{"Content-Type": []string{"text/plain"}, "Content-Length": []string{"123"}}, Form: url.Values{"q": []string{"s"}, "pwd": []string{"secret"}, "user": []string{"test"}}},
|
||||
"get,123,s,test,",
|
||||
},
|
||||
5: {
|
||||
&VaryBy{Cookies: []string{"ssn"}},
|
||||
&http.Request{Header: http.Header{"Cookie": []string{ck.String()}}},
|
||||
"test\n",
|
||||
},
|
||||
6: {
|
||||
&VaryBy{Cookies: []string{"ssn"}, RemoteAddr: true, Custom: func(r *http.Request) string {
|
||||
return "blah"
|
||||
}},
|
||||
&http.Request{Header: http.Header{"Cookie": []string{ck.String()}}},
|
||||
"blah",
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
got := c.vb.Key(c.r)
|
||||
if got != c.k {
|
||||
t.Errorf("%d: expected '%s' (%d), got '%s' (%d)", i, c.k, len(c.k), got, len(got))
|
||||
}
|
||||
}
|
||||
}
|
||||
988
vendor/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
988
vendor/gopkg.in/yaml.v2/decode_test.go
generated
vendored
Normal file
@@ -0,0 +1,988 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var unmarshalIntTest = 123
|
||||
|
||||
var unmarshalTests = []struct {
|
||||
data string
|
||||
value interface{}
|
||||
}{
|
||||
{
|
||||
"",
|
||||
&struct{}{},
|
||||
}, {
|
||||
"{}", &struct{}{},
|
||||
}, {
|
||||
"v: hi",
|
||||
map[string]string{"v": "hi"},
|
||||
}, {
|
||||
"v: hi", map[string]interface{}{"v": "hi"},
|
||||
}, {
|
||||
"v: true",
|
||||
map[string]string{"v": "true"},
|
||||
}, {
|
||||
"v: true",
|
||||
map[string]interface{}{"v": true},
|
||||
}, {
|
||||
"v: 10",
|
||||
map[string]interface{}{"v": 10},
|
||||
}, {
|
||||
"v: 0b10",
|
||||
map[string]interface{}{"v": 2},
|
||||
}, {
|
||||
"v: 0xA",
|
||||
map[string]interface{}{"v": 10},
|
||||
}, {
|
||||
"v: 4294967296",
|
||||
map[string]int64{"v": 4294967296},
|
||||
}, {
|
||||
"v: 0.1",
|
||||
map[string]interface{}{"v": 0.1},
|
||||
}, {
|
||||
"v: .1",
|
||||
map[string]interface{}{"v": 0.1},
|
||||
}, {
|
||||
"v: .Inf",
|
||||
map[string]interface{}{"v": math.Inf(+1)},
|
||||
}, {
|
||||
"v: -.Inf",
|
||||
map[string]interface{}{"v": math.Inf(-1)},
|
||||
}, {
|
||||
"v: -10",
|
||||
map[string]interface{}{"v": -10},
|
||||
}, {
|
||||
"v: -.1",
|
||||
map[string]interface{}{"v": -0.1},
|
||||
},
|
||||
|
||||
// Simple values.
|
||||
{
|
||||
"123",
|
||||
&unmarshalIntTest,
|
||||
},
|
||||
|
||||
// Floats from spec
|
||||
{
|
||||
"canonical: 6.8523e+5",
|
||||
map[string]interface{}{"canonical": 6.8523e+5},
|
||||
}, {
|
||||
"expo: 685.230_15e+03",
|
||||
map[string]interface{}{"expo": 685.23015e+03},
|
||||
}, {
|
||||
"fixed: 685_230.15",
|
||||
map[string]interface{}{"fixed": 685230.15},
|
||||
}, {
|
||||
"neginf: -.inf",
|
||||
map[string]interface{}{"neginf": math.Inf(-1)},
|
||||
}, {
|
||||
"fixed: 685_230.15",
|
||||
map[string]float64{"fixed": 685230.15},
|
||||
},
|
||||
//{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||
//{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails.
|
||||
|
||||
// Bools from spec
|
||||
{
|
||||
"canonical: y",
|
||||
map[string]interface{}{"canonical": true},
|
||||
}, {
|
||||
"answer: NO",
|
||||
map[string]interface{}{"answer": false},
|
||||
}, {
|
||||
"logical: True",
|
||||
map[string]interface{}{"logical": true},
|
||||
}, {
|
||||
"option: on",
|
||||
map[string]interface{}{"option": true},
|
||||
}, {
|
||||
"option: on",
|
||||
map[string]bool{"option": true},
|
||||
},
|
||||
// Ints from spec
|
||||
{
|
||||
"canonical: 685230",
|
||||
map[string]interface{}{"canonical": 685230},
|
||||
}, {
|
||||
"decimal: +685_230",
|
||||
map[string]interface{}{"decimal": 685230},
|
||||
}, {
|
||||
"octal: 02472256",
|
||||
map[string]interface{}{"octal": 685230},
|
||||
}, {
|
||||
"hexa: 0x_0A_74_AE",
|
||||
map[string]interface{}{"hexa": 685230},
|
||||
}, {
|
||||
"bin: 0b1010_0111_0100_1010_1110",
|
||||
map[string]interface{}{"bin": 685230},
|
||||
}, {
|
||||
"bin: -0b101010",
|
||||
map[string]interface{}{"bin": -42},
|
||||
}, {
|
||||
"decimal: +685_230",
|
||||
map[string]int{"decimal": 685230},
|
||||
},
|
||||
|
||||
//{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported
|
||||
|
||||
// Nulls from spec
|
||||
{
|
||||
"empty:",
|
||||
map[string]interface{}{"empty": nil},
|
||||
}, {
|
||||
"canonical: ~",
|
||||
map[string]interface{}{"canonical": nil},
|
||||
}, {
|
||||
"english: null",
|
||||
map[string]interface{}{"english": nil},
|
||||
}, {
|
||||
"~: null key",
|
||||
map[interface{}]string{nil: "null key"},
|
||||
}, {
|
||||
"empty:",
|
||||
map[string]*bool{"empty": nil},
|
||||
},
|
||||
|
||||
// Flow sequence
|
||||
{
|
||||
"seq: [A,B]",
|
||||
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||
}, {
|
||||
"seq: [A,B,C,]",
|
||||
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string][]int{"seq": []int{1}},
|
||||
}, {
|
||||
"seq: [A,1,C]",
|
||||
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||
},
|
||||
// Block sequence
|
||||
{
|
||||
"seq:\n - A\n - B",
|
||||
map[string]interface{}{"seq": []interface{}{"A", "B"}},
|
||||
}, {
|
||||
"seq:\n - A\n - B\n - C",
|
||||
map[string][]string{"seq": []string{"A", "B", "C"}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string][]string{"seq": []string{"A", "1", "C"}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string][]int{"seq": []int{1}},
|
||||
}, {
|
||||
"seq:\n - A\n - 1\n - C",
|
||||
map[string]interface{}{"seq": []interface{}{"A", 1, "C"}},
|
||||
},
|
||||
|
||||
// Literal block scalar
|
||||
{
|
||||
"scalar: | # Comment\n\n literal\n\n \ttext\n\n",
|
||||
map[string]string{"scalar": "\nliteral\n\n\ttext\n"},
|
||||
},
|
||||
|
||||
// Folded block scalar
|
||||
{
|
||||
"scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n",
|
||||
map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"},
|
||||
},
|
||||
|
||||
// Map inside interface with no type hints.
|
||||
{
|
||||
"a: {b: c}",
|
||||
map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||
},
|
||||
|
||||
// Structs and type conversions.
|
||||
{
|
||||
"hello: world",
|
||||
&struct{ Hello string }{"world"},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A struct{ B string } }{struct{ B string }{"c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A *struct{ B string } }{&struct{ B string }{"c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A map[string]string }{map[string]string{"b": "c"}},
|
||||
}, {
|
||||
"a: {b: c}",
|
||||
&struct{ A *map[string]string }{&map[string]string{"b": "c"}},
|
||||
}, {
|
||||
"a:",
|
||||
&struct{ A map[string]string }{},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ A int }{1},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ A float64 }{1},
|
||||
}, {
|
||||
"a: 1.0",
|
||||
&struct{ A int }{1},
|
||||
}, {
|
||||
"a: 1.0",
|
||||
&struct{ A uint }{1},
|
||||
}, {
|
||||
"a: [1, 2]",
|
||||
&struct{ A []int }{[]int{1, 2}},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct{ B int }{0},
|
||||
}, {
|
||||
"a: 1",
|
||||
&struct {
|
||||
B int "a"
|
||||
}{1},
|
||||
}, {
|
||||
"a: y",
|
||||
&struct{ A bool }{true},
|
||||
},
|
||||
|
||||
// Some cross type conversions
|
||||
{
|
||||
"v: 42",
|
||||
map[string]uint{"v": 42},
|
||||
}, {
|
||||
"v: -42",
|
||||
map[string]uint{},
|
||||
}, {
|
||||
"v: 4294967296",
|
||||
map[string]uint64{"v": 4294967296},
|
||||
}, {
|
||||
"v: -4294967296",
|
||||
map[string]uint64{},
|
||||
},
|
||||
|
||||
// int
|
||||
{
|
||||
"int_max: 2147483647",
|
||||
map[string]int{"int_max": math.MaxInt32},
|
||||
},
|
||||
{
|
||||
"int_min: -2147483648",
|
||||
map[string]int{"int_min": math.MinInt32},
|
||||
},
|
||||
{
|
||||
"int_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||
map[string]int{},
|
||||
},
|
||||
|
||||
// int64
|
||||
{
|
||||
"int64_max: 9223372036854775807",
|
||||
map[string]int64{"int64_max": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]int64{"int64_max_base2": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_min: -9223372036854775808",
|
||||
map[string]int64{"int64_min": math.MinInt64},
|
||||
},
|
||||
{
|
||||
"int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]int64{"int64_neg_base2": -math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"int64_overflow: 9223372036854775808", // math.MaxInt64 + 1
|
||||
map[string]int64{},
|
||||
},
|
||||
|
||||
// uint
|
||||
{
|
||||
"uint_min: 0",
|
||||
map[string]uint{"uint_min": 0},
|
||||
},
|
||||
{
|
||||
"uint_max: 4294967295",
|
||||
map[string]uint{"uint_max": math.MaxUint32},
|
||||
},
|
||||
{
|
||||
"uint_underflow: -1",
|
||||
map[string]uint{},
|
||||
},
|
||||
|
||||
// uint64
|
||||
{
|
||||
"uint64_min: 0",
|
||||
map[string]uint{"uint64_min": 0},
|
||||
},
|
||||
{
|
||||
"uint64_max: 18446744073709551615",
|
||||
map[string]uint64{"uint64_max": math.MaxUint64},
|
||||
},
|
||||
{
|
||||
"uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111",
|
||||
map[string]uint64{"uint64_max_base2": math.MaxUint64},
|
||||
},
|
||||
{
|
||||
"uint64_maxint64: 9223372036854775807",
|
||||
map[string]uint64{"uint64_maxint64": math.MaxInt64},
|
||||
},
|
||||
{
|
||||
"uint64_underflow: -1",
|
||||
map[string]uint64{},
|
||||
},
|
||||
|
||||
// float32
|
||||
{
|
||||
"float32_max: 3.40282346638528859811704183484516925440e+38",
|
||||
map[string]float32{"float32_max": math.MaxFloat32},
|
||||
},
|
||||
{
|
||||
"float32_nonzero: 1.401298464324817070923729583289916131280e-45",
|
||||
map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32},
|
||||
},
|
||||
{
|
||||
"float32_maxuint64: 18446744073709551615",
|
||||
map[string]float32{"float32_maxuint64": float32(math.MaxUint64)},
|
||||
},
|
||||
{
|
||||
"float32_maxuint64+1: 18446744073709551616",
|
||||
map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)},
|
||||
},
|
||||
|
||||
// float64
|
||||
{
|
||||
"float64_max: 1.797693134862315708145274237317043567981e+308",
|
||||
map[string]float64{"float64_max": math.MaxFloat64},
|
||||
},
|
||||
{
|
||||
"float64_nonzero: 4.940656458412465441765687928682213723651e-324",
|
||||
map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64},
|
||||
},
|
||||
{
|
||||
"float64_maxuint64: 18446744073709551615",
|
||||
map[string]float64{"float64_maxuint64": float64(math.MaxUint64)},
|
||||
},
|
||||
{
|
||||
"float64_maxuint64+1: 18446744073709551616",
|
||||
map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)},
|
||||
},
|
||||
|
||||
// Overflow cases.
|
||||
{
|
||||
"v: 4294967297",
|
||||
map[string]int32{},
|
||||
}, {
|
||||
"v: 128",
|
||||
map[string]int8{},
|
||||
},
|
||||
|
||||
// Quoted values.
|
||||
{
|
||||
"'1': '\"2\"'",
|
||||
map[interface{}]interface{}{"1": "\"2\""},
|
||||
}, {
|
||||
"v:\n- A\n- 'B\n\n C'\n",
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
},
|
||||
|
||||
// Explicit tags.
|
||||
{
|
||||
"v: !!float '1.1'",
|
||||
map[string]interface{}{"v": 1.1},
|
||||
}, {
|
||||
"v: !!null ''",
|
||||
map[string]interface{}{"v": nil},
|
||||
}, {
|
||||
"%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'",
|
||||
map[string]interface{}{"v": 1},
|
||||
},
|
||||
|
||||
// Anchors and aliases.
|
||||
{
|
||||
"a: &x 1\nb: &y 2\nc: *x\nd: *y\n",
|
||||
&struct{ A, B, C, D int }{1, 2, 1, 2},
|
||||
}, {
|
||||
"a: &a {c: 1}\nb: *a",
|
||||
&struct {
|
||||
A, B struct {
|
||||
C int
|
||||
}
|
||||
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||
}, {
|
||||
"a: &a [1, 2]\nb: *a",
|
||||
&struct{ B []int }{[]int{1, 2}},
|
||||
}, {
|
||||
"b: *a\na: &a {c: 1}",
|
||||
&struct {
|
||||
A, B struct {
|
||||
C int
|
||||
}
|
||||
}{struct{ C int }{1}, struct{ C int }{1}},
|
||||
},
|
||||
|
||||
// Bug #1133337
|
||||
{
|
||||
"foo: ''",
|
||||
map[string]*string{"foo": new(string)},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]string{"foo": ""},
|
||||
}, {
|
||||
"foo: null",
|
||||
map[string]interface{}{"foo": nil},
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
{
|
||||
"a: 1\nb: 2\n",
|
||||
&struct {
|
||||
A int
|
||||
B int "-"
|
||||
}{1, 0},
|
||||
},
|
||||
|
||||
// Bug #1191981
|
||||
{
|
||||
"" +
|
||||
"%YAML 1.1\n" +
|
||||
"--- !!str\n" +
|
||||
`"Generic line break (no glyph)\n\` + "\n" +
|
||||
` Generic line break (glyphed)\n\` + "\n" +
|
||||
` Line separator\u2028\` + "\n" +
|
||||
` Paragraph separator\u2029"` + "\n",
|
||||
"" +
|
||||
"Generic line break (no glyph)\n" +
|
||||
"Generic line break (glyphed)\n" +
|
||||
"Line separator\u2028Paragraph separator\u2029",
|
||||
},
|
||||
|
||||
// Struct inlining
|
||||
{
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
&struct {
|
||||
A int
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
},
|
||||
|
||||
// Map inlining
|
||||
{
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
&struct {
|
||||
A int
|
||||
C map[string]int `yaml:",inline"`
|
||||
}{1, map[string]int{"b": 2, "c": 3}},
|
||||
},
|
||||
|
||||
// bug 1243827
|
||||
{
|
||||
"a: -b_c",
|
||||
map[string]interface{}{"a": "-b_c"},
|
||||
},
|
||||
{
|
||||
"a: +b_c",
|
||||
map[string]interface{}{"a": "+b_c"},
|
||||
},
|
||||
{
|
||||
"a: 50cent_of_dollar",
|
||||
map[string]interface{}{"a": "50cent_of_dollar"},
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
"a: 3s",
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
},
|
||||
|
||||
// Issue #24.
|
||||
{
|
||||
"a: <foo>",
|
||||
map[string]string{"a": "<foo>"},
|
||||
},
|
||||
|
||||
// Base 60 floats are obsolete and unsupported.
|
||||
{
|
||||
"a: 1:1\n",
|
||||
map[string]string{"a": "1:1"},
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
"a: !!binary gIGC\n",
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
}, {
|
||||
"a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n",
|
||||
map[string]string{"a": strings.Repeat("\x00", 52)},
|
||||
},
|
||||
|
||||
// Ordered maps.
|
||||
{
|
||||
"{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}",
|
||||
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||
},
|
||||
|
||||
// Issue #39.
|
||||
{
|
||||
"a:\n b:\n c: d\n",
|
||||
map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}},
|
||||
},
|
||||
|
||||
// Custom map type.
|
||||
{
|
||||
"a: {b: c}",
|
||||
M{"a": M{"b": "c"}},
|
||||
},
|
||||
|
||||
// Support encoding.TextUnmarshaler.
|
||||
{
|
||||
"a: 1.2.3.4\n",
|
||||
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||
},
|
||||
{
|
||||
"a: 2015-02-24T18:19:39Z\n",
|
||||
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||
},
|
||||
|
||||
// Encode empty lists as zero-length slices.
|
||||
{
|
||||
"a: []",
|
||||
&struct{ A []int }{[]int{}},
|
||||
},
|
||||
|
||||
// UTF-16-LE
|
||||
{
|
||||
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00",
|
||||
M{"ñoño": "very yes"},
|
||||
},
|
||||
// UTF-16-LE with surrogate.
|
||||
{
|
||||
"\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00",
|
||||
M{"ñoño": "very yes 🟔"},
|
||||
},
|
||||
|
||||
// UTF-16-BE
|
||||
{
|
||||
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n",
|
||||
M{"ñoño": "very yes"},
|
||||
},
|
||||
// UTF-16-BE with surrogate.
|
||||
{
|
||||
"\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n",
|
||||
M{"ñoño": "very yes 🟔"},
|
||||
},
|
||||
}
|
||||
|
||||
type M map[interface{}]interface{}
|
||||
|
||||
type inlineB struct {
|
||||
B int
|
||||
inlineC `yaml:",inline"`
|
||||
}
|
||||
|
||||
type inlineC struct {
|
||||
C int
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshal(c *C) {
|
||||
for _, item := range unmarshalTests {
|
||||
t := reflect.ValueOf(item.value).Type()
|
||||
var value interface{}
|
||||
switch t.Kind() {
|
||||
case reflect.Map:
|
||||
value = reflect.MakeMap(t).Interface()
|
||||
case reflect.String:
|
||||
value = reflect.New(t).Interface()
|
||||
case reflect.Ptr:
|
||||
value = reflect.New(t.Elem()).Interface()
|
||||
default:
|
||||
c.Fatalf("missing case for %s", t)
|
||||
}
|
||||
err := yaml.Unmarshal([]byte(item.data), value)
|
||||
if _, ok := err.(*yaml.TypeError); !ok {
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
if t.Kind() == reflect.String {
|
||||
c.Assert(*value.(*string), Equals, item.value)
|
||||
} else {
|
||||
c.Assert(value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalNaN(c *C) {
|
||||
value := map[string]interface{}{}
|
||||
err := yaml.Unmarshal([]byte("notanum: .NaN"), &value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true)
|
||||
}
|
||||
|
||||
var unmarshalErrorTests = []struct {
|
||||
data, error string
|
||||
}{
|
||||
{"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"},
|
||||
{"v: [A,", "yaml: line 1: did not find expected node content"},
|
||||
{"v:\n- [A,", "yaml: line 2: did not find expected node content"},
|
||||
{"a: *b\n", "yaml: unknown anchor 'b' referenced"},
|
||||
{"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"},
|
||||
{"value: -", "yaml: block sequence entries are not allowed in this context"},
|
||||
{"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"},
|
||||
{"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`},
|
||||
{"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`},
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalErrors(c *C) {
|
||||
for _, item := range unmarshalErrorTests {
|
||||
var value interface{}
|
||||
err := yaml.Unmarshal([]byte(item.data), &value)
|
||||
c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value))
|
||||
}
|
||||
}
|
||||
|
||||
var unmarshalerTests = []struct {
|
||||
data, tag string
|
||||
value interface{}
|
||||
}{
|
||||
{"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}},
|
||||
{"_: [1,A]", "!!seq", []interface{}{1, "A"}},
|
||||
{"_: 10", "!!int", 10},
|
||||
{"_: null", "!!null", nil},
|
||||
{`_: BAR!`, "!!str", "BAR!"},
|
||||
{`_: "BAR!"`, "!!str", "BAR!"},
|
||||
{"_: !!foo 'BAR!'", "!!foo", "BAR!"},
|
||||
}
|
||||
|
||||
var unmarshalerResult = map[int]error{}
|
||||
|
||||
type unmarshalerType struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error {
|
||||
if err := unmarshal(&o.value); err != nil {
|
||||
return err
|
||||
}
|
||||
if i, ok := o.value.(int); ok {
|
||||
if result, ok := unmarshalerResult[i]; ok {
|
||||
return result
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type unmarshalerPointer struct {
|
||||
Field *unmarshalerType "_"
|
||||
}
|
||||
|
||||
type unmarshalerValue struct {
|
||||
Field unmarshalerType "_"
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerPointerField(c *C) {
|
||||
for _, item := range unmarshalerTests {
|
||||
obj := &unmarshalerPointer{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
if item.value == nil {
|
||||
c.Assert(obj.Field, IsNil)
|
||||
} else {
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerValueField(c *C) {
|
||||
for _, item := range unmarshalerTests {
|
||||
obj := &unmarshalerValue{}
|
||||
err := yaml.Unmarshal([]byte(item.data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value))
|
||||
c.Assert(obj.Field.value, DeepEquals, item.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerWholeDocument(c *C) {
|
||||
obj := &unmarshalerType{}
|
||||
err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj)
|
||||
c.Assert(err, IsNil)
|
||||
value, ok := obj.value.(map[interface{}]interface{})
|
||||
c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value))
|
||||
c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value)
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerTypeError(c *C) {
|
||||
unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}}
|
||||
unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}}
|
||||
defer func() {
|
||||
delete(unmarshalerResult, 2)
|
||||
delete(unmarshalerResult, 4)
|
||||
}()
|
||||
|
||||
type T struct {
|
||||
Before int
|
||||
After int
|
||||
M map[string]*unmarshalerType
|
||||
}
|
||||
var v T
|
||||
data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}`
|
||||
err := yaml.Unmarshal([]byte(data), &v)
|
||||
c.Assert(err, ErrorMatches, ""+
|
||||
"yaml: unmarshal errors:\n"+
|
||||
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||
" foo\n"+
|
||||
" bar\n"+
|
||||
" line 1: cannot unmarshal !!str `B` into int")
|
||||
c.Assert(v.M["abc"], NotNil)
|
||||
c.Assert(v.M["def"], IsNil)
|
||||
c.Assert(v.M["ghi"], NotNil)
|
||||
c.Assert(v.M["jkl"], IsNil)
|
||||
|
||||
c.Assert(v.M["abc"].value, Equals, 1)
|
||||
c.Assert(v.M["ghi"].value, Equals, 3)
|
||||
}
|
||||
|
||||
type proxyTypeError struct{}
|
||||
|
||||
func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
var a int32
|
||||
var b int64
|
||||
if err := unmarshal(&s); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if s == "a" {
|
||||
if err := unmarshal(&b); err == nil {
|
||||
panic("should have failed")
|
||||
}
|
||||
return unmarshal(&a)
|
||||
}
|
||||
if err := unmarshal(&a); err == nil {
|
||||
panic("should have failed")
|
||||
}
|
||||
return unmarshal(&b)
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerTypeErrorProxying(c *C) {
|
||||
type T struct {
|
||||
Before int
|
||||
After int
|
||||
M map[string]*proxyTypeError
|
||||
}
|
||||
var v T
|
||||
data := `{before: A, m: {abc: a, def: b}, after: B}`
|
||||
err := yaml.Unmarshal([]byte(data), &v)
|
||||
c.Assert(err, ErrorMatches, ""+
|
||||
"yaml: unmarshal errors:\n"+
|
||||
" line 1: cannot unmarshal !!str `A` into int\n"+
|
||||
" line 1: cannot unmarshal !!str `a` into int32\n"+
|
||||
" line 1: cannot unmarshal !!str `b` into int64\n"+
|
||||
" line 1: cannot unmarshal !!str `B` into int")
|
||||
}
|
||||
|
||||
type failingUnmarshaler struct{}
|
||||
|
||||
var failingErr = errors.New("failingErr")
|
||||
|
||||
func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
return failingErr
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerError(c *C) {
|
||||
err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{})
|
||||
c.Assert(err, Equals, failingErr)
|
||||
}
|
||||
|
||||
type sliceUnmarshaler []int
|
||||
|
||||
func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var slice []int
|
||||
err := unmarshal(&slice)
|
||||
if err == nil {
|
||||
*su = slice
|
||||
return nil
|
||||
}
|
||||
|
||||
var intVal int
|
||||
err = unmarshal(&intVal)
|
||||
if err == nil {
|
||||
*su = []int{intVal}
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalerRetry(c *C) {
|
||||
var su sliceUnmarshaler
|
||||
err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3}))
|
||||
|
||||
err = yaml.Unmarshal([]byte("1"), &su)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1}))
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/merge.html
|
||||
var mergeTests = `
|
||||
anchors:
|
||||
list:
|
||||
- &CENTER { "x": 1, "y": 2 }
|
||||
- &LEFT { "x": 0, "y": 2 }
|
||||
- &BIG { "r": 10 }
|
||||
- &SMALL { "r": 1 }
|
||||
|
||||
# All the following maps are equal:
|
||||
|
||||
plain:
|
||||
# Explicit keys
|
||||
"x": 1
|
||||
"y": 2
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeOne:
|
||||
# Merge one map
|
||||
<< : *CENTER
|
||||
"r": 10
|
||||
label: center/big
|
||||
|
||||
mergeMultiple:
|
||||
# Merge multiple maps
|
||||
<< : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
override:
|
||||
# Override
|
||||
<< : [ *BIG, *LEFT, *SMALL ]
|
||||
"x": 1
|
||||
label: center/big
|
||||
|
||||
shortTag:
|
||||
# Explicit short merge tag
|
||||
!!merge "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
longTag:
|
||||
# Explicit merge long tag
|
||||
!<tag:yaml.org,2002:merge> "<<" : [ *CENTER, *BIG ]
|
||||
label: center/big
|
||||
|
||||
inlineMap:
|
||||
# Inlined map
|
||||
<< : {"x": 1, "y": 2, "r": 10}
|
||||
label: center/big
|
||||
|
||||
inlineSequenceMap:
|
||||
# Inlined map in sequence
|
||||
<< : [ *CENTER, {"r": 10} ]
|
||||
label: center/big
|
||||
`
|
||||
|
||||
func (s *S) TestMerge(c *C) {
|
||||
var want = map[interface{}]interface{}{
|
||||
"x": 1,
|
||||
"y": 2,
|
||||
"r": 10,
|
||||
"label": "center/big",
|
||||
}
|
||||
|
||||
var m map[interface{}]interface{}
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, DeepEquals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMergeStruct(c *C) {
|
||||
type Data struct {
|
||||
X, Y, R int
|
||||
Label string
|
||||
}
|
||||
want := Data{1, 2, 10, "center/big"}
|
||||
|
||||
var m map[string]Data
|
||||
err := yaml.Unmarshal([]byte(mergeTests), &m)
|
||||
c.Assert(err, IsNil)
|
||||
for name, test := range m {
|
||||
if name == "anchors" {
|
||||
continue
|
||||
}
|
||||
c.Assert(test, Equals, want, Commentf("test %q failed", name))
|
||||
}
|
||||
}
|
||||
|
||||
var unmarshalNullTests = []func() interface{}{
|
||||
func() interface{} { var v interface{}; v = "v"; return &v },
|
||||
func() interface{} { var s = "s"; return &s },
|
||||
func() interface{} { var s = "s"; sptr := &s; return &sptr },
|
||||
func() interface{} { var i = 1; return &i },
|
||||
func() interface{} { var i = 1; iptr := &i; return &iptr },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return &m },
|
||||
func() interface{} { m := map[string]int{"s": 1}; return m },
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalNull(c *C) {
|
||||
for _, test := range unmarshalNullTests {
|
||||
item := test()
|
||||
zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface()
|
||||
err := yaml.Unmarshal([]byte("null"), item)
|
||||
c.Assert(err, IsNil)
|
||||
if reflect.TypeOf(item).Kind() == reflect.Map {
|
||||
c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface())
|
||||
} else {
|
||||
c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestUnmarshalSliceOnPreset(c *C) {
|
||||
// Issue #48.
|
||||
v := struct{ A []int }{[]int{1}}
|
||||
yaml.Unmarshal([]byte("a: [2]"), &v)
|
||||
c.Assert(v.A, DeepEquals, []int{2})
|
||||
}
|
||||
|
||||
//var data []byte
|
||||
//func init() {
|
||||
// var err error
|
||||
// data, err = ioutil.ReadFile("/tmp/file.yaml")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (s *S) BenchmarkUnmarshal(c *C) {
|
||||
// var err error
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// var v map[string]interface{}
|
||||
// err = yaml.Unmarshal(data, &v)
|
||||
// }
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (s *S) BenchmarkMarshal(c *C) {
|
||||
// var v map[string]interface{}
|
||||
// yaml.Unmarshal(data, &v)
|
||||
// c.ResetTimer()
|
||||
// for i := 0; i < c.N; i++ {
|
||||
// yaml.Marshal(&v)
|
||||
// }
|
||||
//}
|
||||
501
vendor/gopkg.in/yaml.v2/encode_test.go
generated
vendored
Normal file
501
vendor/gopkg.in/yaml.v2/encode_test.go
generated
vendored
Normal file
@@ -0,0 +1,501 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
var marshalIntTest = 123
|
||||
|
||||
var marshalTests = []struct {
|
||||
value interface{}
|
||||
data string
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
"null\n",
|
||||
}, {
|
||||
&struct{}{},
|
||||
"{}\n",
|
||||
}, {
|
||||
map[string]string{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]string{"v": "true"},
|
||||
"v: \"true\"\n",
|
||||
}, {
|
||||
map[string]string{"v": "false"},
|
||||
"v: \"false\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": true},
|
||||
"v: true\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": false},
|
||||
"v: false\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 10},
|
||||
"v: 10\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -10},
|
||||
"v: -10\n",
|
||||
}, {
|
||||
map[string]uint{"v": 42},
|
||||
"v: 42\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]int64{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]uint64{"v": 4294967296},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "10"},
|
||||
"v: \"10\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 0.1},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": float64(0.1)},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -0.1},
|
||||
"v: -0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(+1)},
|
||||
"v: .inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(-1)},
|
||||
"v: -.inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.NaN()},
|
||||
"v: .nan\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": nil},
|
||||
"v: null\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": ""},
|
||||
"v: \"\"\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B"}},
|
||||
"v:\n- A\n- B\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
"v:\n- A\n- |-\n B\n C\n",
|
||||
}, {
|
||||
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": "-"},
|
||||
"a: '-'\n",
|
||||
},
|
||||
|
||||
// Simple values.
|
||||
{
|
||||
&marshalIntTest,
|
||||
"123\n",
|
||||
},
|
||||
|
||||
// Structures
|
||||
{
|
||||
&struct{ Hello string }{"world"},
|
||||
"hello: world\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B string
|
||||
}
|
||||
}{struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{&struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{},
|
||||
"a: null\n",
|
||||
}, {
|
||||
&struct{ A int }{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A []int }{[]int{1, 2}},
|
||||
"a:\n- 1\n- 2\n",
|
||||
}, {
|
||||
&struct {
|
||||
B int "a"
|
||||
}{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A bool }{true},
|
||||
"a: true\n",
|
||||
},
|
||||
|
||||
// Conditional flag
|
||||
{
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{0, 0},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{nil},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{}},
|
||||
"a: {x: 0}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{0, 1}},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A float64 "a,omitempty"
|
||||
B float64 "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Flow flag
|
||||
{
|
||||
&struct {
|
||||
A []int "a,flow"
|
||||
}{[]int{1, 2}},
|
||||
"a: [1, 2]\n",
|
||||
}, {
|
||||
&struct {
|
||||
A map[string]string "a,flow"
|
||||
}{map[string]string{"b": "c", "d": "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B, D string
|
||||
} "a,flow"
|
||||
}{struct{ B, D string }{"c", "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
},
|
||||
|
||||
// Unexported field
|
||||
{
|
||||
&struct {
|
||||
u int
|
||||
A int
|
||||
}{0, 1},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
B int "-"
|
||||
}{1, 2},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Struct inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Map inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C map[string]int `yaml:",inline"`
|
||||
}{1, map[string]int{"b": 2, "c": 3}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
"a: 3s\n",
|
||||
},
|
||||
|
||||
// Issue #24: bug in map merging logic.
|
||||
{
|
||||
map[string]string{"a": "<foo>"},
|
||||
"a: <foo>\n",
|
||||
},
|
||||
|
||||
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||
// with old YAML 1.1 parsers.
|
||||
{
|
||||
map[string]string{"a": "1:1"},
|
||||
"a: \"1:1\"\n",
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
map[string]string{"a": "\x00"},
|
||||
"a: \"\\0\"\n",
|
||||
}, {
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
"a: !!binary gIGC\n",
|
||||
}, {
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
},
|
||||
|
||||
// Ordered maps.
|
||||
{
|
||||
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
|
||||
},
|
||||
|
||||
// Encode unicode as utf-8 rather than in escaped form.
|
||||
{
|
||||
map[string]string{"a": "你好"},
|
||||
"a: 你好\n",
|
||||
},
|
||||
|
||||
// Support encoding.TextMarshaler.
|
||||
{
|
||||
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||
"a: 1.2.3.4\n",
|
||||
},
|
||||
{
|
||||
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||
"a: 2015-02-24T18:19:39Z\n",
|
||||
},
|
||||
|
||||
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
|
||||
{
|
||||
map[string]string{"a": "b: c"},
|
||||
"a: 'b: c'\n",
|
||||
},
|
||||
|
||||
// Containing hash mark ('#') in string should be quoted
|
||||
{
|
||||
map[string]string{"a": "Hello #comment"},
|
||||
"a: 'Hello #comment'\n",
|
||||
},
|
||||
{
|
||||
map[string]string{"a": "你好 #comment"},
|
||||
"a: '你好 #comment'\n",
|
||||
},
|
||||
}
|
||||
|
||||
func (s *S) TestMarshal(c *C) {
|
||||
defer os.Setenv("TZ", os.Getenv("TZ"))
|
||||
os.Setenv("TZ", "UTC")
|
||||
for _, item := range marshalTests {
|
||||
data, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, item.data)
|
||||
}
|
||||
}
|
||||
|
||||
var marshalErrorTests = []struct {
|
||||
value interface{}
|
||||
error string
|
||||
panic string
|
||||
}{{
|
||||
value: &struct {
|
||||
B int
|
||||
inlineB ",inline"
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||
}, {
|
||||
value: &struct {
|
||||
A int
|
||||
B map[string]int ",inline"
|
||||
}{1, map[string]int{"a": 2}},
|
||||
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
|
||||
}}
|
||||
|
||||
func (s *S) TestMarshalErrors(c *C) {
|
||||
for _, item := range marshalErrorTests {
|
||||
if item.panic != "" {
|
||||
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||
} else {
|
||||
_, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, ErrorMatches, item.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalTypeCache(c *C) {
|
||||
var data []byte
|
||||
var err error
|
||||
func() {
|
||||
type T struct{ A int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
func() {
|
||||
type T struct{ B int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
c.Assert(string(data), Equals, "b: 0\n")
|
||||
}
|
||||
|
||||
var marshalerTests = []struct {
|
||||
data string
|
||||
value interface{}
|
||||
}{
|
||||
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
|
||||
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
|
||||
{"_: 10\n", 10},
|
||||
{"_: null\n", nil},
|
||||
{"_: BAR!\n", "BAR!"},
|
||||
}
|
||||
|
||||
type marshalerType struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalText() ([]byte, error) {
|
||||
panic("MarshalText called on type with MarshalYAML")
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalYAML() (interface{}, error) {
|
||||
return o.value, nil
|
||||
}
|
||||
|
||||
type marshalerValue struct {
|
||||
Field marshalerType "_"
|
||||
}
|
||||
|
||||
func (s *S) TestMarshaler(c *C) {
|
||||
for _, item := range marshalerTests {
|
||||
obj := &marshalerValue{}
|
||||
obj.Field.value = item.value
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, string(item.data))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerWholeDocument(c *C) {
|
||||
obj := &marshalerType{}
|
||||
obj.value = map[string]string{"hello": "world!"}
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, "hello: world!\n")
|
||||
}
|
||||
|
||||
type failingMarshaler struct{}
|
||||
|
||||
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
|
||||
return nil, failingErr
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerError(c *C) {
|
||||
_, err := yaml.Marshal(&failingMarshaler{})
|
||||
c.Assert(err, Equals, failingErr)
|
||||
}
|
||||
|
||||
func (s *S) TestSortedOutput(c *C) {
|
||||
order := []interface{}{
|
||||
false,
|
||||
true,
|
||||
1,
|
||||
uint(1),
|
||||
1.0,
|
||||
1.1,
|
||||
1.2,
|
||||
2,
|
||||
uint(2),
|
||||
2.0,
|
||||
2.1,
|
||||
"",
|
||||
".1",
|
||||
".2",
|
||||
".a",
|
||||
"1",
|
||||
"2",
|
||||
"a!10",
|
||||
"a/2",
|
||||
"a/10",
|
||||
"a~10",
|
||||
"ab/1",
|
||||
"b/1",
|
||||
"b/01",
|
||||
"b/2",
|
||||
"b/02",
|
||||
"b/3",
|
||||
"b/03",
|
||||
"b1",
|
||||
"b01",
|
||||
"b3",
|
||||
"c2.10",
|
||||
"c10.2",
|
||||
"d1",
|
||||
"d12",
|
||||
"d12a",
|
||||
}
|
||||
m := make(map[interface{}]int)
|
||||
for _, k := range order {
|
||||
m[k] = 1
|
||||
}
|
||||
data, err := yaml.Marshal(m)
|
||||
c.Assert(err, IsNil)
|
||||
out := "\n" + string(data)
|
||||
last := 0
|
||||
for i, k := range order {
|
||||
repr := fmt.Sprint(k)
|
||||
if s, ok := k.(string); ok {
|
||||
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
|
||||
repr = `"` + repr + `"`
|
||||
}
|
||||
}
|
||||
index := strings.Index(out, "\n"+repr+":")
|
||||
if index == -1 {
|
||||
c.Fatalf("%#v is not in the output: %#v", k, out)
|
||||
}
|
||||
if index < last {
|
||||
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
|
||||
}
|
||||
last = index
|
||||
}
|
||||
}
|
||||
12
vendor/gopkg.in/yaml.v2/suite_test.go
generated
vendored
Normal file
12
vendor/gopkg.in/yaml.v2/suite_test.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type S struct{}
|
||||
|
||||
var _ = Suite(&S{})
|
||||
Reference in New Issue
Block a user