tech(build): move dependencies to vendor folder

This commit is contained in:
bergquist
2016-09-14 16:14:27 +02:00
parent 4e567b5f02
commit 6253476c73
2760 changed files with 8 additions and 401 deletions

12
vendor/gopkg.in/asn1-ber.v1/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,12 @@
language: go
go:
- 1.2
- 1.3
- tip
install:
- go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v
- go list -f '{{range .TestImports}}{{.}} {{end}}' ./... | xargs go get -v
- go get code.google.com/p/go.tools/cmd/cover
- go build -v ./...
script:
- go test -v -cover ./...

27
vendor/gopkg.in/asn1-ber.v1/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

18
vendor/gopkg.in/asn1-ber.v1/README.md generated vendored Normal file
View File

@@ -0,0 +1,18 @@
[![GoDoc](https://godoc.org/gopkg.in/asn1-ber.v1?status.svg)](https://godoc.org/gopkg.in/asn1-ber.v1) [![Build Status](https://travis-ci.org/go-asn1-ber/asn1-ber.svg)](https://travis-ci.org/go-asn1-ber/asn1-ber)
ASN1 BER Encoding / Decoding Library for the GO programming language.
---------------------------------------------------------------------
Required libraries:
None
Working:
Very basic encoding / decoding needed for LDAP protocol
Tests Implemented:
A few
TODO:
Fix all encoding / decoding to conform to ASN1 BER spec
Implement Tests / Benchmarks

528
vendor/gopkg.in/asn1-ber.v1/ber.go generated vendored Normal file
View File

@@ -0,0 +1,528 @@
package ber
import (
"bytes"
"fmt"
"io"
"os"
"reflect"
)
type Packet struct {
ClassType Class
TagType Type
Tag Tag
Value interface{}
ByteValue []byte
Data *bytes.Buffer
Children []*Packet
Description string
}
type Tag uint8
const (
TagEOC Tag = 0x00
TagBoolean Tag = 0x01
TagInteger Tag = 0x02
TagBitString Tag = 0x03
TagOctetString Tag = 0x04
TagNULL Tag = 0x05
TagObjectIdentifier Tag = 0x06
TagObjectDescriptor Tag = 0x07
TagExternal Tag = 0x08
TagRealFloat Tag = 0x09
TagEnumerated Tag = 0x0a
TagEmbeddedPDV Tag = 0x0b
TagUTF8String Tag = 0x0c
TagRelativeOID Tag = 0x0d
TagSequence Tag = 0x10
TagSet Tag = 0x11
TagNumericString Tag = 0x12
TagPrintableString Tag = 0x13
TagT61String Tag = 0x14
TagVideotexString Tag = 0x15
TagIA5String Tag = 0x16
TagUTCTime Tag = 0x17
TagGeneralizedTime Tag = 0x18
TagGraphicString Tag = 0x19
TagVisibleString Tag = 0x1a
TagGeneralString Tag = 0x1b
TagUniversalString Tag = 0x1c
TagCharacterString Tag = 0x1d
TagBMPString Tag = 0x1e
TagBitmask Tag = 0x1f // xxx11111b
)
var tagMap = map[Tag]string{
TagEOC: "EOC (End-of-Content)",
TagBoolean: "Boolean",
TagInteger: "Integer",
TagBitString: "Bit String",
TagOctetString: "Octet String",
TagNULL: "NULL",
TagObjectIdentifier: "Object Identifier",
TagObjectDescriptor: "Object Descriptor",
TagExternal: "External",
TagRealFloat: "Real (float)",
TagEnumerated: "Enumerated",
TagEmbeddedPDV: "Embedded PDV",
TagUTF8String: "UTF8 String",
TagRelativeOID: "Relative-OID",
TagSequence: "Sequence and Sequence of",
TagSet: "Set and Set OF",
TagNumericString: "Numeric String",
TagPrintableString: "Printable String",
TagT61String: "T61 String",
TagVideotexString: "Videotex String",
TagIA5String: "IA5 String",
TagUTCTime: "UTC Time",
TagGeneralizedTime: "Generalized Time",
TagGraphicString: "Graphic String",
TagVisibleString: "Visible String",
TagGeneralString: "General String",
TagUniversalString: "Universal String",
TagCharacterString: "Character String",
TagBMPString: "BMP String",
}
type Class uint8
const (
ClassUniversal Class = 0 // 00xxxxxxb
ClassApplication Class = 64 // 01xxxxxxb
ClassContext Class = 128 // 10xxxxxxb
ClassPrivate Class = 192 // 11xxxxxxb
ClassBitmask Class = 192 // 11xxxxxxb
)
var ClassMap = map[Class]string{
ClassUniversal: "Universal",
ClassApplication: "Application",
ClassContext: "Context",
ClassPrivate: "Private",
}
type Type uint8
const (
TypePrimitive Type = 0 // xx0xxxxxb
TypeConstructed Type = 32 // xx1xxxxxb
TypeBitmask Type = 32 // xx1xxxxxb
)
var TypeMap = map[Type]string{
TypePrimitive: "Primitive",
TypeConstructed: "Constructed",
}
var Debug bool = false
func PrintBytes(out io.Writer, buf []byte, indent string) {
data_lines := make([]string, (len(buf)/30)+1)
num_lines := make([]string, (len(buf)/30)+1)
for i, b := range buf {
data_lines[i/30] += fmt.Sprintf("%02x ", b)
num_lines[i/30] += fmt.Sprintf("%02d ", (i+1)%100)
}
for i := 0; i < len(data_lines); i++ {
out.Write([]byte(indent + data_lines[i] + "\n"))
out.Write([]byte(indent + num_lines[i] + "\n\n"))
}
}
func PrintPacket(p *Packet) {
printPacket(os.Stdout, p, 0, false)
}
func printPacket(out io.Writer, p *Packet, indent int, printBytes bool) {
indent_str := ""
for len(indent_str) != indent {
indent_str += " "
}
class_str := ClassMap[p.ClassType]
tagtype_str := TypeMap[p.TagType]
tag_str := fmt.Sprintf("0x%02X", p.Tag)
if p.ClassType == ClassUniversal {
tag_str = tagMap[p.Tag]
}
value := fmt.Sprint(p.Value)
description := ""
if p.Description != "" {
description = p.Description + ": "
}
fmt.Fprintf(out, "%s%s(%s, %s, %s) Len=%d %q\n", indent_str, description, class_str, tagtype_str, tag_str, p.Data.Len(), value)
if printBytes {
PrintBytes(out, p.Bytes(), indent_str)
}
for _, child := range p.Children {
printPacket(out, child, indent+1, printBytes)
}
}
func resizeBuffer(in []byte, new_size int) (out []byte) {
out = make([]byte, new_size)
copy(out, in)
return
}
func ReadPacket(reader io.Reader) (*Packet, error) {
var header [2]byte
buf := header[:]
_, err := io.ReadFull(reader, buf)
if err != nil {
return nil, err
}
idx := 2
var datalen int
l := buf[1]
if l&0x80 == 0 {
// The length is encoded in the bottom 7 bits.
datalen = int(l & 0x7f)
if Debug {
fmt.Printf("Read: datalen = %d len(buf) = %d\n ", l, len(buf))
for _, b := range buf {
fmt.Printf("%02X ", b)
}
fmt.Printf("\n")
}
} else {
// Bottom 7 bits give the number of length bytes to follow.
numBytes := int(l & 0x7f)
if numBytes == 0 {
return nil, fmt.Errorf("invalid length found")
}
idx += numBytes
buf = resizeBuffer(buf, 2+numBytes)
_, err := io.ReadFull(reader, buf[2:])
if err != nil {
return nil, err
}
datalen = 0
for i := 0; i < numBytes; i++ {
b := buf[2+i]
datalen <<= 8
datalen |= int(b)
}
if Debug {
fmt.Printf("Read: datalen = %d numbytes=%d len(buf) = %d\n ", datalen, numBytes, len(buf))
for _, b := range buf {
fmt.Printf("%02X ", b)
}
fmt.Printf("\n")
}
}
buf = resizeBuffer(buf, idx+datalen)
_, err = io.ReadFull(reader, buf[idx:])
if err != nil {
return nil, err
}
if Debug {
fmt.Printf("Read: len( buf ) = %d idx=%d datalen=%d idx+datalen=%d\n ", len(buf), idx, datalen, idx+datalen)
for _, b := range buf {
fmt.Printf("%02X ", b)
}
}
p, _ := decodePacket(buf)
return p, nil
}
func DecodeString(data []byte) string {
return string(data)
}
func parseInt64(bytes []byte) (ret int64, err error) {
if len(bytes) > 8 {
// We'll overflow an int64 in this case.
err = fmt.Errorf("integer too large")
return
}
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
ret <<= 8
ret |= int64(bytes[bytesRead])
}
// Shift up and down in order to sign extend the result.
ret <<= 64 - uint8(len(bytes))*8
ret >>= 64 - uint8(len(bytes))*8
return
}
func encodeInteger(i int64) []byte {
n := int64Length(i)
out := make([]byte, n)
var j int
for ; n > 0; n-- {
out[j] = (byte(i >> uint((n-1)*8)))
j++
}
return out
}
func int64Length(i int64) (numBytes int) {
numBytes = 1
for i > 127 {
numBytes++
i >>= 8
}
for i < -128 {
numBytes++
i >>= 8
}
return
}
func DecodePacket(data []byte) *Packet {
p, _ := decodePacket(data)
return p
}
func decodePacket(data []byte) (*Packet, []byte) {
if Debug {
fmt.Printf("decodePacket: enter %d\n", len(data))
}
p := new(Packet)
p.ClassType = Class(data[0]) & ClassBitmask
p.TagType = Type(data[0]) & TypeBitmask
p.Tag = Tag(data[0]) & TagBitmask
var datalen int
l := data[1]
datapos := 2
if l&0x80 == 0 {
// The length is encoded in the bottom 7 bits.
datalen = int(l & 0x7f)
} else {
// Bottom 7 bits give the number of length bytes to follow.
numBytes := int(l & 0x7f)
if numBytes == 0 {
return nil, nil
}
datapos += numBytes
datalen = 0
for i := 0; i < numBytes; i++ {
b := data[2+i]
datalen <<= 8
datalen |= int(b)
}
}
p.Data = new(bytes.Buffer)
p.Children = make([]*Packet, 0, 2)
p.Value = nil
value_data := data[datapos : datapos+datalen]
if p.TagType == TypeConstructed {
for len(value_data) != 0 {
var child *Packet
child, value_data = decodePacket(value_data)
p.AppendChild(child)
}
} else if p.ClassType == ClassUniversal {
p.Data.Write(data[datapos : datapos+datalen])
p.ByteValue = value_data
switch p.Tag {
case TagEOC:
case TagBoolean:
val, _ := parseInt64(value_data)
p.Value = val != 0
case TagInteger:
p.Value, _ = parseInt64(value_data)
case TagBitString:
case TagOctetString:
// the actual string encoding is not known here
// (e.g. for LDAP value_data is already an UTF8-encoded
// string). Return the data without further processing
p.Value = DecodeString(value_data)
case TagNULL:
case TagObjectIdentifier:
case TagObjectDescriptor:
case TagExternal:
case TagRealFloat:
case TagEnumerated:
p.Value, _ = parseInt64(value_data)
case TagEmbeddedPDV:
case TagUTF8String:
case TagRelativeOID:
case TagSequence:
case TagSet:
case TagNumericString:
case TagPrintableString:
p.Value = DecodeString(value_data)
case TagT61String:
case TagVideotexString:
case TagIA5String:
case TagUTCTime:
case TagGeneralizedTime:
case TagGraphicString:
case TagVisibleString:
case TagGeneralString:
case TagUniversalString:
case TagCharacterString:
case TagBMPString:
}
} else {
p.Data.Write(data[datapos : datapos+datalen])
}
return p, data[datapos+datalen:]
}
func (p *Packet) Bytes() []byte {
var out bytes.Buffer
out.Write([]byte{byte(p.ClassType) | byte(p.TagType) | byte(p.Tag)})
packet_length := encodeInteger(int64(p.Data.Len()))
if p.Data.Len() > 127 || len(packet_length) > 1 {
out.Write([]byte{byte(len(packet_length) | 128)})
out.Write(packet_length)
} else {
out.Write(packet_length)
}
out.Write(p.Data.Bytes())
return out.Bytes()
}
func (p *Packet) AppendChild(child *Packet) {
p.Data.Write(child.Bytes())
p.Children = append(p.Children, child)
}
func Encode(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
p := new(Packet)
p.ClassType = ClassType
p.TagType = TagType
p.Tag = Tag
p.Data = new(bytes.Buffer)
p.Children = make([]*Packet, 0, 2)
p.Value = Value
p.Description = Description
if Value != nil {
v := reflect.ValueOf(Value)
if ClassType == ClassUniversal {
switch Tag {
case TagOctetString:
sv, ok := v.Interface().(string)
if ok {
p.Data.Write([]byte(sv))
}
}
}
}
return p
}
func NewSequence(Description string) *Packet {
return Encode(ClassUniversal, TypeConstructed, TagSequence, nil, Description)
}
func NewBoolean(ClassType Class, TagType Type, Tag Tag, Value bool, Description string) *Packet {
intValue := int64(0)
if Value {
intValue = 1
}
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
p.Data.Write(encodeInteger(intValue))
return p
}
func NewInteger(ClassType Class, TagType Type, Tag Tag, Value interface{}, Description string) *Packet {
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
switch v := Value.(type) {
case int:
p.Data.Write(encodeInteger(int64(v)))
case uint:
p.Data.Write(encodeInteger(int64(v)))
case int64:
p.Data.Write(encodeInteger(v))
case uint64:
// TODO : check range or add encodeUInt...
p.Data.Write(encodeInteger(int64(v)))
case int32:
p.Data.Write(encodeInteger(int64(v)))
case uint32:
p.Data.Write(encodeInteger(int64(v)))
case int16:
p.Data.Write(encodeInteger(int64(v)))
case uint16:
p.Data.Write(encodeInteger(int64(v)))
case int8:
p.Data.Write(encodeInteger(int64(v)))
case uint8:
p.Data.Write(encodeInteger(int64(v)))
default:
// TODO : add support for big.Int ?
panic(fmt.Sprintf("Invalid type %T, expected {u|}int{64|32|16|8}", v))
}
return p
}
func NewString(ClassType Class, TagType Type, Tag Tag, Value, Description string) *Packet {
p := Encode(ClassType, TagType, Tag, nil, Description)
p.Value = Value
p.Data.Write([]byte(Value))
return p
}

158
vendor/gopkg.in/asn1-ber.v1/ber_test.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package ber
import (
"bytes"
"io"
"testing"
)
func TestEncodeDecodeInteger(t *testing.T) {
for _, v := range []int64{0, 10, 128, 1024, -1, -100, -128, -1024} {
enc := encodeInteger(v)
dec, err := parseInt64(enc)
if err != nil {
t.Fatalf("Error decoding %d : %s", v, err)
}
if v != dec {
t.Error("TestEncodeDecodeInteger failed for %d (got %d)", v, dec)
}
}
}
func TestBoolean(t *testing.T) {
var value bool = true
packet := NewBoolean(ClassUniversal, TypePrimitive, TagBoolean, value, "first Packet, True")
newBoolean, ok := packet.Value.(bool)
if !ok || newBoolean != value {
t.Error("error during creating packet")
}
encodedPacket := packet.Bytes()
newPacket := DecodePacket(encodedPacket)
newBoolean, ok = newPacket.Value.(bool)
if !ok || newBoolean != value {
t.Error("error during decoding packet")
}
}
func TestInteger(t *testing.T) {
var value int64 = 10
packet := NewInteger(ClassUniversal, TypePrimitive, TagInteger, value, "Integer, 10")
{
newInteger, ok := packet.Value.(int64)
if !ok || newInteger != value {
t.Error("error creating packet")
}
}
encodedPacket := packet.Bytes()
newPacket := DecodePacket(encodedPacket)
{
newInteger, ok := newPacket.Value.(int64)
if !ok || int64(newInteger) != value {
t.Error("error decoding packet")
}
}
}
func TestString(t *testing.T) {
var value string = "Hic sunt dracones"
packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, value, "String")
newValue, ok := packet.Value.(string)
if !ok || newValue != value {
t.Error("error during creating packet")
}
encodedPacket := packet.Bytes()
newPacket := DecodePacket(encodedPacket)
newValue, ok = newPacket.Value.(string)
if !ok || newValue != value {
t.Error("error during decoding packet")
}
}
func TestSequenceAndAppendChild(t *testing.T) {
p1 := NewString(ClassUniversal, TypePrimitive, TagOctetString, "HIC SVNT LEONES", "String")
p2 := NewString(ClassUniversal, TypePrimitive, TagOctetString, "HIC SVNT DRACONES", "String")
p3 := NewString(ClassUniversal, TypePrimitive, TagOctetString, "Terra Incognita", "String")
sequence := NewSequence("a sequence")
sequence.AppendChild(p1)
sequence.AppendChild(p2)
sequence.AppendChild(p3)
if len(sequence.Children) != 3 {
t.Error("wrong length for children array should be three =>", len(sequence.Children))
}
encodedSequence := sequence.Bytes()
decodedSequence := DecodePacket(encodedSequence)
if len(decodedSequence.Children) != 3 {
t.Error("wrong length for children array should be three =>", len(decodedSequence.Children))
}
}
func TestReadPacket(t *testing.T) {
packet := NewString(ClassUniversal, TypePrimitive, TagOctetString, "Ad impossibilia nemo tenetur", "string")
var buffer io.ReadWriter
buffer = new(bytes.Buffer)
buffer.Write(packet.Bytes())
newPacket, err := ReadPacket(buffer)
if err != nil {
t.Error("error during ReadPacket", err)
}
newPacket.ByteValue = nil
if !bytes.Equal(newPacket.ByteValue, packet.ByteValue) {
t.Error("packets should be the same")
}
}
func TestBinaryInteger(t *testing.T) {
// data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.7
var data = []struct {
v int64
e []byte
}{
{v: 0, e: []byte{0x02, 0x01, 0x00}},
{v: 127, e: []byte{0x02, 0x01, 0x7F}},
{v: 128, e: []byte{0x02, 0x02, 0x00, 0x80}},
{v: 256, e: []byte{0x02, 0x02, 0x01, 0x00}},
{v: -128, e: []byte{0x02, 0x01, 0x80}},
{v: -129, e: []byte{0x02, 0x02, 0xFF, 0x7F}},
}
for _, d := range data {
if b := NewInteger(ClassUniversal, TypePrimitive, TagInteger, int64(d.v), "").Bytes(); !bytes.Equal(d.e, b) {
t.Errorf("Wrong binary generated for %d : got % X, expected % X", d.v, b, d.e)
}
}
}
func TestBinaryOctetString(t *testing.T) {
// data src : http://luca.ntop.org/Teaching/Appunti/asn1.html 5.10
if !bytes.Equal([]byte{0x04, 0x08, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, NewString(ClassUniversal, TypePrimitive, TagOctetString, "\x01\x23\x45\x67\x89\xab\xcd\xef", "").Bytes()) {
t.Error("wrong binary generated")
}
}

11
vendor/gopkg.in/bufio.v1/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- tip
install:
- go get launchpad.net/gocheck
- go get gopkg.in/bufio.v1

27
vendor/gopkg.in/bufio.v1/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2013 The bufio Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

2
vendor/gopkg.in/bufio.v1/Makefile generated vendored Normal file
View File

@@ -0,0 +1,2 @@
all:
go test gopkg.in/bufio.v1

4
vendor/gopkg.in/bufio.v1/README.md generated vendored Normal file
View File

@@ -0,0 +1,4 @@
bufio
=====
This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature.

413
vendor/gopkg.in/bufio.v1/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,413 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
// Simple byte buffer for marshaling data.
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
lastRead readOp // last read operation, so that Unread* can work correctly.
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can
// check for invalid usage.
type readOp int
const (
opInvalid readOp = iota // Non-read operation.
opReadRune // Read rune.
opRead // Any other read operation.
)
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
// Bytes returns a slice of the contents of the unread portion of the buffer;
// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
// returned slice, the contents of the buffer will change provided there
// are no intervening method calls on the Buffer.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
b.lastRead = opInvalid
switch {
case n < 0 || n > b.Len():
panic("bytes.Buffer: truncation out of range")
case n == 0:
// Reuse buffer space.
b.off = 0
}
b.buf = b.buf[0 : b.off+n]
}
// Reset resets the buffer so it has no content.
// b.Reset() is the same as b.Truncate(0).
func (b *Buffer) Reset() { b.Truncate(0) }
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Truncate(0)
}
if len(b.buf)+n > cap(b.buf) {
var buf []byte
if b.buf == nil && n <= len(b.bootstrap) {
buf = b.bootstrap[0:]
} else if m+n <= cap(b.buf)/2 {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= cap(b.buf) to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
// not enough space anywhere
buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:])
}
b.buf = buf
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[0:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(p))
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(s))
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
b.Truncate(0)
}
for {
if free := cap(b.buf) - len(b.buf); free < MinRead {
// not enough space at end
newBuf := b.buf
if b.off+free < MinRead {
// not enough space using beginning of buffer;
// double buffer capacity
newBuf = makeSlice(2*cap(b.buf) + MinRead)
}
copy(newBuf, b.buf[b.off:])
b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0
}
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
b.buf = b.buf[0 : len(b.buf)+m]
n += int64(m)
if e == io.EOF {
break
}
if e != nil {
return n, e
}
}
return n, nil // err is EOF, so return nil explicitly
}
// makeSlice allocates a slice of size n. If the allocation fails, it panics
// with ErrTooLarge.
func makeSlice(n int) []byte {
// If the make fails, give a known error.
defer func() {
if recover() != nil {
panic(ErrTooLarge)
}
}()
return make([]byte, n)
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
if b.off < len(b.buf) {
nBytes := b.Len()
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Truncate(0)
return
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m := b.grow(1)
b.buf[m] = c
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}
n = utf8.EncodeRune(b.runeBytes[0:], r)
b.Write(b.runeBytes[0:n])
return n, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
if len(p) == 0 {
return
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.lastRead = opRead
}
return
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
b.lastRead = opInvalid
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.lastRead = opRead
}
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (c byte, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, io.EOF
}
c = b.buf[b.off]
b.off++
b.lastRead = opRead
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, 0, io.EOF
}
b.lastRead = opReadRune
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
return r, n, nil
}
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
// not a ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead != opReadRune {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
}
b.lastRead = opInvalid
if b.off > 0 {
_, n := utf8.DecodeLastRune(b.buf[0:b.off])
b.off -= n
}
return nil
}
// UnreadByte unreads the last byte returned by the most recent
// read operation. If write has happened since the last read, UnreadByte
// returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead != opReadRune && b.lastRead != opRead {
return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
}
b.lastRead = opInvalid
if b.off > 0 {
b.off--
}
return nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := bytes.IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.lastRead = opRead
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

527
vendor/gopkg.in/bufio.v1/buffer_test.go generated vendored Normal file
View File

@@ -0,0 +1,527 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
import (
"bytes"
"io"
"math/rand"
"runtime"
"testing"
"unicode/utf8"
)
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var testBytes []byte // test data; same as data but as a slice.
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
data = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of string fus.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.WriteString(fus)
if m != len(fus) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += fus
check(t, testname+" (fill 4)", buf, s)
}
return s
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, data)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(data)
check(t, "NewBufferString", buf, data)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write([]byte(data[0:1]))
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
if err != nil {
t.Errorf("err should always be nil, but err == %s", err)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(data[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write([]byte(data[2:26]))
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d", n)
}
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(data[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
c, err = buf.ReadByte()
if err == nil {
t.Error("ReadByte unexpected not eof")
}
}
}
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeStringWrites (3)", &buf, "")
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data))
if i%2 == 0 {
s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
} else {
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
}
rlen := rand.Intn(len(data))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
}
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
}
}
func TestRuneIO(t *testing.T) {
const NRune = 1000
// Built a test slice while we write the data
b := make([]byte, utf8.UTFMax*NRune)
var buf Buffer
n := 0
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(b[n:], r)
nbytes, err := buf.WriteRune(r)
if err != nil {
t.Fatalf("WriteRune(%U) error: %s", r, err)
}
if nbytes != size {
t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes)
}
n += size
}
b = b[0:n]
// Check the resulting bytes
if !bytes.Equal(buf.Bytes(), b) {
t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b)
}
p := make([]byte, utf8.UTFMax)
// Read it back with ReadRune
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(p, r)
nr, nbytes, err := buf.ReadRune()
if nr != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err)
}
}
// Check that UnreadRune works
buf.Reset()
buf.Write(b)
for r := rune(0); r < NRune; r++ {
r1, size, _ := buf.ReadRune()
if err := buf.UnreadRune(); err != nil {
t.Fatalf("UnreadRune(%U) got error %q", r, err)
}
r2, nbytes, err := buf.ReadRune()
if r1 != r2 || r1 != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err)
}
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
var readBytesTests = []struct {
buffer string
delim byte
expected []string
err error
}{
{"", 0, []string{""}, io.EOF},
{"a\x00", 0, []string{"a\x00"}, nil},
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
{"hello\x01world", 1, []string{"hello\x01"}, nil},
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
}
func TestReadBytes(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var bytes []byte
bytes, err = buf.ReadBytes(test.delim)
if string(bytes) != expected {
t.Errorf("expected %q, got %q", expected, bytes)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func TestReadString(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var s string
s, err = buf.ReadString(test.delim)
if s != expected {
t.Errorf("expected %q, got %q", expected, s)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func BenchmarkReadString(b *testing.B) {
const n = 32 << 10
data := make([]byte, n)
data[n-1] = 'x'
b.SetBytes(int64(n))
for i := 0; i < b.N; i++ {
buf := NewBuffer(data)
_, err := buf.ReadString('x')
if err != nil {
b.Fatal(err)
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := bytes.Repeat(x, startLen)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
buf.Grow(growLen)
yBytes := bytes.Repeat(y, growLen)
// Check no allocation occurs in write, as long as we're single-threaded.
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
buf.Write(yBytes)
runtime.ReadMemStats(&m2)
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
func TestBufferUnreadByte(t *testing.T) {
b := new(Buffer)
b.WriteString("abcdefghijklmnopqrstuvwxyz")
_, err := b.ReadBytes('m')
if err != nil {
t.Fatalf("ReadBytes: %v", err)
}
err = b.UnreadByte()
if err != nil {
t.Fatalf("UnreadByte: %v", err)
}
c, err := b.ReadByte()
if err != nil {
t.Fatalf("ReadByte: %v", err)
}
if c != 'm' {
t.Errorf("ReadByte = %q; want %q", c, 'm')
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = b.Cap()
}
}
cap1 := b.Cap()
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < b.Cap() {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}

728
vendor/gopkg.in/bufio.v1/bufio.go generated vendored Normal file
View File

@@ -0,0 +1,728 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
// object, creating another object (Reader or Writer) that also implements
// the interface but provides buffering and some help for textual I/O.
package bufio
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
const (
defaultBufSize = 4096
)
var (
ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
ErrBufferFull = errors.New("bufio: buffer full")
ErrNegativeCount = errors.New("bufio: negative count")
)
// Buffered input.
// Reader implements buffering for an io.Reader object.
type Reader struct {
buf []byte
rd io.Reader
r, w int
err error
lastByte int
lastRuneSize int
}
const minReadBufferSize = 16
const maxConsecutiveEmptyReads = 100
// NewReaderSize returns a new Reader whose buffer has at least the specified
// size. If the argument io.Reader is already a Reader with large enough
// size, it returns the underlying Reader.
func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
return b
}
if size < minReadBufferSize {
size = minReadBufferSize
}
r := new(Reader)
r.reset(make([]byte, size), rd)
return r
}
// NewReader returns a new Reader whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
return NewReaderSize(rd, defaultBufSize)
}
// Reset discards any buffered data, resets all state, and switches
// the buffered reader to read from r.
func (b *Reader) Reset(r io.Reader) {
b.reset(b.buf, r)
}
func (b *Reader) reset(buf []byte, r io.Reader) {
*b = Reader{
buf: buf,
rd: r,
lastByte: -1,
lastRuneSize: -1,
}
}
var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
// fill reads a new chunk into the buffer.
func (b *Reader) fill() {
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
if b.w >= len(b.buf) {
panic("bufio: tried to fill full buffer")
}
// Read new data: try a limited number of times.
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err := b.rd.Read(b.buf[b.w:])
if n < 0 {
panic(errNegativeRead)
}
b.w += n
if err != nil {
b.err = err
return
}
if n > 0 {
return
}
}
b.err = io.ErrNoProgress
}
func (b *Reader) readErr() error {
err := b.err
b.err = nil
return err
}
// Peek returns the next n bytes without advancing the reader. The bytes stop
// being valid at the next read call. If Peek returns fewer than n bytes, it
// also returns an error explaining why the read is short. The error is
// ErrBufferFull if n is larger than b's buffer size.
func (b *Reader) Peek(n int) ([]byte, error) {
if n < 0 {
return nil, ErrNegativeCount
}
if n > len(b.buf) {
return nil, ErrBufferFull
}
// 0 <= n <= len(b.buf)
for b.w-b.r < n && b.err == nil {
b.fill() // b.w-b.r < len(b.buf) => buffer is not full
}
m := b.w - b.r
if m > n {
m = n
}
var err error
if m < n {
err = b.readErr()
if err == nil {
err = ErrBufferFull
}
}
return b.buf[b.r : b.r+m], err
}
// Read reads data into p.
// It returns the number of bytes read into p.
// It calls Read at most once on the underlying Reader,
// hence n may be less than len(p).
// At EOF, the count will be zero and err will be io.EOF.
func (b *Reader) Read(p []byte) (n int, err error) {
n = len(p)
if n == 0 {
return 0, b.readErr()
}
if b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
if len(p) >= len(b.buf) {
// Large read, empty buffer.
// Read directly into p to avoid copy.
n, b.err = b.rd.Read(p)
if n < 0 {
panic(errNegativeRead)
}
if n > 0 {
b.lastByte = int(p[n-1])
b.lastRuneSize = -1
}
return n, b.readErr()
}
b.fill() // buffer is empty
if b.w == b.r {
return 0, b.readErr()
}
}
if n > b.w-b.r {
n = b.w - b.r
}
copy(p[0:n], b.buf[b.r:])
b.r += n
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = -1
return n, nil
}
// ReadByte reads and returns a single byte.
// If no byte is available, returns an error.
func (b *Reader) ReadByte() (c byte, err error) {
b.lastRuneSize = -1
for b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
b.fill() // buffer is empty
}
c = b.buf[b.r]
b.r++
b.lastByte = int(c)
return c, nil
}
// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
func (b *Reader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
return ErrInvalidUnreadByte
}
// b.r > 0 || b.w == 0
if b.r > 0 {
b.r--
} else {
// b.r == 0 && b.w == 0
b.w = 1
}
b.buf[b.r] = byte(b.lastByte)
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// ReadRune reads a single UTF-8 encoded Unicode character and returns the
// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
func (b *Reader) ReadRune() (r rune, size int, err error) {
for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
b.fill() // b.w-b.r < len(buf) => buffer is not full
}
b.lastRuneSize = -1
if b.r == b.w {
return 0, 0, b.readErr()
}
r, size = rune(b.buf[b.r]), 1
if r >= 0x80 {
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
}
b.r += size
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = size
return r, size, nil
}
// UnreadRune unreads the last rune. If the most recent read operation on
// the buffer was not a ReadRune, UnreadRune returns an error. (In this
// regard it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Reader) UnreadRune() error {
if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
return ErrInvalidUnreadRune
}
b.r -= b.lastRuneSize
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// Buffered returns the number of bytes that can be read from the current buffer.
func (b *Reader) Buffered() int { return b.w - b.r }
// ReadSlice reads until the first occurrence of delim in the input,
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
for {
// Search buffer.
if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
line = b.buf[b.r : b.r+i+1]
b.r += i + 1
break
}
// Pending error?
if b.err != nil {
line = b.buf[b.r:b.w]
b.r = b.w
err = b.readErr()
break
}
// Buffer full?
if n := b.Buffered(); n >= len(b.buf) {
b.r = b.w
line = b.buf
err = ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
}
return
}
// ReadN tries to read exactly n bytes.
// The bytes stop being valid at the next read call.
// If ReadN encounters an error before reading n bytes,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadN fails with error ErrBufferFull if the buffer fills
// without reading N bytes.
// Because the data returned from ReadN will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
func (b *Reader) ReadN(n int) ([]byte, error) {
for b.Buffered() < n {
if b.err != nil {
buf := b.buf[b.r:b.w]
b.r = b.w
return buf, b.readErr()
}
// Buffer is full?
if b.Buffered() >= len(b.buf) {
b.r = b.w
return b.buf, ErrBufferFull
}
b.fill()
}
buf := b.buf[b.r : b.r+n]
b.r += n
return buf, nil
}
// ReadLine is a low-level line-reading primitive. Most callers should use
// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
//
// ReadLine tries to return a single line, not including the end-of-line bytes.
// If the line was too long for the buffer then isPrefix is set and the
// beginning of the line is returned. The rest of the line will be returned
// from future calls. isPrefix will be false when returning the last fragment
// of the line. The returned buffer is only valid until the next call to
// ReadLine. ReadLine either returns a non-nil line or it returns an error,
// never both.
//
// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
// No indication or error is given if the input ends without a final line end.
// Calling UnreadByte after ReadLine will always unread the last byte read
// (possibly a character belonging to the line end) even if that byte is not
// part of the line returned by ReadLine.
func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
line, err = b.ReadSlice('\n')
if err == ErrBufferFull {
// Handle the case where "\r\n" straddles the buffer.
if len(line) > 0 && line[len(line)-1] == '\r' {
// Put the '\r' back on buf and drop it from line.
// Let the next call to ReadLine check for "\r\n".
if b.r == 0 {
// should be unreachable
panic("bufio: tried to rewind past start of buffer")
}
b.r--
line = line[:len(line)-1]
}
return line, true, nil
}
if len(line) == 0 {
if err != nil {
line = nil
}
return
}
err = nil
if line[len(line)-1] == '\n' {
drop := 1
if len(line) > 1 && line[len(line)-2] == '\r' {
drop = 2
}
line = line[:len(line)-drop]
}
return
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
// Use ReadSlice to look for array,
// accumulating full buffers.
var frag []byte
var full [][]byte
err = nil
for {
var e error
frag, e = b.ReadSlice(delim)
if e == nil { // got final fragment
break
}
if e != ErrBufferFull { // unexpected error
err = e
break
}
// Make a copy of the buffer.
buf := make([]byte, len(frag))
copy(buf, frag)
full = append(full, buf)
}
// Allocate new buffer to hold the full pieces and the fragment.
n := 0
for i := range full {
n += len(full[i])
}
n += len(frag)
// Copy full pieces and fragment in.
buf := make([]byte, n)
n = 0
for i := range full {
n += copy(buf[n:], full[i])
}
copy(buf[n:], frag)
return buf, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadString(delim byte) (line string, err error) {
bytes, err := b.ReadBytes(delim)
line = string(bytes)
return line, err
}
// WriteTo implements io.WriterTo.
func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
n, err = b.writeBuf(w)
if err != nil {
return
}
if r, ok := b.rd.(io.WriterTo); ok {
m, err := r.WriteTo(w)
n += m
return n, err
}
if w, ok := w.(io.ReaderFrom); ok {
m, err := w.ReadFrom(b.rd)
n += m
return n, err
}
if b.w-b.r < len(b.buf) {
b.fill() // buffer not full
}
for b.r < b.w {
// b.r < b.w => buffer is not empty
m, err := b.writeBuf(w)
n += m
if err != nil {
return n, err
}
b.fill() // buffer is empty
}
if b.err == io.EOF {
b.err = nil
}
return n, b.readErr()
}
// writeBuf writes the Reader's buffer to the writer.
func (b *Reader) writeBuf(w io.Writer) (int64, error) {
n, err := w.Write(b.buf[b.r:b.w])
if n < b.r-b.w {
panic(errors.New("bufio: writer did not write all data"))
}
b.r += n
return int64(n), err
}
// buffered output
// Writer implements buffering for an io.Writer object.
// If an error occurs writing to a Writer, no more data will be
// accepted and all subsequent writes will return the error.
// After all data has been written, the client should call the
// Flush method to guarantee all data has been forwarded to
// the underlying io.Writer.
type Writer struct {
err error
buf []byte
n int
wr io.Writer
}
// NewWriterSize returns a new Writer whose buffer has at least the specified
// size. If the argument io.Writer is already a Writer with large enough
// size, it returns the underlying Writer.
func NewWriterSize(w io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := w.(*Writer)
if ok && len(b.buf) >= size {
return b
}
if size <= 0 {
size = defaultBufSize
}
return &Writer{
buf: make([]byte, size),
wr: w,
}
}
// NewWriter returns a new Writer whose buffer has the default size.
func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
// Reset discards any unflushed buffered data, clears any error, and
// resets b to write its output to w.
func (b *Writer) Reset(w io.Writer) {
b.err = nil
b.n = 0
b.wr = w
}
// Flush writes any buffered data to the underlying io.Writer.
func (b *Writer) Flush() error {
err := b.flush()
return err
}
func (b *Writer) flush() error {
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
n, err := b.wr.Write(b.buf[0:b.n])
if n < b.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = err
return err
}
b.n = 0
return nil
}
// Available returns how many bytes are unused in the buffer.
func (b *Writer) Available() int { return len(b.buf) - b.n }
// Buffered returns the number of bytes that have been written into the current buffer.
func (b *Writer) Buffered() int { return b.n }
// Write writes the contents of p into the buffer.
// It returns the number of bytes written.
// If nn < len(p), it also returns an error explaining
// why the write is short.
func (b *Writer) Write(p []byte) (nn int, err error) {
for len(p) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, b.err = b.wr.Write(p)
} else {
n = copy(b.buf[b.n:], p)
b.n += n
b.flush()
}
nn += n
p = p[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], p)
b.n += n
nn += n
return nn, nil
}
// WriteByte writes a single byte.
func (b *Writer) WriteByte(c byte) error {
if b.err != nil {
return b.err
}
if b.Available() <= 0 && b.flush() != nil {
return b.err
}
b.buf[b.n] = c
b.n++
return nil
}
// WriteRune writes a single Unicode code point, returning
// the number of bytes written and any error.
func (b *Writer) WriteRune(r rune) (size int, err error) {
if r < utf8.RuneSelf {
err = b.WriteByte(byte(r))
if err != nil {
return 0, err
}
return 1, nil
}
if b.err != nil {
return 0, b.err
}
n := b.Available()
if n < utf8.UTFMax {
if b.flush(); b.err != nil {
return 0, b.err
}
n = b.Available()
if n < utf8.UTFMax {
// Can only happen if buffer is silly small.
return b.WriteString(string(r))
}
}
size = utf8.EncodeRune(b.buf[b.n:], r)
b.n += size
return size, nil
}
// WriteString writes a string.
// It returns the number of bytes written.
// If the count is less than len(s), it also returns an error explaining
// why the write is short.
func (b *Writer) WriteString(s string) (int, error) {
nn := 0
for len(s) > b.Available() && b.err == nil {
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
s = s[n:]
b.flush()
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
return nn, nil
}
// ReadFrom implements io.ReaderFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if b.Buffered() == 0 {
if w, ok := b.wr.(io.ReaderFrom); ok {
return w.ReadFrom(r)
}
}
var m int
for {
if b.Available() == 0 {
if err1 := b.flush(); err1 != nil {
return n, err1
}
}
nr := 0
for nr < maxConsecutiveEmptyReads {
m, err = r.Read(b.buf[b.n:])
if m != 0 || err != nil {
break
}
nr++
}
if nr == maxConsecutiveEmptyReads {
return n, io.ErrNoProgress
}
b.n += m
n += int64(m)
if err != nil {
break
}
}
if err == io.EOF {
// If we filled the buffer exactly, flush pre-emptively.
if b.Available() == 0 {
err = b.flush()
} else {
err = nil
}
}
return n, err
}
// buffered input and output
// ReadWriter stores pointers to a Reader and a Writer.
// It implements io.ReadWriter.
type ReadWriter struct {
*Reader
*Writer
}
// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
return &ReadWriter{r, w}
}

1418
vendor/gopkg.in/bufio.v1/bufio_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

9
vendor/gopkg.in/bufio.v1/export_test.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
func (b *Buffer) Cap() int {
return cap(b.buf)
}

5
vendor/gopkg.in/ini.v1/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,5 @@
testdata/conf_out.ini
ini.sublime-project
ini.sublime-workspace
testdata/conf_reflect.ini
.idea

16
vendor/gopkg.in/ini.v1/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,16 @@
sudo: false
language: go
go:
- 1.4
- 1.5
- 1.6
- tip
script:
- go get -v github.com/smartystreets/goconvey
- go test -v -cover -race
notifications:
email:
- u@gogs.io

191
vendor/gopkg.in/ini.v1/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

12
vendor/gopkg.in/ini.v1/Makefile generated vendored Normal file
View File

@@ -0,0 +1,12 @@
.PHONY: build test bench vet
build: vet bench
test:
go test -v -cover -race
bench:
go test -v -cover -race -test.bench=. -test.benchmem
vet:
go vet

703
vendor/gopkg.in/ini.v1/README.md generated vendored Normal file
View File

@@ -0,0 +1,703 @@
INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini)
===
![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
Package ini provides INI file read and write functionality in Go.
[简体中文](README_ZH.md)
## Feature
- Load multiple data sources(`[]byte` or file) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
- Read with multiple-line values.
- Read with tons of helper methods.
- Read and convert values to Go types.
- Read and **WRITE** comments of sections and keys.
- Manipulate sections, keys and comments with ease.
- Keep sections and keys in order as you parse and save.
## Installation
To use a tagged revision:
go get gopkg.in/ini.v1
To use with latest changes:
go get github.com/go-ini/ini
Please add `-u` flag to update in the future.
### Testing
If you want to test on your machine, please apply `-t` flag:
go get -t gopkg.in/ini.v1
Please add `-u` flag to update in the future.
## Getting Started
### Loading from data sources
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error.
```go
cfg, err := ini.Load([]byte("raw data"), "filename")
```
Or start with an empty object:
```go
cfg := ini.Empty()
```
When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
```go
err := cfg.Append("other file", []byte("other raw data"))
```
If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
```go
cfg, err := ini.LooseLoad("filename", "filename_404")
```
The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
#### Ignore cases of key name
When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
```go
cfg, err := ini.InsensitiveLoad("filename")
//...
// sec1 and sec2 are the exactly same section object
sec1, err := cfg.GetSection("Section")
sec2, err := cfg.GetSection("SecTIOn")
// key1 and key2 are the exactly same key object
key1, err := cfg.GetKey("Key")
key2, err := cfg.GetKey("KeY")
```
#### MySQL-like boolean key
MySQL's configuration allows a key without value as follows:
```ini
[mysqld]
...
skip-host-cache
skip-name-resolve
```
By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
```go
cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
```
The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
### Working with sections
To get a section, you would need to:
```go
section, err := cfg.GetSection("section name")
```
For a shortcut for default section, just give an empty string as name:
```go
section, err := cfg.GetSection("")
```
When you're pretty sure the section exists, following code could make your life easier:
```go
section := cfg.Section("")
```
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
To create a new section:
```go
err := cfg.NewSection("new section")
```
To get a list of sections or section names:
```go
sections := cfg.Sections()
names := cfg.SectionStrings()
```
### Working with keys
To get a key under a section:
```go
key, err := cfg.Section("").GetKey("key name")
```
Same rule applies to key operations:
```go
key := cfg.Section("").Key("key name")
```
To check if a key exists:
```go
yes := cfg.Section("").HasKey("key name")
```
To create a new key:
```go
err := cfg.Section("").NewKey("name", "value")
```
To get a list of keys or key names:
```go
keys := cfg.Section("").Keys()
names := cfg.Section("").KeyStrings()
```
To get a clone hash of keys and corresponding values:
```go
hash := cfg.Section("").KeysHash()
```
### Working with values
To get a string value:
```go
val := cfg.Section("").Key("key name").String()
```
To validate key value on the fly:
```go
val := cfg.Section("").Key("key name").Validate(func(in string) string {
if len(in) == 0 {
return "default"
}
return in
})
```
If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
```go
val := cfg.Section("").Key("key name").Value()
```
To check if raw value exists:
```go
yes := cfg.Section("").HasValue("test value")
```
To get value with types:
```go
// For boolean values:
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()
v, err = cfg.Section("").Key("INT64").Int64()
v, err = cfg.Section("").Key("UINT").Uint()
v, err = cfg.Section("").Key("UINT64").Uint64()
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
v = cfg.Section("").Key("BOOL").MustBool()
v = cfg.Section("").Key("FLOAT64").MustFloat64()
v = cfg.Section("").Key("INT").MustInt()
v = cfg.Section("").Key("INT64").MustInt64()
v = cfg.Section("").Key("UINT").MustUint()
v = cfg.Section("").Key("UINT64").MustUint64()
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
// Methods start with Must also accept one argument for default value
// when key not found or fail to parse value to given type.
// Except method MustString, which you have to pass a default value.
v = cfg.Section("").Key("String").MustString("default")
v = cfg.Section("").Key("BOOL").MustBool(true)
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
v = cfg.Section("").Key("INT").MustInt(10)
v = cfg.Section("").Key("INT64").MustInt64(99)
v = cfg.Section("").Key("UINT").MustUint(3)
v = cfg.Section("").Key("UINT64").MustUint64(6)
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
```
What if my value is three-line long?
```ini
[advance]
ADDRESS = """404 road,
NotFound, State, 5000
Earth"""
```
Not a problem!
```go
cfg.Section("advance").Key("ADDRESS").String()
/* --- start ---
404 road,
NotFound, State, 5000
Earth
------ end --- */
```
That's cool, how about continuation lines?
```ini
[advance]
two_lines = how about \
continuation lines?
lots_of_lines = 1 \
2 \
3 \
4
```
Piece of cake!
```go
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
Well, I hate continuation lines, how do I disable that?
```go
cfg, err := ini.LoadSources(ini.LoadOptions{
IgnoreContinuation: true,
}, "filename")
```
Holy crap!
Note that single quotes around values will be stripped:
```ini
foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
That's all? Hmm, no.
#### Helper methods of working with values
To get value with given candidates:
```go
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
```
Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
To validate value in a given range:
```go
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
```
##### Auto-split values into a slice
To use zero value of type for invalid inputs:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
vals = cfg.Section("").Key("STRINGS").Strings(",")
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
vals = cfg.Section("").Key("INTS").Ints(",")
vals = cfg.Section("").Key("INT64S").Int64s(",")
vals = cfg.Section("").Key("UINTS").Uints(",")
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
vals = cfg.Section("").Key("TIMES").Times(",")
```
To exclude invalid values out of result slice:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [2.2]
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
vals = cfg.Section("").Key("INTS").ValidInts(",")
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
vals = cfg.Section("").Key("UINTS").ValidUints(",")
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
```
Or to return nothing but error when have invalid inputs:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> error
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
vals = cfg.Section("").Key("INTS").StrictInts(",")
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
vals = cfg.Section("").Key("UINTS").StrictUints(",")
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
```
### Save your configuration
Finally, it's time to save your configuration to somewhere.
A typical way to save configuration is writing it to a file:
```go
// ...
err = cfg.SaveTo("my.ini")
err = cfg.SaveToIndent("my.ini", "\t")
```
Another way to save is writing to a `io.Writer` interface:
```go
// ...
cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
## Advanced Usage
### Recursive Values
For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
```ini
NAME = ini
[author]
NAME = Unknwon
GITHUB = https://github.com/%(NAME)s
[package]
FULL_NAME = github.com/go-ini/%(NAME)s
```
```go
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
```
### Parent-child Sections
You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
```ini
NAME = ini
VERSION = v1
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
[package]
CLONE_URL = https://%(IMPORT_PATH)s
[package.sub]
```
```go
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
```
#### Retrieve parent keys available to a child section
```go
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
### Auto-increment Key Names
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
```ini
[features]
-: Support read/write comments of keys and sections
-: Support auto-increment of key names
-: Support load multiple files to overwrite key values
```
```go
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
```
### Map To Struct
Want more objective way to play with INI? Cool.
```ini
Name = Unknwon
age = 21
Male = true
Born = 1993-01-01T20:17:05Z
[Note]
Content = Hi is a good man!
Cities = HangZhou, Boston
```
```go
type Note struct {
Content string
Cities []string
}
type Person struct {
Name string
Age int `ini:"age"`
Male bool
Born time.Time
Note
Created time.Time `ini:"-"`
}
func main() {
cfg, err := ini.Load("path/to/ini")
// ...
p := new(Person)
err = cfg.MapTo(p)
// ...
// Things can be simpler.
err = ini.MapTo(p, "path/to/ini")
// ...
// Just map a section? Fine.
n := new(Note)
err = cfg.Section("Note").MapTo(n)
// ...
}
```
Can I have default value for field? Absolutely.
Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
```go
// ...
p := &Person{
Name: "Joe",
}
// ...
```
It's really cool, but what's the point if you can't give me my file back from struct?
### Reflect From Struct
Why not?
```go
type Embeded struct {
Dates []time.Time `delim:"|"`
Places []string `ini:"places,omitempty"`
None []int `ini:",omitempty"`
}
type Author struct {
Name string `ini:"NAME"`
Male bool
Age int
GPA float64
NeverMind string `ini:"-"`
*Embeded
}
func main() {
a := &Author{"Unknwon", true, 21, 2.8, "",
&Embeded{
[]time.Time{time.Now(), time.Now()},
[]string{"HangZhou", "Boston"},
[]int{},
}}
cfg := ini.Empty()
err = ini.ReflectFrom(cfg, a)
// ...
}
```
So, what do I get?
```ini
NAME = Unknwon
Male = true
Age = 21
GPA = 2.8
[Embeded]
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
places = HangZhou,Boston
```
#### Name Mapper
To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
There are 2 built-in name mappers:
- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
To use them:
```go
type Info struct {
PackageName string
}
func main() {
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
// ...
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
// ...
info := new(Info)
cfg.NameMapper = ini.AllCapsUnderscore
err = cfg.MapTo(info)
// ...
}
```
Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
#### Value Mapper
To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
```go
type Env struct {
Foo string `ini:"foo"`
}
func main() {
cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
cfg.ValueMapper = os.ExpandEnv
// ...
env := &Env{}
err = cfg.Section("env").MapTo(env)
}
```
This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
#### Other Notes On Map/Reflect
Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child
}
type Config struct {
City string
Parent
}
```
Example configuration:
```ini
City = Boston
[Parent]
Name = Unknwon
[Child]
Age = 21
```
What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child `ini:"Parent"`
}
type Config struct {
City string
Parent
}
```
Example configuration:
```ini
City = Boston
[Parent]
Name = Unknwon
Age = 21
```
## Getting Help
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
- [File An Issue](https://github.com/go-ini/ini/issues/new)
## FAQs
### What does `BlockMode` field do?
By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
### Why another INI library?
Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
## License
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

690
vendor/gopkg.in/ini.v1/README_ZH.md generated vendored Normal file
View File

@@ -0,0 +1,690 @@
本包提供了 Go 语言中读写 INI 文件的功能。
## 功能特性
- 支持覆盖加载多个数据源(`[]byte` 或文件)
- 支持递归读取键值
- 支持读取父子分区
- 支持读取自增键名
- 支持读取多行的键值
- 支持大量辅助方法
- 支持在读取时直接转换为 Go 语言类型
- 支持读取和 **写入** 分区和键的注释
- 轻松操作分区、键值和注释
- 在保存文件时分区和键值会保持原有的顺序
## 下载安装
使用一个特定版本:
go get gopkg.in/ini.v1
使用最新版:
go get github.com/go-ini/ini
如需更新请添加 `-u` 选项。
### 测试安装
如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
go get -t gopkg.in/ini.v1
如需更新请添加 `-u` 选项。
## 开始使用
### 从数据源加载
一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
```go
cfg, err := ini.Load([]byte("raw data"), "filename")
```
或者从一个空白的文件开始:
```go
cfg := ini.Empty()
```
当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
```go
err := cfg.Append("other file", []byte("other raw data"))
```
当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
```go
cfg, err := ini.LooseLoad("filename", "filename_404")
```
更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
#### 忽略键名的大小写
有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
```go
cfg, err := ini.InsensitiveLoad("filename")
//...
// sec1 和 sec2 指向同一个分区对象
sec1, err := cfg.GetSection("Section")
sec2, err := cfg.GetSection("SecTIOn")
// key1 和 key2 指向同一个键对象
key1, err := cfg.GetKey("Key")
key2, err := cfg.GetKey("KeY")
```
#### 类似 MySQL 配置中的布尔值键
MySQL 的配置文件中会出现没有具体值的布尔类型的键:
```ini
[mysqld]
...
skip-host-cache
skip-name-resolve
```
默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
```go
cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
```
这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
### 操作分区Section
获取指定分区:
```go
section, err := cfg.GetSection("section name")
```
如果您想要获取默认分区,则可以用空字符串代替分区名:
```go
section, err := cfg.GetSection("")
```
当您非常确定某个分区是存在的,可以使用以下简便方法:
```go
section := cfg.Section("")
```
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
创建一个分区:
```go
err := cfg.NewSection("new section")
```
获取所有分区对象或名称:
```go
sections := cfg.Sections()
names := cfg.SectionStrings()
```
### 操作键Key
获取某个分区下的键:
```go
key, err := cfg.Section("").GetKey("key name")
```
和分区一样,您也可以直接获取键而忽略错误处理:
```go
key := cfg.Section("").Key("key name")
```
判断某个键是否存在:
```go
yes := cfg.Section("").HasKey("key name")
```
创建一个新的键:
```go
err := cfg.Section("").NewKey("name", "value")
```
获取分区下的所有键或键名:
```go
keys := cfg.Section("").Keys()
names := cfg.Section("").KeyStrings()
```
获取分区下的所有键值对的克隆:
```go
hash := cfg.Section("").KeysHash()
```
### 操作键值Value
获取一个类型为字符串string的值
```go
val := cfg.Section("").Key("key name").String()
```
获取值的同时通过自定义函数进行处理验证:
```go
val := cfg.Section("").Key("key name").Validate(func(in string) string {
if len(in) == 0 {
return "default"
}
return in
})
```
如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
```go
val := cfg.Section("").Key("key name").Value()
```
判断某个原值是否存在:
```go
yes := cfg.Section("").HasValue("test value")
```
获取其它类型的值:
```go
// 布尔值的规则:
// true 当值为1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false 当值为0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()
v, err = cfg.Section("").Key("INT64").Int64()
v, err = cfg.Section("").Key("UINT").Uint()
v, err = cfg.Section("").Key("UINT64").Uint64()
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
v = cfg.Section("").Key("BOOL").MustBool()
v = cfg.Section("").Key("FLOAT64").MustFloat64()
v = cfg.Section("").Key("INT").MustInt()
v = cfg.Section("").Key("INT64").MustInt64()
v = cfg.Section("").Key("UINT").MustUint()
v = cfg.Section("").Key("UINT64").MustUint64()
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
// 当键不存在或者转换失败时,则会直接返回该默认值。
// 但是MustString 方法必须传递一个默认值。
v = cfg.Seciont("").Key("String").MustString("default")
v = cfg.Section("").Key("BOOL").MustBool(true)
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
v = cfg.Section("").Key("INT").MustInt(10)
v = cfg.Section("").Key("INT64").MustInt64(99)
v = cfg.Section("").Key("UINT").MustUint(3)
v = cfg.Section("").Key("UINT64").MustUint64(6)
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
```
如果我的值有好多行怎么办?
```ini
[advance]
ADDRESS = """404 road,
NotFound, State, 5000
Earth"""
```
嗯哼?小 case
```go
cfg.Section("advance").Key("ADDRESS").String()
/* --- start ---
404 road,
NotFound, State, 5000
Earth
------ end --- */
```
赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
```ini
[advance]
two_lines = how about \
continuation lines?
lots_of_lines = 1 \
2 \
3 \
4
```
简直是小菜一碟!
```go
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
```
可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
```go
cfg, err := ini.LoadSources(ini.LoadOptions{
IgnoreContinuation: true,
}, "filename")
```
哇靠给力啊!
需要注意的是,值两侧的单引号会被自动剔除:
```ini
foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
这就是全部了?哈哈,当然不是。
#### 操作键值的辅助方法
获取键值时设定候选值:
```go
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
```
如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
验证获取的值是否在指定范围内:
```go
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
```
##### 自动分割键值到切片slice
当存在无效输入时,使用零值代替:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
vals = cfg.Section("").Key("STRINGS").Strings(",")
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
vals = cfg.Section("").Key("INTS").Ints(",")
vals = cfg.Section("").Key("INT64S").Int64s(",")
vals = cfg.Section("").Key("UINTS").Uints(",")
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
vals = cfg.Section("").Key("TIMES").Times(",")
```
从结果切片中剔除无效输入:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> [2.2]
vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
vals = cfg.Section("").Key("INTS").ValidInts(",")
vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
vals = cfg.Section("").Key("UINTS").ValidUints(",")
vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
vals = cfg.Section("").Key("TIMES").ValidTimes(",")
```
当存在无效输入时,直接返回错误:
```go
// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
// Input: how, 2.2, are, you -> error
vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
vals = cfg.Section("").Key("INTS").StrictInts(",")
vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
vals = cfg.Section("").Key("UINTS").StrictUints(",")
vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
vals = cfg.Section("").Key("TIMES").StrictTimes(",")
```
### 保存配置
终于到了这个时刻,是时候保存一下配置了。
比较原始的做法是输出配置到某个文件:
```go
// ...
err = cfg.SaveTo("my.ini")
err = cfg.SaveToIndent("my.ini", "\t")
```
另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
```go
// ...
cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
### 高级用法
#### 递归读取键值
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
```ini
NAME = ini
[author]
NAME = Unknwon
GITHUB = https://github.com/%(NAME)s
[package]
FULL_NAME = github.com/go-ini/%(NAME)s
```
```go
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
```
#### 读取父子分区
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
```ini
NAME = ini
VERSION = v1
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
[package]
CLONE_URL = https://%(IMPORT_PATH)s
[package.sub]
```
```go
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
```
#### 获取上级父分区下的所有键名
```go
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
#### 读取自增键名
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
```ini
[features]
-: Support read/write comments of keys and sections
-: Support auto-increment of key names
-: Support load multiple files to overwrite key values
```
```go
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
```
### 映射到结构
想要使用更加面向对象的方式玩转 INI 吗?好主意。
```ini
Name = Unknwon
age = 21
Male = true
Born = 1993-01-01T20:17:05Z
[Note]
Content = Hi is a good man!
Cities = HangZhou, Boston
```
```go
type Note struct {
Content string
Cities []string
}
type Person struct {
Name string
Age int `ini:"age"`
Male bool
Born time.Time
Note
Created time.Time `ini:"-"`
}
func main() {
cfg, err := ini.Load("path/to/ini")
// ...
p := new(Person)
err = cfg.MapTo(p)
// ...
// 一切竟可以如此的简单。
err = ini.MapTo(p, "path/to/ini")
// ...
// 嗯哼?只需要映射一个分区吗?
n := new(Note)
err = cfg.Section("Note").MapTo(n)
// ...
}
```
结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
```go
// ...
p := &Person{
Name: "Joe",
}
// ...
```
这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
### 从结构反射
可是,我有说不能吗?
```go
type Embeded struct {
Dates []time.Time `delim:"|"`
Places []string `ini:"places,omitempty"`
None []int `ini:",omitempty"`
}
type Author struct {
Name string `ini:"NAME"`
Male bool
Age int
GPA float64
NeverMind string `ini:"-"`
*Embeded
}
func main() {
a := &Author{"Unknwon", true, 21, 2.8, "",
&Embeded{
[]time.Time{time.Now(), time.Now()},
[]string{"HangZhou", "Boston"},
[]int{},
}}
cfg := ini.Empty()
err = ini.ReflectFrom(cfg, a)
// ...
}
```
瞧瞧,奇迹发生了。
```ini
NAME = Unknwon
Male = true
Age = 21
GPA = 2.8
[Embeded]
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
places = HangZhou,Boston
```
#### 名称映射器Name Mapper
为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
目前有 2 款内置的映射器:
- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
使用方法:
```go
type Info struct{
PackageName string
}
func main() {
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
// ...
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
// ...
info := new(Info)
cfg.NameMapper = ini.AllCapsUnderscore
err = cfg.MapTo(info)
// ...
}
```
使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
#### 值映射器Value Mapper
值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
```go
type Env struct {
Foo string `ini:"foo"`
}
func main() {
cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
cfg.ValueMapper = os.ExpandEnv
// ...
env := &Env{}
err = cfg.Section("env").MapTo(env)
}
```
本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
#### 映射/反射的其它说明
任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child
}
type Config struct {
City string
Parent
}
```
示例配置文件:
```ini
City = Boston
[Parent]
Name = Unknwon
[Child]
Age = 21
```
很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
```go
type Child struct {
Age string
}
type Parent struct {
Name string
Child `ini:"Parent"`
}
type Config struct {
City string
Parent
}
```
示例配置文件:
```ini
City = Boston
[Parent]
Name = Unknwon
Age = 21
```
## 获取帮助
- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
- [创建工单](https://github.com/go-ini/ini/issues/new)
## 常见问题
### 字段 `BlockMode` 是什么?
默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
### 为什么要写另一个 INI 解析库?
许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)

32
vendor/gopkg.in/ini.v1/error.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
// Copyright 2016 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
)
type ErrDelimiterNotFound struct {
Line string
}
func IsErrDelimiterNotFound(err error) bool {
_, ok := err.(ErrDelimiterNotFound)
return ok
}
func (err ErrDelimiterNotFound) Error() string {
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
}

501
vendor/gopkg.in/ini.v1/ini.go generated vendored Normal file
View File

@@ -0,0 +1,501 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
const (
// Name for default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DEFAULT_SECTION = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.21.1"
)
// Version returns current package version literal.
func Version() string {
return _VERSION
}
var (
// Delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows
// at package init time.
LineBreak = "\n"
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
// Indicate whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// Explicitly write DEFAULT section header
DefaultHeader = false
)
func init() {
if runtime.GOOS == "windows" {
LineBreak = "\r\n"
}
}
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
type bytesReadCloser struct {
reader io.Reader
}
func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
return rc.reader.Read(p)
}
func (rc *bytesReadCloser) Close() error {
return nil
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return &bytesReadCloser{bytes.NewReader(s.data)}, nil
}
// File represents a combination of a or more INI file(s) in memory.
type File struct {
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
// Make sure data is safe in multiple goroutines.
lock sync.RWMutex
// Allow combination of multiple data sources.
dataSources []dataSource
// Actual data is stored here.
sections map[string]*Section
// To keep data in order.
sectionList []string
options LoadOptions
NameMapper
ValueMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string]*Section),
sectionList: make([]string, 0, 10),
options: opts,
}
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
}
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, opts)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{}, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Loose: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it forces all section and key names to be lowercased.
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
f, _ := Load([]byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("error creating new section: empty section name")
} else if f.options.Insensitive && name != DEFAULT_SECTION {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if inSlice(name, f.sectionList) {
return f.sections[name], nil
}
f.sectionList = append(f.sectionList, name)
f.sections[name] = newSection(f, name)
return f.sections[name], nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
if len(name) == 0 {
name = DEFAULT_SECTION
} else if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sec := f.sections[name]
if sec == nil {
return nil, fmt.Errorf("section '%s' does not exist", name)
}
return sec, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// Section returns list of Section.
func (f *File) Sections() []*Section {
sections := make([]*Section, len(f.sectionList))
for i := range f.sectionList {
sections[i] = f.Section(f.sectionList[i])
}
return sections
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section.
func (f *File) DeleteSection(name string) {
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if len(name) == 0 {
name = DEFAULT_SECTION
}
for i, s := range f.sectionList {
if s == name {
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
delete(f.sections, name)
return
}
}
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
equalSign := "="
if PrettyFormat {
equalSign = " = "
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
sec.Comment = "; " + sec.Comment
}
if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
return 0, err
}
}
if i > 0 || DefaultHeader {
if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return 0, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modifed if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.ContainsAny(kname, "\"=:") {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
}
if keyLength > alignLength {
alignLength = keyLength
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
if key.Comment[0] != '#' && key.Comment[0] != ';' {
key.Comment = "; " + key.Comment
}
if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
return 0, err
}
}
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
switch {
case key.isAutoIncrement:
kname = "-"
case strings.ContainsAny(kname, "\"=:"):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
if key.isBooleanType {
continue
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
val := key.value
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
}
}
// Put a line between sections
if _, err = buf.WriteString(LineBreak); err != nil {
return 0, err
}
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
defer os.Remove(tmpPath)
fw, err := os.Create(tmpPath)
if err != nil {
return err
}
if _, err = f.WriteToIndent(fw, indent); err != nil {
fw.Close()
return err
}
fw.Close()
// Remove old file and rename the new one.
os.Remove(filename)
return os.Rename(tmpPath, filename)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

633
vendor/gopkg.in/ini.v1/key.go generated vendored Normal file
View File

@@ -0,0 +1,633 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
"strconv"
"strings"
"time"
)
// Key represents a key under a section.
type Key struct {
s *Section
name string
value string
isAutoIncrement bool
isBooleanType bool
Comment string
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
// Name returns name of key.
func (k *Key) Name() string {
return k.name
}
// Value returns raw value of key for performance purpose.
func (k *Key) Value() string {
return k.value
}
// String returns string representation of value.
func (k *Key) String() string {
val := k.value
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
if strings.Index(val, "%") == -1 {
return val
}
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
break
}
// Take off leading '%(' and trailing ')s'.
noption := strings.TrimLeft(vr, "%(")
noption = strings.TrimRight(noption, ")s")
// Search in the same section.
nk, err := k.s.GetKey(noption)
if err != nil {
// Search again in default section.
nk, _ = k.s.f.Section("").GetKey(noption)
}
// Substitute by new value and take off leading '%(' and trailing ')s'.
val = strings.Replace(val, vr, nk.value, -1)
}
return val
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
return fn(k.String())
}
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
}
// Bool returns bool type value.
func (k *Key) Bool() (bool, error) {
return parseBool(k.String())
}
// Float64 returns float64 type value.
func (k *Key) Float64() (float64, error) {
return strconv.ParseFloat(k.String(), 64)
}
// Int returns int type value.
func (k *Key) Int() (int, error) {
return strconv.Atoi(k.String())
}
// Int64 returns int64 type value.
func (k *Key) Int64() (int64, error) {
return strconv.ParseInt(k.String(), 10, 64)
}
// Uint returns uint type valued.
func (k *Key) Uint() (uint, error) {
u, e := strconv.ParseUint(k.String(), 10, 64)
return uint(u), e
}
// Uint64 returns uint64 type value.
func (k *Key) Uint64() (uint64, error) {
return strconv.ParseUint(k.String(), 10, 64)
}
// Duration returns time.Duration type value.
func (k *Key) Duration() (time.Duration, error) {
return time.ParseDuration(k.String())
}
// TimeFormat parses with given format and returns time.Time type value.
func (k *Key) TimeFormat(format string) (time.Time, error) {
return time.Parse(format, k.String())
}
// Time parses with RFC3339 format and returns time.Time type value.
func (k *Key) Time() (time.Time, error) {
return k.TimeFormat(time.RFC3339)
}
// MustString returns default value if key value is empty.
func (k *Key) MustString(defaultVal string) string {
val := k.String()
if len(val) == 0 {
k.value = defaultVal
return defaultVal
}
return val
}
// MustBool always returns value without error,
// it returns false if error occurs.
func (k *Key) MustBool(defaultVal ...bool) bool {
val, err := k.Bool()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatBool(defaultVal[0])
return defaultVal[0]
}
return val
}
// MustFloat64 always returns value without error,
// it returns 0.0 if error occurs.
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
val, err := k.Float64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
return defaultVal[0]
}
return val
}
// MustInt always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt(defaultVal ...int) int {
val, err := k.Int()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustInt64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt64(defaultVal ...int64) int64 {
val, err := k.Int64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustUint always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint(defaultVal ...uint) uint {
val, err := k.Uint()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustUint64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
val, err := k.Uint64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustDuration always returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
val, err := k.Duration()
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].String()
return defaultVal[0]
}
return val
}
// MustTimeFormat always parses with given format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
val, err := k.TimeFormat(format)
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].Format(format)
return defaultVal[0]
}
return val
}
// MustTime always parses with RFC3339 format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
return k.MustTimeFormat(time.RFC3339, defaultVal...)
}
// In always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) In(defaultVal string, candidates []string) string {
val := k.String()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InFloat64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
val := k.MustFloat64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt(defaultVal int, candidates []int) int {
val := k.MustInt()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
val := k.MustInt64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
val := k.MustUint()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
val := k.MustUint64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTimeFormat always parses with given format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
val := k.MustTimeFormat(format)
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTime always parses with RFC3339 format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
}
// RangeFloat64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
val := k.MustFloat64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt(defaultVal, min, max int) int {
val := k.MustInt()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
val := k.MustInt64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeTimeFormat checks if value with given format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
val := k.MustTimeFormat(format)
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
return defaultVal
}
return val
}
// RangeTime checks if value with RFC3339 format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
return []string{}
}
vals := strings.Split(str, delim)
for i := range vals {
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.getInts(delim, true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.getUints(delim, true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, true, false)
return vals
}
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.getInts(delim, false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.getUints(delim, false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, false, false)
return vals
}
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimes(delim string) []time.Time {
return k.ValidTimesFormat(time.RFC3339, delim)
}
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.getFloat64s(delim, false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.getInts(delim, false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.getInt64s(delim, false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.getUints(delim, false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.getUint64s(delim, false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.getTimesFormat(format, delim, false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// getFloat64s returns list of float64 divided by given delimiter.
func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
strs := k.Strings(delim)
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInts returns list of int divided by given delimiter.
func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
strs := k.Strings(delim)
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInt64s returns list of int64 divided by given delimiter.
func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
strs := k.Strings(delim)
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getUints returns list of uint divided by given delimiter.
func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
strs := k.Strings(delim)
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, uint(val))
}
}
return vals, nil
}
// getUint64s returns list of uint64 divided by given delimiter.
func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
strs := k.Strings(delim)
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
strs := k.Strings(delim)
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}

325
vendor/gopkg.in/ini.v1/parser.go generated vendored Normal file
View File

@@ -0,0 +1,325 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"unicode"
)
type tokenType int
const (
_TOKEN_INVALID tokenType = iota
_TOKEN_COMMENT
_TOKEN_SECTION
_TOKEN_KEY
)
type parser struct {
buf *bufio.Reader
isEOF bool
count int
comment *bytes.Buffer
}
func newParser(r io.Reader) *parser {
return &parser{
buf: bufio.NewReader(r),
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of BOM-UTF8 format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
} else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
p.buf.Read(mask)
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], "=:")
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, "=:")
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
line = strings.TrimSpace(line)
// Check continuation lines when desired.
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
// Trim single quotes
if hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"') {
line = line[1 : len(line)-1]
}
return line, nil
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader)
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
section, _ := f.NewSection(DEFAULT_SECTION)
var line []byte
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
closeIdx := bytes.LastIndex(line, []byte("]"))
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
name := string(line[1:closeIdx])
section, err = f.NewSection(name)
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
continue
}
kname, offset, err := readKeyName(line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
key, err := section.NewKey(string(line), "true")
if err != nil {
return err
}
key.isBooleanType = true
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
}
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
key, err := section.NewKey(kname, "")
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}
return nil
}

206
vendor/gopkg.in/ini.v1/section.go generated vendored Normal file
View File

@@ -0,0 +1,206 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"errors"
"fmt"
"strings"
)
// Section represents a config section.
type Section struct {
f *File
Comment string
name string
keys map[string]*Key
keyList []string
keysHash map[string]string
}
func newSection(f *File, name string) *Section {
return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
}
// Name returns name of Section.
func (s *Section) Name() string {
return s.name
}
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
} else if s.f.options.Insensitive {
name = strings.ToLower(name)
}
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
if inSlice(name, s.keyList) {
s.keys[name].value = val
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = &Key{
s: s,
name: name,
value: val,
}
s.keysHash[name] = val
return s.keys[name], nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
// FIXME: change to section level lock?
if s.f.BlockMode {
s.f.lock.RLock()
}
if s.f.options.Insensitive {
name = strings.ToLower(name)
}
key := s.keys[name]
if s.f.BlockMode {
s.f.lock.RUnlock()
}
if key == nil {
// Check if it is a child-section.
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
return sec.GetKey(name)
} else {
break
}
}
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
}
return key, nil
}
// HasKey returns true if section contains a key with given name.
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// Haskey is a backwards-compatible name for HasKey.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
for _, k := range s.keys {
if value == k.value {
return true
}
}
return false
}
// Key assumes named Key exists in section and returns a zero-value when not.
func (s *Section) Key(name string) *Key {
key, err := s.GetKey(name)
if err != nil {
// It's OK here because the only possible error is empty key name,
// but if it's empty, this piece of code won't be executed.
key, _ = s.NewKey(name, "")
return key
}
return key
}
// Keys returns list of keys of section.
func (s *Section) Keys() []*Key {
keys := make([]*Key, len(s.keyList))
for i := range s.keyList {
keys[i] = s.Key(s.keyList[i])
}
return keys
}
// ParentKeys returns list of keys of parent section.
func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
parentKeys = append(parentKeys, sec.Keys()...)
} else {
break
}
}
return parentKeys
}
// KeyStrings returns list of key names of section.
func (s *Section) KeyStrings() []string {
list := make([]string, len(s.keyList))
copy(list, s.keyList)
return list
}
// KeysHash returns keys hash consisting of names and values.
func (s *Section) KeysHash() map[string]string {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
hash := map[string]string{}
for key, value := range s.keysHash {
hash[key] = value
}
return hash
}
// DeleteKey deletes a key from section.
func (s *Section) DeleteKey(name string) {
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
for i, k := range s.keyList {
if k == name {
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
delete(s.keys, name)
return
}
}
}

431
vendor/gopkg.in/ini.v1/struct.go generated vendored Normal file
View File

@@ -0,0 +1,431 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"time"
"unicode"
)
// NameMapper represents a ini tag name mapper.
type NameMapper func(string) string
// Built-in name getters.
var (
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
}
newstr = append(newstr, unicode.ToUpper(chr))
}
return string(newstr)
}
// TitleUnderscore converts to format title_underscore.
TitleUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
chr -= ('A' - 'a')
}
newstr = append(newstr, chr)
}
return string(newstr)
}
)
func (s *Section) parseFieldName(raw, actual string) string {
if len(actual) > 0 {
return actual
}
if s.f.NameMapper != nil {
return s.f.NameMapper(raw)
}
return raw
}
func parseDelim(actual string) string {
if len(actual) > 0 {
return actual
}
return ","
}
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
strs := key.Strings(delim)
numVals := len(strs)
if numVals == 0 {
return nil
}
var vals interface{}
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
vals = key.Ints(delim)
case reflect.Int64:
vals = key.Int64s(delim)
case reflect.Uint:
vals = key.Uints(delim)
case reflect.Uint64:
vals = key.Uint64s(delim)
case reflect.Float64:
vals = key.Float64s(delim)
case reflectTime:
vals = key.Times(delim)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflect.String:
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
case reflect.Int:
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
case reflect.Int64:
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
case reflect.Uint:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
case reflect.Uint64:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
case reflect.Float64:
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
}
}
field.Set(slice)
return nil
}
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
return nil
}
field.SetString(key.String())
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return nil
}
field.SetBool(boolVal)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
intVal, err := key.Int64()
if err != nil || intVal == 0 {
return nil
}
field.SetInt(intVal)
// byte is an alias for uint8, so supporting uint8 breaks support for byte
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
uintVal, err := key.Uint64()
if err != nil {
return nil
}
field.SetUint(uintVal)
case reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return nil
}
field.SetFloat(floatVal)
case reflectTime:
timeVal, err := key.Time()
if err != nil {
return nil
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
return setSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
fieldName := s.parseFieldName(tpField.Name, opts[0])
if len(fieldName) == 0 || !field.CanSet() {
continue
}
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
isStruct := tpField.Type.Kind() == reflect.Struct
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct {
if sec, err := s.f.GetSection(fieldName); err == nil {
if err = sec.mapTo(field); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
continue
}
}
if key, err := s.GetKey(fieldName); err == nil {
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}
}
return nil
}
// MapTo maps section to given struct.
func (s *Section) MapTo(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot map to non-pointer struct")
}
return s.mapTo(val)
}
// MapTo maps file to given struct.
func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
// MapTo maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.MapTo(v)
}
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
var buf bytes.Buffer
sliceOf := field.Type().Elem().Kind()
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
buf.WriteString(slice.Index(i).String())
case reflect.Int, reflect.Int64:
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
case reflect.Uint, reflect.Uint64:
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
case reflect.Float64:
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
case reflectTime:
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool:
key.SetValue(fmt.Sprint(field.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
key.SetValue(fmt.Sprint(field.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
key.SetValue(fmt.Sprint(field.Uint()))
case reflect.Float32, reflect.Float64:
key.SetValue(fmt.Sprint(field.Float()))
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
return reflectSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
// TODO: add more test coverage.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflectTime:
return v.Interface().(time.Time).IsZero()
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
opts := strings.SplitN(tag, ",", 2)
if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
continue
}
fieldName := s.parseFieldName(tpField.Name, opts[0])
if len(fieldName) == 0 || !field.CanSet() {
continue
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
continue
}
// Note: Same reason as secion.
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
}
return nil
}
// ReflectFrom reflects secion from given struct.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot reflect from non-pointer struct")
}
return s.reflectFrom(val)
}
// ReflectFrom reflects file from given struct.
func (f *File) ReflectFrom(v interface{}) error {
return f.Section("").ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct with name mapper.
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
cfg.NameMapper = mapper
return cfg.ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct.
func ReflectFrom(cfg *File, v interface{}) error {
return ReflectFromWithMapper(cfg, v, nil)
}

2
vendor/gopkg.in/macaron.v1/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
macaron.sublime-project
macaron.sublime-workspace

13
vendor/gopkg.in/macaron.v1/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,13 @@
sudo: false
language: go
go:
- 1.3
- 1.4
- 1.5
script: go test -v -cover -race
notifications:
email:
- u@gogs.io

191
vendor/gopkg.in/macaron.v1/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

94
vendor/gopkg.in/macaron.v1/README.md generated vendored Normal file
View File

@@ -0,0 +1,94 @@
Macaron [![Build Status](https://travis-ci.org/go-macaron/macaron.svg?branch=v1)](https://travis-ci.org/go-macaron/macaron) [![](http://gocover.io/_badge/github.com/go-macaron/macaron)](http://gocover.io/github.com/go-macaron/macaron)
=======================
![Macaron Logo](https://raw.githubusercontent.com/go-macaron/macaron/v1/macaronlogo.png)
Package macaron is a high productive and modular web framework in Go.
##### Current version: 0.8.0
## Getting Started
The minimum requirement of Go is **1.3**.
To install Macaron:
go get gopkg.in/macaron.v1
The very basic usage of Macaron:
```go
package main
import "gopkg.in/macaron.v1"
func main() {
m := macaron.Classic()
m.Get("/", func() string {
return "Hello world!"
})
m.Run()
}
```
## Features
- Powerful routing with suburl.
- Flexible routes combinations.
- Unlimited nested group routers.
- Directly integrate with existing services.
- Dynamically change template files at runtime.
- Allow to use in-memory template and static files.
- Easy to plugin/unplugin features with modular design.
- Handy dependency injection powered by [inject](https://github.com/codegangsta/inject).
- Better router layer and less reflection make faster speed.
## Middlewares
Middlewares allow you easily plugin/unplugin features for your Macaron applications.
There are already many [middlewares](https://github.com/go-macaron) to simplify your work:
- render - Go template engine
- static - Serves static files
- [gzip](https://github.com/go-macaron/gzip) - Gzip compression to all responses
- [binding](https://github.com/go-macaron/binding) - Request data binding and validation
- [i18n](https://github.com/go-macaron/i18n) - Internationalization and Localization
- [cache](https://github.com/go-macaron/cache) - Cache manager
- [session](https://github.com/go-macaron/session) - Session manager
- [csrf](https://github.com/go-macaron/csrf) - Generates and validates csrf tokens
- [captcha](https://github.com/go-macaron/captcha) - Captcha service
- [pongo2](https://github.com/go-macaron/pongo2) - Pongo2 template engine support
- [sockets](https://github.com/go-macaron/sockets) - WebSockets channels binding
- [bindata](https://github.com/go-macaron/bindata) - Embed binary data as static and template files
- [toolbox](https://github.com/go-macaron/toolbox) - Health check, pprof, profile and statistic services
- [oauth2](https://github.com/go-macaron/oauth2) - OAuth 2.0 backend
- [switcher](https://github.com/go-macaron/switcher) - Multiple-site support
- [method](https://github.com/go-macaron/method) - HTTP method override
- [permissions2](https://github.com/xyproto/permissions2) - Cookies, users and permissions
- [renders](https://github.com/go-macaron/renders) - Beego-like render engine(Macaron has built-in template engine, this is another option)
## Use Cases
- [Gogs](http://gogs.io): A painless self-hosted Git Service
- [Peach](http://peachdocs.org): A modern web documentation server
- [Go Walker](https://gowalker.org): Go online API documentation
- [Switch](http://gopm.io): Gopm registry
- [YouGam](http://yougam.com): Online Forum
- [Critical Stack Intel](https://intel.criticalstack.com/): A 100% free intel marketplace from Critical Stack, Inc.
## Getting Help
- [API Reference](https://gowalker.org/gopkg.in/macaron.v1)
- [Documentation](http://go-macaron.com)
- [FAQs](http://go-macaron.com/docs/faqs)
- [![Join the chat at https://gitter.im/Unknwon/macaron](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-macaron/macaron?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Credits
- Basic design of [Martini](https://github.com/go-martini/martini).
- Logo is modified by [@insionng](https://github.com/insionng) based on [Tribal Dragon](http://xtremeyamazaki.deviantart.com/art/Tribal-Dragon-27005087).
## License
This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.

510
vendor/gopkg.in/macaron.v1/context.go generated vendored Normal file
View File

@@ -0,0 +1,510 @@
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"crypto/md5"
"encoding/hex"
"html/template"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
"github.com/Unknwon/com"
"github.com/go-macaron/inject"
)
// Locale reprents a localization interface.
type Locale interface {
Language() string
Tr(string, ...interface{}) string
}
// RequestBody represents a request body.
type RequestBody struct {
reader io.ReadCloser
}
// Bytes reads and returns content of request body in bytes.
func (rb *RequestBody) Bytes() ([]byte, error) {
return ioutil.ReadAll(rb.reader)
}
// String reads and returns content of request body in string.
func (rb *RequestBody) String() (string, error) {
data, err := rb.Bytes()
return string(data), err
}
// ReadCloser returns a ReadCloser for request body.
func (rb *RequestBody) ReadCloser() io.ReadCloser {
return rb.reader
}
// Request represents an HTTP request received by a server or to be sent by a client.
type Request struct {
*http.Request
}
func (r *Request) Body() *RequestBody {
return &RequestBody{r.Request.Body}
}
// Context represents the runtime context of current request of Macaron instance.
// It is the integration of most frequently used middlewares and helper methods.
type Context struct {
inject.Injector
handlers []Handler
action Handler
index int
*Router
Req Request
Resp ResponseWriter
params Params
Render // Not nil only if you use macaran.Render middleware.
Locale
Data map[string]interface{}
}
func (c *Context) handler() Handler {
if c.index < len(c.handlers) {
return c.handlers[c.index]
}
if c.index == len(c.handlers) {
return c.action
}
panic("invalid index for context handler")
}
func (c *Context) Next() {
c.index += 1
c.run()
}
func (c *Context) Written() bool {
return c.Resp.Written()
}
func (c *Context) run() {
for c.index <= len(c.handlers) {
vals, err := c.Invoke(c.handler())
if err != nil {
panic(err)
}
c.index += 1
// if the handler returned something, write it to the http response
if len(vals) > 0 {
ev := c.GetVal(reflect.TypeOf(ReturnHandler(nil)))
handleReturn := ev.Interface().(ReturnHandler)
handleReturn(c, vals)
}
if c.Written() {
return
}
}
}
// RemoteAddr returns more real IP address.
func (ctx *Context) RemoteAddr() string {
addr := ctx.Req.Header.Get("X-Real-IP")
if len(addr) == 0 {
addr = ctx.Req.Header.Get("X-Forwarded-For")
if addr == "" {
addr = ctx.Req.RemoteAddr
if i := strings.LastIndex(addr, ":"); i > -1 {
addr = addr[:i]
}
}
}
return addr
}
func (ctx *Context) renderHTML(status int, setName, tplName string, data ...interface{}) {
if ctx.Render == nil {
panic("renderer middleware hasn't been registered")
}
if len(data) <= 0 {
ctx.Render.HTMLSet(status, setName, tplName, ctx.Data)
} else if len(data) == 1 {
ctx.Render.HTMLSet(status, setName, tplName, data[0])
} else {
ctx.Render.HTMLSet(status, setName, tplName, data[0], data[1].(HTMLOptions))
}
}
// HTML calls Render.HTML but allows less arguments.
func (ctx *Context) HTML(status int, name string, data ...interface{}) {
ctx.renderHTML(status, _DEFAULT_TPL_SET_NAME, name, data...)
}
// HTML calls Render.HTMLSet but allows less arguments.
func (ctx *Context) HTMLSet(status int, setName, tplName string, data ...interface{}) {
ctx.renderHTML(status, setName, tplName, data...)
}
func (ctx *Context) Redirect(location string, status ...int) {
code := http.StatusFound
if len(status) == 1 {
code = status[0]
}
http.Redirect(ctx.Resp, ctx.Req.Request, location, code)
}
// Maximum amount of memory to use when parsing a multipart form.
// Set this to whatever value you prefer; default is 10 MB.
var MaxMemory = int64(1024 * 1024 * 10)
func (ctx *Context) parseForm() {
if ctx.Req.Form != nil {
return
}
contentType := ctx.Req.Header.Get(_CONTENT_TYPE)
if (ctx.Req.Method == "POST" || ctx.Req.Method == "PUT") &&
len(contentType) > 0 && strings.Contains(contentType, "multipart/form-data") {
ctx.Req.ParseMultipartForm(MaxMemory)
} else {
ctx.Req.ParseForm()
}
}
// Query querys form parameter.
func (ctx *Context) Query(name string) string {
ctx.parseForm()
return ctx.Req.Form.Get(name)
}
// QueryTrim querys and trims spaces form parameter.
func (ctx *Context) QueryTrim(name string) string {
return strings.TrimSpace(ctx.Query(name))
}
// QueryStrings returns a list of results by given query name.
func (ctx *Context) QueryStrings(name string) []string {
ctx.parseForm()
vals, ok := ctx.Req.Form[name]
if !ok {
return []string{}
}
return vals
}
// QueryEscape returns escapred query result.
func (ctx *Context) QueryEscape(name string) string {
return template.HTMLEscapeString(ctx.Query(name))
}
// QueryInt returns query result in int type.
func (ctx *Context) QueryInt(name string) int {
return com.StrTo(ctx.Query(name)).MustInt()
}
// QueryInt64 returns query result in int64 type.
func (ctx *Context) QueryInt64(name string) int64 {
return com.StrTo(ctx.Query(name)).MustInt64()
}
// QueryFloat64 returns query result in float64 type.
func (ctx *Context) QueryFloat64(name string) float64 {
v, _ := strconv.ParseFloat(ctx.Query(name), 64)
return v
}
// Params returns value of given param name.
// e.g. ctx.Params(":uid") or ctx.Params("uid")
func (ctx *Context) Params(name string) string {
if len(name) == 0 {
return ""
}
if len(name) > 1 && name[0] != ':' {
name = ":" + name
}
return ctx.params[name]
}
// SetParams sets value of param with given name.
func (ctx *Context) SetParams(name, val string) {
if !strings.HasPrefix(name, ":") {
name = ":" + name
}
ctx.params[name] = val
}
// ParamsEscape returns escapred params result.
// e.g. ctx.ParamsEscape(":uname")
func (ctx *Context) ParamsEscape(name string) string {
return template.HTMLEscapeString(ctx.Params(name))
}
// ParamsInt returns params result in int type.
// e.g. ctx.ParamsInt(":uid")
func (ctx *Context) ParamsInt(name string) int {
return com.StrTo(ctx.Params(name)).MustInt()
}
// ParamsInt64 returns params result in int64 type.
// e.g. ctx.ParamsInt64(":uid")
func (ctx *Context) ParamsInt64(name string) int64 {
return com.StrTo(ctx.Params(name)).MustInt64()
}
// ParamsFloat64 returns params result in int64 type.
// e.g. ctx.ParamsFloat64(":uid")
func (ctx *Context) ParamsFloat64(name string) float64 {
v, _ := strconv.ParseFloat(ctx.Params(name), 64)
return v
}
// GetFile returns information about user upload file by given form field name.
func (ctx *Context) GetFile(name string) (multipart.File, *multipart.FileHeader, error) {
return ctx.Req.FormFile(name)
}
// SaveToFile reads a file from request by field name and saves to given path.
func (ctx *Context) SaveToFile(name, savePath string) error {
fr, _, err := ctx.GetFile(name)
if err != nil {
return err
}
defer fr.Close()
fw, err := os.OpenFile(savePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
defer fw.Close()
_, err = io.Copy(fw, fr)
return err
}
// SetCookie sets given cookie value to response header.
// FIXME: IE support? http://golanghome.com/post/620#reply2
func (ctx *Context) SetCookie(name string, value string, others ...interface{}) {
cookie := http.Cookie{}
cookie.Name = name
cookie.Value = url.QueryEscape(value)
if len(others) > 0 {
switch v := others[0].(type) {
case int:
cookie.MaxAge = v
case int64:
cookie.MaxAge = int(v)
case int32:
cookie.MaxAge = int(v)
}
}
cookie.Path = "/"
if len(others) > 1 {
if v, ok := others[1].(string); ok && len(v) > 0 {
cookie.Path = v
}
}
if len(others) > 2 {
if v, ok := others[2].(string); ok && len(v) > 0 {
cookie.Domain = v
}
}
if len(others) > 3 {
switch v := others[3].(type) {
case bool:
cookie.Secure = v
default:
if others[3] != nil {
cookie.Secure = true
}
}
}
if len(others) > 4 {
if v, ok := others[4].(bool); ok && v {
cookie.HttpOnly = true
}
}
ctx.Resp.Header().Add("Set-Cookie", cookie.String())
}
// GetCookie returns given cookie value from request header.
func (ctx *Context) GetCookie(name string) string {
cookie, err := ctx.Req.Cookie(name)
if err != nil {
return ""
}
val, _ := url.QueryUnescape(cookie.Value)
return val
}
// GetCookieInt returns cookie result in int type.
func (ctx *Context) GetCookieInt(name string) int {
return com.StrTo(ctx.GetCookie(name)).MustInt()
}
// GetCookieInt64 returns cookie result in int64 type.
func (ctx *Context) GetCookieInt64(name string) int64 {
return com.StrTo(ctx.GetCookie(name)).MustInt64()
}
// GetCookieFloat64 returns cookie result in float64 type.
func (ctx *Context) GetCookieFloat64(name string) float64 {
v, _ := strconv.ParseFloat(ctx.GetCookie(name), 64)
return v
}
var defaultCookieSecret string
// SetDefaultCookieSecret sets global default secure cookie secret.
func (m *Macaron) SetDefaultCookieSecret(secret string) {
defaultCookieSecret = secret
}
// SetSecureCookie sets given cookie value to response header with default secret string.
func (ctx *Context) SetSecureCookie(name, value string, others ...interface{}) {
ctx.SetSuperSecureCookie(defaultCookieSecret, name, value, others...)
}
// GetSecureCookie returns given cookie value from request header with default secret string.
func (ctx *Context) GetSecureCookie(key string) (string, bool) {
return ctx.GetSuperSecureCookie(defaultCookieSecret, key)
}
// SetSuperSecureCookie sets given cookie value to response header with secret string.
func (ctx *Context) SetSuperSecureCookie(secret, name, value string, others ...interface{}) {
m := md5.Sum([]byte(secret))
secret = hex.EncodeToString(m[:])
text, err := com.AESEncrypt([]byte(secret), []byte(value))
if err != nil {
panic("error encrypting cookie: " + err.Error())
}
ctx.SetCookie(name, hex.EncodeToString(text), others...)
}
// GetSuperSecureCookie returns given cookie value from request header with secret string.
func (ctx *Context) GetSuperSecureCookie(secret, key string) (string, bool) {
val := ctx.GetCookie(key)
if val == "" {
return "", false
}
data, err := hex.DecodeString(val)
if err != nil {
return "", false
}
m := md5.Sum([]byte(secret))
secret = hex.EncodeToString(m[:])
text, err := com.AESDecrypt([]byte(secret), data)
return string(text), err == nil
}
func (ctx *Context) setRawContentHeader() {
ctx.Resp.Header().Set("Content-Description", "Raw content")
ctx.Resp.Header().Set("Content-Type", "text/plain")
ctx.Resp.Header().Set("Expires", "0")
ctx.Resp.Header().Set("Cache-Control", "must-revalidate")
ctx.Resp.Header().Set("Pragma", "public")
}
// ServeContent serves given content to response.
func (ctx *Context) ServeContent(name string, r io.ReadSeeker, params ...interface{}) {
modtime := time.Now()
for _, p := range params {
switch v := p.(type) {
case time.Time:
modtime = v
}
}
ctx.setRawContentHeader()
http.ServeContent(ctx.Resp, ctx.Req.Request, name, modtime, r)
}
// ServeFileContent serves given file as content to response.
func (ctx *Context) ServeFileContent(file string, names ...string) {
var name string
if len(names) > 0 {
name = names[0]
} else {
name = path.Base(file)
}
f, err := os.Open(file)
if err != nil {
if Env == PROD {
http.Error(ctx.Resp, "Internal Server Error", 500)
} else {
http.Error(ctx.Resp, err.Error(), 500)
}
return
}
defer f.Close()
ctx.setRawContentHeader()
http.ServeContent(ctx.Resp, ctx.Req.Request, name, time.Now(), f)
}
// ServeFile serves given file to response.
func (ctx *Context) ServeFile(file string, names ...string) {
var name string
if len(names) > 0 {
name = names[0]
} else {
name = path.Base(file)
}
ctx.Resp.Header().Set("Content-Description", "File Transfer")
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+name)
ctx.Resp.Header().Set("Content-Transfer-Encoding", "binary")
ctx.Resp.Header().Set("Expires", "0")
ctx.Resp.Header().Set("Cache-Control", "must-revalidate")
ctx.Resp.Header().Set("Pragma", "public")
http.ServeFile(ctx.Resp, ctx.Req.Request, file)
}
// ChangeStaticPath changes static path from old to new one.
func (ctx *Context) ChangeStaticPath(oldPath, newPath string) {
if !filepath.IsAbs(oldPath) {
oldPath = filepath.Join(Root, oldPath)
}
dir := statics.Get(oldPath)
if dir != nil {
statics.Delete(oldPath)
if !filepath.IsAbs(newPath) {
newPath = filepath.Join(Root, newPath)
}
*dir = http.Dir(newPath)
statics.Set(dir)
}
}

View File

@@ -0,0 +1 @@
<h1>Admin {{.}}</h1>

View File

@@ -0,0 +1 @@
another head{{ yield }}another foot

View File

@@ -0,0 +1 @@
<h1>{{ . }}</h1>

View File

@@ -0,0 +1 @@
{{ current }} head{{ yield }}{{ current }} foot

View File

@@ -0,0 +1 @@
<h1>Hello {[{.}]}</h1>

1
vendor/gopkg.in/macaron.v1/fixtures/basic/hello.tmpl generated vendored Normal file
View File

@@ -0,0 +1 @@
<h1>Hello {{.}}</h1>

View File

@@ -0,0 +1 @@
Hypertext!

View File

@@ -0,0 +1 @@
head{{ yield }}foot

View File

@@ -0,0 +1 @@
<h1>What's up, {{.}}</h1>

View File

@@ -0,0 +1 @@
<h1>Hello {{.Name}}</h1>

View File

@@ -0,0 +1 @@
{{ myCustomFunc }}

61
vendor/gopkg.in/macaron.v1/logger.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright 2013 Martini Authors
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"fmt"
"log"
"net/http"
"runtime"
"time"
)
var ColorLog = true
func init() {
ColorLog = runtime.GOOS != "windows"
}
// Logger returns a middleware handler that logs the request as it goes in and the response as it goes out.
func Logger() Handler {
return func(ctx *Context, log *log.Logger) {
start := time.Now()
log.Printf("Started %s %s for %s", ctx.Req.Method, ctx.Req.RequestURI, ctx.RemoteAddr())
rw := ctx.Resp.(ResponseWriter)
ctx.Next()
content := fmt.Sprintf("Completed %s %v %s in %v", ctx.Req.RequestURI, rw.Status(), http.StatusText(rw.Status()), time.Since(start))
if ColorLog {
switch rw.Status() {
case 200, 201, 202:
content = fmt.Sprintf("\033[1;32m%s\033[0m", content)
case 301, 302:
content = fmt.Sprintf("\033[1;37m%s\033[0m", content)
case 304:
content = fmt.Sprintf("\033[1;33m%s\033[0m", content)
case 401, 403:
content = fmt.Sprintf("\033[4;31m%s\033[0m", content)
case 404:
content = fmt.Sprintf("\033[1;31m%s\033[0m", content)
case 500:
content = fmt.Sprintf("\033[1;36m%s\033[0m", content)
}
}
log.Println(content)
}
}

278
vendor/gopkg.in/macaron.v1/macaron.go generated vendored Normal file
View File

@@ -0,0 +1,278 @@
// +build go1.3
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package macaron is a high productive and modular web framework in Go.
package macaron
import (
"io"
"log"
"net/http"
"os"
"reflect"
"strings"
"github.com/Unknwon/com"
"gopkg.in/ini.v1"
"github.com/go-macaron/inject"
)
const _VERSION = "0.8.0.1013"
func Version() string {
return _VERSION
}
// Handler can be any callable function.
// Macaron attempts to inject services into the handler's argument list,
// and panics if an argument could not be fullfilled via dependency injection.
type Handler interface{}
// validateHandler makes sure a handler is a callable function,
// and panics if it is not.
func validateHandler(h Handler) {
if reflect.TypeOf(h).Kind() != reflect.Func {
panic("Macaron handler must be a callable function")
}
}
// validateHandlers makes sure handlers are callable functions,
// and panics if any of them is not.
func validateHandlers(handlers []Handler) {
for _, h := range handlers {
validateHandler(h)
}
}
// Macaron represents the top level web application.
// inject.Injector methods can be invoked to map services on a global level.
type Macaron struct {
inject.Injector
befores []BeforeHandler
handlers []Handler
action Handler
hasURLPrefix bool
urlPrefix string // For suburl support.
*Router
logger *log.Logger
}
// NewWithLogger creates a bare bones Macaron instance.
// Use this method if you want to have full control over the middleware that is used.
// You can specify logger output writer with this function.
func NewWithLogger(out io.Writer) *Macaron {
m := &Macaron{
Injector: inject.New(),
action: func() {},
Router: NewRouter(),
logger: log.New(out, "[Macaron] ", 0),
}
m.Router.m = m
m.Map(m.logger)
m.Map(defaultReturnHandler())
m.NotFound(http.NotFound)
m.InternalServerError(func(rw http.ResponseWriter, err error) {
http.Error(rw, err.Error(), 500)
})
return m
}
// New creates a bare bones Macaron instance.
// Use this method if you want to have full control over the middleware that is used.
func New() *Macaron {
return NewWithLogger(os.Stdout)
}
// Classic creates a classic Macaron with some basic default middleware:
// mocaron.Logger, mocaron.Recovery and mocaron.Static.
func Classic() *Macaron {
m := New()
m.Use(Logger())
m.Use(Recovery())
m.Use(Static("public"))
return m
}
// Handlers sets the entire middleware stack with the given Handlers.
// This will clear any current middleware handlers,
// and panics if any of the handlers is not a callable function
func (m *Macaron) Handlers(handlers ...Handler) {
m.handlers = make([]Handler, 0)
for _, handler := range handlers {
m.Use(handler)
}
}
// Action sets the handler that will be called after all the middleware has been invoked.
// This is set to macaron.Router in a macaron.Classic().
func (m *Macaron) Action(handler Handler) {
validateHandler(handler)
m.action = handler
}
// BeforeHandler represents a handler executes at beginning of every request.
// Macaron stops future process when it returns true.
type BeforeHandler func(rw http.ResponseWriter, req *http.Request) bool
func (m *Macaron) Before(handler BeforeHandler) {
m.befores = append(m.befores, handler)
}
// Use adds a middleware Handler to the stack,
// and panics if the handler is not a callable func.
// Middleware Handlers are invoked in the order that they are added.
func (m *Macaron) Use(handler Handler) {
validateHandler(handler)
m.handlers = append(m.handlers, handler)
}
func (m *Macaron) createContext(rw http.ResponseWriter, req *http.Request) *Context {
c := &Context{
Injector: inject.New(),
handlers: m.handlers,
action: m.action,
index: 0,
Router: m.Router,
Req: Request{req},
Resp: NewResponseWriter(rw),
Data: make(map[string]interface{}),
}
c.SetParent(m)
c.Map(c)
c.MapTo(c.Resp, (*http.ResponseWriter)(nil))
c.Map(req)
return c
}
// ServeHTTP is the HTTP Entry point for a Macaron instance.
// Useful if you want to control your own HTTP server.
// Be aware that none of middleware will run without registering any router.
func (m *Macaron) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if m.hasURLPrefix {
req.URL.Path = strings.TrimPrefix(req.URL.Path, m.urlPrefix)
}
for _, h := range m.befores {
if h(rw, req) {
return
}
}
m.Router.ServeHTTP(rw, req)
}
func GetDefaultListenInfo() (string, int) {
host := os.Getenv("HOST")
if len(host) == 0 {
host = "0.0.0.0"
}
port := com.StrTo(os.Getenv("PORT")).MustInt()
if port == 0 {
port = 4000
}
return host, port
}
// Run the http server. Listening on os.GetEnv("PORT") or 4000 by default.
func (m *Macaron) Run(args ...interface{}) {
host, port := GetDefaultListenInfo()
if len(args) == 1 {
switch arg := args[0].(type) {
case string:
host = arg
case int:
port = arg
}
} else if len(args) >= 2 {
if arg, ok := args[0].(string); ok {
host = arg
}
if arg, ok := args[1].(int); ok {
port = arg
}
}
addr := host + ":" + com.ToStr(port)
logger := m.GetVal(reflect.TypeOf(m.logger)).Interface().(*log.Logger)
logger.Printf("listening on %s (%s)\n", addr, Env)
logger.Fatalln(http.ListenAndServe(addr, m))
}
// SetURLPrefix sets URL prefix of router layer, so that it support suburl.
func (m *Macaron) SetURLPrefix(prefix string) {
m.urlPrefix = prefix
m.hasURLPrefix = len(m.urlPrefix) > 0
}
// ____ ____ .__ ___. .__
// \ \ / /____ _______|__|____ \_ |__ | | ____ ______
// \ Y /\__ \\_ __ \ \__ \ | __ \| | _/ __ \ / ___/
// \ / / __ \| | \/ |/ __ \| \_\ \ |_\ ___/ \___ \
// \___/ (____ /__| |__(____ /___ /____/\___ >____ >
// \/ \/ \/ \/ \/
const (
DEV = "development"
PROD = "production"
TEST = "test"
)
var (
// Env is the environment that Macaron is executing in.
// The MACARON_ENV is read on initialization to set this variable.
Env = DEV
// Path of work directory.
Root string
// Flash applies to current request.
FlashNow bool
// Configuration convention object.
cfg *ini.File
)
func setENV(e string) {
if len(e) > 0 {
Env = e
}
}
func init() {
setENV(os.Getenv("MACARON_ENV"))
var err error
Root, err = os.Getwd()
if err != nil {
panic("error getting work directory: " + err.Error())
}
}
// SetConfig sets data sources for configuration.
func SetConfig(source interface{}, others ...interface{}) (_ *ini.File, err error) {
cfg, err = ini.Load(source, others...)
return Config(), err
}
// Config returns configuration convention object.
// It returns an empty object if there is no one available.
func Config() *ini.File {
if cfg == nil {
return ini.Empty()
}
return cfg
}

BIN
vendor/gopkg.in/macaron.v1/macaronlogo.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

163
vendor/gopkg.in/macaron.v1/recovery.go generated vendored Normal file
View File

@@ -0,0 +1,163 @@
// Copyright 2013 Martini Authors
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"runtime"
"github.com/go-macaron/inject"
)
const (
panicHtml = `<html>
<head><title>PANIC: %s</title>
<meta charset="utf-8" />
<style type="text/css">
html, body {
font-family: "Roboto", sans-serif;
color: #333333;
background-color: #ea5343;
margin: 0px;
}
h1 {
color: #d04526;
background-color: #ffffff;
padding: 20px;
border-bottom: 1px dashed #2b3848;
}
pre {
margin: 20px;
padding: 20px;
border: 2px solid #2b3848;
background-color: #ffffff;
white-space: pre-wrap; /* css-3 */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 */
white-space: -o-pre-wrap; /* Opera 7 */
word-wrap: break-word; /* Internet Explorer 5.5+ */
}
</style>
</head><body>
<h1>PANIC</h1>
<pre style="font-weight: bold;">%s</pre>
<pre>%s</pre>
</body>
</html>`
)
var (
dunno = []byte("???")
centerDot = []byte("·")
dot = []byte(".")
slash = []byte("/")
)
// stack returns a nicely formated stack frame, skipping skip frames
func stack(skip int) []byte {
buf := new(bytes.Buffer) // the returned data
// As we loop, we open files and read them. These variables record the currently
// loaded file.
var lines [][]byte
var lastFile string
for i := skip; ; i++ { // Skip the expected number of frames
pc, file, line, ok := runtime.Caller(i)
if !ok {
break
}
// Print this much at least. If we can't find the source, it won't show.
fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
if file != lastFile {
data, err := ioutil.ReadFile(file)
if err != nil {
continue
}
lines = bytes.Split(data, []byte{'\n'})
lastFile = file
}
fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
}
return buf.Bytes()
}
// source returns a space-trimmed slice of the n'th line.
func source(lines [][]byte, n int) []byte {
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
if n < 0 || n >= len(lines) {
return dunno
}
return bytes.TrimSpace(lines[n])
}
// function returns, if possible, the name of the function containing the PC.
func function(pc uintptr) []byte {
fn := runtime.FuncForPC(pc)
if fn == nil {
return dunno
}
name := []byte(fn.Name())
// The name includes the path name to the package, which is unnecessary
// since the file name is already included. Plus, it has center dots.
// That is, we see
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
// Also the package path might contains dot (e.g. code.google.com/...),
// so first eliminate the path prefix
if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
name = name[lastslash+1:]
}
if period := bytes.Index(name, dot); period >= 0 {
name = name[period+1:]
}
name = bytes.Replace(name, centerDot, dot, -1)
return name
}
// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
// While Martini is in development mode, Recovery will also output the panic as HTML.
func Recovery() Handler {
return func(c *Context, log *log.Logger) {
defer func() {
if err := recover(); err != nil {
stack := stack(3)
log.Printf("PANIC: %s\n%s", err, stack)
// Lookup the current responsewriter
val := c.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
res := val.Interface().(http.ResponseWriter)
// respond with panic message while in development mode
var body []byte
if Env == DEV {
res.Header().Set("Content-Type", "text/html")
body = []byte(fmt.Sprintf(panicHtml, err, err, stack))
}
res.WriteHeader(http.StatusInternalServerError)
if nil != body {
res.Write(body)
}
}
}()
c.Next()
}
}

593
vendor/gopkg.in/macaron.v1/render.go generated vendored Normal file
View File

@@ -0,0 +1,593 @@
// Copyright 2013 Martini Authors
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/Unknwon/com"
)
const (
_CONTENT_TYPE = "Content-Type"
_CONTENT_LENGTH = "Content-Length"
_CONTENT_BINARY = "application/octet-stream"
_CONTENT_JSON = "application/json"
_CONTENT_HTML = "text/html"
_CONTENT_PLAIN = "text/plain"
_CONTENT_XHTML = "application/xhtml+xml"
_CONTENT_XML = "text/xml"
_DEFAULT_CHARSET = "UTF-8"
)
var (
// Provides a temporary buffer to execute templates into and catch errors.
bufpool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
// Included helper functions for use when rendering html
helperFuncs = template.FuncMap{
"yield": func() (string, error) {
return "", fmt.Errorf("yield called with no layout defined")
},
"current": func() (string, error) {
return "", nil
},
}
)
type (
// TemplateFile represents a interface of template file that has name and can be read.
TemplateFile interface {
Name() string
Data() []byte
Ext() string
}
// TemplateFileSystem represents a interface of template file system that able to list all files.
TemplateFileSystem interface {
ListFiles() []TemplateFile
}
// Delims represents a set of Left and Right delimiters for HTML template rendering
Delims struct {
// Left delimiter, defaults to {{
Left string
// Right delimiter, defaults to }}
Right string
}
// RenderOptions represents a struct for specifying configuration options for the Render middleware.
RenderOptions struct {
// Directory to load templates. Default is "templates".
Directory string
// Layout template name. Will not render a layout if "". Default is to "".
Layout string
// Extensions to parse template files from. Defaults are [".tmpl", ".html"].
Extensions []string
// Funcs is a slice of FuncMaps to apply to the template upon compilation. This is useful for helper functions. Default is [].
Funcs []template.FuncMap
// Delims sets the action delimiters to the specified strings in the Delims struct.
Delims Delims
// Appends the given charset to the Content-Type header. Default is "UTF-8".
Charset string
// Outputs human readable JSON.
IndentJSON bool
// Outputs human readable XML.
IndentXML bool
// Prefixes the JSON output with the given bytes.
PrefixJSON []byte
// Prefixes the XML output with the given bytes.
PrefixXML []byte
// Allows changing of output to XHTML instead of HTML. Default is "text/html"
HTMLContentType string
// TemplateFileSystem is the interface for supporting any implmentation of template file system.
TemplateFileSystem
}
// HTMLOptions is a struct for overriding some rendering Options for specific HTML call
HTMLOptions struct {
// Layout template name. Overrides Options.Layout.
Layout string
}
Render interface {
http.ResponseWriter
SetResponseWriter(http.ResponseWriter)
JSON(int, interface{})
JSONString(interface{}) (string, error)
RawData(int, []byte) // Serve content as binary
PlainText(int, []byte) // Serve content as plain text
HTML(int, string, interface{}, ...HTMLOptions)
HTMLSet(int, string, string, interface{}, ...HTMLOptions)
HTMLSetString(string, string, interface{}, ...HTMLOptions) (string, error)
HTMLString(string, interface{}, ...HTMLOptions) (string, error)
HTMLSetBytes(string, string, interface{}, ...HTMLOptions) ([]byte, error)
HTMLBytes(string, interface{}, ...HTMLOptions) ([]byte, error)
XML(int, interface{})
Error(int, ...string)
Status(int)
SetTemplatePath(string, string)
HasTemplateSet(string) bool
}
)
// TplFile implements TemplateFile interface.
type TplFile struct {
name string
data []byte
ext string
}
// NewTplFile cerates new template file with given name and data.
func NewTplFile(name string, data []byte, ext string) *TplFile {
return &TplFile{name, data, ext}
}
func (f *TplFile) Name() string {
return f.name
}
func (f *TplFile) Data() []byte {
return f.data
}
func (f *TplFile) Ext() string {
return f.ext
}
// TplFileSystem implements TemplateFileSystem interface.
type TplFileSystem struct {
files []TemplateFile
}
// NewTemplateFileSystem creates new template file system with given options.
func NewTemplateFileSystem(opt RenderOptions, omitData bool) TplFileSystem {
fs := TplFileSystem{}
fs.files = make([]TemplateFile, 0, 10)
if err := filepath.Walk(opt.Directory, func(path string, info os.FileInfo, err error) error {
r, err := filepath.Rel(opt.Directory, path)
if err != nil {
return err
}
ext := GetExt(r)
for _, extension := range opt.Extensions {
if ext == extension {
var data []byte
if !omitData {
data, err = ioutil.ReadFile(path)
if err != nil {
return err
}
}
name := filepath.ToSlash((r[0 : len(r)-len(ext)]))
fs.files = append(fs.files, NewTplFile(name, data, ext))
break
}
}
return nil
}); err != nil {
panic("NewTemplateFileSystem: " + err.Error())
}
return fs
}
func (fs TplFileSystem) ListFiles() []TemplateFile {
return fs.files
}
func PrepareCharset(charset string) string {
if len(charset) != 0 {
return "; charset=" + charset
}
return "; charset=" + _DEFAULT_CHARSET
}
func GetExt(s string) string {
index := strings.Index(s, ".")
if index == -1 {
return ""
}
return s[index:]
}
func compile(opt RenderOptions) *template.Template {
dir := opt.Directory
t := template.New(dir)
t.Delims(opt.Delims.Left, opt.Delims.Right)
// Parse an initial template in case we don't have any.
template.Must(t.Parse("Macaron"))
if opt.TemplateFileSystem == nil {
opt.TemplateFileSystem = NewTemplateFileSystem(opt, false)
}
for _, f := range opt.TemplateFileSystem.ListFiles() {
tmpl := t.New(f.Name())
for _, funcs := range opt.Funcs {
tmpl.Funcs(funcs)
}
// Bomb out if parse fails. We don't want any silent server starts.
template.Must(tmpl.Funcs(helperFuncs).Parse(string(f.Data())))
}
return t
}
const (
_DEFAULT_TPL_SET_NAME = "DEFAULT"
)
// templateSet represents a template set of type *template.Template.
type templateSet struct {
lock sync.RWMutex
sets map[string]*template.Template
dirs map[string]string
}
func newTemplateSet() *templateSet {
return &templateSet{
sets: make(map[string]*template.Template),
dirs: make(map[string]string),
}
}
func (ts *templateSet) Set(name string, opt *RenderOptions) *template.Template {
t := compile(*opt)
ts.lock.Lock()
defer ts.lock.Unlock()
ts.sets[name] = t
ts.dirs[name] = opt.Directory
return t
}
func (ts *templateSet) Get(name string) *template.Template {
ts.lock.RLock()
defer ts.lock.RUnlock()
return ts.sets[name]
}
func (ts *templateSet) GetDir(name string) string {
ts.lock.RLock()
defer ts.lock.RUnlock()
return ts.dirs[name]
}
func prepareRenderOptions(options []RenderOptions) RenderOptions {
var opt RenderOptions
if len(options) > 0 {
opt = options[0]
}
// Defaults.
if len(opt.Directory) == 0 {
opt.Directory = "templates"
}
if len(opt.Extensions) == 0 {
opt.Extensions = []string{".tmpl", ".html"}
}
if len(opt.HTMLContentType) == 0 {
opt.HTMLContentType = _CONTENT_HTML
}
return opt
}
func ParseTplSet(tplSet string) (tplName string, tplDir string) {
tplSet = strings.TrimSpace(tplSet)
if len(tplSet) == 0 {
panic("empty template set argument")
}
infos := strings.Split(tplSet, ":")
if len(infos) == 1 {
tplDir = infos[0]
tplName = path.Base(tplDir)
} else {
tplName = infos[0]
tplDir = infos[1]
}
if !com.IsDir(tplDir) {
panic("template set path does not exist or is not a directory")
}
return tplName, tplDir
}
func renderHandler(opt RenderOptions, tplSets []string) Handler {
cs := PrepareCharset(opt.Charset)
ts := newTemplateSet()
ts.Set(_DEFAULT_TPL_SET_NAME, &opt)
var tmpOpt RenderOptions
for _, tplSet := range tplSets {
tplName, tplDir := ParseTplSet(tplSet)
tmpOpt = opt
tmpOpt.Directory = tplDir
ts.Set(tplName, &tmpOpt)
}
return func(ctx *Context) {
r := &TplRender{
ResponseWriter: ctx.Resp,
templateSet: ts,
Opt: &opt,
CompiledCharset: cs,
}
ctx.Data["TmplLoadTimes"] = func() string {
if r.startTime.IsZero() {
return ""
}
return fmt.Sprint(time.Since(r.startTime).Nanoseconds()/1e6) + "ms"
}
ctx.Render = r
ctx.MapTo(r, (*Render)(nil))
}
}
// Renderer is a Middleware that maps a macaron.Render service into the Macaron handler chain.
// An single variadic macaron.RenderOptions struct can be optionally provided to configure
// HTML rendering. The default directory for templates is "templates" and the default
// file extension is ".tmpl" and ".html".
//
// If MACARON_ENV is set to "" or "development" then templates will be recompiled on every request. For more performance, set the
// MACARON_ENV environment variable to "production".
func Renderer(options ...RenderOptions) Handler {
return renderHandler(prepareRenderOptions(options), []string{})
}
func Renderers(options RenderOptions, tplSets ...string) Handler {
return renderHandler(prepareRenderOptions([]RenderOptions{options}), tplSets)
}
type TplRender struct {
http.ResponseWriter
*templateSet
Opt *RenderOptions
CompiledCharset string
startTime time.Time
}
func (r *TplRender) SetResponseWriter(rw http.ResponseWriter) {
r.ResponseWriter = rw
}
func (r *TplRender) JSON(status int, v interface{}) {
var (
result []byte
err error
)
if r.Opt.IndentJSON {
result, err = json.MarshalIndent(v, "", " ")
} else {
result, err = json.Marshal(v)
}
if err != nil {
http.Error(r, err.Error(), 500)
return
}
// json rendered fine, write out the result
r.Header().Set(_CONTENT_TYPE, _CONTENT_JSON+r.CompiledCharset)
r.WriteHeader(status)
if len(r.Opt.PrefixJSON) > 0 {
r.Write(r.Opt.PrefixJSON)
}
r.Write(result)
}
func (r *TplRender) JSONString(v interface{}) (string, error) {
var result []byte
var err error
if r.Opt.IndentJSON {
result, err = json.MarshalIndent(v, "", " ")
} else {
result, err = json.Marshal(v)
}
if err != nil {
return "", err
}
return string(result), nil
}
func (r *TplRender) XML(status int, v interface{}) {
var result []byte
var err error
if r.Opt.IndentXML {
result, err = xml.MarshalIndent(v, "", " ")
} else {
result, err = xml.Marshal(v)
}
if err != nil {
http.Error(r, err.Error(), 500)
return
}
// XML rendered fine, write out the result
r.Header().Set(_CONTENT_TYPE, _CONTENT_XML+r.CompiledCharset)
r.WriteHeader(status)
if len(r.Opt.PrefixXML) > 0 {
r.Write(r.Opt.PrefixXML)
}
r.Write(result)
}
func (r *TplRender) data(status int, contentType string, v []byte) {
if r.Header().Get(_CONTENT_TYPE) == "" {
r.Header().Set(_CONTENT_TYPE, contentType)
}
r.WriteHeader(status)
r.Write(v)
}
func (r *TplRender) RawData(status int, v []byte) {
r.data(status, _CONTENT_BINARY, v)
}
func (r *TplRender) PlainText(status int, v []byte) {
r.data(status, _CONTENT_PLAIN, v)
}
func (r *TplRender) execute(t *template.Template, name string, data interface{}) (*bytes.Buffer, error) {
buf := bufpool.Get().(*bytes.Buffer)
return buf, t.ExecuteTemplate(buf, name, data)
}
func (r *TplRender) addYield(t *template.Template, tplName string, data interface{}) {
funcs := template.FuncMap{
"yield": func() (template.HTML, error) {
buf, err := r.execute(t, tplName, data)
// return safe html here since we are rendering our own template
return template.HTML(buf.String()), err
},
"current": func() (string, error) {
return tplName, nil
},
}
t.Funcs(funcs)
}
func (r *TplRender) renderBytes(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) (*bytes.Buffer, error) {
t := r.templateSet.Get(setName)
if Env == DEV {
opt := *r.Opt
opt.Directory = r.templateSet.GetDir(setName)
t = r.templateSet.Set(setName, &opt)
}
if t == nil {
return nil, fmt.Errorf("html/template: template \"%s\" is undefined", tplName)
}
opt := r.prepareHTMLOptions(htmlOpt)
if len(opt.Layout) > 0 {
r.addYield(t, tplName, data)
tplName = opt.Layout
}
out, err := r.execute(t, tplName, data)
if err != nil {
return nil, err
}
return out, nil
}
func (r *TplRender) renderHTML(status int, setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) {
r.startTime = time.Now()
out, err := r.renderBytes(setName, tplName, data, htmlOpt...)
if err != nil {
http.Error(r, err.Error(), http.StatusInternalServerError)
return
}
r.Header().Set(_CONTENT_TYPE, r.Opt.HTMLContentType+r.CompiledCharset)
r.WriteHeader(status)
out.WriteTo(r)
bufpool.Put(out)
}
func (r *TplRender) HTML(status int, name string, data interface{}, htmlOpt ...HTMLOptions) {
r.renderHTML(status, _DEFAULT_TPL_SET_NAME, name, data, htmlOpt...)
}
func (r *TplRender) HTMLSet(status int, setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) {
r.renderHTML(status, setName, tplName, data, htmlOpt...)
}
func (r *TplRender) HTMLSetBytes(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) ([]byte, error) {
out, err := r.renderBytes(setName, tplName, data, htmlOpt...)
if err != nil {
return []byte(""), err
}
return out.Bytes(), nil
}
func (r *TplRender) HTMLBytes(name string, data interface{}, htmlOpt ...HTMLOptions) ([]byte, error) {
return r.HTMLSetBytes(_DEFAULT_TPL_SET_NAME, name, data, htmlOpt...)
}
func (r *TplRender) HTMLSetString(setName, tplName string, data interface{}, htmlOpt ...HTMLOptions) (string, error) {
p, err := r.HTMLSetBytes(setName, tplName, data, htmlOpt...)
return string(p), err
}
func (r *TplRender) HTMLString(name string, data interface{}, htmlOpt ...HTMLOptions) (string, error) {
p, err := r.HTMLBytes(name, data, htmlOpt...)
return string(p), err
}
// Error writes the given HTTP status to the current ResponseWriter
func (r *TplRender) Error(status int, message ...string) {
r.WriteHeader(status)
if len(message) > 0 {
r.Write([]byte(message[0]))
}
}
func (r *TplRender) Status(status int) {
r.WriteHeader(status)
}
func (r *TplRender) prepareHTMLOptions(htmlOpt []HTMLOptions) HTMLOptions {
if len(htmlOpt) > 0 {
return htmlOpt[0]
}
return HTMLOptions{
Layout: r.Opt.Layout,
}
}
func (r *TplRender) SetTemplatePath(setName, dir string) {
if len(setName) == 0 {
setName = _DEFAULT_TPL_SET_NAME
}
opt := *r.Opt
opt.Directory = dir
r.templateSet.Set(setName, &opt)
}
func (r *TplRender) HasTemplateSet(name string) bool {
return r.templateSet.Get(name) != nil
}

111
vendor/gopkg.in/macaron.v1/response_writer.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Copyright 2013 Martini Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"bufio"
"fmt"
"net"
"net/http"
)
// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about
// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter
// if the functionality calls for it.
type ResponseWriter interface {
http.ResponseWriter
http.Flusher
// Status returns the status code of the response or 0 if the response has not been written.
Status() int
// Written returns whether or not the ResponseWriter has been written.
Written() bool
// Size returns the size of the response body.
Size() int
// Before allows for a function to be called before the ResponseWriter has been written to. This is
// useful for setting headers or any other operations that must happen before a response has been written.
Before(BeforeFunc)
}
// BeforeFunc is a function that is called before the ResponseWriter has been written to.
type BeforeFunc func(ResponseWriter)
// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
return &responseWriter{rw, 0, 0, nil}
}
type responseWriter struct {
http.ResponseWriter
status int
size int
beforeFuncs []BeforeFunc
}
func (rw *responseWriter) WriteHeader(s int) {
rw.callBefore()
rw.ResponseWriter.WriteHeader(s)
rw.status = s
}
func (rw *responseWriter) Write(b []byte) (int, error) {
if !rw.Written() {
// The status will be StatusOK if WriteHeader has not been called yet
rw.WriteHeader(http.StatusOK)
}
size, err := rw.ResponseWriter.Write(b)
rw.size += size
return size, err
}
func (rw *responseWriter) Status() int {
return rw.status
}
func (rw *responseWriter) Size() int {
return rw.size
}
func (rw *responseWriter) Written() bool {
return rw.status != 0
}
func (rw *responseWriter) Before(before BeforeFunc) {
rw.beforeFuncs = append(rw.beforeFuncs, before)
}
func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := rw.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
}
return hijacker.Hijack()
}
func (rw *responseWriter) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (rw *responseWriter) callBefore() {
for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
rw.beforeFuncs[i](rw)
}
}
func (rw *responseWriter) Flush() {
flusher, ok := rw.ResponseWriter.(http.Flusher)
if ok {
flusher.Flush()
}
}

76
vendor/gopkg.in/macaron.v1/return_handler.go generated vendored Normal file
View File

@@ -0,0 +1,76 @@
// Copyright 2013 Martini Authors
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"net/http"
"reflect"
"github.com/go-macaron/inject"
)
// ReturnHandler is a service that Martini provides that is called
// when a route handler returns something. The ReturnHandler is
// responsible for writing to the ResponseWriter based on the values
// that are passed into this function.
type ReturnHandler func(*Context, []reflect.Value)
func canDeref(val reflect.Value) bool {
return val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr
}
func isError(val reflect.Value) bool {
_, ok := val.Interface().(error)
return ok
}
func isByteSlice(val reflect.Value) bool {
return val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8
}
func defaultReturnHandler() ReturnHandler {
return func(ctx *Context, vals []reflect.Value) {
rv := ctx.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil)))
resp := rv.Interface().(http.ResponseWriter)
var respVal reflect.Value
if len(vals) > 1 && vals[0].Kind() == reflect.Int {
resp.WriteHeader(int(vals[0].Int()))
respVal = vals[1]
} else if len(vals) > 0 {
respVal = vals[0]
if isError(respVal) {
err := respVal.Interface().(error)
if err != nil {
ctx.internalServerError(ctx, err)
}
return
} else if canDeref(respVal) {
if respVal.IsNil() {
return // Ignore nil error
}
}
}
if canDeref(respVal) {
respVal = respVal.Elem()
}
if isByteSlice(respVal) {
resp.Write(respVal.Bytes())
} else {
resp.Write([]byte(respVal.String()))
}
}
}

360
vendor/gopkg.in/macaron.v1/router.go generated vendored Normal file
View File

@@ -0,0 +1,360 @@
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"net/http"
"strings"
"sync"
)
var (
// Known HTTP methods.
_HTTP_METHODS = map[string]bool{
"GET": true,
"POST": true,
"PUT": true,
"DELETE": true,
"PATCH": true,
"OPTIONS": true,
"HEAD": true,
}
)
// routeMap represents a thread-safe map for route tree.
type routeMap struct {
lock sync.RWMutex
routes map[string]map[string]*Leaf
}
// NewRouteMap initializes and returns a new routeMap.
func NewRouteMap() *routeMap {
rm := &routeMap{
routes: make(map[string]map[string]*Leaf),
}
for m := range _HTTP_METHODS {
rm.routes[m] = make(map[string]*Leaf)
}
return rm
}
// getLeaf returns Leaf object if a route has been registered.
func (rm *routeMap) getLeaf(method, pattern string) *Leaf {
rm.lock.RLock()
defer rm.lock.RUnlock()
return rm.routes[method][pattern]
}
// add adds new route to route tree map.
func (rm *routeMap) add(method, pattern string, leaf *Leaf) {
rm.lock.Lock()
defer rm.lock.Unlock()
rm.routes[method][pattern] = leaf
}
type group struct {
pattern string
handlers []Handler
}
// Router represents a Macaron router layer.
type Router struct {
m *Macaron
autoHead bool
routers map[string]*Tree
*routeMap
namedRoutes map[string]*Leaf
groups []group
notFound http.HandlerFunc
internalServerError func(*Context, error)
}
func NewRouter() *Router {
return &Router{
routers: make(map[string]*Tree),
routeMap: NewRouteMap(),
namedRoutes: make(map[string]*Leaf),
}
}
// SetAutoHead sets the value who determines whether add HEAD method automatically
// when GET method is added. Combo router will not be affected by this value.
func (r *Router) SetAutoHead(v bool) {
r.autoHead = v
}
type Params map[string]string
// Handle is a function that can be registered to a route to handle HTTP requests.
// Like http.HandlerFunc, but has a third parameter for the values of wildcards (variables).
type Handle func(http.ResponseWriter, *http.Request, Params)
// Route represents a wrapper of leaf route and upper level router.
type Route struct {
router *Router
leaf *Leaf
}
// Name sets name of route.
func (r *Route) Name(name string) {
if len(name) == 0 {
panic("route name cannot be empty")
} else if r.router.namedRoutes[name] != nil {
panic("route with given name already exists")
}
r.router.namedRoutes[name] = r.leaf
}
// handle adds new route to the router tree.
func (r *Router) handle(method, pattern string, handle Handle) *Route {
method = strings.ToUpper(method)
var leaf *Leaf
// Prevent duplicate routes.
if leaf = r.getLeaf(method, pattern); leaf != nil {
return &Route{r, leaf}
}
// Validate HTTP methods.
if !_HTTP_METHODS[method] && method != "*" {
panic("unknown HTTP method: " + method)
}
// Generate methods need register.
methods := make(map[string]bool)
if method == "*" {
for m := range _HTTP_METHODS {
methods[m] = true
}
} else {
methods[method] = true
}
// Add to router tree.
for m := range methods {
if t, ok := r.routers[m]; ok {
leaf = t.Add(pattern, handle)
} else {
t := NewTree()
leaf = t.Add(pattern, handle)
r.routers[m] = t
}
r.add(m, pattern, leaf)
}
return &Route{r, leaf}
}
// Handle registers a new request handle with the given pattern, method and handlers.
func (r *Router) Handle(method string, pattern string, handlers []Handler) *Route {
if len(r.groups) > 0 {
groupPattern := ""
h := make([]Handler, 0)
for _, g := range r.groups {
groupPattern += g.pattern
h = append(h, g.handlers...)
}
pattern = groupPattern + pattern
h = append(h, handlers...)
handlers = h
}
validateHandlers(handlers)
return r.handle(method, pattern, func(resp http.ResponseWriter, req *http.Request, params Params) {
c := r.m.createContext(resp, req)
c.params = params
c.handlers = make([]Handler, 0, len(r.m.handlers)+len(handlers))
c.handlers = append(c.handlers, r.m.handlers...)
c.handlers = append(c.handlers, handlers...)
c.run()
})
}
func (r *Router) Group(pattern string, fn func(), h ...Handler) {
r.groups = append(r.groups, group{pattern, h})
fn()
r.groups = r.groups[:len(r.groups)-1]
}
// Get is a shortcut for r.Handle("GET", pattern, handlers)
func (r *Router) Get(pattern string, h ...Handler) (leaf *Route) {
leaf = r.Handle("GET", pattern, h)
if r.autoHead {
r.Head(pattern, h...)
}
return leaf
}
// Patch is a shortcut for r.Handle("PATCH", pattern, handlers)
func (r *Router) Patch(pattern string, h ...Handler) *Route {
return r.Handle("PATCH", pattern, h)
}
// Post is a shortcut for r.Handle("POST", pattern, handlers)
func (r *Router) Post(pattern string, h ...Handler) *Route {
return r.Handle("POST", pattern, h)
}
// Put is a shortcut for r.Handle("PUT", pattern, handlers)
func (r *Router) Put(pattern string, h ...Handler) *Route {
return r.Handle("PUT", pattern, h)
}
// Delete is a shortcut for r.Handle("DELETE", pattern, handlers)
func (r *Router) Delete(pattern string, h ...Handler) *Route {
return r.Handle("DELETE", pattern, h)
}
// Options is a shortcut for r.Handle("OPTIONS", pattern, handlers)
func (r *Router) Options(pattern string, h ...Handler) *Route {
return r.Handle("OPTIONS", pattern, h)
}
// Head is a shortcut for r.Handle("HEAD", pattern, handlers)
func (r *Router) Head(pattern string, h ...Handler) *Route {
return r.Handle("HEAD", pattern, h)
}
// Any is a shortcut for r.Handle("*", pattern, handlers)
func (r *Router) Any(pattern string, h ...Handler) *Route {
return r.Handle("*", pattern, h)
}
// Route is a shortcut for same handlers but different HTTP methods.
//
// Example:
// m.Route("/", "GET,POST", h)
func (r *Router) Route(pattern, methods string, h ...Handler) (route *Route) {
for _, m := range strings.Split(methods, ",") {
route = r.Handle(strings.TrimSpace(m), pattern, h)
}
return route
}
// Combo returns a combo router.
func (r *Router) Combo(pattern string, h ...Handler) *ComboRouter {
return &ComboRouter{r, pattern, h, map[string]bool{}, nil}
}
// Configurable http.HandlerFunc which is called when no matching route is
// found. If it is not set, http.NotFound is used.
// Be sure to set 404 response code in your handler.
func (r *Router) NotFound(handlers ...Handler) {
validateHandlers(handlers)
r.notFound = func(rw http.ResponseWriter, req *http.Request) {
c := r.m.createContext(rw, req)
c.handlers = append(r.m.handlers, handlers...)
c.run()
}
}
// Configurable handler which is called when route handler returns
// error. If it is not set, default handler is used.
// Be sure to set 500 response code in your handler.
func (r *Router) InternalServerError(handlers ...Handler) {
validateHandlers(handlers)
r.internalServerError = func(c *Context, err error) {
c.index = 0
c.handlers = handlers
c.Map(err)
c.run()
}
}
func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if t, ok := r.routers[req.Method]; ok {
h, p, ok := t.Match(req.URL.Path)
if ok {
if splat, ok := p["*0"]; ok {
p["*"] = splat // Easy name.
}
h(rw, req, p)
return
}
}
r.notFound(rw, req)
}
// URLFor builds path part of URL by given pair values.
func (r *Router) URLFor(name string, pairs ...string) string {
leaf, ok := r.namedRoutes[name]
if !ok {
panic("route with given name does not exists: " + name)
}
return leaf.URLPath(pairs...)
}
// ComboRouter represents a combo router.
type ComboRouter struct {
router *Router
pattern string
handlers []Handler
methods map[string]bool // Registered methods.
lastRoute *Route
}
func (cr *ComboRouter) checkMethod(name string) {
if cr.methods[name] {
panic("method '" + name + "' has already been registered")
}
cr.methods[name] = true
}
func (cr *ComboRouter) route(fn func(string, ...Handler) *Route, method string, h ...Handler) *ComboRouter {
cr.checkMethod(method)
cr.lastRoute = fn(cr.pattern, append(cr.handlers, h...)...)
return cr
}
func (cr *ComboRouter) Get(h ...Handler) *ComboRouter {
return cr.route(cr.router.Get, "GET", h...)
}
func (cr *ComboRouter) Patch(h ...Handler) *ComboRouter {
return cr.route(cr.router.Patch, "PATCH", h...)
}
func (cr *ComboRouter) Post(h ...Handler) *ComboRouter {
return cr.route(cr.router.Post, "POST", h...)
}
func (cr *ComboRouter) Put(h ...Handler) *ComboRouter {
return cr.route(cr.router.Put, "PUT", h...)
}
func (cr *ComboRouter) Delete(h ...Handler) *ComboRouter {
return cr.route(cr.router.Delete, "DELETE", h...)
}
func (cr *ComboRouter) Options(h ...Handler) *ComboRouter {
return cr.route(cr.router.Options, "OPTIONS", h...)
}
func (cr *ComboRouter) Head(h ...Handler) *ComboRouter {
return cr.route(cr.router.Head, "HEAD", h...)
}
// Name sets name of ComboRouter route.
func (cr *ComboRouter) Name(name string) {
if cr.lastRoute == nil {
panic("no corresponding route to be named")
}
cr.lastRoute.Name(name)
}

205
vendor/gopkg.in/macaron.v1/static.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
// Copyright 2013 Martini Authors
// Copyright 2014 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"log"
"net/http"
"path"
"path/filepath"
"strings"
"sync"
)
// StaticOptions is a struct for specifying configuration options for the macaron.Static middleware.
type StaticOptions struct {
// Prefix is the optional prefix used to serve the static directory content
Prefix string
// SkipLogging will disable [Static] log messages when a static file is served.
SkipLogging bool
// IndexFile defines which file to serve as index if it exists.
IndexFile string
// Expires defines which user-defined function to use for producing a HTTP Expires Header
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
Expires func() string
// FileSystem is the interface for supporting any implmentation of file system.
FileSystem http.FileSystem
}
// FIXME: to be deleted.
type staticMap struct {
lock sync.RWMutex
data map[string]*http.Dir
}
func (sm *staticMap) Set(dir *http.Dir) {
sm.lock.Lock()
defer sm.lock.Unlock()
sm.data[string(*dir)] = dir
}
func (sm *staticMap) Get(name string) *http.Dir {
sm.lock.RLock()
defer sm.lock.RUnlock()
return sm.data[name]
}
func (sm *staticMap) Delete(name string) {
sm.lock.Lock()
defer sm.lock.Unlock()
delete(sm.data, name)
}
var statics = staticMap{sync.RWMutex{}, map[string]*http.Dir{}}
// staticFileSystem implements http.FileSystem interface.
type staticFileSystem struct {
dir *http.Dir
}
func newStaticFileSystem(directory string) staticFileSystem {
if !filepath.IsAbs(directory) {
directory = filepath.Join(Root, directory)
}
dir := http.Dir(directory)
statics.Set(&dir)
return staticFileSystem{&dir}
}
func (fs staticFileSystem) Open(name string) (http.File, error) {
return fs.dir.Open(name)
}
func prepareStaticOption(dir string, opt StaticOptions) StaticOptions {
// Defaults
if len(opt.IndexFile) == 0 {
opt.IndexFile = "index.html"
}
// Normalize the prefix if provided
if opt.Prefix != "" {
// Ensure we have a leading '/'
if opt.Prefix[0] != '/' {
opt.Prefix = "/" + opt.Prefix
}
// Remove any trailing '/'
opt.Prefix = strings.TrimRight(opt.Prefix, "/")
}
if opt.FileSystem == nil {
opt.FileSystem = newStaticFileSystem(dir)
}
return opt
}
func prepareStaticOptions(dir string, options []StaticOptions) StaticOptions {
var opt StaticOptions
if len(options) > 0 {
opt = options[0]
}
return prepareStaticOption(dir, opt)
}
func staticHandler(ctx *Context, log *log.Logger, opt StaticOptions) bool {
if ctx.Req.Method != "GET" && ctx.Req.Method != "HEAD" {
return false
}
file := ctx.Req.URL.Path
// if we have a prefix, filter requests by stripping the prefix
if opt.Prefix != "" {
if !strings.HasPrefix(file, opt.Prefix) {
return false
}
file = file[len(opt.Prefix):]
if file != "" && file[0] != '/' {
return false
}
}
f, err := opt.FileSystem.Open(file)
if err != nil {
return false
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return true // File exists but fail to open.
}
// Try to serve index file
if fi.IsDir() {
// Redirect if missing trailing slash.
if !strings.HasSuffix(ctx.Req.URL.Path, "/") {
http.Redirect(ctx.Resp, ctx.Req.Request, ctx.Req.URL.Path+"/", http.StatusFound)
return true
}
file = path.Join(file, opt.IndexFile)
f, err = opt.FileSystem.Open(file)
if err != nil {
return false // Discard error.
}
defer f.Close()
fi, err = f.Stat()
if err != nil || fi.IsDir() {
return true
}
}
if !opt.SkipLogging {
log.Println("[Static] Serving " + file)
}
// Add an Expires header to the static content
if opt.Expires != nil {
ctx.Resp.Header().Set("Expires", opt.Expires())
}
http.ServeContent(ctx.Resp, ctx.Req.Request, file, fi.ModTime(), f)
return true
}
// Static returns a middleware handler that serves static files in the given directory.
func Static(directory string, staticOpt ...StaticOptions) Handler {
opt := prepareStaticOptions(directory, staticOpt)
return func(ctx *Context, log *log.Logger) {
staticHandler(ctx, log, opt)
}
}
// Statics registers multiple static middleware handlers all at once.
func Statics(opt StaticOptions, dirs ...string) Handler {
if len(dirs) == 0 {
panic("no static directory is given")
}
opts := make([]StaticOptions, len(dirs))
for i := range dirs {
opts[i] = prepareStaticOption(dirs[i], opt)
}
return func(ctx *Context, log *log.Logger) {
for i := range opts {
if staticHandler(ctx, log, opts[i]) {
return
}
}
}
}

379
vendor/gopkg.in/macaron.v1/tree.go generated vendored Normal file
View File

@@ -0,0 +1,379 @@
// Copyright 2015 The Macaron Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package macaron
import (
"regexp"
"strings"
"github.com/Unknwon/com"
)
type patternType int8
const (
_PATTERN_STATIC patternType = iota // /home
_PATTERN_REGEXP // /:id([0-9]+)
_PATTERN_PATH_EXT // /*.*
_PATTERN_HOLDER // /:user
_PATTERN_MATCH_ALL // /*
)
// Leaf represents a leaf route information.
type Leaf struct {
parent *Tree
typ patternType
pattern string
rawPattern string // Contains wildcard instead of regexp
wildcards []string
reg *regexp.Regexp
optional bool
handle Handle
}
var wildcardPattern = regexp.MustCompile(`:[a-zA-Z0-9]+`)
func isSpecialRegexp(pattern, regStr string, pos []int) bool {
return len(pattern) >= pos[1]+len(regStr) && pattern[pos[1]:pos[1]+len(regStr)] == regStr
}
// getNextWildcard tries to find next wildcard and update pattern with corresponding regexp.
func getNextWildcard(pattern string) (wildcard, _ string) {
pos := wildcardPattern.FindStringIndex(pattern)
if pos == nil {
return "", pattern
}
wildcard = pattern[pos[0]:pos[1]]
// Reach last character or no regexp is given.
if len(pattern) == pos[1] {
return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1)
} else if pattern[pos[1]] != '(' {
switch {
case isSpecialRegexp(pattern, ":int", pos):
pattern = strings.Replace(pattern, ":int", "([0-9]+)", 1)
case isSpecialRegexp(pattern, ":string", pos):
pattern = strings.Replace(pattern, ":string", "([\\w]+)", 1)
default:
return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1)
}
}
// Cut out placeholder directly.
return wildcard, pattern[:pos[0]] + pattern[pos[1]:]
}
func getWildcards(pattern string) (string, []string) {
wildcards := make([]string, 0, 2)
// Keep getting next wildcard until nothing is left.
var wildcard string
for {
wildcard, pattern = getNextWildcard(pattern)
if len(wildcard) > 0 {
wildcards = append(wildcards, wildcard)
} else {
break
}
}
return pattern, wildcards
}
// getRawPattern removes all regexp but keeps wildcards for building URL path.
func getRawPattern(rawPattern string) string {
rawPattern = strings.Replace(rawPattern, ":int", "", -1)
rawPattern = strings.Replace(rawPattern, ":string", "", -1)
for {
startIdx := strings.Index(rawPattern, "(")
if startIdx == -1 {
break
}
closeIdx := strings.Index(rawPattern, ")")
if closeIdx > -1 {
rawPattern = rawPattern[:startIdx] + rawPattern[closeIdx+1:]
}
}
return rawPattern
}
func checkPattern(pattern string) (typ patternType, rawPattern string, wildcards []string, reg *regexp.Regexp) {
pattern = strings.TrimLeft(pattern, "?")
rawPattern = getRawPattern(pattern)
if pattern == "*" {
typ = _PATTERN_MATCH_ALL
} else if pattern == "*.*" {
typ = _PATTERN_PATH_EXT
} else if strings.Contains(pattern, ":") {
typ = _PATTERN_REGEXP
pattern, wildcards = getWildcards(pattern)
if pattern == "(.+)" {
typ = _PATTERN_HOLDER
} else {
reg = regexp.MustCompile(pattern)
}
}
return typ, rawPattern, wildcards, reg
}
func NewLeaf(parent *Tree, pattern string, handle Handle) *Leaf {
typ, rawPattern, wildcards, reg := checkPattern(pattern)
optional := false
if len(pattern) > 0 && pattern[0] == '?' {
optional = true
}
return &Leaf{parent, typ, pattern, rawPattern, wildcards, reg, optional, handle}
}
// URLPath build path part of URL by given pair values.
func (l *Leaf) URLPath(pairs ...string) string {
if len(pairs)%2 != 0 {
panic("number of pairs does not match")
}
urlPath := l.rawPattern
parent := l.parent
for parent != nil {
urlPath = parent.rawPattern + "/" + urlPath
parent = parent.parent
}
for i := 0; i < len(pairs); i += 2 {
if len(pairs[i]) == 0 {
panic("pair value cannot be empty: " + com.ToStr(i))
} else if pairs[i][0] != ':' && pairs[i] != "*" && pairs[i] != "*.*" {
pairs[i] = ":" + pairs[i]
}
urlPath = strings.Replace(urlPath, pairs[i], pairs[i+1], 1)
}
return urlPath
}
// Tree represents a router tree in Macaron.
type Tree struct {
parent *Tree
typ patternType
pattern string
rawPattern string
wildcards []string
reg *regexp.Regexp
subtrees []*Tree
leaves []*Leaf
}
func NewSubtree(parent *Tree, pattern string) *Tree {
typ, rawPattern, wildcards, reg := checkPattern(pattern)
return &Tree{parent, typ, pattern, rawPattern, wildcards, reg, make([]*Tree, 0, 5), make([]*Leaf, 0, 5)}
}
func NewTree() *Tree {
return NewSubtree(nil, "")
}
func (t *Tree) addLeaf(pattern string, handle Handle) *Leaf {
for i := 0; i < len(t.leaves); i++ {
if t.leaves[i].pattern == pattern {
return t.leaves[i]
}
}
leaf := NewLeaf(t, pattern, handle)
// Add exact same leaf to grandparent/parent level without optional.
if leaf.optional {
parent := leaf.parent
if parent.parent != nil {
parent.parent.addLeaf(parent.pattern, handle)
} else {
parent.addLeaf("", handle) // Root tree can add as empty pattern.
}
}
i := 0
for ; i < len(t.leaves); i++ {
if leaf.typ < t.leaves[i].typ {
break
}
}
if i == len(t.leaves) {
t.leaves = append(t.leaves, leaf)
} else {
t.leaves = append(t.leaves[:i], append([]*Leaf{leaf}, t.leaves[i:]...)...)
}
return leaf
}
func (t *Tree) addSubtree(segment, pattern string, handle Handle) *Leaf {
for i := 0; i < len(t.subtrees); i++ {
if t.subtrees[i].pattern == segment {
return t.subtrees[i].addNextSegment(pattern, handle)
}
}
subtree := NewSubtree(t, segment)
i := 0
for ; i < len(t.subtrees); i++ {
if subtree.typ < t.subtrees[i].typ {
break
}
}
if i == len(t.subtrees) {
t.subtrees = append(t.subtrees, subtree)
} else {
t.subtrees = append(t.subtrees[:i], append([]*Tree{subtree}, t.subtrees[i:]...)...)
}
return subtree.addNextSegment(pattern, handle)
}
func (t *Tree) addNextSegment(pattern string, handle Handle) *Leaf {
pattern = strings.TrimPrefix(pattern, "/")
i := strings.Index(pattern, "/")
if i == -1 {
return t.addLeaf(pattern, handle)
}
return t.addSubtree(pattern[:i], pattern[i+1:], handle)
}
func (t *Tree) Add(pattern string, handle Handle) *Leaf {
pattern = strings.TrimSuffix(pattern, "/")
return t.addNextSegment(pattern, handle)
}
func (t *Tree) matchLeaf(globLevel int, url string, params Params) (Handle, bool) {
for i := 0; i < len(t.leaves); i++ {
switch t.leaves[i].typ {
case _PATTERN_STATIC:
if t.leaves[i].pattern == url {
return t.leaves[i].handle, true
}
case _PATTERN_REGEXP:
results := t.leaves[i].reg.FindStringSubmatch(url)
// Number of results and wildcasrd should be exact same.
if len(results)-1 != len(t.leaves[i].wildcards) {
break
}
for j := 0; j < len(t.leaves[i].wildcards); j++ {
params[t.leaves[i].wildcards[j]] = results[j+1]
}
return t.leaves[i].handle, true
case _PATTERN_PATH_EXT:
j := strings.LastIndex(url, ".")
if j > -1 {
params[":path"] = url[:j]
params[":ext"] = url[j+1:]
} else {
params[":path"] = url
}
return t.leaves[i].handle, true
case _PATTERN_HOLDER:
params[t.leaves[i].wildcards[0]] = url
return t.leaves[i].handle, true
case _PATTERN_MATCH_ALL:
params["*"] = url
params["*"+com.ToStr(globLevel)] = url
return t.leaves[i].handle, true
}
}
return nil, false
}
func (t *Tree) matchSubtree(globLevel int, segment, url string, params Params) (Handle, bool) {
for i := 0; i < len(t.subtrees); i++ {
switch t.subtrees[i].typ {
case _PATTERN_STATIC:
if t.subtrees[i].pattern == segment {
if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok {
return handle, true
}
}
case _PATTERN_REGEXP:
results := t.subtrees[i].reg.FindStringSubmatch(segment)
if len(results)-1 != len(t.subtrees[i].wildcards) {
break
}
for j := 0; j < len(t.subtrees[i].wildcards); j++ {
params[t.subtrees[i].wildcards[j]] = results[j+1]
}
if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok {
return handle, true
}
case _PATTERN_HOLDER:
if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok {
params[t.subtrees[i].wildcards[0]] = segment
return handle, true
}
case _PATTERN_MATCH_ALL:
if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok {
params["*"+com.ToStr(globLevel)] = segment
return handle, true
}
}
}
if len(t.leaves) > 0 {
leaf := t.leaves[len(t.leaves)-1]
if leaf.typ == _PATTERN_PATH_EXT {
url = segment + "/" + url
j := strings.LastIndex(url, ".")
if j > -1 {
params[":path"] = url[:j]
params[":ext"] = url[j+1:]
} else {
params[":path"] = url
}
return leaf.handle, true
} else if leaf.typ == _PATTERN_MATCH_ALL {
params["*"] = segment + "/" + url
params["*"+com.ToStr(globLevel)] = segment + "/" + url
return leaf.handle, true
}
}
return nil, false
}
func (t *Tree) matchNextSegment(globLevel int, url string, params Params) (Handle, bool) {
i := strings.Index(url, "/")
if i == -1 {
return t.matchLeaf(globLevel, url, params)
}
return t.matchSubtree(globLevel, url[:i], url[i+1:], params)
}
func (t *Tree) Match(url string) (Handle, Params, bool) {
url = strings.TrimPrefix(url, "/")
url = strings.TrimSuffix(url, "/")
params := make(Params)
handle, ok := t.matchNextSegment(0, url, params)
return handle, params, ok
}
// MatchTest returns true if given URL is matched by given pattern.
func MatchTest(pattern, url string) bool {
t := NewTree()
t.Add(pattern, nil)
_, _, ok := t.Match(url)
return ok
}

19
vendor/gopkg.in/redis.v2/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
language: go
services:
- redis-server
go:
- 1.1
- 1.2
- 1.3
- tip
install:
- go get gopkg.in/bufio.v1
- go get gopkg.in/check.v1
- mkdir -p $HOME/gopath/src/gopkg.in
- ln -s `pwd` $HOME/gopath/src/gopkg.in/redis.v2
before_script:
- redis-server testdata/sentinel.conf --sentinel &

27
vendor/gopkg.in/redis.v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2012 The Redis Go Client Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

3
vendor/gopkg.in/redis.v2/Makefile generated vendored Normal file
View File

@@ -0,0 +1,3 @@
all:
go test gopkg.in/redis.v2 -cpu=1,2,4
go test gopkg.in/redis.v2 -short -race

46
vendor/gopkg.in/redis.v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,46 @@
Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
=======================
Supports:
- Redis 2.8 commands except QUIT, MONITOR, SLOWLOG and SYNC.
- Pub/sub.
- Transactions.
- Pipelining.
- Connection pool.
- TLS connections.
- Thread safety.
- Timeouts.
- Redis Sentinel.
API docs: http://godoc.org/gopkg.in/redis.v2.
Examples: http://godoc.org/gopkg.in/redis.v2#pkg-examples.
Installation
------------
Install:
go get gopkg.in/redis.v2
Look and feel
-------------
Some corner cases:
SORT list LIMIT 0 2 ASC
vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
Min: "-inf",
Max: "+inf",
Offset: 0,
Count: 2,
}).Result()
ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()

597
vendor/gopkg.in/redis.v2/command.go generated vendored Normal file
View File

@@ -0,0 +1,597 @@
package redis
import (
"fmt"
"strconv"
"strings"
"time"
"gopkg.in/bufio.v1"
)
var (
_ Cmder = (*Cmd)(nil)
_ Cmder = (*SliceCmd)(nil)
_ Cmder = (*StatusCmd)(nil)
_ Cmder = (*IntCmd)(nil)
_ Cmder = (*DurationCmd)(nil)
_ Cmder = (*BoolCmd)(nil)
_ Cmder = (*StringCmd)(nil)
_ Cmder = (*FloatCmd)(nil)
_ Cmder = (*StringSliceCmd)(nil)
_ Cmder = (*BoolSliceCmd)(nil)
_ Cmder = (*StringStringMapCmd)(nil)
_ Cmder = (*ZSliceCmd)(nil)
_ Cmder = (*ScanCmd)(nil)
)
type Cmder interface {
args() []string
parseReply(*bufio.Reader) error
setErr(error)
writeTimeout() *time.Duration
readTimeout() *time.Duration
Err() error
String() string
}
func setCmdsErr(cmds []Cmder, e error) {
for _, cmd := range cmds {
cmd.setErr(e)
}
}
func cmdString(cmd Cmder, val interface{}) string {
s := strings.Join(cmd.args(), " ")
if err := cmd.Err(); err != nil {
return s + ": " + err.Error()
}
if val != nil {
return s + ": " + fmt.Sprint(val)
}
return s
}
//------------------------------------------------------------------------------
type baseCmd struct {
_args []string
err error
_writeTimeout, _readTimeout *time.Duration
}
func newBaseCmd(args ...string) *baseCmd {
return &baseCmd{
_args: args,
}
}
func (cmd *baseCmd) Err() error {
if cmd.err != nil {
return cmd.err
}
return nil
}
func (cmd *baseCmd) args() []string {
return cmd._args
}
func (cmd *baseCmd) readTimeout() *time.Duration {
return cmd._readTimeout
}
func (cmd *baseCmd) setReadTimeout(d time.Duration) {
cmd._readTimeout = &d
}
func (cmd *baseCmd) writeTimeout() *time.Duration {
return cmd._writeTimeout
}
func (cmd *baseCmd) setWriteTimeout(d time.Duration) {
cmd._writeTimeout = &d
}
func (cmd *baseCmd) setErr(e error) {
cmd.err = e
}
//------------------------------------------------------------------------------
type Cmd struct {
*baseCmd
val interface{}
}
func NewCmd(args ...string) *Cmd {
return &Cmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *Cmd) Val() interface{} {
return cmd.val
}
func (cmd *Cmd) Result() (interface{}, error) {
return cmd.val, cmd.err
}
func (cmd *Cmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *Cmd) parseReply(rd *bufio.Reader) error {
cmd.val, cmd.err = parseReply(rd, parseSlice)
return cmd.err
}
//------------------------------------------------------------------------------
type SliceCmd struct {
*baseCmd
val []interface{}
}
func NewSliceCmd(args ...string) *SliceCmd {
return &SliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *SliceCmd) Val() []interface{} {
return cmd.val
}
func (cmd *SliceCmd) Result() ([]interface{}, error) {
return cmd.val, cmd.err
}
func (cmd *SliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]interface{})
return nil
}
//------------------------------------------------------------------------------
type StatusCmd struct {
*baseCmd
val string
}
func NewStatusCmd(args ...string) *StatusCmd {
return &StatusCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StatusCmd) Val() string {
return cmd.val
}
func (cmd *StatusCmd) Result() (string, error) {
return cmd.val, cmd.err
}
func (cmd *StatusCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(string)
return nil
}
//------------------------------------------------------------------------------
type IntCmd struct {
*baseCmd
val int64
}
func NewIntCmd(args ...string) *IntCmd {
return &IntCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *IntCmd) Val() int64 {
return cmd.val
}
func (cmd *IntCmd) Result() (int64, error) {
return cmd.val, cmd.err
}
func (cmd *IntCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *IntCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(int64)
return nil
}
//------------------------------------------------------------------------------
type DurationCmd struct {
*baseCmd
val time.Duration
precision time.Duration
}
func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd {
return &DurationCmd{
baseCmd: newBaseCmd(args...),
precision: precision,
}
}
func (cmd *DurationCmd) Val() time.Duration {
return cmd.val
}
func (cmd *DurationCmd) Result() (time.Duration, error) {
return cmd.val, cmd.err
}
func (cmd *DurationCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = time.Duration(v.(int64)) * cmd.precision
return nil
}
//------------------------------------------------------------------------------
type BoolCmd struct {
*baseCmd
val bool
}
func NewBoolCmd(args ...string) *BoolCmd {
return &BoolCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *BoolCmd) Val() bool {
return cmd.val
}
func (cmd *BoolCmd) Result() (bool, error) {
return cmd.val, cmd.err
}
func (cmd *BoolCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(int64) == 1
return nil
}
//------------------------------------------------------------------------------
type StringCmd struct {
*baseCmd
val string
}
func NewStringCmd(args ...string) *StringCmd {
return &StringCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringCmd) Val() string {
return cmd.val
}
func (cmd *StringCmd) Result() (string, error) {
return cmd.val, cmd.err
}
func (cmd *StringCmd) Int64() (int64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseInt(cmd.val, 10, 64)
}
func (cmd *StringCmd) Uint64() (uint64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseUint(cmd.val, 10, 64)
}
func (cmd *StringCmd) Float64() (float64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseFloat(cmd.val, 64)
}
func (cmd *StringCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(string)
return nil
}
//------------------------------------------------------------------------------
type FloatCmd struct {
*baseCmd
val float64
}
func NewFloatCmd(args ...string) *FloatCmd {
return &FloatCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *FloatCmd) Val() float64 {
return cmd.val
}
func (cmd *FloatCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64)
return cmd.err
}
//------------------------------------------------------------------------------
type StringSliceCmd struct {
*baseCmd
val []string
}
func NewStringSliceCmd(args ...string) *StringSliceCmd {
return &StringSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringSliceCmd) Val() []string {
return cmd.val
}
func (cmd *StringSliceCmd) Result() ([]string, error) {
return cmd.Val(), cmd.Err()
}
func (cmd *StringSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseStringSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]string)
return nil
}
//------------------------------------------------------------------------------
type BoolSliceCmd struct {
*baseCmd
val []bool
}
func NewBoolSliceCmd(args ...string) *BoolSliceCmd {
return &BoolSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *BoolSliceCmd) Val() []bool {
return cmd.val
}
func (cmd *BoolSliceCmd) Result() ([]bool, error) {
return cmd.val, cmd.err
}
func (cmd *BoolSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseBoolSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]bool)
return nil
}
//------------------------------------------------------------------------------
type StringStringMapCmd struct {
*baseCmd
val map[string]string
}
func NewStringStringMapCmd(args ...string) *StringStringMapCmd {
return &StringStringMapCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringStringMapCmd) Val() map[string]string {
return cmd.val
}
func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
return cmd.val, cmd.err
}
func (cmd *StringStringMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseStringStringMap)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(map[string]string)
return nil
}
//------------------------------------------------------------------------------
type ZSliceCmd struct {
*baseCmd
val []Z
}
func NewZSliceCmd(args ...string) *ZSliceCmd {
return &ZSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *ZSliceCmd) Val() []Z {
return cmd.val
}
func (cmd *ZSliceCmd) Result() ([]Z, error) {
return cmd.val, cmd.err
}
func (cmd *ZSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseZSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]Z)
return nil
}
//------------------------------------------------------------------------------
type ScanCmd struct {
*baseCmd
cursor int64
keys []string
}
func NewScanCmd(args ...string) *ScanCmd {
return &ScanCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *ScanCmd) Val() (int64, []string) {
return cmd.cursor, cmd.keys
}
func (cmd *ScanCmd) Result() (int64, []string, error) {
return cmd.cursor, cmd.keys, cmd.err
}
func (cmd *ScanCmd) String() string {
return cmdString(cmd, cmd.keys)
}
func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error {
vi, err := parseReply(rd, parseSlice)
if err != nil {
cmd.err = err
return cmd.err
}
v := vi.([]interface{})
cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64)
if cmd.err != nil {
return cmd.err
}
keys := v[1].([]interface{})
for _, keyi := range keys {
cmd.keys = append(cmd.keys, keyi.(string))
}
return nil
}

1246
vendor/gopkg.in/redis.v2/commands.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4
vendor/gopkg.in/redis.v2/doc.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/*
Package redis implements a Redis client.
*/
package redis

23
vendor/gopkg.in/redis.v2/error.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
package redis
import (
"fmt"
)
// Redis nil reply.
var Nil = errorf("redis: nil")
// Redis transaction failed.
var TxFailedErr = errorf("redis: transaction failed")
type redisError struct {
s string
}
func errorf(s string, args ...interface{}) redisError {
return redisError{s: fmt.Sprintf(s, args...)}
}
func (err redisError) Error() string {
return err.s
}

180
vendor/gopkg.in/redis.v2/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
package redis_test
import (
"fmt"
"strconv"
"gopkg.in/redis.v2"
)
var client *redis.Client
func init() {
client = redis.NewTCPClient(&redis.Options{
Addr: ":6379",
})
client.FlushDb()
}
func ExampleNewTCPClient() {
client := redis.NewTCPClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
pong, err := client.Ping().Result()
fmt.Println(pong, err)
// Output: PONG <nil>
}
func ExampleNewFailoverClient() {
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: "master",
SentinelAddrs: []string{":26379"},
})
pong, err := client.Ping().Result()
fmt.Println(pong, err)
// Output: PONG <nil>
}
func ExampleClient() {
if err := client.Set("foo", "bar").Err(); err != nil {
panic(err)
}
v, err := client.Get("hello").Result()
fmt.Printf("%q %q %v", v, err, err == redis.Nil)
// Output: "" "redis: nil" true
}
func ExampleClient_Incr() {
if err := client.Incr("counter").Err(); err != nil {
panic(err)
}
n, err := client.Get("counter").Int64()
fmt.Println(n, err)
// Output: 1 <nil>
}
func ExampleClient_Pipelined() {
cmds, err := client.Pipelined(func(c *redis.Pipeline) error {
c.Set("key1", "hello1")
c.Get("key1")
return nil
})
fmt.Println(err)
set := cmds[0].(*redis.StatusCmd)
fmt.Println(set)
get := cmds[1].(*redis.StringCmd)
fmt.Println(get)
// Output: <nil>
// SET key1 hello1: OK
// GET key1: hello1
}
func ExamplePipeline() {
pipeline := client.Pipeline()
set := pipeline.Set("key1", "hello1")
get := pipeline.Get("key1")
cmds, err := pipeline.Exec()
fmt.Println(cmds, err)
fmt.Println(set)
fmt.Println(get)
// Output: [SET key1 hello1: OK GET key1: hello1] <nil>
// SET key1 hello1: OK
// GET key1: hello1
}
func ExampleMulti() {
incr := func(tx *redis.Multi) ([]redis.Cmder, error) {
s, err := tx.Get("key").Result()
if err != nil && err != redis.Nil {
return nil, err
}
n, _ := strconv.ParseInt(s, 10, 64)
return tx.Exec(func() error {
tx.Set("key", strconv.FormatInt(n+1, 10))
return nil
})
}
client.Del("key")
tx := client.Multi()
defer tx.Close()
watch := tx.Watch("key")
_ = watch.Err()
for {
cmds, err := incr(tx)
if err == redis.TxFailedErr {
continue
} else if err != nil {
panic(err)
}
fmt.Println(cmds, err)
break
}
// Output: [SET key 1: OK] <nil>
}
func ExamplePubSub() {
pubsub := client.PubSub()
defer pubsub.Close()
err := pubsub.Subscribe("mychannel")
_ = err
msg, err := pubsub.Receive()
fmt.Println(msg, err)
pub := client.Publish("mychannel", "hello")
_ = pub.Err()
msg, err = pubsub.Receive()
fmt.Println(msg, err)
// Output: subscribe: mychannel <nil>
// Message<mychannel: hello> <nil>
}
func ExampleScript() {
setnx := redis.NewScript(`
if redis.call("get", KEYS[1]) == false then
redis.call("set", KEYS[1], ARGV[1])
return 1
end
return 0
`)
v1, err := setnx.Run(client, []string{"keynx"}, []string{"foo"}).Result()
fmt.Println(v1.(int64), err)
v2, err := setnx.Run(client, []string{"keynx"}, []string{"bar"}).Result()
fmt.Println(v2.(int64), err)
get := client.Get("keynx")
fmt.Println(get)
// Output: 1 <nil>
// 0 <nil>
// GET keynx: foo
}
func Example_customCommand() {
Get := func(client *redis.Client, key string) *redis.StringCmd {
cmd := redis.NewStringCmd("GET", key)
client.Process(cmd)
return cmd
}
v, err := Get(client, "key_does_not_exist").Result()
fmt.Printf("%q %s", v, err)
// Output: "" redis: nil
}

5
vendor/gopkg.in/redis.v2/export_test.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
package redis
func (c *baseClient) Pool() pool {
return c.connPool
}

138
vendor/gopkg.in/redis.v2/multi.go generated vendored Normal file
View File

@@ -0,0 +1,138 @@
package redis
import (
"errors"
"fmt"
)
var errDiscard = errors.New("redis: Discard can be used only inside Exec")
// Not thread-safe.
type Multi struct {
*Client
}
func (c *Client) Multi() *Multi {
return &Multi{
Client: &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, true),
},
},
}
}
func (c *Multi) Close() error {
if err := c.Unwatch().Err(); err != nil {
return err
}
return c.Client.Close()
}
func (c *Multi) Watch(keys ...string) *StatusCmd {
args := append([]string{"WATCH"}, keys...)
cmd := NewStatusCmd(args...)
c.Process(cmd)
return cmd
}
func (c *Multi) Unwatch(keys ...string) *StatusCmd {
args := append([]string{"UNWATCH"}, keys...)
cmd := NewStatusCmd(args...)
c.Process(cmd)
return cmd
}
func (c *Multi) Discard() error {
if c.cmds == nil {
return errDiscard
}
c.cmds = c.cmds[:1]
return nil
}
// Exec always returns list of commands. If transaction fails
// TxFailedErr is returned. Otherwise Exec returns error of the first
// failed command or nil.
func (c *Multi) Exec(f func() error) ([]Cmder, error) {
c.cmds = []Cmder{NewStatusCmd("MULTI")}
if err := f(); err != nil {
return nil, err
}
c.cmds = append(c.cmds, NewSliceCmd("EXEC"))
cmds := c.cmds
c.cmds = nil
if len(cmds) == 2 {
return []Cmder{}, nil
}
cn, err := c.conn()
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return cmds[1 : len(cmds)-1], err
}
err = c.execCmds(cn, cmds)
if err != nil {
c.freeConn(cn, err)
return cmds[1 : len(cmds)-1], err
}
c.putConn(cn)
return cmds[1 : len(cmds)-1], nil
}
func (c *Multi) execCmds(cn *conn, cmds []Cmder) error {
err := c.writeCmd(cn, cmds...)
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
statusCmd := NewStatusCmd()
// Omit last command (EXEC).
cmdsLen := len(cmds) - 1
// Parse queued replies.
for i := 0; i < cmdsLen; i++ {
if err := statusCmd.parseReply(cn.rd); err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
}
// Parse number of replies.
line, err := readLine(cn.rd)
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
if line[0] != '*' {
err := fmt.Errorf("redis: expected '*', but got line %q", line)
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr)
return TxFailedErr
}
var firstCmdErr error
// Parse replies.
// Loop starts from 1 to omit MULTI cmd.
for i := 1; i < cmdsLen; i++ {
cmd := cmds[i]
if err := cmd.parseReply(cn.rd); err != nil {
if firstCmdErr == nil {
firstCmdErr = err
}
}
}
return firstCmdErr
}

262
vendor/gopkg.in/redis.v2/parser.go generated vendored Normal file
View File

@@ -0,0 +1,262 @@
package redis
import (
"errors"
"fmt"
"strconv"
"gopkg.in/bufio.v1"
)
type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error)
var (
errReaderTooSmall = errors.New("redis: reader is too small")
)
//------------------------------------------------------------------------------
func appendArgs(buf []byte, args []string) []byte {
buf = append(buf, '*')
buf = strconv.AppendUint(buf, uint64(len(args)), 10)
buf = append(buf, '\r', '\n')
for _, arg := range args {
buf = append(buf, '$')
buf = strconv.AppendUint(buf, uint64(len(arg)), 10)
buf = append(buf, '\r', '\n')
buf = append(buf, arg...)
buf = append(buf, '\r', '\n')
}
return buf
}
//------------------------------------------------------------------------------
func readLine(rd *bufio.Reader) ([]byte, error) {
line, isPrefix, err := rd.ReadLine()
if err != nil {
return line, err
}
if isPrefix {
return line, errReaderTooSmall
}
return line, nil
}
func readN(rd *bufio.Reader, n int) ([]byte, error) {
b, err := rd.ReadN(n)
if err == bufio.ErrBufferFull {
tmp := make([]byte, n)
r := copy(tmp, b)
b = tmp
for {
nn, err := rd.Read(b[r:])
r += nn
if r >= n {
// Ignore error if we read enough.
break
}
if err != nil {
return nil, err
}
}
} else if err != nil {
return nil, err
}
return b, nil
}
//------------------------------------------------------------------------------
func parseReq(rd *bufio.Reader) ([]string, error) {
line, err := readLine(rd)
if err != nil {
return nil, err
}
if line[0] != '*' {
return []string{string(line)}, nil
}
numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
args := make([]string, 0, numReplies)
for i := int64(0); i < numReplies; i++ {
line, err = readLine(rd)
if err != nil {
return nil, err
}
if line[0] != '$' {
return nil, fmt.Errorf("redis: expected '$', but got %q", line)
}
argLen, err := strconv.ParseInt(string(line[1:]), 10, 32)
if err != nil {
return nil, err
}
arg, err := readN(rd, int(argLen)+2)
if err != nil {
return nil, err
}
args = append(args, string(arg[:argLen]))
}
return args, nil
}
//------------------------------------------------------------------------------
func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) {
line, err := readLine(rd)
if err != nil {
return nil, err
}
switch line[0] {
case '-':
return nil, errorf(string(line[1:]))
case '+':
return string(line[1:]), nil
case ':':
v, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
return v, nil
case '$':
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
return nil, Nil
}
replyLen, err := strconv.Atoi(string(line[1:]))
if err != nil {
return nil, err
}
b, err := readN(rd, replyLen+2)
if err != nil {
return nil, err
}
return string(b[:replyLen]), nil
case '*':
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
return nil, Nil
}
repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
return p(rd, repliesNum)
}
return nil, fmt.Errorf("redis: can't parse %q", line)
}
func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]interface{}, 0, n)
for i := int64(0); i < n; i++ {
v, err := parseReply(rd, parseSlice)
if err == Nil {
vals = append(vals, nil)
} else if err != nil {
return nil, err
} else {
vals = append(vals, v)
}
}
return vals, nil
}
func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]string, 0, n)
for i := int64(0); i < n; i++ {
viface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
v, ok := viface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", viface)
}
vals = append(vals, v)
}
return vals, nil
}
func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]bool, 0, n)
for i := int64(0); i < n; i++ {
viface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
v, ok := viface.(int64)
if !ok {
return nil, fmt.Errorf("got %T, expected int64", viface)
}
vals = append(vals, v == 1)
}
return vals, nil
}
func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) {
m := make(map[string]string, n/2)
for i := int64(0); i < n; i += 2 {
keyiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
key, ok := keyiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", keyiface)
}
valueiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
value, ok := valueiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", valueiface)
}
m[key] = value
}
return m, nil
}
func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) {
zz := make([]Z, n/2)
for i := int64(0); i < n; i += 2 {
z := &zz[i/2]
memberiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
member, ok := memberiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", memberiface)
}
z.Member = member
scoreiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
scorestr, ok := scoreiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", scoreiface)
}
score, err := strconv.ParseFloat(scorestr, 64)
if err != nil {
return nil, err
}
z.Score = score
}
return zz, nil
}

54
vendor/gopkg.in/redis.v2/parser_test.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package redis
import (
"testing"
"gopkg.in/bufio.v1"
)
func BenchmarkParseReplyStatus(b *testing.B) {
benchmarkParseReply(b, "+OK\r\n", nil, false)
}
func BenchmarkParseReplyInt(b *testing.B) {
benchmarkParseReply(b, ":1\r\n", nil, false)
}
func BenchmarkParseReplyError(b *testing.B) {
benchmarkParseReply(b, "-Error message\r\n", nil, true)
}
func BenchmarkParseReplyString(b *testing.B) {
benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
}
func BenchmarkParseReplySlice(b *testing.B) {
benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", parseSlice, false)
}
func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) {
b.StopTimer()
buf := &bufio.Buffer{}
rd := bufio.NewReader(buf)
for i := 0; i < b.N; i++ {
buf.WriteString(reply)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := parseReply(rd, p)
if !wanterr && err != nil {
panic(err)
}
}
}
func BenchmarkAppendArgs(b *testing.B) {
buf := make([]byte, 0, 64)
args := []string{"hello", "world", "foo", "bar"}
for i := 0; i < b.N; i++ {
appendArgs(buf, args)
}
}

91
vendor/gopkg.in/redis.v2/pipeline.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
package redis
// Not thread-safe.
type Pipeline struct {
*Client
closed bool
}
func (c *Client) Pipeline() *Pipeline {
return &Pipeline{
Client: &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: c.connPool,
cmds: make([]Cmder, 0),
},
},
}
}
func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) {
pc := c.Pipeline()
if err := f(pc); err != nil {
return nil, err
}
cmds, err := pc.Exec()
pc.Close()
return cmds, err
}
func (c *Pipeline) Close() error {
c.closed = true
return nil
}
func (c *Pipeline) Discard() error {
if c.closed {
return errClosed
}
c.cmds = c.cmds[:0]
return nil
}
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec() ([]Cmder, error) {
if c.closed {
return nil, errClosed
}
cmds := c.cmds
c.cmds = make([]Cmder, 0)
if len(cmds) == 0 {
return []Cmder{}, nil
}
cn, err := c.conn()
if err != nil {
setCmdsErr(cmds, err)
return cmds, err
}
if err := c.execCmds(cn, cmds); err != nil {
c.freeConn(cn, err)
return cmds, err
}
c.putConn(cn)
return cmds, nil
}
func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error {
if err := c.writeCmd(cn, cmds...); err != nil {
setCmdsErr(cmds, err)
return err
}
var firstCmdErr error
for _, cmd := range cmds {
if err := cmd.parseReply(cn.rd); err != nil {
if firstCmdErr == nil {
firstCmdErr = err
}
}
}
return firstCmdErr
}

405
vendor/gopkg.in/redis.v2/pool.go generated vendored Normal file
View File

@@ -0,0 +1,405 @@
package redis
import (
"container/list"
"errors"
"log"
"net"
"sync"
"time"
"gopkg.in/bufio.v1"
)
var (
errClosed = errors.New("redis: client is closed")
errRateLimited = errors.New("redis: you open connections too fast")
)
var (
zeroTime = time.Time{}
)
type pool interface {
Get() (*conn, bool, error)
Put(*conn) error
Remove(*conn) error
Len() int
Size() int
Close() error
Filter(func(*conn) bool)
}
//------------------------------------------------------------------------------
type conn struct {
netcn net.Conn
rd *bufio.Reader
buf []byte
inUse bool
usedAt time.Time
readTimeout time.Duration
writeTimeout time.Duration
elem *list.Element
}
func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) {
return func() (*conn, error) {
netcn, err := dial()
if err != nil {
return nil, err
}
cn := &conn{
netcn: netcn,
buf: make([]byte, 0, 64),
}
cn.rd = bufio.NewReader(cn)
return cn, nil
}
}
func (cn *conn) Read(b []byte) (int, error) {
if cn.readTimeout != 0 {
cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout))
} else {
cn.netcn.SetReadDeadline(zeroTime)
}
return cn.netcn.Read(b)
}
func (cn *conn) Write(b []byte) (int, error) {
if cn.writeTimeout != 0 {
cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout))
} else {
cn.netcn.SetWriteDeadline(zeroTime)
}
return cn.netcn.Write(b)
}
func (cn *conn) RemoteAddr() net.Addr {
return cn.netcn.RemoteAddr()
}
func (cn *conn) Close() error {
return cn.netcn.Close()
}
//------------------------------------------------------------------------------
type connPool struct {
dial func() (*conn, error)
rl *rateLimiter
opt *options
cond *sync.Cond
conns *list.List
idleNum int
closed bool
}
func newConnPool(dial func() (*conn, error), opt *options) *connPool {
return &connPool{
dial: dial,
rl: newRateLimiter(time.Second, 2*opt.PoolSize),
opt: opt,
cond: sync.NewCond(&sync.Mutex{}),
conns: list.New(),
}
}
func (p *connPool) new() (*conn, error) {
if !p.rl.Check() {
return nil, errRateLimited
}
return p.dial()
}
func (p *connPool) Get() (*conn, bool, error) {
p.cond.L.Lock()
if p.closed {
p.cond.L.Unlock()
return nil, false, errClosed
}
if p.opt.IdleTimeout > 0 {
for el := p.conns.Front(); el != nil; el = el.Next() {
cn := el.Value.(*conn)
if cn.inUse {
break
}
if time.Since(cn.usedAt) > p.opt.IdleTimeout {
if err := p.remove(cn); err != nil {
log.Printf("remove failed: %s", err)
}
}
}
}
for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 {
p.cond.Wait()
}
if p.idleNum > 0 {
elem := p.conns.Front()
cn := elem.Value.(*conn)
if cn.inUse {
panic("pool: precondition failed")
}
cn.inUse = true
p.conns.MoveToBack(elem)
p.idleNum--
p.cond.L.Unlock()
return cn, false, nil
}
if p.conns.Len() < p.opt.PoolSize {
cn, err := p.new()
if err != nil {
p.cond.L.Unlock()
return nil, false, err
}
cn.inUse = true
cn.elem = p.conns.PushBack(cn)
p.cond.L.Unlock()
return cn, true, nil
}
panic("not reached")
}
func (p *connPool) Put(cn *conn) error {
if cn.rd.Buffered() != 0 {
b, _ := cn.rd.ReadN(cn.rd.Buffered())
log.Printf("redis: connection has unread data: %q", b)
return p.Remove(cn)
}
if p.opt.IdleTimeout > 0 {
cn.usedAt = time.Now()
}
p.cond.L.Lock()
if p.closed {
p.cond.L.Unlock()
return errClosed
}
cn.inUse = false
p.conns.MoveToFront(cn.elem)
p.idleNum++
p.cond.Signal()
p.cond.L.Unlock()
return nil
}
func (p *connPool) Remove(cn *conn) error {
p.cond.L.Lock()
if p.closed {
// Noop, connection is already closed.
p.cond.L.Unlock()
return nil
}
err := p.remove(cn)
p.cond.Signal()
p.cond.L.Unlock()
return err
}
func (p *connPool) remove(cn *conn) error {
p.conns.Remove(cn.elem)
cn.elem = nil
if !cn.inUse {
p.idleNum--
}
return cn.Close()
}
// Len returns number of idle connections.
func (p *connPool) Len() int {
defer p.cond.L.Unlock()
p.cond.L.Lock()
return p.idleNum
}
// Size returns number of connections in the pool.
func (p *connPool) Size() int {
defer p.cond.L.Unlock()
p.cond.L.Lock()
return p.conns.Len()
}
func (p *connPool) Filter(f func(*conn) bool) {
p.cond.L.Lock()
for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next {
next = el.Next()
cn := el.Value.(*conn)
if !f(cn) {
p.remove(cn)
}
}
p.cond.L.Unlock()
}
func (p *connPool) Close() error {
defer p.cond.L.Unlock()
p.cond.L.Lock()
if p.closed {
return nil
}
p.closed = true
p.rl.Close()
var retErr error
for {
e := p.conns.Front()
if e == nil {
break
}
if err := p.remove(e.Value.(*conn)); err != nil {
log.Printf("cn.Close failed: %s", err)
retErr = err
}
}
return retErr
}
//------------------------------------------------------------------------------
type singleConnPool struct {
pool pool
cnMtx sync.Mutex
cn *conn
reusable bool
closed bool
}
func newSingleConnPool(pool pool, reusable bool) *singleConnPool {
return &singleConnPool{
pool: pool,
reusable: reusable,
}
}
func (p *singleConnPool) SetConn(cn *conn) {
p.cnMtx.Lock()
p.cn = cn
p.cnMtx.Unlock()
}
func (p *singleConnPool) Get() (*conn, bool, error) {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.closed {
return nil, false, errClosed
}
if p.cn != nil {
return p.cn, false, nil
}
cn, isNew, err := p.pool.Get()
if err != nil {
return nil, false, err
}
p.cn = cn
return p.cn, isNew, nil
}
func (p *singleConnPool) Put(cn *conn) error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn != cn {
panic("p.cn != cn")
}
if p.closed {
return errClosed
}
return nil
}
func (p *singleConnPool) put() error {
err := p.pool.Put(p.cn)
p.cn = nil
return err
}
func (p *singleConnPool) Remove(cn *conn) error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
panic("p.cn == nil")
}
if p.cn != cn {
panic("p.cn != cn")
}
if p.closed {
return errClosed
}
return p.remove()
}
func (p *singleConnPool) remove() error {
err := p.pool.Remove(p.cn)
p.cn = nil
return err
}
func (p *singleConnPool) Len() int {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
return 0
}
return 1
}
func (p *singleConnPool) Size() int {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
return 0
}
return 1
}
func (p *singleConnPool) Filter(f func(*conn) bool) {
p.cnMtx.Lock()
if p.cn != nil {
if !f(p.cn) {
p.remove()
}
}
p.cnMtx.Unlock()
}
func (p *singleConnPool) Close() error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.closed {
return nil
}
p.closed = true
var err error
if p.cn != nil {
if p.reusable {
err = p.put()
} else {
err = p.remove()
}
}
return err
}

134
vendor/gopkg.in/redis.v2/pubsub.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
package redis
import (
"fmt"
"time"
)
// Not thread-safe.
type PubSub struct {
*baseClient
}
func (c *Client) PubSub() *PubSub {
return &PubSub{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, false),
},
}
}
func (c *Client) Publish(channel, message string) *IntCmd {
req := NewIntCmd("PUBLISH", channel, message)
c.Process(req)
return req
}
type Message struct {
Channel string
Payload string
}
func (m *Message) String() string {
return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
}
type PMessage struct {
Channel string
Pattern string
Payload string
}
func (m *PMessage) String() string {
return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload)
}
type Subscription struct {
Kind string
Channel string
Count int
}
func (m *Subscription) String() string {
return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
}
func (c *PubSub) Receive() (interface{}, error) {
return c.ReceiveTimeout(0)
}
func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
cn, err := c.conn()
if err != nil {
return nil, err
}
cn.readTimeout = timeout
cmd := NewSliceCmd()
if err := cmd.parseReply(cn.rd); err != nil {
return nil, err
}
reply := cmd.Val()
msgName := reply[0].(string)
switch msgName {
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
return &Subscription{
Kind: msgName,
Channel: reply[1].(string),
Count: int(reply[2].(int64)),
}, nil
case "message":
return &Message{
Channel: reply[1].(string),
Payload: reply[2].(string),
}, nil
case "pmessage":
return &PMessage{
Pattern: reply[1].(string),
Channel: reply[2].(string),
Payload: reply[3].(string),
}, nil
}
return nil, fmt.Errorf("redis: unsupported message name: %q", msgName)
}
func (c *PubSub) subscribe(cmd string, channels ...string) error {
cn, err := c.conn()
if err != nil {
return err
}
args := append([]string{cmd}, channels...)
req := NewSliceCmd(args...)
return c.writeCmd(cn, req)
}
func (c *PubSub) Subscribe(channels ...string) error {
return c.subscribe("SUBSCRIBE", channels...)
}
func (c *PubSub) PSubscribe(patterns ...string) error {
return c.subscribe("PSUBSCRIBE", patterns...)
}
func (c *PubSub) unsubscribe(cmd string, channels ...string) error {
cn, err := c.conn()
if err != nil {
return err
}
args := append([]string{cmd}, channels...)
req := NewSliceCmd(args...)
return c.writeCmd(cn, req)
}
func (c *PubSub) Unsubscribe(channels ...string) error {
return c.unsubscribe("UNSUBSCRIBE", channels...)
}
func (c *PubSub) PUnsubscribe(patterns ...string) error {
return c.unsubscribe("PUNSUBSCRIBE", patterns...)
}

53
vendor/gopkg.in/redis.v2/rate_limit.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
package redis
import (
"sync/atomic"
"time"
)
type rateLimiter struct {
v int64
_closed int64
}
func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter {
rl := &rateLimiter{
v: int64(bucketSize),
}
go rl.loop(limit, int64(bucketSize))
return rl
}
func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) {
for {
if rl.closed() {
break
}
if v := atomic.LoadInt64(&rl.v); v < bucketSize {
atomic.AddInt64(&rl.v, 1)
}
time.Sleep(limit)
}
}
func (rl *rateLimiter) Check() bool {
for {
if v := atomic.LoadInt64(&rl.v); v > 0 {
if atomic.CompareAndSwapInt64(&rl.v, v, v-1) {
return true
}
} else {
return false
}
}
}
func (rl *rateLimiter) Close() error {
atomic.StoreInt64(&rl._closed, 1)
return nil
}
func (rl *rateLimiter) closed() bool {
return atomic.LoadInt64(&rl._closed) == 1
}

31
vendor/gopkg.in/redis.v2/rate_limit_test.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package redis
import (
"sync"
"testing"
"time"
)
func TestRateLimiter(t *testing.T) {
var n = 100000
if testing.Short() {
n = 1000
}
rl := newRateLimiter(time.Minute, n)
wg := &sync.WaitGroup{}
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
if !rl.Check() {
panic("check failed")
}
wg.Done()
}()
}
wg.Wait()
if rl.Check() && rl.Check() {
t.Fatal("check passed")
}
}

231
vendor/gopkg.in/redis.v2/redis.go generated vendored Normal file
View File

@@ -0,0 +1,231 @@
package redis
import (
"log"
"net"
"time"
)
type baseClient struct {
connPool pool
opt *options
cmds []Cmder
}
func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error {
buf := cn.buf[:0]
for _, cmd := range cmds {
buf = appendArgs(buf, cmd.args())
}
_, err := cn.Write(buf)
return err
}
func (c *baseClient) conn() (*conn, error) {
cn, isNew, err := c.connPool.Get()
if err != nil {
return nil, err
}
if isNew {
if err := c.initConn(cn); err != nil {
c.removeConn(cn)
return nil, err
}
}
return cn, nil
}
func (c *baseClient) initConn(cn *conn) error {
if c.opt.Password == "" && c.opt.DB == 0 {
return nil
}
pool := newSingleConnPool(c.connPool, false)
pool.SetConn(cn)
// Client is not closed because we want to reuse underlying connection.
client := &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: pool,
},
}
if c.opt.Password != "" {
if err := client.Auth(c.opt.Password).Err(); err != nil {
return err
}
}
if c.opt.DB > 0 {
if err := client.Select(c.opt.DB).Err(); err != nil {
return err
}
}
return nil
}
func (c *baseClient) freeConn(cn *conn, ei error) error {
if cn.rd.Buffered() > 0 {
return c.connPool.Remove(cn)
}
if _, ok := ei.(redisError); ok {
return c.connPool.Put(cn)
}
return c.connPool.Remove(cn)
}
func (c *baseClient) removeConn(cn *conn) {
if err := c.connPool.Remove(cn); err != nil {
log.Printf("pool.Remove failed: %s", err)
}
}
func (c *baseClient) putConn(cn *conn) {
if err := c.connPool.Put(cn); err != nil {
log.Printf("pool.Put failed: %s", err)
}
}
func (c *baseClient) Process(cmd Cmder) {
if c.cmds == nil {
c.run(cmd)
} else {
c.cmds = append(c.cmds, cmd)
}
}
func (c *baseClient) run(cmd Cmder) {
cn, err := c.conn()
if err != nil {
cmd.setErr(err)
return
}
if timeout := cmd.writeTimeout(); timeout != nil {
cn.writeTimeout = *timeout
} else {
cn.writeTimeout = c.opt.WriteTimeout
}
if timeout := cmd.readTimeout(); timeout != nil {
cn.readTimeout = *timeout
} else {
cn.readTimeout = c.opt.ReadTimeout
}
if err := c.writeCmd(cn, cmd); err != nil {
c.freeConn(cn, err)
cmd.setErr(err)
return
}
if err := cmd.parseReply(cn.rd); err != nil {
c.freeConn(cn, err)
return
}
c.putConn(cn)
}
// Close closes the client, releasing any open resources.
func (c *baseClient) Close() error {
return c.connPool.Close()
}
//------------------------------------------------------------------------------
type options struct {
Password string
DB int64
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
IdleTimeout time.Duration
}
type Options struct {
Network string
Addr string
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func() (net.Conn, error)
Password string
DB int64
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
IdleTimeout time.Duration
}
func (opt *Options) getPoolSize() int {
if opt.PoolSize == 0 {
return 10
}
return opt.PoolSize
}
func (opt *Options) getDialTimeout() time.Duration {
if opt.DialTimeout == 0 {
return 5 * time.Second
}
return opt.DialTimeout
}
func (opt *Options) options() *options {
return &options{
DB: opt.DB,
Password: opt.Password,
DialTimeout: opt.getDialTimeout(),
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.getPoolSize(),
IdleTimeout: opt.IdleTimeout,
}
}
type Client struct {
*baseClient
}
func NewClient(clOpt *Options) *Client {
opt := clOpt.options()
dialer := clOpt.Dialer
if dialer == nil {
dialer = func() (net.Conn, error) {
return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout)
}
}
return &Client{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(newConnFunc(dialer), opt),
},
}
}
// Deprecated. Use NewClient instead.
func NewTCPClient(opt *Options) *Client {
opt.Network = "tcp"
return NewClient(opt)
}
// Deprecated. Use NewClient instead.
func NewUnixClient(opt *Options) *Client {
opt.Network = "unix"
return NewClient(opt)
}

3333
vendor/gopkg.in/redis.v2/redis_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

52
vendor/gopkg.in/redis.v2/script.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package redis
import (
"crypto/sha1"
"encoding/hex"
"io"
"strings"
)
type scripter interface {
Eval(script string, keys []string, args []string) *Cmd
EvalSha(sha1 string, keys []string, args []string) *Cmd
ScriptExists(scripts ...string) *BoolSliceCmd
ScriptLoad(script string) *StringCmd
}
type Script struct {
src, hash string
}
func NewScript(src string) *Script {
h := sha1.New()
io.WriteString(h, src)
return &Script{
src: src,
hash: hex.EncodeToString(h.Sum(nil)),
}
}
func (s *Script) Load(c scripter) *StringCmd {
return c.ScriptLoad(s.src)
}
func (s *Script) Exists(c scripter) *BoolSliceCmd {
return c.ScriptExists(s.src)
}
func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd {
return c.Eval(s.src, keys, args)
}
func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd {
return c.EvalSha(s.hash, keys, args)
}
func (s *Script) Run(c *Client, keys []string, args []string) *Cmd {
r := s.EvalSha(c, keys, args)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
return s.Eval(c, keys, args)
}
return r
}

291
vendor/gopkg.in/redis.v2/sentinel.go generated vendored Normal file
View File

@@ -0,0 +1,291 @@
package redis
import (
"errors"
"log"
"net"
"strings"
"sync"
"time"
)
//------------------------------------------------------------------------------
type FailoverOptions struct {
MasterName string
SentinelAddrs []string
Password string
DB int64
PoolSize int
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
IdleTimeout time.Duration
}
func (opt *FailoverOptions) getPoolSize() int {
if opt.PoolSize == 0 {
return 10
}
return opt.PoolSize
}
func (opt *FailoverOptions) getDialTimeout() time.Duration {
if opt.DialTimeout == 0 {
return 5 * time.Second
}
return opt.DialTimeout
}
func (opt *FailoverOptions) options() *options {
return &options{
DB: opt.DB,
Password: opt.Password,
DialTimeout: opt.getDialTimeout(),
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.getPoolSize(),
IdleTimeout: opt.IdleTimeout,
}
}
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
opt := failoverOpt.options()
failover := &sentinelFailover{
masterName: failoverOpt.MasterName,
sentinelAddrs: failoverOpt.SentinelAddrs,
opt: opt,
}
return &Client{
baseClient: &baseClient{
opt: opt,
connPool: failover.Pool(),
},
}
}
//------------------------------------------------------------------------------
type sentinelClient struct {
*baseClient
}
func newSentinel(clOpt *Options) *sentinelClient {
opt := clOpt.options()
opt.Password = ""
opt.DB = 0
dialer := func() (net.Conn, error) {
return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout)
}
return &sentinelClient{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(newConnFunc(dialer), opt),
},
}
}
func (c *sentinelClient) PubSub() *PubSub {
return &PubSub{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, false),
},
}
}
func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
c.Process(cmd)
return cmd
}
func (c *sentinelClient) Sentinels(name string) *SliceCmd {
cmd := NewSliceCmd("SENTINEL", "sentinels", name)
c.Process(cmd)
return cmd
}
type sentinelFailover struct {
masterName string
sentinelAddrs []string
opt *options
pool pool
poolOnce sync.Once
lock sync.RWMutex
_sentinel *sentinelClient
}
func (d *sentinelFailover) dial() (net.Conn, error) {
addr, err := d.MasterAddr()
if err != nil {
return nil, err
}
return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
}
func (d *sentinelFailover) Pool() pool {
d.poolOnce.Do(func() {
d.pool = newConnPool(newConnFunc(d.dial), d.opt)
})
return d.pool
}
func (d *sentinelFailover) MasterAddr() (string, error) {
defer d.lock.Unlock()
d.lock.Lock()
// Try last working sentinel.
if d._sentinel != nil {
addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
d.resetSentinel()
} else {
addr := net.JoinHostPort(addr[0], addr[1])
log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
return addr, nil
}
}
for i, sentinelAddr := range d.sentinelAddrs {
sentinel := newSentinel(&Options{
Addr: sentinelAddr,
DB: d.opt.DB,
Password: d.opt.Password,
DialTimeout: d.opt.DialTimeout,
ReadTimeout: d.opt.ReadTimeout,
WriteTimeout: d.opt.WriteTimeout,
PoolSize: d.opt.PoolSize,
IdleTimeout: d.opt.IdleTimeout,
})
masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
sentinel.Close()
continue
}
// Push working sentinel to the top.
d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
d.setSentinel(sentinel)
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
return addr, nil
}
return "", errors.New("redis: all sentinels are unreachable")
}
func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
d.discoverSentinels(sentinel)
d._sentinel = sentinel
go d.listen()
}
func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
sentinels, err := sentinel.Sentinels(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err)
return
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
if key == "name" {
sentinelAddr := vals[i+1].(string)
if !contains(d.sentinelAddrs, sentinelAddr) {
log.Printf(
"redis-sentinel: discovered new %q sentinel: %s",
d.masterName, sentinelAddr,
)
d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
}
}
}
}
}
func (d *sentinelFailover) listen() {
var pubsub *PubSub
for {
if pubsub == nil {
pubsub = d._sentinel.PubSub()
if err := pubsub.Subscribe("+switch-master"); err != nil {
log.Printf("redis-sentinel: Subscribe failed: %s", err)
d.lock.Lock()
d.resetSentinel()
d.lock.Unlock()
return
}
}
msgIface, err := pubsub.Receive()
if err != nil {
log.Printf("redis-sentinel: Receive failed: %s", err)
pubsub.Close()
return
}
switch msg := msgIface.(type) {
case *Message:
switch msg.Channel {
case "+switch-master":
parts := strings.Split(msg.Payload, " ")
if parts[0] != d.masterName {
log.Printf("redis-sentinel: ignore new %s addr", parts[0])
continue
}
addr := net.JoinHostPort(parts[3], parts[4])
log.Printf(
"redis-sentinel: new %q addr is %s",
d.masterName, addr,
)
d.pool.Filter(func(cn *conn) bool {
if cn.RemoteAddr().String() != addr {
log.Printf(
"redis-sentinel: closing connection to old master %s",
cn.RemoteAddr(),
)
return false
}
return true
})
default:
log.Printf("redis-sentinel: unsupported message: %s", msg)
}
case *Subscription:
// Ignore.
default:
log.Printf("redis-sentinel: unsupported message: %s", msgIface)
}
}
}
func (d *sentinelFailover) resetSentinel() {
d._sentinel.Close()
d._sentinel = nil
}
func contains(slice []string, str string) bool {
for _, s := range slice {
if s == str {
return true
}
}
return false
}

185
vendor/gopkg.in/redis.v2/sentinel_test.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
package redis_test
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"text/template"
"time"
"gopkg.in/redis.v2"
)
func startRedis(port string) (*exec.Cmd, error) {
cmd := exec.Command("redis-server", "--port", port)
if false {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func startRedisSlave(port, slave string) (*exec.Cmd, error) {
cmd := exec.Command("redis-server", "--port", port, "--slaveof", "127.0.0.1", slave)
if false {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func startRedisSentinel(port, masterName, masterPort string) (*exec.Cmd, error) {
dir, err := ioutil.TempDir("", "sentinel")
if err != nil {
return nil, err
}
sentinelConfFilepath := filepath.Join(dir, "sentinel.conf")
tpl, err := template.New("sentinel.conf").Parse(sentinelConf)
if err != nil {
return nil, err
}
data := struct {
Port string
MasterName string
MasterPort string
}{
Port: port,
MasterName: masterName,
MasterPort: masterPort,
}
if err := writeTemplateToFile(sentinelConfFilepath, tpl, data); err != nil {
return nil, err
}
cmd := exec.Command("redis-server", sentinelConfFilepath, "--sentinel")
if true {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func writeTemplateToFile(path string, t *template.Template, data interface{}) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
return t.Execute(f, data)
}
func TestSentinel(t *testing.T) {
masterName := "mymaster"
masterPort := "8123"
slavePort := "8124"
sentinelPort := "8125"
masterCmd, err := startRedis(masterPort)
if err != nil {
t.Fatal(err)
}
defer masterCmd.Process.Kill()
// Wait for master to start.
time.Sleep(200 * time.Millisecond)
master := redis.NewTCPClient(&redis.Options{
Addr: ":" + masterPort,
})
if err := master.Ping().Err(); err != nil {
t.Fatal(err)
}
slaveCmd, err := startRedisSlave(slavePort, masterPort)
if err != nil {
t.Fatal(err)
}
defer slaveCmd.Process.Kill()
// Wait for slave to start.
time.Sleep(200 * time.Millisecond)
slave := redis.NewTCPClient(&redis.Options{
Addr: ":" + slavePort,
})
if err := slave.Ping().Err(); err != nil {
t.Fatal(err)
}
sentinelCmd, err := startRedisSentinel(sentinelPort, masterName, masterPort)
if err != nil {
t.Fatal(err)
}
defer sentinelCmd.Process.Kill()
// Wait for sentinel to start.
time.Sleep(200 * time.Millisecond)
sentinel := redis.NewTCPClient(&redis.Options{
Addr: ":" + sentinelPort,
})
if err := sentinel.Ping().Err(); err != nil {
t.Fatal(err)
}
defer sentinel.Shutdown()
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: masterName,
SentinelAddrs: []string{":" + sentinelPort},
})
if err := client.Set("foo", "master").Err(); err != nil {
t.Fatal(err)
}
val, err := master.Get("foo").Result()
if err != nil {
t.Fatal(err)
}
if val != "master" {
t.Fatalf(`got %q, expected "master"`, val)
}
// Kill Redis master.
if err := masterCmd.Process.Kill(); err != nil {
t.Fatal(err)
}
if err := master.Ping().Err(); err == nil {
t.Fatalf("master was not killed")
}
// Wait for Redis sentinel to elect new master.
time.Sleep(5 * time.Second)
// Check that client picked up new master.
val, err = client.Get("foo").Result()
if err != nil {
t.Fatal(err)
}
if val != "master" {
t.Fatalf(`got %q, expected "master"`, val)
}
}
var sentinelConf = `
port {{ .Port }}
sentinel monitor {{ .MasterName }} 127.0.0.1 {{ .MasterPort }} 1
sentinel down-after-milliseconds {{ .MasterName }} 1000
sentinel failover-timeout {{ .MasterName }} 2000
sentinel parallel-syncs {{ .MasterName }} 1
`

6
vendor/gopkg.in/redis.v2/testdata/sentinel.conf generated vendored Normal file
View File

@@ -0,0 +1,6 @@
port 26379
sentinel monitor master 127.0.0.1 6379 1
sentinel down-after-milliseconds master 2000
sentinel failover-timeout master 5000
sentinel parallel-syncs master 4