added postgres and redis session options

This commit is contained in:
Torkel Ödegaard 2015-04-01 09:45:42 +02:00
parent c3fa68ade8
commit cf877e6567
36 changed files with 10569 additions and 3 deletions

14
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/torkelo/grafana-pro",
"GoVersion": "go1.3",
"ImportPath": "github.com/grafana/grafana",
"GoVersion": "go1.4.2",
"Packages": [
"./pkg/..."
],
@ -70,10 +70,20 @@
"ImportPath": "golang.org/x/oauth2",
"Rev": "e5909d4679a1926c774c712b343f10b8298687a3"
},
{
"ImportPath": "gopkg.in/bufio.v1",
"Comment": "v1",
"Rev": "567b2bfa514e796916c4747494d6ff5132a1dfce"
},
{
"ImportPath": "gopkg.in/ini.v1",
"Comment": "v0-16-g1772191",
"Rev": "177219109c97e7920c933e21c9b25f874357b237"
},
{
"ImportPath": "gopkg.in/redis.v2",
"Comment": "v2.3.2",
"Rev": "e6179049628164864e6e84e973cfb56335748dea"
}
]
}

11
Godeps/_workspace/src/gopkg.in/bufio.v1/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,11 @@
language: go
go:
- 1.0
- 1.1
- 1.2
- tip
install:
- go get launchpad.net/gocheck
- go get gopkg.in/bufio.v1

27
Godeps/_workspace/src/gopkg.in/bufio.v1/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013 The bufio Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

2
Godeps/_workspace/src/gopkg.in/bufio.v1/Makefile generated vendored Normal file
View File

@ -0,0 +1,2 @@
all:
go test gopkg.in/bufio.v1

4
Godeps/_workspace/src/gopkg.in/bufio.v1/README.md generated vendored Normal file
View File

@ -0,0 +1,4 @@
bufio
=====
This is a fork of the http://golang.org/pkg/bufio/ package. It adds `ReadN` method that allows reading next `n` bytes from the internal buffer without allocating intermediate buffer. This method works just like the [Buffer.Next](http://golang.org/pkg/bytes/#Buffer.Next) method, but has slightly different signature.

413
Godeps/_workspace/src/gopkg.in/bufio.v1/buffer.go generated vendored Normal file
View File

@ -0,0 +1,413 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
// Simple byte buffer for marshaling data.
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer struct {
buf []byte // contents are the bytes buf[off : len(buf)]
off int // read at &buf[off], write at &buf[len(buf)]
runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune
bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
lastRead readOp // last read operation, so that Unread* can work correctly.
}
// The readOp constants describe the last action performed on
// the buffer, so that UnreadRune and UnreadByte can
// check for invalid usage.
type readOp int
const (
opInvalid readOp = iota // Non-read operation.
opReadRune // Read rune.
opRead // Any other read operation.
)
// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer.
var ErrTooLarge = errors.New("bytes.Buffer: too large")
// Bytes returns a slice of the contents of the unread portion of the buffer;
// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
// returned slice, the contents of the buffer will change provided there
// are no intervening method calls on the Buffer.
func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
// String returns the contents of the unread portion of the buffer
// as a string. If the Buffer is a nil pointer, it returns "<nil>".
func (b *Buffer) String() string {
if b == nil {
// Special case, useful in debugging.
return "<nil>"
}
return string(b.buf[b.off:])
}
// Len returns the number of bytes of the unread portion of the buffer;
// b.Len() == len(b.Bytes()).
func (b *Buffer) Len() int { return len(b.buf) - b.off }
// Truncate discards all but the first n unread bytes from the buffer.
// It panics if n is negative or greater than the length of the buffer.
func (b *Buffer) Truncate(n int) {
b.lastRead = opInvalid
switch {
case n < 0 || n > b.Len():
panic("bytes.Buffer: truncation out of range")
case n == 0:
// Reuse buffer space.
b.off = 0
}
b.buf = b.buf[0 : b.off+n]
}
// Reset resets the buffer so it has no content.
// b.Reset() is the same as b.Truncate(0).
func (b *Buffer) Reset() { b.Truncate(0) }
// grow grows the buffer to guarantee space for n more bytes.
// It returns the index where bytes should be written.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) grow(n int) int {
m := b.Len()
// If buffer is empty, reset to recover space.
if m == 0 && b.off != 0 {
b.Truncate(0)
}
if len(b.buf)+n > cap(b.buf) {
var buf []byte
if b.buf == nil && n <= len(b.bootstrap) {
buf = b.bootstrap[0:]
} else if m+n <= cap(b.buf)/2 {
// We can slide things down instead of allocating a new
// slice. We only need m+n <= cap(b.buf) to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
// not enough space anywhere
buf = makeSlice(2*cap(b.buf) + n)
copy(buf, b.buf[b.off:])
}
b.buf = buf
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Grow grows the buffer's capacity, if necessary, to guarantee space for
// another n bytes. After Grow(n), at least n bytes can be written to the
// buffer without another allocation.
// If n is negative, Grow will panic.
// If the buffer can't grow it will panic with ErrTooLarge.
func (b *Buffer) Grow(n int) {
if n < 0 {
panic("bytes.Buffer.Grow: negative count")
}
m := b.grow(n)
b.buf = b.buf[0:m]
}
// Write appends the contents of p to the buffer, growing the buffer as
// needed. The return value n is the length of p; err is always nil. If the
// buffer becomes too large, Write will panic with ErrTooLarge.
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(p))
return copy(b.buf[m:], p), nil
}
// WriteString appends the contents of s to the buffer, growing the buffer as
// needed. The return value n is the length of s; err is always nil. If the
// buffer becomes too large, WriteString will panic with ErrTooLarge.
func (b *Buffer) WriteString(s string) (n int, err error) {
b.lastRead = opInvalid
m := b.grow(len(s))
return copy(b.buf[m:], s), nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads data from r until EOF and appends it to the buffer, growing
// the buffer as needed. The return value n is the number of bytes read. Any
// error except io.EOF encountered during the read is also returned. If the
// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
b.lastRead = opInvalid
// If buffer is empty, reset to recover space.
if b.off >= len(b.buf) {
b.Truncate(0)
}
for {
if free := cap(b.buf) - len(b.buf); free < MinRead {
// not enough space at end
newBuf := b.buf
if b.off+free < MinRead {
// not enough space using beginning of buffer;
// double buffer capacity
newBuf = makeSlice(2*cap(b.buf) + MinRead)
}
copy(newBuf, b.buf[b.off:])
b.buf = newBuf[:len(b.buf)-b.off]
b.off = 0
}
m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
b.buf = b.buf[0 : len(b.buf)+m]
n += int64(m)
if e == io.EOF {
break
}
if e != nil {
return n, e
}
}
return n, nil // err is EOF, so return nil explicitly
}
// makeSlice allocates a slice of size n. If the allocation fails, it panics
// with ErrTooLarge.
func makeSlice(n int) []byte {
// If the make fails, give a known error.
defer func() {
if recover() != nil {
panic(ErrTooLarge)
}
}()
return make([]byte, n)
}
// WriteTo writes data to w until the buffer is drained or an error occurs.
// The return value n is the number of bytes written; it always fits into an
// int, but it is int64 to match the io.WriterTo interface. Any error
// encountered during the write is also returned.
func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
b.lastRead = opInvalid
if b.off < len(b.buf) {
nBytes := b.Len()
m, e := w.Write(b.buf[b.off:])
if m > nBytes {
panic("bytes.Buffer.WriteTo: invalid Write count")
}
b.off += m
n = int64(m)
if e != nil {
return n, e
}
// all bytes should have been written, by definition of
// Write method in io.Writer
if m != nBytes {
return n, io.ErrShortWrite
}
}
// Buffer is now empty; reset.
b.Truncate(0)
return
}
// WriteByte appends the byte c to the buffer, growing the buffer as needed.
// The returned error is always nil, but is included to match bufio.Writer's
// WriteByte. If the buffer becomes too large, WriteByte will panic with
// ErrTooLarge.
func (b *Buffer) WriteByte(c byte) error {
b.lastRead = opInvalid
m := b.grow(1)
b.buf[m] = c
return nil
}
// WriteRune appends the UTF-8 encoding of Unicode code point r to the
// buffer, returning its length and an error, which is always nil but is
// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
// if it becomes too large, WriteRune will panic with ErrTooLarge.
func (b *Buffer) WriteRune(r rune) (n int, err error) {
if r < utf8.RuneSelf {
b.WriteByte(byte(r))
return 1, nil
}
n = utf8.EncodeRune(b.runeBytes[0:], r)
b.Write(b.runeBytes[0:n])
return n, nil
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value n is the number of bytes read. If the
// buffer has no data to return, err is io.EOF (unless len(p) is zero);
// otherwise it is nil.
func (b *Buffer) Read(p []byte) (n int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
if len(p) == 0 {
return
}
return 0, io.EOF
}
n = copy(p, b.buf[b.off:])
b.off += n
if n > 0 {
b.lastRead = opRead
}
return
}
// Next returns a slice containing the next n bytes from the buffer,
// advancing the buffer as if the bytes had been returned by Read.
// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
// The slice is only valid until the next call to a read or write method.
func (b *Buffer) Next(n int) []byte {
b.lastRead = opInvalid
m := b.Len()
if n > m {
n = m
}
data := b.buf[b.off : b.off+n]
b.off += n
if n > 0 {
b.lastRead = opRead
}
return data
}
// ReadByte reads and returns the next byte from the buffer.
// If no byte is available, it returns error io.EOF.
func (b *Buffer) ReadByte() (c byte, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, io.EOF
}
c = b.buf[b.off]
b.off++
b.lastRead = opRead
return c, nil
}
// ReadRune reads and returns the next UTF-8-encoded
// Unicode code point from the buffer.
// If no bytes are available, the error returned is io.EOF.
// If the bytes are an erroneous UTF-8 encoding, it
// consumes one byte and returns U+FFFD, 1.
func (b *Buffer) ReadRune() (r rune, size int, err error) {
b.lastRead = opInvalid
if b.off >= len(b.buf) {
// Buffer is empty, reset to recover space.
b.Truncate(0)
return 0, 0, io.EOF
}
b.lastRead = opReadRune
c := b.buf[b.off]
if c < utf8.RuneSelf {
b.off++
return rune(c), 1, nil
}
r, n := utf8.DecodeRune(b.buf[b.off:])
b.off += n
return r, n, nil
}
// UnreadRune unreads the last rune returned by ReadRune.
// If the most recent read or write operation on the buffer was
// not a ReadRune, UnreadRune returns an error. (In this regard
// it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Buffer) UnreadRune() error {
if b.lastRead != opReadRune {
return errors.New("bytes.Buffer: UnreadRune: previous operation was not ReadRune")
}
b.lastRead = opInvalid
if b.off > 0 {
_, n := utf8.DecodeLastRune(b.buf[0:b.off])
b.off -= n
}
return nil
}
// UnreadByte unreads the last byte returned by the most recent
// read operation. If write has happened since the last read, UnreadByte
// returns an error.
func (b *Buffer) UnreadByte() error {
if b.lastRead != opReadRune && b.lastRead != opRead {
return errors.New("bytes.Buffer: UnreadByte: previous operation was not a read")
}
b.lastRead = opInvalid
if b.off > 0 {
b.off--
}
return nil
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
slice, err := b.readSlice(delim)
// return a copy of slice. The buffer's backing array may
// be overwritten by later calls.
line = append(line, slice...)
return
}
// readSlice is like ReadBytes but returns a reference to internal buffer data.
func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
i := bytes.IndexByte(b.buf[b.off:], delim)
end := b.off + i + 1
if i < 0 {
end = len(b.buf)
err = io.EOF
}
line = b.buf[b.off:end]
b.off = end
b.lastRead = opRead
return line, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end
// in delim.
func (b *Buffer) ReadString(delim byte) (line string, err error) {
slice, err := b.readSlice(delim)
return string(slice), err
}
// NewBuffer creates and initializes a new Buffer using buf as its initial
// contents. It is intended to prepare a Buffer to read existing data. It
// can also be used to size the internal buffer for writing. To do that,
// buf should have the desired capacity but a length of zero.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
// NewBufferString creates and initializes a new Buffer using string s as its
// initial contents. It is intended to prepare a buffer to read an existing
// string.
//
// In most cases, new(Buffer) (or just declaring a Buffer variable) is
// sufficient to initialize a Buffer.
func NewBufferString(s string) *Buffer {
return &Buffer{buf: []byte(s)}
}

527
Godeps/_workspace/src/gopkg.in/bufio.v1/buffer_test.go generated vendored Normal file
View File

@ -0,0 +1,527 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
import (
"bytes"
"io"
"math/rand"
"runtime"
"testing"
"unicode/utf8"
)
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var testBytes []byte // test data; same as data but as a slice.
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
data = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of string fus.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.WriteString(fus)
if m != len(fus) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += fus
check(t, testname+" (fill 4)", buf, s)
}
return s
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, data)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(data)
check(t, "NewBufferString", buf, data)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write([]byte(data[0:1]))
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
if err != nil {
t.Errorf("err should always be nil, but err == %s", err)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(data[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write([]byte(data[2:26]))
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d", n)
}
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(data[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
c, err = buf.ReadByte()
if err == nil {
t.Error("ReadByte unexpected not eof")
}
}
}
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeStringWrites (3)", &buf, "")
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data))
if i%2 == 0 {
s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
} else {
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
}
rlen := rand.Intn(len(data))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
}
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
}
}
func TestRuneIO(t *testing.T) {
const NRune = 1000
// Built a test slice while we write the data
b := make([]byte, utf8.UTFMax*NRune)
var buf Buffer
n := 0
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(b[n:], r)
nbytes, err := buf.WriteRune(r)
if err != nil {
t.Fatalf("WriteRune(%U) error: %s", r, err)
}
if nbytes != size {
t.Fatalf("WriteRune(%U) expected %d, got %d", r, size, nbytes)
}
n += size
}
b = b[0:n]
// Check the resulting bytes
if !bytes.Equal(buf.Bytes(), b) {
t.Fatalf("incorrect result from WriteRune: %q not %q", buf.Bytes(), b)
}
p := make([]byte, utf8.UTFMax)
// Read it back with ReadRune
for r := rune(0); r < NRune; r++ {
size := utf8.EncodeRune(p, r)
nr, nbytes, err := buf.ReadRune()
if nr != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) got %U,%d not %U,%d (err=%s)", r, nr, nbytes, r, size, err)
}
}
// Check that UnreadRune works
buf.Reset()
buf.Write(b)
for r := rune(0); r < NRune; r++ {
r1, size, _ := buf.ReadRune()
if err := buf.UnreadRune(); err != nil {
t.Fatalf("UnreadRune(%U) got error %q", r, err)
}
r2, nbytes, err := buf.ReadRune()
if r1 != r2 || r1 != r || nbytes != size || err != nil {
t.Fatalf("ReadRune(%U) after UnreadRune got %U,%d not %U,%d (err=%s)", r, r2, nbytes, r, size, err)
}
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
var readBytesTests = []struct {
buffer string
delim byte
expected []string
err error
}{
{"", 0, []string{""}, io.EOF},
{"a\x00", 0, []string{"a\x00"}, nil},
{"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil},
{"hello\x01world", 1, []string{"hello\x01"}, nil},
{"foo\nbar", 0, []string{"foo\nbar"}, io.EOF},
{"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil},
{"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF},
}
func TestReadBytes(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var bytes []byte
bytes, err = buf.ReadBytes(test.delim)
if string(bytes) != expected {
t.Errorf("expected %q, got %q", expected, bytes)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func TestReadString(t *testing.T) {
for _, test := range readBytesTests {
buf := NewBufferString(test.buffer)
var err error
for _, expected := range test.expected {
var s string
s, err = buf.ReadString(test.delim)
if s != expected {
t.Errorf("expected %q, got %q", expected, s)
}
if err != nil {
break
}
}
if err != test.err {
t.Errorf("expected error %v, got %v", test.err, err)
}
}
}
func BenchmarkReadString(b *testing.B) {
const n = 32 << 10
data := make([]byte, n)
data[n-1] = 'x'
b.SetBytes(int64(n))
for i := 0; i < b.N; i++ {
buf := NewBuffer(data)
_, err := buf.ReadString('x')
if err != nil {
b.Fatal(err)
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := bytes.Repeat(x, startLen)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
buf.Grow(growLen)
yBytes := bytes.Repeat(y, growLen)
// Check no allocation occurs in write, as long as we're single-threaded.
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
buf.Write(yBytes)
runtime.ReadMemStats(&m2)
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
func TestBufferUnreadByte(t *testing.T) {
b := new(Buffer)
b.WriteString("abcdefghijklmnopqrstuvwxyz")
_, err := b.ReadBytes('m')
if err != nil {
t.Fatalf("ReadBytes: %v", err)
}
err = b.UnreadByte()
if err != nil {
t.Fatalf("UnreadByte: %v", err)
}
c, err := b.ReadByte()
if err != nil {
t.Fatalf("ReadByte: %v", err)
}
if c != 'm' {
t.Errorf("ReadByte = %q; want %q", c, 'm')
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = b.Cap()
}
}
cap1 := b.Cap()
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < b.Cap() {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}

728
Godeps/_workspace/src/gopkg.in/bufio.v1/bufio.go generated vendored Normal file
View File

@ -0,0 +1,728 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer
// object, creating another object (Reader or Writer) that also implements
// the interface but provides buffering and some help for textual I/O.
package bufio
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
const (
defaultBufSize = 4096
)
var (
ErrInvalidUnreadByte = errors.New("bufio: invalid use of UnreadByte")
ErrInvalidUnreadRune = errors.New("bufio: invalid use of UnreadRune")
ErrBufferFull = errors.New("bufio: buffer full")
ErrNegativeCount = errors.New("bufio: negative count")
)
// Buffered input.
// Reader implements buffering for an io.Reader object.
type Reader struct {
buf []byte
rd io.Reader
r, w int
err error
lastByte int
lastRuneSize int
}
const minReadBufferSize = 16
const maxConsecutiveEmptyReads = 100
// NewReaderSize returns a new Reader whose buffer has at least the specified
// size. If the argument io.Reader is already a Reader with large enough
// size, it returns the underlying Reader.
func NewReaderSize(rd io.Reader, size int) *Reader {
// Is it already a Reader?
b, ok := rd.(*Reader)
if ok && len(b.buf) >= size {
return b
}
if size < minReadBufferSize {
size = minReadBufferSize
}
r := new(Reader)
r.reset(make([]byte, size), rd)
return r
}
// NewReader returns a new Reader whose buffer has the default size.
func NewReader(rd io.Reader) *Reader {
return NewReaderSize(rd, defaultBufSize)
}
// Reset discards any buffered data, resets all state, and switches
// the buffered reader to read from r.
func (b *Reader) Reset(r io.Reader) {
b.reset(b.buf, r)
}
func (b *Reader) reset(buf []byte, r io.Reader) {
*b = Reader{
buf: buf,
rd: r,
lastByte: -1,
lastRuneSize: -1,
}
}
var errNegativeRead = errors.New("bufio: reader returned negative count from Read")
// fill reads a new chunk into the buffer.
func (b *Reader) fill() {
// Slide existing data to beginning.
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w])
b.w -= b.r
b.r = 0
}
if b.w >= len(b.buf) {
panic("bufio: tried to fill full buffer")
}
// Read new data: try a limited number of times.
for i := maxConsecutiveEmptyReads; i > 0; i-- {
n, err := b.rd.Read(b.buf[b.w:])
if n < 0 {
panic(errNegativeRead)
}
b.w += n
if err != nil {
b.err = err
return
}
if n > 0 {
return
}
}
b.err = io.ErrNoProgress
}
func (b *Reader) readErr() error {
err := b.err
b.err = nil
return err
}
// Peek returns the next n bytes without advancing the reader. The bytes stop
// being valid at the next read call. If Peek returns fewer than n bytes, it
// also returns an error explaining why the read is short. The error is
// ErrBufferFull if n is larger than b's buffer size.
func (b *Reader) Peek(n int) ([]byte, error) {
if n < 0 {
return nil, ErrNegativeCount
}
if n > len(b.buf) {
return nil, ErrBufferFull
}
// 0 <= n <= len(b.buf)
for b.w-b.r < n && b.err == nil {
b.fill() // b.w-b.r < len(b.buf) => buffer is not full
}
m := b.w - b.r
if m > n {
m = n
}
var err error
if m < n {
err = b.readErr()
if err == nil {
err = ErrBufferFull
}
}
return b.buf[b.r : b.r+m], err
}
// Read reads data into p.
// It returns the number of bytes read into p.
// It calls Read at most once on the underlying Reader,
// hence n may be less than len(p).
// At EOF, the count will be zero and err will be io.EOF.
func (b *Reader) Read(p []byte) (n int, err error) {
n = len(p)
if n == 0 {
return 0, b.readErr()
}
if b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
if len(p) >= len(b.buf) {
// Large read, empty buffer.
// Read directly into p to avoid copy.
n, b.err = b.rd.Read(p)
if n < 0 {
panic(errNegativeRead)
}
if n > 0 {
b.lastByte = int(p[n-1])
b.lastRuneSize = -1
}
return n, b.readErr()
}
b.fill() // buffer is empty
if b.w == b.r {
return 0, b.readErr()
}
}
if n > b.w-b.r {
n = b.w - b.r
}
copy(p[0:n], b.buf[b.r:])
b.r += n
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = -1
return n, nil
}
// ReadByte reads and returns a single byte.
// If no byte is available, returns an error.
func (b *Reader) ReadByte() (c byte, err error) {
b.lastRuneSize = -1
for b.r == b.w {
if b.err != nil {
return 0, b.readErr()
}
b.fill() // buffer is empty
}
c = b.buf[b.r]
b.r++
b.lastByte = int(c)
return c, nil
}
// UnreadByte unreads the last byte. Only the most recently read byte can be unread.
func (b *Reader) UnreadByte() error {
if b.lastByte < 0 || b.r == 0 && b.w > 0 {
return ErrInvalidUnreadByte
}
// b.r > 0 || b.w == 0
if b.r > 0 {
b.r--
} else {
// b.r == 0 && b.w == 0
b.w = 1
}
b.buf[b.r] = byte(b.lastByte)
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// ReadRune reads a single UTF-8 encoded Unicode character and returns the
// rune and its size in bytes. If the encoded rune is invalid, it consumes one byte
// and returns unicode.ReplacementChar (U+FFFD) with a size of 1.
func (b *Reader) ReadRune() (r rune, size int, err error) {
for b.r+utf8.UTFMax > b.w && !utf8.FullRune(b.buf[b.r:b.w]) && b.err == nil && b.w-b.r < len(b.buf) {
b.fill() // b.w-b.r < len(buf) => buffer is not full
}
b.lastRuneSize = -1
if b.r == b.w {
return 0, 0, b.readErr()
}
r, size = rune(b.buf[b.r]), 1
if r >= 0x80 {
r, size = utf8.DecodeRune(b.buf[b.r:b.w])
}
b.r += size
b.lastByte = int(b.buf[b.r-1])
b.lastRuneSize = size
return r, size, nil
}
// UnreadRune unreads the last rune. If the most recent read operation on
// the buffer was not a ReadRune, UnreadRune returns an error. (In this
// regard it is stricter than UnreadByte, which will unread the last byte
// from any read operation.)
func (b *Reader) UnreadRune() error {
if b.lastRuneSize < 0 || b.r < b.lastRuneSize {
return ErrInvalidUnreadRune
}
b.r -= b.lastRuneSize
b.lastByte = -1
b.lastRuneSize = -1
return nil
}
// Buffered returns the number of bytes that can be read from the current buffer.
func (b *Reader) Buffered() int { return b.w - b.r }
// ReadSlice reads until the first occurrence of delim in the input,
// returning a slice pointing at the bytes in the buffer.
// The bytes stop being valid at the next read.
// If ReadSlice encounters an error before finding a delimiter,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim.
// Because the data returned from ReadSlice will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
// ReadSlice returns err != nil if and only if line does not end in delim.
func (b *Reader) ReadSlice(delim byte) (line []byte, err error) {
for {
// Search buffer.
if i := bytes.IndexByte(b.buf[b.r:b.w], delim); i >= 0 {
line = b.buf[b.r : b.r+i+1]
b.r += i + 1
break
}
// Pending error?
if b.err != nil {
line = b.buf[b.r:b.w]
b.r = b.w
err = b.readErr()
break
}
// Buffer full?
if n := b.Buffered(); n >= len(b.buf) {
b.r = b.w
line = b.buf
err = ErrBufferFull
break
}
b.fill() // buffer is not full
}
// Handle last byte, if any.
if i := len(line) - 1; i >= 0 {
b.lastByte = int(line[i])
}
return
}
// ReadN tries to read exactly n bytes.
// The bytes stop being valid at the next read call.
// If ReadN encounters an error before reading n bytes,
// it returns all the data in the buffer and the error itself (often io.EOF).
// ReadN fails with error ErrBufferFull if the buffer fills
// without reading N bytes.
// Because the data returned from ReadN will be overwritten
// by the next I/O operation, most clients should use
// ReadBytes or ReadString instead.
func (b *Reader) ReadN(n int) ([]byte, error) {
for b.Buffered() < n {
if b.err != nil {
buf := b.buf[b.r:b.w]
b.r = b.w
return buf, b.readErr()
}
// Buffer is full?
if b.Buffered() >= len(b.buf) {
b.r = b.w
return b.buf, ErrBufferFull
}
b.fill()
}
buf := b.buf[b.r : b.r+n]
b.r += n
return buf, nil
}
// ReadLine is a low-level line-reading primitive. Most callers should use
// ReadBytes('\n') or ReadString('\n') instead or use a Scanner.
//
// ReadLine tries to return a single line, not including the end-of-line bytes.
// If the line was too long for the buffer then isPrefix is set and the
// beginning of the line is returned. The rest of the line will be returned
// from future calls. isPrefix will be false when returning the last fragment
// of the line. The returned buffer is only valid until the next call to
// ReadLine. ReadLine either returns a non-nil line or it returns an error,
// never both.
//
// The text returned from ReadLine does not include the line end ("\r\n" or "\n").
// No indication or error is given if the input ends without a final line end.
// Calling UnreadByte after ReadLine will always unread the last byte read
// (possibly a character belonging to the line end) even if that byte is not
// part of the line returned by ReadLine.
func (b *Reader) ReadLine() (line []byte, isPrefix bool, err error) {
line, err = b.ReadSlice('\n')
if err == ErrBufferFull {
// Handle the case where "\r\n" straddles the buffer.
if len(line) > 0 && line[len(line)-1] == '\r' {
// Put the '\r' back on buf and drop it from line.
// Let the next call to ReadLine check for "\r\n".
if b.r == 0 {
// should be unreachable
panic("bufio: tried to rewind past start of buffer")
}
b.r--
line = line[:len(line)-1]
}
return line, true, nil
}
if len(line) == 0 {
if err != nil {
line = nil
}
return
}
err = nil
if line[len(line)-1] == '\n' {
drop := 1
if len(line) > 1 && line[len(line)-2] == '\r' {
drop = 2
}
line = line[:len(line)-drop]
}
return
}
// ReadBytes reads until the first occurrence of delim in the input,
// returning a slice containing the data up to and including the delimiter.
// If ReadBytes encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadBytes returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadBytes(delim byte) (line []byte, err error) {
// Use ReadSlice to look for array,
// accumulating full buffers.
var frag []byte
var full [][]byte
err = nil
for {
var e error
frag, e = b.ReadSlice(delim)
if e == nil { // got final fragment
break
}
if e != ErrBufferFull { // unexpected error
err = e
break
}
// Make a copy of the buffer.
buf := make([]byte, len(frag))
copy(buf, frag)
full = append(full, buf)
}
// Allocate new buffer to hold the full pieces and the fragment.
n := 0
for i := range full {
n += len(full[i])
}
n += len(frag)
// Copy full pieces and fragment in.
buf := make([]byte, n)
n = 0
for i := range full {
n += copy(buf[n:], full[i])
}
copy(buf[n:], frag)
return buf, err
}
// ReadString reads until the first occurrence of delim in the input,
// returning a string containing the data up to and including the delimiter.
// If ReadString encounters an error before finding a delimiter,
// it returns the data read before the error and the error itself (often io.EOF).
// ReadString returns err != nil if and only if the returned data does not end in
// delim.
// For simple uses, a Scanner may be more convenient.
func (b *Reader) ReadString(delim byte) (line string, err error) {
bytes, err := b.ReadBytes(delim)
line = string(bytes)
return line, err
}
// WriteTo implements io.WriterTo.
func (b *Reader) WriteTo(w io.Writer) (n int64, err error) {
n, err = b.writeBuf(w)
if err != nil {
return
}
if r, ok := b.rd.(io.WriterTo); ok {
m, err := r.WriteTo(w)
n += m
return n, err
}
if w, ok := w.(io.ReaderFrom); ok {
m, err := w.ReadFrom(b.rd)
n += m
return n, err
}
if b.w-b.r < len(b.buf) {
b.fill() // buffer not full
}
for b.r < b.w {
// b.r < b.w => buffer is not empty
m, err := b.writeBuf(w)
n += m
if err != nil {
return n, err
}
b.fill() // buffer is empty
}
if b.err == io.EOF {
b.err = nil
}
return n, b.readErr()
}
// writeBuf writes the Reader's buffer to the writer.
func (b *Reader) writeBuf(w io.Writer) (int64, error) {
n, err := w.Write(b.buf[b.r:b.w])
if n < b.r-b.w {
panic(errors.New("bufio: writer did not write all data"))
}
b.r += n
return int64(n), err
}
// buffered output
// Writer implements buffering for an io.Writer object.
// If an error occurs writing to a Writer, no more data will be
// accepted and all subsequent writes will return the error.
// After all data has been written, the client should call the
// Flush method to guarantee all data has been forwarded to
// the underlying io.Writer.
type Writer struct {
err error
buf []byte
n int
wr io.Writer
}
// NewWriterSize returns a new Writer whose buffer has at least the specified
// size. If the argument io.Writer is already a Writer with large enough
// size, it returns the underlying Writer.
func NewWriterSize(w io.Writer, size int) *Writer {
// Is it already a Writer?
b, ok := w.(*Writer)
if ok && len(b.buf) >= size {
return b
}
if size <= 0 {
size = defaultBufSize
}
return &Writer{
buf: make([]byte, size),
wr: w,
}
}
// NewWriter returns a new Writer whose buffer has the default size.
func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
// Reset discards any unflushed buffered data, clears any error, and
// resets b to write its output to w.
func (b *Writer) Reset(w io.Writer) {
b.err = nil
b.n = 0
b.wr = w
}
// Flush writes any buffered data to the underlying io.Writer.
func (b *Writer) Flush() error {
err := b.flush()
return err
}
func (b *Writer) flush() error {
if b.err != nil {
return b.err
}
if b.n == 0 {
return nil
}
n, err := b.wr.Write(b.buf[0:b.n])
if n < b.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[0:b.n-n], b.buf[n:b.n])
}
b.n -= n
b.err = err
return err
}
b.n = 0
return nil
}
// Available returns how many bytes are unused in the buffer.
func (b *Writer) Available() int { return len(b.buf) - b.n }
// Buffered returns the number of bytes that have been written into the current buffer.
func (b *Writer) Buffered() int { return b.n }
// Write writes the contents of p into the buffer.
// It returns the number of bytes written.
// If nn < len(p), it also returns an error explaining
// why the write is short.
func (b *Writer) Write(p []byte) (nn int, err error) {
for len(p) > b.Available() && b.err == nil {
var n int
if b.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, b.err = b.wr.Write(p)
} else {
n = copy(b.buf[b.n:], p)
b.n += n
b.flush()
}
nn += n
p = p[n:]
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], p)
b.n += n
nn += n
return nn, nil
}
// WriteByte writes a single byte.
func (b *Writer) WriteByte(c byte) error {
if b.err != nil {
return b.err
}
if b.Available() <= 0 && b.flush() != nil {
return b.err
}
b.buf[b.n] = c
b.n++
return nil
}
// WriteRune writes a single Unicode code point, returning
// the number of bytes written and any error.
func (b *Writer) WriteRune(r rune) (size int, err error) {
if r < utf8.RuneSelf {
err = b.WriteByte(byte(r))
if err != nil {
return 0, err
}
return 1, nil
}
if b.err != nil {
return 0, b.err
}
n := b.Available()
if n < utf8.UTFMax {
if b.flush(); b.err != nil {
return 0, b.err
}
n = b.Available()
if n < utf8.UTFMax {
// Can only happen if buffer is silly small.
return b.WriteString(string(r))
}
}
size = utf8.EncodeRune(b.buf[b.n:], r)
b.n += size
return size, nil
}
// WriteString writes a string.
// It returns the number of bytes written.
// If the count is less than len(s), it also returns an error explaining
// why the write is short.
func (b *Writer) WriteString(s string) (int, error) {
nn := 0
for len(s) > b.Available() && b.err == nil {
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
s = s[n:]
b.flush()
}
if b.err != nil {
return nn, b.err
}
n := copy(b.buf[b.n:], s)
b.n += n
nn += n
return nn, nil
}
// ReadFrom implements io.ReaderFrom.
func (b *Writer) ReadFrom(r io.Reader) (n int64, err error) {
if b.Buffered() == 0 {
if w, ok := b.wr.(io.ReaderFrom); ok {
return w.ReadFrom(r)
}
}
var m int
for {
if b.Available() == 0 {
if err1 := b.flush(); err1 != nil {
return n, err1
}
}
nr := 0
for nr < maxConsecutiveEmptyReads {
m, err = r.Read(b.buf[b.n:])
if m != 0 || err != nil {
break
}
nr++
}
if nr == maxConsecutiveEmptyReads {
return n, io.ErrNoProgress
}
b.n += m
n += int64(m)
if err != nil {
break
}
}
if err == io.EOF {
// If we filled the buffer exactly, flush pre-emptively.
if b.Available() == 0 {
err = b.flush()
} else {
err = nil
}
}
return n, err
}
// buffered input and output
// ReadWriter stores pointers to a Reader and a Writer.
// It implements io.ReadWriter.
type ReadWriter struct {
*Reader
*Writer
}
// NewReadWriter allocates a new ReadWriter that dispatches to r and w.
func NewReadWriter(r *Reader, w *Writer) *ReadWriter {
return &ReadWriter{r, w}
}

1418
Godeps/_workspace/src/gopkg.in/bufio.v1/bufio_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
func (b *Buffer) Cap() int {
return cap(b.buf)
}

19
Godeps/_workspace/src/gopkg.in/redis.v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,19 @@
language: go
services:
- redis-server
go:
- 1.1
- 1.2
- 1.3
- tip
install:
- go get gopkg.in/bufio.v1
- go get gopkg.in/check.v1
- mkdir -p $HOME/gopath/src/gopkg.in
- ln -s `pwd` $HOME/gopath/src/gopkg.in/redis.v2
before_script:
- redis-server testdata/sentinel.conf --sentinel &

27
Godeps/_workspace/src/gopkg.in/redis.v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2012 The Redis Go Client Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

3
Godeps/_workspace/src/gopkg.in/redis.v2/Makefile generated vendored Normal file
View File

@ -0,0 +1,3 @@
all:
go test gopkg.in/redis.v2 -cpu=1,2,4
go test gopkg.in/redis.v2 -short -race

46
Godeps/_workspace/src/gopkg.in/redis.v2/README.md generated vendored Normal file
View File

@ -0,0 +1,46 @@
Redis client for Golang [![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
=======================
Supports:
- Redis 2.8 commands except QUIT, MONITOR, SLOWLOG and SYNC.
- Pub/sub.
- Transactions.
- Pipelining.
- Connection pool.
- TLS connections.
- Thread safety.
- Timeouts.
- Redis Sentinel.
API docs: http://godoc.org/gopkg.in/redis.v2.
Examples: http://godoc.org/gopkg.in/redis.v2#pkg-examples.
Installation
------------
Install:
go get gopkg.in/redis.v2
Look and feel
-------------
Some corner cases:
SORT list LIMIT 0 2 ASC
vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeByScore{
Min: "-inf",
Max: "+inf",
Offset: 0,
Count: 2,
}).Result()
ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, []string{"hello"}).Result()

597
Godeps/_workspace/src/gopkg.in/redis.v2/command.go generated vendored Normal file
View File

@ -0,0 +1,597 @@
package redis
import (
"fmt"
"strconv"
"strings"
"time"
"gopkg.in/bufio.v1"
)
var (
_ Cmder = (*Cmd)(nil)
_ Cmder = (*SliceCmd)(nil)
_ Cmder = (*StatusCmd)(nil)
_ Cmder = (*IntCmd)(nil)
_ Cmder = (*DurationCmd)(nil)
_ Cmder = (*BoolCmd)(nil)
_ Cmder = (*StringCmd)(nil)
_ Cmder = (*FloatCmd)(nil)
_ Cmder = (*StringSliceCmd)(nil)
_ Cmder = (*BoolSliceCmd)(nil)
_ Cmder = (*StringStringMapCmd)(nil)
_ Cmder = (*ZSliceCmd)(nil)
_ Cmder = (*ScanCmd)(nil)
)
type Cmder interface {
args() []string
parseReply(*bufio.Reader) error
setErr(error)
writeTimeout() *time.Duration
readTimeout() *time.Duration
Err() error
String() string
}
func setCmdsErr(cmds []Cmder, e error) {
for _, cmd := range cmds {
cmd.setErr(e)
}
}
func cmdString(cmd Cmder, val interface{}) string {
s := strings.Join(cmd.args(), " ")
if err := cmd.Err(); err != nil {
return s + ": " + err.Error()
}
if val != nil {
return s + ": " + fmt.Sprint(val)
}
return s
}
//------------------------------------------------------------------------------
type baseCmd struct {
_args []string
err error
_writeTimeout, _readTimeout *time.Duration
}
func newBaseCmd(args ...string) *baseCmd {
return &baseCmd{
_args: args,
}
}
func (cmd *baseCmd) Err() error {
if cmd.err != nil {
return cmd.err
}
return nil
}
func (cmd *baseCmd) args() []string {
return cmd._args
}
func (cmd *baseCmd) readTimeout() *time.Duration {
return cmd._readTimeout
}
func (cmd *baseCmd) setReadTimeout(d time.Duration) {
cmd._readTimeout = &d
}
func (cmd *baseCmd) writeTimeout() *time.Duration {
return cmd._writeTimeout
}
func (cmd *baseCmd) setWriteTimeout(d time.Duration) {
cmd._writeTimeout = &d
}
func (cmd *baseCmd) setErr(e error) {
cmd.err = e
}
//------------------------------------------------------------------------------
type Cmd struct {
*baseCmd
val interface{}
}
func NewCmd(args ...string) *Cmd {
return &Cmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *Cmd) Val() interface{} {
return cmd.val
}
func (cmd *Cmd) Result() (interface{}, error) {
return cmd.val, cmd.err
}
func (cmd *Cmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *Cmd) parseReply(rd *bufio.Reader) error {
cmd.val, cmd.err = parseReply(rd, parseSlice)
return cmd.err
}
//------------------------------------------------------------------------------
type SliceCmd struct {
*baseCmd
val []interface{}
}
func NewSliceCmd(args ...string) *SliceCmd {
return &SliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *SliceCmd) Val() []interface{} {
return cmd.val
}
func (cmd *SliceCmd) Result() ([]interface{}, error) {
return cmd.val, cmd.err
}
func (cmd *SliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *SliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]interface{})
return nil
}
//------------------------------------------------------------------------------
type StatusCmd struct {
*baseCmd
val string
}
func NewStatusCmd(args ...string) *StatusCmd {
return &StatusCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StatusCmd) Val() string {
return cmd.val
}
func (cmd *StatusCmd) Result() (string, error) {
return cmd.val, cmd.err
}
func (cmd *StatusCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StatusCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(string)
return nil
}
//------------------------------------------------------------------------------
type IntCmd struct {
*baseCmd
val int64
}
func NewIntCmd(args ...string) *IntCmd {
return &IntCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *IntCmd) Val() int64 {
return cmd.val
}
func (cmd *IntCmd) Result() (int64, error) {
return cmd.val, cmd.err
}
func (cmd *IntCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *IntCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(int64)
return nil
}
//------------------------------------------------------------------------------
type DurationCmd struct {
*baseCmd
val time.Duration
precision time.Duration
}
func NewDurationCmd(precision time.Duration, args ...string) *DurationCmd {
return &DurationCmd{
baseCmd: newBaseCmd(args...),
precision: precision,
}
}
func (cmd *DurationCmd) Val() time.Duration {
return cmd.val
}
func (cmd *DurationCmd) Result() (time.Duration, error) {
return cmd.val, cmd.err
}
func (cmd *DurationCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *DurationCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = time.Duration(v.(int64)) * cmd.precision
return nil
}
//------------------------------------------------------------------------------
type BoolCmd struct {
*baseCmd
val bool
}
func NewBoolCmd(args ...string) *BoolCmd {
return &BoolCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *BoolCmd) Val() bool {
return cmd.val
}
func (cmd *BoolCmd) Result() (bool, error) {
return cmd.val, cmd.err
}
func (cmd *BoolCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *BoolCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(int64) == 1
return nil
}
//------------------------------------------------------------------------------
type StringCmd struct {
*baseCmd
val string
}
func NewStringCmd(args ...string) *StringCmd {
return &StringCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringCmd) Val() string {
return cmd.val
}
func (cmd *StringCmd) Result() (string, error) {
return cmd.val, cmd.err
}
func (cmd *StringCmd) Int64() (int64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseInt(cmd.val, 10, 64)
}
func (cmd *StringCmd) Uint64() (uint64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseUint(cmd.val, 10, 64)
}
func (cmd *StringCmd) Float64() (float64, error) {
if cmd.err != nil {
return 0, cmd.err
}
return strconv.ParseFloat(cmd.val, 64)
}
func (cmd *StringCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(string)
return nil
}
//------------------------------------------------------------------------------
type FloatCmd struct {
*baseCmd
val float64
}
func NewFloatCmd(args ...string) *FloatCmd {
return &FloatCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *FloatCmd) Val() float64 {
return cmd.val
}
func (cmd *FloatCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *FloatCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, nil)
if err != nil {
cmd.err = err
return err
}
cmd.val, cmd.err = strconv.ParseFloat(v.(string), 64)
return cmd.err
}
//------------------------------------------------------------------------------
type StringSliceCmd struct {
*baseCmd
val []string
}
func NewStringSliceCmd(args ...string) *StringSliceCmd {
return &StringSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringSliceCmd) Val() []string {
return cmd.val
}
func (cmd *StringSliceCmd) Result() ([]string, error) {
return cmd.Val(), cmd.Err()
}
func (cmd *StringSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseStringSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]string)
return nil
}
//------------------------------------------------------------------------------
type BoolSliceCmd struct {
*baseCmd
val []bool
}
func NewBoolSliceCmd(args ...string) *BoolSliceCmd {
return &BoolSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *BoolSliceCmd) Val() []bool {
return cmd.val
}
func (cmd *BoolSliceCmd) Result() ([]bool, error) {
return cmd.val, cmd.err
}
func (cmd *BoolSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *BoolSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseBoolSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]bool)
return nil
}
//------------------------------------------------------------------------------
type StringStringMapCmd struct {
*baseCmd
val map[string]string
}
func NewStringStringMapCmd(args ...string) *StringStringMapCmd {
return &StringStringMapCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *StringStringMapCmd) Val() map[string]string {
return cmd.val
}
func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
return cmd.val, cmd.err
}
func (cmd *StringStringMapCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *StringStringMapCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseStringStringMap)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.(map[string]string)
return nil
}
//------------------------------------------------------------------------------
type ZSliceCmd struct {
*baseCmd
val []Z
}
func NewZSliceCmd(args ...string) *ZSliceCmd {
return &ZSliceCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *ZSliceCmd) Val() []Z {
return cmd.val
}
func (cmd *ZSliceCmd) Result() ([]Z, error) {
return cmd.val, cmd.err
}
func (cmd *ZSliceCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *ZSliceCmd) parseReply(rd *bufio.Reader) error {
v, err := parseReply(rd, parseZSlice)
if err != nil {
cmd.err = err
return err
}
cmd.val = v.([]Z)
return nil
}
//------------------------------------------------------------------------------
type ScanCmd struct {
*baseCmd
cursor int64
keys []string
}
func NewScanCmd(args ...string) *ScanCmd {
return &ScanCmd{
baseCmd: newBaseCmd(args...),
}
}
func (cmd *ScanCmd) Val() (int64, []string) {
return cmd.cursor, cmd.keys
}
func (cmd *ScanCmd) Result() (int64, []string, error) {
return cmd.cursor, cmd.keys, cmd.err
}
func (cmd *ScanCmd) String() string {
return cmdString(cmd, cmd.keys)
}
func (cmd *ScanCmd) parseReply(rd *bufio.Reader) error {
vi, err := parseReply(rd, parseSlice)
if err != nil {
cmd.err = err
return cmd.err
}
v := vi.([]interface{})
cmd.cursor, cmd.err = strconv.ParseInt(v[0].(string), 10, 64)
if cmd.err != nil {
return cmd.err
}
keys := v[1].([]interface{})
for _, keyi := range keys {
cmd.keys = append(cmd.keys, keyi.(string))
}
return nil
}

1246
Godeps/_workspace/src/gopkg.in/redis.v2/commands.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4
Godeps/_workspace/src/gopkg.in/redis.v2/doc.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
/*
Package redis implements a Redis client.
*/
package redis

23
Godeps/_workspace/src/gopkg.in/redis.v2/error.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package redis
import (
"fmt"
)
// Redis nil reply.
var Nil = errorf("redis: nil")
// Redis transaction failed.
var TxFailedErr = errorf("redis: transaction failed")
type redisError struct {
s string
}
func errorf(s string, args ...interface{}) redisError {
return redisError{s: fmt.Sprintf(s, args...)}
}
func (err redisError) Error() string {
return err.s
}

180
Godeps/_workspace/src/gopkg.in/redis.v2/example_test.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
package redis_test
import (
"fmt"
"strconv"
"gopkg.in/redis.v2"
)
var client *redis.Client
func init() {
client = redis.NewTCPClient(&redis.Options{
Addr: ":6379",
})
client.FlushDb()
}
func ExampleNewTCPClient() {
client := redis.NewTCPClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
pong, err := client.Ping().Result()
fmt.Println(pong, err)
// Output: PONG <nil>
}
func ExampleNewFailoverClient() {
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: "master",
SentinelAddrs: []string{":26379"},
})
pong, err := client.Ping().Result()
fmt.Println(pong, err)
// Output: PONG <nil>
}
func ExampleClient() {
if err := client.Set("foo", "bar").Err(); err != nil {
panic(err)
}
v, err := client.Get("hello").Result()
fmt.Printf("%q %q %v", v, err, err == redis.Nil)
// Output: "" "redis: nil" true
}
func ExampleClient_Incr() {
if err := client.Incr("counter").Err(); err != nil {
panic(err)
}
n, err := client.Get("counter").Int64()
fmt.Println(n, err)
// Output: 1 <nil>
}
func ExampleClient_Pipelined() {
cmds, err := client.Pipelined(func(c *redis.Pipeline) error {
c.Set("key1", "hello1")
c.Get("key1")
return nil
})
fmt.Println(err)
set := cmds[0].(*redis.StatusCmd)
fmt.Println(set)
get := cmds[1].(*redis.StringCmd)
fmt.Println(get)
// Output: <nil>
// SET key1 hello1: OK
// GET key1: hello1
}
func ExamplePipeline() {
pipeline := client.Pipeline()
set := pipeline.Set("key1", "hello1")
get := pipeline.Get("key1")
cmds, err := pipeline.Exec()
fmt.Println(cmds, err)
fmt.Println(set)
fmt.Println(get)
// Output: [SET key1 hello1: OK GET key1: hello1] <nil>
// SET key1 hello1: OK
// GET key1: hello1
}
func ExampleMulti() {
incr := func(tx *redis.Multi) ([]redis.Cmder, error) {
s, err := tx.Get("key").Result()
if err != nil && err != redis.Nil {
return nil, err
}
n, _ := strconv.ParseInt(s, 10, 64)
return tx.Exec(func() error {
tx.Set("key", strconv.FormatInt(n+1, 10))
return nil
})
}
client.Del("key")
tx := client.Multi()
defer tx.Close()
watch := tx.Watch("key")
_ = watch.Err()
for {
cmds, err := incr(tx)
if err == redis.TxFailedErr {
continue
} else if err != nil {
panic(err)
}
fmt.Println(cmds, err)
break
}
// Output: [SET key 1: OK] <nil>
}
func ExamplePubSub() {
pubsub := client.PubSub()
defer pubsub.Close()
err := pubsub.Subscribe("mychannel")
_ = err
msg, err := pubsub.Receive()
fmt.Println(msg, err)
pub := client.Publish("mychannel", "hello")
_ = pub.Err()
msg, err = pubsub.Receive()
fmt.Println(msg, err)
// Output: subscribe: mychannel <nil>
// Message<mychannel: hello> <nil>
}
func ExampleScript() {
setnx := redis.NewScript(`
if redis.call("get", KEYS[1]) == false then
redis.call("set", KEYS[1], ARGV[1])
return 1
end
return 0
`)
v1, err := setnx.Run(client, []string{"keynx"}, []string{"foo"}).Result()
fmt.Println(v1.(int64), err)
v2, err := setnx.Run(client, []string{"keynx"}, []string{"bar"}).Result()
fmt.Println(v2.(int64), err)
get := client.Get("keynx")
fmt.Println(get)
// Output: 1 <nil>
// 0 <nil>
// GET keynx: foo
}
func Example_customCommand() {
Get := func(client *redis.Client, key string) *redis.StringCmd {
cmd := redis.NewStringCmd("GET", key)
client.Process(cmd)
return cmd
}
v, err := Get(client, "key_does_not_exist").Result()
fmt.Printf("%q %s", v, err)
// Output: "" redis: nil
}

View File

@ -0,0 +1,5 @@
package redis
func (c *baseClient) Pool() pool {
return c.connPool
}

138
Godeps/_workspace/src/gopkg.in/redis.v2/multi.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package redis
import (
"errors"
"fmt"
)
var errDiscard = errors.New("redis: Discard can be used only inside Exec")
// Not thread-safe.
type Multi struct {
*Client
}
func (c *Client) Multi() *Multi {
return &Multi{
Client: &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, true),
},
},
}
}
func (c *Multi) Close() error {
if err := c.Unwatch().Err(); err != nil {
return err
}
return c.Client.Close()
}
func (c *Multi) Watch(keys ...string) *StatusCmd {
args := append([]string{"WATCH"}, keys...)
cmd := NewStatusCmd(args...)
c.Process(cmd)
return cmd
}
func (c *Multi) Unwatch(keys ...string) *StatusCmd {
args := append([]string{"UNWATCH"}, keys...)
cmd := NewStatusCmd(args...)
c.Process(cmd)
return cmd
}
func (c *Multi) Discard() error {
if c.cmds == nil {
return errDiscard
}
c.cmds = c.cmds[:1]
return nil
}
// Exec always returns list of commands. If transaction fails
// TxFailedErr is returned. Otherwise Exec returns error of the first
// failed command or nil.
func (c *Multi) Exec(f func() error) ([]Cmder, error) {
c.cmds = []Cmder{NewStatusCmd("MULTI")}
if err := f(); err != nil {
return nil, err
}
c.cmds = append(c.cmds, NewSliceCmd("EXEC"))
cmds := c.cmds
c.cmds = nil
if len(cmds) == 2 {
return []Cmder{}, nil
}
cn, err := c.conn()
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return cmds[1 : len(cmds)-1], err
}
err = c.execCmds(cn, cmds)
if err != nil {
c.freeConn(cn, err)
return cmds[1 : len(cmds)-1], err
}
c.putConn(cn)
return cmds[1 : len(cmds)-1], nil
}
func (c *Multi) execCmds(cn *conn, cmds []Cmder) error {
err := c.writeCmd(cn, cmds...)
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
statusCmd := NewStatusCmd()
// Omit last command (EXEC).
cmdsLen := len(cmds) - 1
// Parse queued replies.
for i := 0; i < cmdsLen; i++ {
if err := statusCmd.parseReply(cn.rd); err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
}
// Parse number of replies.
line, err := readLine(cn.rd)
if err != nil {
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
if line[0] != '*' {
err := fmt.Errorf("redis: expected '*', but got line %q", line)
setCmdsErr(cmds[1:len(cmds)-1], err)
return err
}
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
setCmdsErr(cmds[1:len(cmds)-1], TxFailedErr)
return TxFailedErr
}
var firstCmdErr error
// Parse replies.
// Loop starts from 1 to omit MULTI cmd.
for i := 1; i < cmdsLen; i++ {
cmd := cmds[i]
if err := cmd.parseReply(cn.rd); err != nil {
if firstCmdErr == nil {
firstCmdErr = err
}
}
}
return firstCmdErr
}

262
Godeps/_workspace/src/gopkg.in/redis.v2/parser.go generated vendored Normal file
View File

@ -0,0 +1,262 @@
package redis
import (
"errors"
"fmt"
"strconv"
"gopkg.in/bufio.v1"
)
type multiBulkParser func(rd *bufio.Reader, n int64) (interface{}, error)
var (
errReaderTooSmall = errors.New("redis: reader is too small")
)
//------------------------------------------------------------------------------
func appendArgs(buf []byte, args []string) []byte {
buf = append(buf, '*')
buf = strconv.AppendUint(buf, uint64(len(args)), 10)
buf = append(buf, '\r', '\n')
for _, arg := range args {
buf = append(buf, '$')
buf = strconv.AppendUint(buf, uint64(len(arg)), 10)
buf = append(buf, '\r', '\n')
buf = append(buf, arg...)
buf = append(buf, '\r', '\n')
}
return buf
}
//------------------------------------------------------------------------------
func readLine(rd *bufio.Reader) ([]byte, error) {
line, isPrefix, err := rd.ReadLine()
if err != nil {
return line, err
}
if isPrefix {
return line, errReaderTooSmall
}
return line, nil
}
func readN(rd *bufio.Reader, n int) ([]byte, error) {
b, err := rd.ReadN(n)
if err == bufio.ErrBufferFull {
tmp := make([]byte, n)
r := copy(tmp, b)
b = tmp
for {
nn, err := rd.Read(b[r:])
r += nn
if r >= n {
// Ignore error if we read enough.
break
}
if err != nil {
return nil, err
}
}
} else if err != nil {
return nil, err
}
return b, nil
}
//------------------------------------------------------------------------------
func parseReq(rd *bufio.Reader) ([]string, error) {
line, err := readLine(rd)
if err != nil {
return nil, err
}
if line[0] != '*' {
return []string{string(line)}, nil
}
numReplies, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
args := make([]string, 0, numReplies)
for i := int64(0); i < numReplies; i++ {
line, err = readLine(rd)
if err != nil {
return nil, err
}
if line[0] != '$' {
return nil, fmt.Errorf("redis: expected '$', but got %q", line)
}
argLen, err := strconv.ParseInt(string(line[1:]), 10, 32)
if err != nil {
return nil, err
}
arg, err := readN(rd, int(argLen)+2)
if err != nil {
return nil, err
}
args = append(args, string(arg[:argLen]))
}
return args, nil
}
//------------------------------------------------------------------------------
func parseReply(rd *bufio.Reader, p multiBulkParser) (interface{}, error) {
line, err := readLine(rd)
if err != nil {
return nil, err
}
switch line[0] {
case '-':
return nil, errorf(string(line[1:]))
case '+':
return string(line[1:]), nil
case ':':
v, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
return v, nil
case '$':
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
return nil, Nil
}
replyLen, err := strconv.Atoi(string(line[1:]))
if err != nil {
return nil, err
}
b, err := readN(rd, replyLen+2)
if err != nil {
return nil, err
}
return string(b[:replyLen]), nil
case '*':
if len(line) == 3 && line[1] == '-' && line[2] == '1' {
return nil, Nil
}
repliesNum, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return nil, err
}
return p(rd, repliesNum)
}
return nil, fmt.Errorf("redis: can't parse %q", line)
}
func parseSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]interface{}, 0, n)
for i := int64(0); i < n; i++ {
v, err := parseReply(rd, parseSlice)
if err == Nil {
vals = append(vals, nil)
} else if err != nil {
return nil, err
} else {
vals = append(vals, v)
}
}
return vals, nil
}
func parseStringSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]string, 0, n)
for i := int64(0); i < n; i++ {
viface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
v, ok := viface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", viface)
}
vals = append(vals, v)
}
return vals, nil
}
func parseBoolSlice(rd *bufio.Reader, n int64) (interface{}, error) {
vals := make([]bool, 0, n)
for i := int64(0); i < n; i++ {
viface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
v, ok := viface.(int64)
if !ok {
return nil, fmt.Errorf("got %T, expected int64", viface)
}
vals = append(vals, v == 1)
}
return vals, nil
}
func parseStringStringMap(rd *bufio.Reader, n int64) (interface{}, error) {
m := make(map[string]string, n/2)
for i := int64(0); i < n; i += 2 {
keyiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
key, ok := keyiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", keyiface)
}
valueiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
value, ok := valueiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", valueiface)
}
m[key] = value
}
return m, nil
}
func parseZSlice(rd *bufio.Reader, n int64) (interface{}, error) {
zz := make([]Z, n/2)
for i := int64(0); i < n; i += 2 {
z := &zz[i/2]
memberiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
member, ok := memberiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", memberiface)
}
z.Member = member
scoreiface, err := parseReply(rd, nil)
if err != nil {
return nil, err
}
scorestr, ok := scoreiface.(string)
if !ok {
return nil, fmt.Errorf("got %T, expected string", scoreiface)
}
score, err := strconv.ParseFloat(scorestr, 64)
if err != nil {
return nil, err
}
z.Score = score
}
return zz, nil
}

54
Godeps/_workspace/src/gopkg.in/redis.v2/parser_test.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package redis
import (
"testing"
"gopkg.in/bufio.v1"
)
func BenchmarkParseReplyStatus(b *testing.B) {
benchmarkParseReply(b, "+OK\r\n", nil, false)
}
func BenchmarkParseReplyInt(b *testing.B) {
benchmarkParseReply(b, ":1\r\n", nil, false)
}
func BenchmarkParseReplyError(b *testing.B) {
benchmarkParseReply(b, "-Error message\r\n", nil, true)
}
func BenchmarkParseReplyString(b *testing.B) {
benchmarkParseReply(b, "$5\r\nhello\r\n", nil, false)
}
func BenchmarkParseReplySlice(b *testing.B) {
benchmarkParseReply(b, "*2\r\n$5\r\nhello\r\n$5\r\nworld\r\n", parseSlice, false)
}
func benchmarkParseReply(b *testing.B, reply string, p multiBulkParser, wanterr bool) {
b.StopTimer()
buf := &bufio.Buffer{}
rd := bufio.NewReader(buf)
for i := 0; i < b.N; i++ {
buf.WriteString(reply)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
_, err := parseReply(rd, p)
if !wanterr && err != nil {
panic(err)
}
}
}
func BenchmarkAppendArgs(b *testing.B) {
buf := make([]byte, 0, 64)
args := []string{"hello", "world", "foo", "bar"}
for i := 0; i < b.N; i++ {
appendArgs(buf, args)
}
}

91
Godeps/_workspace/src/gopkg.in/redis.v2/pipeline.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package redis
// Not thread-safe.
type Pipeline struct {
*Client
closed bool
}
func (c *Client) Pipeline() *Pipeline {
return &Pipeline{
Client: &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: c.connPool,
cmds: make([]Cmder, 0),
},
},
}
}
func (c *Client) Pipelined(f func(*Pipeline) error) ([]Cmder, error) {
pc := c.Pipeline()
if err := f(pc); err != nil {
return nil, err
}
cmds, err := pc.Exec()
pc.Close()
return cmds, err
}
func (c *Pipeline) Close() error {
c.closed = true
return nil
}
func (c *Pipeline) Discard() error {
if c.closed {
return errClosed
}
c.cmds = c.cmds[:0]
return nil
}
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec() ([]Cmder, error) {
if c.closed {
return nil, errClosed
}
cmds := c.cmds
c.cmds = make([]Cmder, 0)
if len(cmds) == 0 {
return []Cmder{}, nil
}
cn, err := c.conn()
if err != nil {
setCmdsErr(cmds, err)
return cmds, err
}
if err := c.execCmds(cn, cmds); err != nil {
c.freeConn(cn, err)
return cmds, err
}
c.putConn(cn)
return cmds, nil
}
func (c *Pipeline) execCmds(cn *conn, cmds []Cmder) error {
if err := c.writeCmd(cn, cmds...); err != nil {
setCmdsErr(cmds, err)
return err
}
var firstCmdErr error
for _, cmd := range cmds {
if err := cmd.parseReply(cn.rd); err != nil {
if firstCmdErr == nil {
firstCmdErr = err
}
}
}
return firstCmdErr
}

405
Godeps/_workspace/src/gopkg.in/redis.v2/pool.go generated vendored Normal file
View File

@ -0,0 +1,405 @@
package redis
import (
"container/list"
"errors"
"log"
"net"
"sync"
"time"
"gopkg.in/bufio.v1"
)
var (
errClosed = errors.New("redis: client is closed")
errRateLimited = errors.New("redis: you open connections too fast")
)
var (
zeroTime = time.Time{}
)
type pool interface {
Get() (*conn, bool, error)
Put(*conn) error
Remove(*conn) error
Len() int
Size() int
Close() error
Filter(func(*conn) bool)
}
//------------------------------------------------------------------------------
type conn struct {
netcn net.Conn
rd *bufio.Reader
buf []byte
inUse bool
usedAt time.Time
readTimeout time.Duration
writeTimeout time.Duration
elem *list.Element
}
func newConnFunc(dial func() (net.Conn, error)) func() (*conn, error) {
return func() (*conn, error) {
netcn, err := dial()
if err != nil {
return nil, err
}
cn := &conn{
netcn: netcn,
buf: make([]byte, 0, 64),
}
cn.rd = bufio.NewReader(cn)
return cn, nil
}
}
func (cn *conn) Read(b []byte) (int, error) {
if cn.readTimeout != 0 {
cn.netcn.SetReadDeadline(time.Now().Add(cn.readTimeout))
} else {
cn.netcn.SetReadDeadline(zeroTime)
}
return cn.netcn.Read(b)
}
func (cn *conn) Write(b []byte) (int, error) {
if cn.writeTimeout != 0 {
cn.netcn.SetWriteDeadline(time.Now().Add(cn.writeTimeout))
} else {
cn.netcn.SetWriteDeadline(zeroTime)
}
return cn.netcn.Write(b)
}
func (cn *conn) RemoteAddr() net.Addr {
return cn.netcn.RemoteAddr()
}
func (cn *conn) Close() error {
return cn.netcn.Close()
}
//------------------------------------------------------------------------------
type connPool struct {
dial func() (*conn, error)
rl *rateLimiter
opt *options
cond *sync.Cond
conns *list.List
idleNum int
closed bool
}
func newConnPool(dial func() (*conn, error), opt *options) *connPool {
return &connPool{
dial: dial,
rl: newRateLimiter(time.Second, 2*opt.PoolSize),
opt: opt,
cond: sync.NewCond(&sync.Mutex{}),
conns: list.New(),
}
}
func (p *connPool) new() (*conn, error) {
if !p.rl.Check() {
return nil, errRateLimited
}
return p.dial()
}
func (p *connPool) Get() (*conn, bool, error) {
p.cond.L.Lock()
if p.closed {
p.cond.L.Unlock()
return nil, false, errClosed
}
if p.opt.IdleTimeout > 0 {
for el := p.conns.Front(); el != nil; el = el.Next() {
cn := el.Value.(*conn)
if cn.inUse {
break
}
if time.Since(cn.usedAt) > p.opt.IdleTimeout {
if err := p.remove(cn); err != nil {
log.Printf("remove failed: %s", err)
}
}
}
}
for p.conns.Len() >= p.opt.PoolSize && p.idleNum == 0 {
p.cond.Wait()
}
if p.idleNum > 0 {
elem := p.conns.Front()
cn := elem.Value.(*conn)
if cn.inUse {
panic("pool: precondition failed")
}
cn.inUse = true
p.conns.MoveToBack(elem)
p.idleNum--
p.cond.L.Unlock()
return cn, false, nil
}
if p.conns.Len() < p.opt.PoolSize {
cn, err := p.new()
if err != nil {
p.cond.L.Unlock()
return nil, false, err
}
cn.inUse = true
cn.elem = p.conns.PushBack(cn)
p.cond.L.Unlock()
return cn, true, nil
}
panic("not reached")
}
func (p *connPool) Put(cn *conn) error {
if cn.rd.Buffered() != 0 {
b, _ := cn.rd.ReadN(cn.rd.Buffered())
log.Printf("redis: connection has unread data: %q", b)
return p.Remove(cn)
}
if p.opt.IdleTimeout > 0 {
cn.usedAt = time.Now()
}
p.cond.L.Lock()
if p.closed {
p.cond.L.Unlock()
return errClosed
}
cn.inUse = false
p.conns.MoveToFront(cn.elem)
p.idleNum++
p.cond.Signal()
p.cond.L.Unlock()
return nil
}
func (p *connPool) Remove(cn *conn) error {
p.cond.L.Lock()
if p.closed {
// Noop, connection is already closed.
p.cond.L.Unlock()
return nil
}
err := p.remove(cn)
p.cond.Signal()
p.cond.L.Unlock()
return err
}
func (p *connPool) remove(cn *conn) error {
p.conns.Remove(cn.elem)
cn.elem = nil
if !cn.inUse {
p.idleNum--
}
return cn.Close()
}
// Len returns number of idle connections.
func (p *connPool) Len() int {
defer p.cond.L.Unlock()
p.cond.L.Lock()
return p.idleNum
}
// Size returns number of connections in the pool.
func (p *connPool) Size() int {
defer p.cond.L.Unlock()
p.cond.L.Lock()
return p.conns.Len()
}
func (p *connPool) Filter(f func(*conn) bool) {
p.cond.L.Lock()
for el, next := p.conns.Front(), p.conns.Front(); el != nil; el = next {
next = el.Next()
cn := el.Value.(*conn)
if !f(cn) {
p.remove(cn)
}
}
p.cond.L.Unlock()
}
func (p *connPool) Close() error {
defer p.cond.L.Unlock()
p.cond.L.Lock()
if p.closed {
return nil
}
p.closed = true
p.rl.Close()
var retErr error
for {
e := p.conns.Front()
if e == nil {
break
}
if err := p.remove(e.Value.(*conn)); err != nil {
log.Printf("cn.Close failed: %s", err)
retErr = err
}
}
return retErr
}
//------------------------------------------------------------------------------
type singleConnPool struct {
pool pool
cnMtx sync.Mutex
cn *conn
reusable bool
closed bool
}
func newSingleConnPool(pool pool, reusable bool) *singleConnPool {
return &singleConnPool{
pool: pool,
reusable: reusable,
}
}
func (p *singleConnPool) SetConn(cn *conn) {
p.cnMtx.Lock()
p.cn = cn
p.cnMtx.Unlock()
}
func (p *singleConnPool) Get() (*conn, bool, error) {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.closed {
return nil, false, errClosed
}
if p.cn != nil {
return p.cn, false, nil
}
cn, isNew, err := p.pool.Get()
if err != nil {
return nil, false, err
}
p.cn = cn
return p.cn, isNew, nil
}
func (p *singleConnPool) Put(cn *conn) error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn != cn {
panic("p.cn != cn")
}
if p.closed {
return errClosed
}
return nil
}
func (p *singleConnPool) put() error {
err := p.pool.Put(p.cn)
p.cn = nil
return err
}
func (p *singleConnPool) Remove(cn *conn) error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
panic("p.cn == nil")
}
if p.cn != cn {
panic("p.cn != cn")
}
if p.closed {
return errClosed
}
return p.remove()
}
func (p *singleConnPool) remove() error {
err := p.pool.Remove(p.cn)
p.cn = nil
return err
}
func (p *singleConnPool) Len() int {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
return 0
}
return 1
}
func (p *singleConnPool) Size() int {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.cn == nil {
return 0
}
return 1
}
func (p *singleConnPool) Filter(f func(*conn) bool) {
p.cnMtx.Lock()
if p.cn != nil {
if !f(p.cn) {
p.remove()
}
}
p.cnMtx.Unlock()
}
func (p *singleConnPool) Close() error {
defer p.cnMtx.Unlock()
p.cnMtx.Lock()
if p.closed {
return nil
}
p.closed = true
var err error
if p.cn != nil {
if p.reusable {
err = p.put()
} else {
err = p.remove()
}
}
return err
}

134
Godeps/_workspace/src/gopkg.in/redis.v2/pubsub.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
package redis
import (
"fmt"
"time"
)
// Not thread-safe.
type PubSub struct {
*baseClient
}
func (c *Client) PubSub() *PubSub {
return &PubSub{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, false),
},
}
}
func (c *Client) Publish(channel, message string) *IntCmd {
req := NewIntCmd("PUBLISH", channel, message)
c.Process(req)
return req
}
type Message struct {
Channel string
Payload string
}
func (m *Message) String() string {
return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
}
type PMessage struct {
Channel string
Pattern string
Payload string
}
func (m *PMessage) String() string {
return fmt.Sprintf("PMessage<%s: %s>", m.Channel, m.Payload)
}
type Subscription struct {
Kind string
Channel string
Count int
}
func (m *Subscription) String() string {
return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
}
func (c *PubSub) Receive() (interface{}, error) {
return c.ReceiveTimeout(0)
}
func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
cn, err := c.conn()
if err != nil {
return nil, err
}
cn.readTimeout = timeout
cmd := NewSliceCmd()
if err := cmd.parseReply(cn.rd); err != nil {
return nil, err
}
reply := cmd.Val()
msgName := reply[0].(string)
switch msgName {
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
return &Subscription{
Kind: msgName,
Channel: reply[1].(string),
Count: int(reply[2].(int64)),
}, nil
case "message":
return &Message{
Channel: reply[1].(string),
Payload: reply[2].(string),
}, nil
case "pmessage":
return &PMessage{
Pattern: reply[1].(string),
Channel: reply[2].(string),
Payload: reply[3].(string),
}, nil
}
return nil, fmt.Errorf("redis: unsupported message name: %q", msgName)
}
func (c *PubSub) subscribe(cmd string, channels ...string) error {
cn, err := c.conn()
if err != nil {
return err
}
args := append([]string{cmd}, channels...)
req := NewSliceCmd(args...)
return c.writeCmd(cn, req)
}
func (c *PubSub) Subscribe(channels ...string) error {
return c.subscribe("SUBSCRIBE", channels...)
}
func (c *PubSub) PSubscribe(patterns ...string) error {
return c.subscribe("PSUBSCRIBE", patterns...)
}
func (c *PubSub) unsubscribe(cmd string, channels ...string) error {
cn, err := c.conn()
if err != nil {
return err
}
args := append([]string{cmd}, channels...)
req := NewSliceCmd(args...)
return c.writeCmd(cn, req)
}
func (c *PubSub) Unsubscribe(channels ...string) error {
return c.unsubscribe("UNSUBSCRIBE", channels...)
}
func (c *PubSub) PUnsubscribe(patterns ...string) error {
return c.unsubscribe("PUNSUBSCRIBE", patterns...)
}

53
Godeps/_workspace/src/gopkg.in/redis.v2/rate_limit.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package redis
import (
"sync/atomic"
"time"
)
type rateLimiter struct {
v int64
_closed int64
}
func newRateLimiter(limit time.Duration, bucketSize int) *rateLimiter {
rl := &rateLimiter{
v: int64(bucketSize),
}
go rl.loop(limit, int64(bucketSize))
return rl
}
func (rl *rateLimiter) loop(limit time.Duration, bucketSize int64) {
for {
if rl.closed() {
break
}
if v := atomic.LoadInt64(&rl.v); v < bucketSize {
atomic.AddInt64(&rl.v, 1)
}
time.Sleep(limit)
}
}
func (rl *rateLimiter) Check() bool {
for {
if v := atomic.LoadInt64(&rl.v); v > 0 {
if atomic.CompareAndSwapInt64(&rl.v, v, v-1) {
return true
}
} else {
return false
}
}
}
func (rl *rateLimiter) Close() error {
atomic.StoreInt64(&rl._closed, 1)
return nil
}
func (rl *rateLimiter) closed() bool {
return atomic.LoadInt64(&rl._closed) == 1
}

View File

@ -0,0 +1,31 @@
package redis
import (
"sync"
"testing"
"time"
)
func TestRateLimiter(t *testing.T) {
var n = 100000
if testing.Short() {
n = 1000
}
rl := newRateLimiter(time.Minute, n)
wg := &sync.WaitGroup{}
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
if !rl.Check() {
panic("check failed")
}
wg.Done()
}()
}
wg.Wait()
if rl.Check() && rl.Check() {
t.Fatal("check passed")
}
}

231
Godeps/_workspace/src/gopkg.in/redis.v2/redis.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
package redis
import (
"log"
"net"
"time"
)
type baseClient struct {
connPool pool
opt *options
cmds []Cmder
}
func (c *baseClient) writeCmd(cn *conn, cmds ...Cmder) error {
buf := cn.buf[:0]
for _, cmd := range cmds {
buf = appendArgs(buf, cmd.args())
}
_, err := cn.Write(buf)
return err
}
func (c *baseClient) conn() (*conn, error) {
cn, isNew, err := c.connPool.Get()
if err != nil {
return nil, err
}
if isNew {
if err := c.initConn(cn); err != nil {
c.removeConn(cn)
return nil, err
}
}
return cn, nil
}
func (c *baseClient) initConn(cn *conn) error {
if c.opt.Password == "" && c.opt.DB == 0 {
return nil
}
pool := newSingleConnPool(c.connPool, false)
pool.SetConn(cn)
// Client is not closed because we want to reuse underlying connection.
client := &Client{
baseClient: &baseClient{
opt: c.opt,
connPool: pool,
},
}
if c.opt.Password != "" {
if err := client.Auth(c.opt.Password).Err(); err != nil {
return err
}
}
if c.opt.DB > 0 {
if err := client.Select(c.opt.DB).Err(); err != nil {
return err
}
}
return nil
}
func (c *baseClient) freeConn(cn *conn, ei error) error {
if cn.rd.Buffered() > 0 {
return c.connPool.Remove(cn)
}
if _, ok := ei.(redisError); ok {
return c.connPool.Put(cn)
}
return c.connPool.Remove(cn)
}
func (c *baseClient) removeConn(cn *conn) {
if err := c.connPool.Remove(cn); err != nil {
log.Printf("pool.Remove failed: %s", err)
}
}
func (c *baseClient) putConn(cn *conn) {
if err := c.connPool.Put(cn); err != nil {
log.Printf("pool.Put failed: %s", err)
}
}
func (c *baseClient) Process(cmd Cmder) {
if c.cmds == nil {
c.run(cmd)
} else {
c.cmds = append(c.cmds, cmd)
}
}
func (c *baseClient) run(cmd Cmder) {
cn, err := c.conn()
if err != nil {
cmd.setErr(err)
return
}
if timeout := cmd.writeTimeout(); timeout != nil {
cn.writeTimeout = *timeout
} else {
cn.writeTimeout = c.opt.WriteTimeout
}
if timeout := cmd.readTimeout(); timeout != nil {
cn.readTimeout = *timeout
} else {
cn.readTimeout = c.opt.ReadTimeout
}
if err := c.writeCmd(cn, cmd); err != nil {
c.freeConn(cn, err)
cmd.setErr(err)
return
}
if err := cmd.parseReply(cn.rd); err != nil {
c.freeConn(cn, err)
return
}
c.putConn(cn)
}
// Close closes the client, releasing any open resources.
func (c *baseClient) Close() error {
return c.connPool.Close()
}
//------------------------------------------------------------------------------
type options struct {
Password string
DB int64
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
IdleTimeout time.Duration
}
type Options struct {
Network string
Addr string
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func() (net.Conn, error)
Password string
DB int64
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
IdleTimeout time.Duration
}
func (opt *Options) getPoolSize() int {
if opt.PoolSize == 0 {
return 10
}
return opt.PoolSize
}
func (opt *Options) getDialTimeout() time.Duration {
if opt.DialTimeout == 0 {
return 5 * time.Second
}
return opt.DialTimeout
}
func (opt *Options) options() *options {
return &options{
DB: opt.DB,
Password: opt.Password,
DialTimeout: opt.getDialTimeout(),
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.getPoolSize(),
IdleTimeout: opt.IdleTimeout,
}
}
type Client struct {
*baseClient
}
func NewClient(clOpt *Options) *Client {
opt := clOpt.options()
dialer := clOpt.Dialer
if dialer == nil {
dialer = func() (net.Conn, error) {
return net.DialTimeout(clOpt.Network, clOpt.Addr, opt.DialTimeout)
}
}
return &Client{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(newConnFunc(dialer), opt),
},
}
}
// Deprecated. Use NewClient instead.
func NewTCPClient(opt *Options) *Client {
opt.Network = "tcp"
return NewClient(opt)
}
// Deprecated. Use NewClient instead.
func NewUnixClient(opt *Options) *Client {
opt.Network = "unix"
return NewClient(opt)
}

3333
Godeps/_workspace/src/gopkg.in/redis.v2/redis_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

52
Godeps/_workspace/src/gopkg.in/redis.v2/script.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package redis
import (
"crypto/sha1"
"encoding/hex"
"io"
"strings"
)
type scripter interface {
Eval(script string, keys []string, args []string) *Cmd
EvalSha(sha1 string, keys []string, args []string) *Cmd
ScriptExists(scripts ...string) *BoolSliceCmd
ScriptLoad(script string) *StringCmd
}
type Script struct {
src, hash string
}
func NewScript(src string) *Script {
h := sha1.New()
io.WriteString(h, src)
return &Script{
src: src,
hash: hex.EncodeToString(h.Sum(nil)),
}
}
func (s *Script) Load(c scripter) *StringCmd {
return c.ScriptLoad(s.src)
}
func (s *Script) Exists(c scripter) *BoolSliceCmd {
return c.ScriptExists(s.src)
}
func (s *Script) Eval(c scripter, keys []string, args []string) *Cmd {
return c.Eval(s.src, keys, args)
}
func (s *Script) EvalSha(c scripter, keys []string, args []string) *Cmd {
return c.EvalSha(s.hash, keys, args)
}
func (s *Script) Run(c *Client, keys []string, args []string) *Cmd {
r := s.EvalSha(c, keys, args)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
return s.Eval(c, keys, args)
}
return r
}

291
Godeps/_workspace/src/gopkg.in/redis.v2/sentinel.go generated vendored Normal file
View File

@ -0,0 +1,291 @@
package redis
import (
"errors"
"log"
"net"
"strings"
"sync"
"time"
)
//------------------------------------------------------------------------------
type FailoverOptions struct {
MasterName string
SentinelAddrs []string
Password string
DB int64
PoolSize int
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
IdleTimeout time.Duration
}
func (opt *FailoverOptions) getPoolSize() int {
if opt.PoolSize == 0 {
return 10
}
return opt.PoolSize
}
func (opt *FailoverOptions) getDialTimeout() time.Duration {
if opt.DialTimeout == 0 {
return 5 * time.Second
}
return opt.DialTimeout
}
func (opt *FailoverOptions) options() *options {
return &options{
DB: opt.DB,
Password: opt.Password,
DialTimeout: opt.getDialTimeout(),
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.getPoolSize(),
IdleTimeout: opt.IdleTimeout,
}
}
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
opt := failoverOpt.options()
failover := &sentinelFailover{
masterName: failoverOpt.MasterName,
sentinelAddrs: failoverOpt.SentinelAddrs,
opt: opt,
}
return &Client{
baseClient: &baseClient{
opt: opt,
connPool: failover.Pool(),
},
}
}
//------------------------------------------------------------------------------
type sentinelClient struct {
*baseClient
}
func newSentinel(clOpt *Options) *sentinelClient {
opt := clOpt.options()
opt.Password = ""
opt.DB = 0
dialer := func() (net.Conn, error) {
return net.DialTimeout("tcp", clOpt.Addr, opt.DialTimeout)
}
return &sentinelClient{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(newConnFunc(dialer), opt),
},
}
}
func (c *sentinelClient) PubSub() *PubSub {
return &PubSub{
baseClient: &baseClient{
opt: c.opt,
connPool: newSingleConnPool(c.connPool, false),
},
}
}
func (c *sentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
cmd := NewStringSliceCmd("SENTINEL", "get-master-addr-by-name", name)
c.Process(cmd)
return cmd
}
func (c *sentinelClient) Sentinels(name string) *SliceCmd {
cmd := NewSliceCmd("SENTINEL", "sentinels", name)
c.Process(cmd)
return cmd
}
type sentinelFailover struct {
masterName string
sentinelAddrs []string
opt *options
pool pool
poolOnce sync.Once
lock sync.RWMutex
_sentinel *sentinelClient
}
func (d *sentinelFailover) dial() (net.Conn, error) {
addr, err := d.MasterAddr()
if err != nil {
return nil, err
}
return net.DialTimeout("tcp", addr, d.opt.DialTimeout)
}
func (d *sentinelFailover) Pool() pool {
d.poolOnce.Do(func() {
d.pool = newConnPool(newConnFunc(d.dial), d.opt)
})
return d.pool
}
func (d *sentinelFailover) MasterAddr() (string, error) {
defer d.lock.Unlock()
d.lock.Lock()
// Try last working sentinel.
if d._sentinel != nil {
addr, err := d._sentinel.GetMasterAddrByName(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
d.resetSentinel()
} else {
addr := net.JoinHostPort(addr[0], addr[1])
log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
return addr, nil
}
}
for i, sentinelAddr := range d.sentinelAddrs {
sentinel := newSentinel(&Options{
Addr: sentinelAddr,
DB: d.opt.DB,
Password: d.opt.Password,
DialTimeout: d.opt.DialTimeout,
ReadTimeout: d.opt.ReadTimeout,
WriteTimeout: d.opt.WriteTimeout,
PoolSize: d.opt.PoolSize,
IdleTimeout: d.opt.IdleTimeout,
})
masterAddr, err := sentinel.GetMasterAddrByName(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: GetMasterAddrByName %q failed: %s", d.masterName, err)
sentinel.Close()
continue
}
// Push working sentinel to the top.
d.sentinelAddrs[0], d.sentinelAddrs[i] = d.sentinelAddrs[i], d.sentinelAddrs[0]
d.setSentinel(sentinel)
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
log.Printf("redis-sentinel: %q addr is %s", d.masterName, addr)
return addr, nil
}
return "", errors.New("redis: all sentinels are unreachable")
}
func (d *sentinelFailover) setSentinel(sentinel *sentinelClient) {
d.discoverSentinels(sentinel)
d._sentinel = sentinel
go d.listen()
}
func (d *sentinelFailover) discoverSentinels(sentinel *sentinelClient) {
sentinels, err := sentinel.Sentinels(d.masterName).Result()
if err != nil {
log.Printf("redis-sentinel: Sentinels %q failed: %s", d.masterName, err)
return
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
if key == "name" {
sentinelAddr := vals[i+1].(string)
if !contains(d.sentinelAddrs, sentinelAddr) {
log.Printf(
"redis-sentinel: discovered new %q sentinel: %s",
d.masterName, sentinelAddr,
)
d.sentinelAddrs = append(d.sentinelAddrs, sentinelAddr)
}
}
}
}
}
func (d *sentinelFailover) listen() {
var pubsub *PubSub
for {
if pubsub == nil {
pubsub = d._sentinel.PubSub()
if err := pubsub.Subscribe("+switch-master"); err != nil {
log.Printf("redis-sentinel: Subscribe failed: %s", err)
d.lock.Lock()
d.resetSentinel()
d.lock.Unlock()
return
}
}
msgIface, err := pubsub.Receive()
if err != nil {
log.Printf("redis-sentinel: Receive failed: %s", err)
pubsub.Close()
return
}
switch msg := msgIface.(type) {
case *Message:
switch msg.Channel {
case "+switch-master":
parts := strings.Split(msg.Payload, " ")
if parts[0] != d.masterName {
log.Printf("redis-sentinel: ignore new %s addr", parts[0])
continue
}
addr := net.JoinHostPort(parts[3], parts[4])
log.Printf(
"redis-sentinel: new %q addr is %s",
d.masterName, addr,
)
d.pool.Filter(func(cn *conn) bool {
if cn.RemoteAddr().String() != addr {
log.Printf(
"redis-sentinel: closing connection to old master %s",
cn.RemoteAddr(),
)
return false
}
return true
})
default:
log.Printf("redis-sentinel: unsupported message: %s", msg)
}
case *Subscription:
// Ignore.
default:
log.Printf("redis-sentinel: unsupported message: %s", msgIface)
}
}
}
func (d *sentinelFailover) resetSentinel() {
d._sentinel.Close()
d._sentinel = nil
}
func contains(slice []string, str string) bool {
for _, s := range slice {
if s == str {
return true
}
}
return false
}

View File

@ -0,0 +1,185 @@
package redis_test
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"text/template"
"time"
"gopkg.in/redis.v2"
)
func startRedis(port string) (*exec.Cmd, error) {
cmd := exec.Command("redis-server", "--port", port)
if false {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func startRedisSlave(port, slave string) (*exec.Cmd, error) {
cmd := exec.Command("redis-server", "--port", port, "--slaveof", "127.0.0.1", slave)
if false {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func startRedisSentinel(port, masterName, masterPort string) (*exec.Cmd, error) {
dir, err := ioutil.TempDir("", "sentinel")
if err != nil {
return nil, err
}
sentinelConfFilepath := filepath.Join(dir, "sentinel.conf")
tpl, err := template.New("sentinel.conf").Parse(sentinelConf)
if err != nil {
return nil, err
}
data := struct {
Port string
MasterName string
MasterPort string
}{
Port: port,
MasterName: masterName,
MasterPort: masterPort,
}
if err := writeTemplateToFile(sentinelConfFilepath, tpl, data); err != nil {
return nil, err
}
cmd := exec.Command("redis-server", sentinelConfFilepath, "--sentinel")
if true {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if err := cmd.Start(); err != nil {
return nil, err
}
return cmd, nil
}
func writeTemplateToFile(path string, t *template.Template, data interface{}) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
return t.Execute(f, data)
}
func TestSentinel(t *testing.T) {
masterName := "mymaster"
masterPort := "8123"
slavePort := "8124"
sentinelPort := "8125"
masterCmd, err := startRedis(masterPort)
if err != nil {
t.Fatal(err)
}
defer masterCmd.Process.Kill()
// Wait for master to start.
time.Sleep(200 * time.Millisecond)
master := redis.NewTCPClient(&redis.Options{
Addr: ":" + masterPort,
})
if err := master.Ping().Err(); err != nil {
t.Fatal(err)
}
slaveCmd, err := startRedisSlave(slavePort, masterPort)
if err != nil {
t.Fatal(err)
}
defer slaveCmd.Process.Kill()
// Wait for slave to start.
time.Sleep(200 * time.Millisecond)
slave := redis.NewTCPClient(&redis.Options{
Addr: ":" + slavePort,
})
if err := slave.Ping().Err(); err != nil {
t.Fatal(err)
}
sentinelCmd, err := startRedisSentinel(sentinelPort, masterName, masterPort)
if err != nil {
t.Fatal(err)
}
defer sentinelCmd.Process.Kill()
// Wait for sentinel to start.
time.Sleep(200 * time.Millisecond)
sentinel := redis.NewTCPClient(&redis.Options{
Addr: ":" + sentinelPort,
})
if err := sentinel.Ping().Err(); err != nil {
t.Fatal(err)
}
defer sentinel.Shutdown()
client := redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: masterName,
SentinelAddrs: []string{":" + sentinelPort},
})
if err := client.Set("foo", "master").Err(); err != nil {
t.Fatal(err)
}
val, err := master.Get("foo").Result()
if err != nil {
t.Fatal(err)
}
if val != "master" {
t.Fatalf(`got %q, expected "master"`, val)
}
// Kill Redis master.
if err := masterCmd.Process.Kill(); err != nil {
t.Fatal(err)
}
if err := master.Ping().Err(); err == nil {
t.Fatalf("master was not killed")
}
// Wait for Redis sentinel to elect new master.
time.Sleep(5 * time.Second)
// Check that client picked up new master.
val, err = client.Get("foo").Result()
if err != nil {
t.Fatal(err)
}
if val != "master" {
t.Fatalf(`got %q, expected "master"`, val)
}
}
var sentinelConf = `
port {{ .Port }}
sentinel monitor {{ .MasterName }} 127.0.0.1 {{ .MasterPort }} 1
sentinel down-after-milliseconds {{ .MasterName }} 1000
sentinel failover-timeout {{ .MasterName }} 2000
sentinel parallel-syncs {{ .MasterName }} 1
`

View File

@ -0,0 +1,6 @@
port 26379
sentinel monitor master 127.0.0.1 6379 1
sentinel down-after-milliseconds master 2000
sentinel failover-timeout master 5000
sentinel parallel-syncs master 4

View File

@ -17,6 +17,7 @@ import (
"github.com/macaron-contrib/session"
_ "github.com/macaron-contrib/session/mysql"
_ "github.com/macaron-contrib/session/postgres"
_ "github.com/macaron-contrib/session/redis"
"github.com/grafana/grafana/pkg/api"
"github.com/grafana/grafana/pkg/api/static"

View File

@ -246,7 +246,7 @@ func NewConfigContext(config string) {
func readSessionConfig() {
sec := Cfg.Section("session")
SessionOptions = session.Options{}
SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql"})
SessionOptions.Provider = sec.Key("provider").In("memory", []string{"memory", "file", "redis", "mysql", "postgres"})
SessionOptions.ProviderConfig = strings.Trim(sec.Key("provider_config").String(), "\" ")
SessionOptions.CookieName = sec.Key("cookie_name").MustString("grafana_sess")
SessionOptions.CookiePath = AppSubUrl