Use Go 1.5 vendoring instead of Godeps

Change made by:

- running "gvt fetch" on each of the packages mentioned in
  Godeps/Godeps.json
- `rm -rf Godeps`
- tweaking the build scripts to not mention Godeps
- tweaking the build scripts to test `./lib/...`, `./cmd/...` explicitly
  (to avoid testing vendor)
- tweaking the build scripts to not juggle GOPATH for Godeps and instead
  set GO15VENDOREXPERIMENT.

This also results in some updated packages at the same time I bet.

Building with Go 1.3 and 1.4 still *works* but won't use our vendored
dependencies - the user needs to have the actual packages in their
GOPATH then, which they'll get with a normal "go get". Building with Go
1.6+ will get our vendored dependencies by default even when not using
our build script, which is nice.

By doing this we gain some freedom in that we can pick and choose
manually what to include in vendor, as it's not based on just dependency
analysis of our own code. This is also a risk as we might pick up
dependencies we are unaware of, as the build may work locally with those
packages present in GOPATH. On the other hand the build server will
detect this as it has no packages in it's GOPATH beyond what is included
in the repo.

Recommended tool to manage dependencies is github.com/FiloSottile/gvt.
This commit is contained in:
Jakob Borg 2016-03-05 21:01:58 +01:00
parent 9259425a9a
commit 65aaa607ab
694 changed files with 65763 additions and 3541 deletions

2
.gitattributes vendored
View File

@ -2,7 +2,7 @@
* text=auto
# Except the dependencies, which we leave alone
Godeps/** -text=auto
vendor/** -text=auto
# Diffs on these files are meaningless
gui.files.go -diff

93
Godeps/Godeps.json generated
View File

@ -1,93 +0,0 @@
{
"ImportPath": "github.com/syncthing/syncthing",
"GoVersion": "go1.5.2",
"Packages": [
"./cmd/..."
],
"Deps": [
{
"ImportPath": "github.com/bkaradzic/go-lz4",
"Rev": "74ddf82598bc4745b965729e9c6a463bedd33049"
},
{
"ImportPath": "github.com/calmh/du",
"Comment": "v1.0.0",
"Rev": "3c0690cca16228b97741327b1b6781397afbdb24"
},
{
"ImportPath": "github.com/calmh/luhn",
"Comment": "v1.0.0",
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
},
{
"ImportPath": "github.com/calmh/xdr",
"Comment": "v2.0.0",
"Rev": "b6e0c321c9b5b28ba5ee21e828323e4b982c6976"
},
{
"ImportPath": "github.com/golang/snappy",
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
},
{
"ImportPath": "github.com/juju/ratelimit",
"Rev": "772f5c38e468398c4511514f4f6aa9a4185bc0a0"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "29ae4ffbc9a6fe9fb2bc5029050ce6996ea1d3bc"
},
{
"ImportPath": "github.com/rcrowley/go-metrics",
"Rev": "1ce93efbc8f9c568886b2ef85ce305b2217b3de3"
},
{
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "1a9d62f03ea92815b46fcaab357cfd4df264b1a0"
},
{
"ImportPath": "github.com/thejerf/suture",
"Comment": "v1.0.1",
"Rev": "99c1f2d613756768fc4299acd9dc621e11ed3fd7"
},
{
"ImportPath": "github.com/vitrun/qart/coding",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/gf256",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "github.com/vitrun/qart/qr",
"Rev": "ccb109cf25f0cd24474da73b9fee4e7a3e8a8ce0"
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "575fdbe86e5dd89229707ebec0575ce7d088a4a6"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "575fdbe86e5dd89229707ebec0575ce7d088a4a6"
},
{
"ImportPath": "golang.org/x/net/internal/iana",
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
},
{
"ImportPath": "golang.org/x/net/ipv6",
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
},
{
"ImportPath": "golang.org/x/net/proxy",
"Rev": "042ba42fa6633b34205efc66ba5719cd3afd8d38"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "5eb8d4684c4796dd36c74f6452f2c0fa6c79597e"
}
]
}

5
Godeps/Readme generated
View File

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored
View File

@ -1,2 +0,0 @@
/pkg
/bin

View File

@ -1 +0,0 @@
/lz4-example/lz4-example

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- tip

View File

@ -1 +0,0 @@
coverage.out

View File

@ -1,19 +0,0 @@
language: go
go:
- tip
install:
- export PATH=$PATH:$HOME/gopath/bin
- go get golang.org/x/tools/cover
- go get github.com/mattn/goveralls
script:
- ./generate.sh
- go test -coverprofile=coverage.out
after_success:
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
env:
global:
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=

View File

@ -1,254 +0,0 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"io"
)
// We limit how far copy back-references can go, the same as the C++ code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
// Return early if src is short.
if len(src) <= 4 {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d]
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int
// Iterate over the source bytes.
var (
s int // The iterator position.
t int // The last position with the same hash as s.
lit int // The start position of any pending literal bytes.
)
for s+3 < len(src) {
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s++
continue
}
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for s < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d]
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}
// NewWriter returns a new Writer that compresses to w, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
enc []byte
buf [checksumSize + chunkHeaderSize]byte
wroteHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
w.wroteHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (n int, errRet error) {
if w.err != nil {
return 0, w.err
}
if !w.wroteHeader {
copy(w.enc, magicChunk)
if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
w.err = err
return n, err
}
w.wroteHeader = true
}
for len(p) > 0 {
var uncompressed []byte
if len(p) > maxUncompressedChunkLen {
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
chunkType := uint8(chunkTypeCompressedData)
chunkBody := Encode(w.enc, uncompressed)
if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
}
chunkLen := 4 + len(chunkBody)
w.buf[0] = chunkType
w.buf[1] = uint8(chunkLen >> 0)
w.buf[2] = uint8(chunkLen >> 8)
w.buf[3] = uint8(chunkLen >> 16)
w.buf[4] = uint8(checksum >> 0)
w.buf[5] = uint8(checksum >> 8)
w.buf[6] = uint8(checksum >> 16)
w.buf[7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.buf[:]); err != nil {
w.err = err
return n, err
}
if _, err := w.w.Write(chunkBody); err != nil {
w.err = err
return n, err
}
n += len(uncompressed)
}
return n, nil
}

View File

@ -1,9 +0,0 @@
*.[68]
*.a
*.out
*.swp
_obj
_testmain.go
cmd/metrics-bench/metrics-bench
cmd/metrics-example/metrics-example
cmd/never-read/never-read

View File

@ -1,13 +0,0 @@
language: go
go:
- 1.2
- 1.3
- 1.4
script:
- ./validate.sh
# this should give us faster builds according to
# http://docs.travis-ci.com/user/migrating-from-legacy/
sudo: false

View File

@ -1,24 +0,0 @@
Copyright 2012 Suryandaru Triandana <syndtr@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,142 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"
"github.com/syndtr/goleveldb/leveldb/errors"
)
type ErrIkeyCorrupted struct {
Ikey []byte
Reason string
}
func (e *ErrIkeyCorrupted) Error() string {
return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
}
func newErrIkeyCorrupted(ikey []byte, reason string) error {
return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
}
type kType int
func (kt kType) String() string {
switch kt {
case ktDel:
return "d"
case ktVal:
return "v"
}
return "x"
}
// Value types encoded as the last component of internal keys.
// Don't modify; this value are saved to disk.
const (
ktDel kType = iota
ktVal
)
// ktSeek defines the kType that should be passed when constructing an
// internal key for seeking to a particular sequence number (since we
// sort sequence numbers in decreasing order and the value type is
// embedded as the low 8 bits in the sequence number in internal keys,
// we need to use the highest-numbered ValueType, not the lowest).
const ktSeek = ktVal
const (
// Maximum value possible for sequence number; the 8-bits are
// used by value type, so its can packed together in single
// 64-bit integer.
kMaxSeq uint64 = (uint64(1) << 56) - 1
// Maximum value possible for packed sequence number and type.
kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
)
// Maximum number encoded in bytes.
var kMaxNumBytes = make([]byte, 8)
func init() {
binary.LittleEndian.PutUint64(kMaxNumBytes, kMaxNum)
}
type iKey []byte
func newIkey(ukey []byte, seq uint64, kt kType) iKey {
if seq > kMaxSeq {
panic("leveldb: invalid sequence number")
} else if kt > ktVal {
panic("leveldb: invalid type")
}
ik := make(iKey, len(ukey)+8)
copy(ik, ukey)
binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
return ik
}
func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
if len(ik) < 8 {
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
}
num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
seq, kt = uint64(num>>8), kType(num&0xff)
if kt > ktVal {
return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
}
ukey = ik[:len(ik)-8]
return
}
func validIkey(ik []byte) bool {
_, _, _, err := parseIkey(ik)
return err == nil
}
func (ik iKey) assert() {
if ik == nil {
panic("leveldb: nil iKey")
}
if len(ik) < 8 {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
}
}
func (ik iKey) ukey() []byte {
ik.assert()
return ik[:len(ik)-8]
}
func (ik iKey) num() uint64 {
ik.assert()
return binary.LittleEndian.Uint64(ik[len(ik)-8:])
}
func (ik iKey) parseNum() (seq uint64, kt kType) {
num := ik.num()
seq, kt = uint64(num>>8), kType(num&0xff)
if kt > ktVal {
panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
}
return
}
func (ik iKey) String() string {
if ik == nil {
return "<nil>"
}
if ukey, seq, kt, err := parseIkey(ik); err == nil {
return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
} else {
return "<invalid>"
}
}

View File

@ -1,543 +0,0 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reservefs.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/syndtr/goleveldb/leveldb/util"
)
var errFileOpen = errors.New("leveldb/storage: file still open")
type fileLock interface {
release() error
}
type fileStorageLock struct {
fs *fileStorage
}
func (lock *fileStorageLock) Release() {
fs := lock.fs
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.slock == lock {
fs.slock = nil
}
return
}
// fileStorage is a file-system backed storage.
type fileStorage struct {
path string
mu sync.Mutex
flock fileLock
slock *fileStorageLock
logw *os.File
buf []byte
// Opened file counter; if open < 0 means closed.
open int
day int
}
// OpenFile returns a new filesytem-backed storage implementation with the given
// path. This also hold a file lock, so any subsequent attempt to open the same
// path will fail.
//
// The storage must be closed after use, by calling Close method.
func OpenFile(path string) (Storage, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
flock, err := newFileLock(filepath.Join(path, "LOCK"))
if err != nil {
return nil, err
}
defer func() {
if err != nil {
flock.release()
}
}()
rename(filepath.Join(path, "LOG"), filepath.Join(path, "LOG.old"))
logw, err := os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
fs := &fileStorage{path: path, flock: flock, logw: logw}
runtime.SetFinalizer(fs, (*fileStorage).Close)
return fs, nil
}
func (fs *fileStorage) Lock() (util.Releaser, error) {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return nil, ErrClosed
}
if fs.slock != nil {
return nil, ErrLocked
}
fs.slock = &fileStorageLock{fs: fs}
return fs.slock, nil
}
func itoa(buf []byte, i int, wid int) []byte {
var u uint = uint(i)
if u == 0 && wid <= 1 {
return append(buf, '0')
}
// Assemble decimal in reverse order.
var b [32]byte
bp := len(b)
for ; u > 0 || wid > 0; u /= 10 {
bp--
wid--
b[bp] = byte(u%10) + '0'
}
return append(buf, b[bp:]...)
}
func (fs *fileStorage) printDay(t time.Time) {
if fs.day == t.Day() {
return
}
fs.day = t.Day()
fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
}
func (fs *fileStorage) doLog(t time.Time, str string) {
fs.printDay(t)
hour, min, sec := t.Clock()
msec := t.Nanosecond() / 1e3
// time
fs.buf = itoa(fs.buf[:0], hour, 2)
fs.buf = append(fs.buf, ':')
fs.buf = itoa(fs.buf, min, 2)
fs.buf = append(fs.buf, ':')
fs.buf = itoa(fs.buf, sec, 2)
fs.buf = append(fs.buf, '.')
fs.buf = itoa(fs.buf, msec, 6)
fs.buf = append(fs.buf, ' ')
// write
fs.buf = append(fs.buf, []byte(str)...)
fs.buf = append(fs.buf, '\n')
fs.logw.Write(fs.buf)
}
func (fs *fileStorage) Log(str string) {
t := time.Now()
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return
}
fs.doLog(t, str)
}
func (fs *fileStorage) log(str string) {
fs.doLog(time.Now(), str)
}
func (fs *fileStorage) GetFile(num uint64, t FileType) File {
return &file{fs: fs, num: num, t: t}
}
func (fs *fileStorage) GetFiles(t FileType) (ff []File, err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return nil, ErrClosed
}
dir, err := os.Open(fs.path)
if err != nil {
return
}
fnn, err := dir.Readdirnames(0)
// Close the dir first before checking for Readdirnames error.
if err := dir.Close(); err != nil {
fs.log(fmt.Sprintf("close dir: %v", err))
}
if err != nil {
return
}
f := &file{fs: fs}
for _, fn := range fnn {
if f.parse(fn) && (f.t&t) != 0 {
ff = append(ff, f)
f = &file{fs: fs}
}
}
return
}
func (fs *fileStorage) GetManifest() (f File, err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return nil, ErrClosed
}
dir, err := os.Open(fs.path)
if err != nil {
return
}
fnn, err := dir.Readdirnames(0)
// Close the dir first before checking for Readdirnames error.
if err := dir.Close(); err != nil {
fs.log(fmt.Sprintf("close dir: %v", err))
}
if err != nil {
return
}
// Find latest CURRENT file.
var rem []string
var pend bool
var cerr error
for _, fn := range fnn {
if strings.HasPrefix(fn, "CURRENT") {
pend1 := len(fn) > 7
// Make sure it is valid name for a CURRENT file, otherwise skip it.
if pend1 {
if fn[7] != '.' || len(fn) < 9 {
fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
continue
}
if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
continue
}
}
path := filepath.Join(fs.path, fn)
r, e1 := os.OpenFile(path, os.O_RDONLY, 0)
if e1 != nil {
return nil, e1
}
b, e1 := ioutil.ReadAll(r)
if e1 != nil {
r.Close()
return nil, e1
}
f1 := &file{fs: fs}
if len(b) < 1 || b[len(b)-1] != '\n' || !f1.parse(string(b[:len(b)-1])) {
fs.log(fmt.Sprintf("skipping %s: corrupted or incomplete", fn))
if pend1 {
rem = append(rem, fn)
}
if !pend1 || cerr == nil {
cerr = &ErrCorrupted{
File: fsParseName(filepath.Base(fn)),
Err: errors.New("leveldb/storage: corrupted or incomplete manifest file"),
}
}
} else if f != nil && f1.Num() < f.Num() {
fs.log(fmt.Sprintf("skipping %s: obsolete", fn))
if pend1 {
rem = append(rem, fn)
}
} else {
f = f1
pend = pend1
}
if err := r.Close(); err != nil {
fs.log(fmt.Sprintf("close %s: %v", fn, err))
}
}
}
// Don't remove any files if there is no valid CURRENT file.
if f == nil {
if cerr != nil {
err = cerr
} else {
err = os.ErrNotExist
}
return
}
// Rename pending CURRENT file to an effective CURRENT.
if pend {
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f.Num())
if err := rename(path, filepath.Join(fs.path, "CURRENT")); err != nil {
fs.log(fmt.Sprintf("CURRENT.%d -> CURRENT: %v", f.Num(), err))
}
}
// Remove obsolete or incomplete pending CURRENT files.
for _, fn := range rem {
path := filepath.Join(fs.path, fn)
if err := os.Remove(path); err != nil {
fs.log(fmt.Sprintf("remove %s: %v", fn, err))
}
}
return
}
func (fs *fileStorage) SetManifest(f File) (err error) {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return ErrClosed
}
f2, ok := f.(*file)
if !ok || f2.t != TypeManifest {
return ErrInvalidFile
}
defer func() {
if err != nil {
fs.log(fmt.Sprintf("CURRENT: %v", err))
}
}()
path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), f2.Num())
w, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
_, err = fmt.Fprintln(w, f2.name())
// Close the file first.
if err := w.Close(); err != nil {
fs.log(fmt.Sprintf("close CURRENT.%d: %v", f2.num, err))
}
if err != nil {
return err
}
return rename(path, filepath.Join(fs.path, "CURRENT"))
}
func (fs *fileStorage) Close() error {
fs.mu.Lock()
defer fs.mu.Unlock()
if fs.open < 0 {
return ErrClosed
}
// Clear the finalizer.
runtime.SetFinalizer(fs, nil)
if fs.open > 0 {
fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
}
fs.open = -1
e1 := fs.logw.Close()
err := fs.flock.release()
if err == nil {
err = e1
}
return err
}
type fileWrap struct {
*os.File
f *file
}
func (fw fileWrap) Sync() error {
if err := fw.File.Sync(); err != nil {
return err
}
if fw.f.Type() == TypeManifest {
// Also sync parent directory if file type is manifest.
// See: https://code.google.com/p/leveldb/issues/detail?id=190.
if err := syncDir(fw.f.fs.path); err != nil {
return err
}
}
return nil
}
func (fw fileWrap) Close() error {
f := fw.f
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
if !f.open {
return ErrClosed
}
f.open = false
f.fs.open--
err := fw.File.Close()
if err != nil {
f.fs.log(fmt.Sprintf("close %s.%d: %v", f.Type(), f.Num(), err))
}
return err
}
type file struct {
fs *fileStorage
num uint64
t FileType
open bool
}
func (f *file) Open() (Reader, error) {
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
if f.fs.open < 0 {
return nil, ErrClosed
}
if f.open {
return nil, errFileOpen
}
of, err := os.OpenFile(f.path(), os.O_RDONLY, 0)
if err != nil {
if f.hasOldName() && os.IsNotExist(err) {
of, err = os.OpenFile(f.oldPath(), os.O_RDONLY, 0)
if err == nil {
goto ok
}
}
return nil, err
}
ok:
f.open = true
f.fs.open++
return fileWrap{of, f}, nil
}
func (f *file) Create() (Writer, error) {
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
if f.fs.open < 0 {
return nil, ErrClosed
}
if f.open {
return nil, errFileOpen
}
of, err := os.OpenFile(f.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
f.open = true
f.fs.open++
return fileWrap{of, f}, nil
}
func (f *file) Replace(newfile File) error {
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
if f.fs.open < 0 {
return ErrClosed
}
newfile2, ok := newfile.(*file)
if !ok {
return ErrInvalidFile
}
if f.open || newfile2.open {
return errFileOpen
}
return rename(newfile2.path(), f.path())
}
func (f *file) Type() FileType {
return f.t
}
func (f *file) Num() uint64 {
return f.num
}
func (f *file) Remove() error {
f.fs.mu.Lock()
defer f.fs.mu.Unlock()
if f.fs.open < 0 {
return ErrClosed
}
if f.open {
return errFileOpen
}
err := os.Remove(f.path())
if err != nil {
f.fs.log(fmt.Sprintf("remove %s.%d: %v", f.Type(), f.Num(), err))
}
// Also try remove file with old name, just in case.
if f.hasOldName() {
if e1 := os.Remove(f.oldPath()); !os.IsNotExist(e1) {
f.fs.log(fmt.Sprintf("remove %s.%d: %v (old name)", f.Type(), f.Num(), err))
err = e1
}
}
return err
}
func (f *file) hasOldName() bool {
return f.t == TypeTable
}
func (f *file) oldName() string {
switch f.t {
case TypeTable:
return fmt.Sprintf("%06d.sst", f.num)
}
return f.name()
}
func (f *file) oldPath() string {
return filepath.Join(f.fs.path, f.oldName())
}
func (f *file) name() string {
switch f.t {
case TypeManifest:
return fmt.Sprintf("MANIFEST-%06d", f.num)
case TypeJournal:
return fmt.Sprintf("%06d.log", f.num)
case TypeTable:
return fmt.Sprintf("%06d.ldb", f.num)
case TypeTemp:
return fmt.Sprintf("%06d.tmp", f.num)
default:
panic("invalid file type")
}
}
func (f *file) path() string {
return filepath.Join(f.fs.path, f.name())
}
func fsParseName(name string) *FileInfo {
fi := &FileInfo{}
var tail string
_, err := fmt.Sscanf(name, "%d.%s", &fi.Num, &tail)
if err == nil {
switch tail {
case "log":
fi.Type = TypeJournal
case "ldb", "sst":
fi.Type = TypeTable
case "tmp":
fi.Type = TypeTemp
default:
return nil
}
return fi
}
n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fi.Num, &tail)
if n == 1 {
fi.Type = TypeManifest
return fi
}
return nil
}
func (f *file) parse(name string) bool {
fi := fsParseName(name)
if fi == nil {
return false
}
f.t = fi.Type
f.num = fi.Num
return true
}

View File

@ -1,203 +0,0 @@
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package storage
import (
"bytes"
"os"
"sync"
"github.com/syndtr/goleveldb/leveldb/util"
)
const typeShift = 3
type memStorageLock struct {
ms *memStorage
}
func (lock *memStorageLock) Release() {
ms := lock.ms
ms.mu.Lock()
defer ms.mu.Unlock()
if ms.slock == lock {
ms.slock = nil
}
return
}
// memStorage is a memory-backed storage.
type memStorage struct {
mu sync.Mutex
slock *memStorageLock
files map[uint64]*memFile
manifest *memFilePtr
}
// NewMemStorage returns a new memory-backed storage implementation.
func NewMemStorage() Storage {
return &memStorage{
files: make(map[uint64]*memFile),
}
}
func (ms *memStorage) Lock() (util.Releaser, error) {
ms.mu.Lock()
defer ms.mu.Unlock()
if ms.slock != nil {
return nil, ErrLocked
}
ms.slock = &memStorageLock{ms: ms}
return ms.slock, nil
}
func (*memStorage) Log(str string) {}
func (ms *memStorage) GetFile(num uint64, t FileType) File {
return &memFilePtr{ms: ms, num: num, t: t}
}
func (ms *memStorage) GetFiles(t FileType) ([]File, error) {
ms.mu.Lock()
var ff []File
for x, _ := range ms.files {
num, mt := x>>typeShift, FileType(x)&TypeAll
if mt&t == 0 {
continue
}
ff = append(ff, &memFilePtr{ms: ms, num: num, t: mt})
}
ms.mu.Unlock()
return ff, nil
}
func (ms *memStorage) GetManifest() (File, error) {
ms.mu.Lock()
defer ms.mu.Unlock()
if ms.manifest == nil {
return nil, os.ErrNotExist
}
return ms.manifest, nil
}
func (ms *memStorage) SetManifest(f File) error {
fm, ok := f.(*memFilePtr)
if !ok || fm.t != TypeManifest {
return ErrInvalidFile
}
ms.mu.Lock()
ms.manifest = fm
ms.mu.Unlock()
return nil
}
func (*memStorage) Close() error { return nil }
type memReader struct {
*bytes.Reader
m *memFile
}
func (mr *memReader) Close() error {
return mr.m.Close()
}
type memFile struct {
bytes.Buffer
ms *memStorage
open bool
}
func (*memFile) Sync() error { return nil }
func (m *memFile) Close() error {
m.ms.mu.Lock()
m.open = false
m.ms.mu.Unlock()
return nil
}
type memFilePtr struct {
ms *memStorage
num uint64
t FileType
}
func (p *memFilePtr) x() uint64 {
return p.Num()<<typeShift | uint64(p.Type())
}
func (p *memFilePtr) Open() (Reader, error) {
ms := p.ms
ms.mu.Lock()
defer ms.mu.Unlock()
if m, exist := ms.files[p.x()]; exist {
if m.open {
return nil, errFileOpen
}
m.open = true
return &memReader{Reader: bytes.NewReader(m.Bytes()), m: m}, nil
}
return nil, os.ErrNotExist
}
func (p *memFilePtr) Create() (Writer, error) {
ms := p.ms
ms.mu.Lock()
defer ms.mu.Unlock()
m, exist := ms.files[p.x()]
if exist {
if m.open {
return nil, errFileOpen
}
m.Reset()
} else {
m = &memFile{ms: ms}
ms.files[p.x()] = m
}
m.open = true
return m, nil
}
func (p *memFilePtr) Replace(newfile File) error {
p1, ok := newfile.(*memFilePtr)
if !ok {
return ErrInvalidFile
}
ms := p.ms
ms.mu.Lock()
defer ms.mu.Unlock()
m1, exist := ms.files[p1.x()]
if !exist {
return os.ErrNotExist
}
m0, exist := ms.files[p.x()]
if (exist && m0.open) || m1.open {
return errFileOpen
}
delete(ms.files, p1.x())
ms.files[p.x()] = m1
return nil
}
func (p *memFilePtr) Type() FileType {
return p.t
}
func (p *memFilePtr) Num() uint64 {
return p.num
}
func (p *memFilePtr) Remove() error {
ms := p.ms
ms.mu.Lock()
defer ms.mu.Unlock()
if _, exist := ms.files[p.x()]; exist {
delete(ms.files, p.x())
return nil
}
return os.ErrNotExist
}

View File

@ -1,187 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package testutil
import (
"fmt"
"math/rand"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/util"
)
func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
if rnd == nil {
rnd = NewRand()
}
if p == nil {
BeforeEach(func() {
p = setup(kv)
})
if teardown != nil {
AfterEach(func() {
teardown(p)
})
}
}
It("Should find all keys with Find", func() {
if db, ok := p.(Find); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
// Using exact key.
rkey, rvalue, err := db.TestFind(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
Expect(rkey).Should(Equal(key), "Key")
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
// Using inexact key.
rkey, rvalue, err = db.TestFind(key_)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q)", key_, key)
Expect(rkey).Should(Equal(key))
Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
})
}
})
It("Should return error if the key is not present", func() {
if db, ok := p.(Find); ok {
var key []byte
if kv.Len() > 0 {
key_, _ := kv.Index(kv.Len() - 1)
key = BytesAfter(key_)
}
rkey, _, err := db.TestFind(key)
Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
Expect(err).Should(Equal(errors.ErrNotFound))
}
})
It("Should only find exact key with Get", func() {
if db, ok := p.(Get); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, value := kv.IndexInexact(i)
// Using exact key.
rvalue, err := db.TestGet(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
Expect(rvalue).Should(Equal(value), "Value for key %q", key)
// Using inexact key.
if len(key_) > 0 {
_, err = db.TestGet(key_)
Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
Expect(err).Should(Equal(errors.ErrNotFound))
}
})
}
})
It("Should only find present key with Has", func() {
if db, ok := p.(Has); ok {
ShuffledIndex(nil, kv.Len(), 1, func(i int) {
key_, key, _ := kv.IndexInexact(i)
// Using exact key.
ret, err := db.TestHas(key)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
Expect(ret).Should(BeTrue(), "False for key %q", key)
// Using inexact key.
if len(key_) > 0 {
ret, err = db.TestHas(key_)
Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
}
})
}
})
TestIter := func(r *util.Range, _kv KeyValue) {
if db, ok := p.(NewIterator); ok {
iter := db.TestNewIterator(r)
Expect(iter.Error()).ShouldNot(HaveOccurred())
t := IteratorTesting{
KeyValue: _kv,
Iter: iter,
}
DoIteratorTesting(&t)
iter.Release()
}
}
It("Should iterates and seeks correctly", func(done Done) {
TestIter(nil, kv.Clone())
done <- true
}, 3.0)
RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
type slice struct {
r *util.Range
start, limit int
}
key_, _, _ := kv.IndexInexact(i)
for _, x := range []slice{
{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
{&util.Range{Start: nil, Limit: key_}, 0, i},
} {
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
TestIter(x.r, kv.Slice(x.start, x.limit))
done <- true
}, 3.0)
}
})
RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
r := kv.Range(start, limit)
TestIter(&r, kv.Slice(start, limit))
done <- true
}, 3.0)
})
}
func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
Test := func(kv *KeyValue) func() {
return func() {
var p DB
if setup != nil {
Defer("setup", func() {
p = setup(*kv)
})
}
if teardown != nil {
Defer("teardown", func() {
teardown(p)
})
}
if body != nil {
p = body(*kv)
}
KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
return p
}, nil)
}
}
Describe("with no key/value (empty)", Test(&KeyValue{}))
Describe("with empty key", Test(KeyValue_EmptyKey()))
Describe("with empty value", Test(KeyValue_EmptyValue()))
Describe("with one key/value", Test(KeyValue_OneKeyValue()))
Describe("with big value", Test(KeyValue_BigValue()))
Describe("with special key", Test(KeyValue_SpecialKey()))
Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
}

View File

@ -1,586 +0,0 @@
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package testutil
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
. "github.com/onsi/gomega"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
)
var (
storageMu sync.Mutex
storageUseFS bool = true
storageKeepFS bool = false
storageNum int
)
type StorageMode int
const (
ModeOpen StorageMode = 1 << iota
ModeCreate
ModeRemove
ModeRead
ModeWrite
ModeSync
ModeClose
)
const (
modeOpen = iota
modeCreate
modeRemove
modeRead
modeWrite
modeSync
modeClose
modeCount
)
const (
typeManifest = iota
typeJournal
typeTable
typeTemp
typeCount
)
const flattenCount = modeCount * typeCount
func flattenType(m StorageMode, t storage.FileType) int {
var x int
switch m {
case ModeOpen:
x = modeOpen
case ModeCreate:
x = modeCreate
case ModeRemove:
x = modeRemove
case ModeRead:
x = modeRead
case ModeWrite:
x = modeWrite
case ModeSync:
x = modeSync
case ModeClose:
x = modeClose
default:
panic("invalid storage mode")
}
x *= typeCount
switch t {
case storage.TypeManifest:
return x + typeManifest
case storage.TypeJournal:
return x + typeJournal
case storage.TypeTable:
return x + typeTable
case storage.TypeTemp:
return x + typeTemp
default:
panic("invalid file type")
}
}
func listFlattenType(m StorageMode, t storage.FileType) []int {
ret := make([]int, 0, flattenCount)
add := func(x int) {
x *= typeCount
switch {
case t&storage.TypeManifest != 0:
ret = append(ret, x+typeManifest)
case t&storage.TypeJournal != 0:
ret = append(ret, x+typeJournal)
case t&storage.TypeTable != 0:
ret = append(ret, x+typeTable)
case t&storage.TypeTemp != 0:
ret = append(ret, x+typeTemp)
}
}
switch {
case m&ModeOpen != 0:
add(modeOpen)
case m&ModeCreate != 0:
add(modeCreate)
case m&ModeRemove != 0:
add(modeRemove)
case m&ModeRead != 0:
add(modeRead)
case m&ModeWrite != 0:
add(modeWrite)
case m&ModeSync != 0:
add(modeSync)
case m&ModeClose != 0:
add(modeClose)
}
return ret
}
func packFile(num uint64, t storage.FileType) uint64 {
if num>>(64-typeCount) != 0 {
panic("overflow")
}
return num<<typeCount | uint64(t)
}
func unpackFile(x uint64) (uint64, storage.FileType) {
return x >> typeCount, storage.FileType(x) & storage.TypeAll
}
type emulatedError struct {
err error
}
func (err emulatedError) Error() string {
return fmt.Sprintf("emulated storage error: %v", err.err)
}
type storageLock struct {
s *Storage
r util.Releaser
}
func (l storageLock) Release() {
l.r.Release()
l.s.logI("storage lock released")
}
type reader struct {
f *file
storage.Reader
}
func (r *reader) Read(p []byte) (n int, err error) {
err = r.f.s.emulateError(ModeRead, r.f.Type())
if err == nil {
r.f.s.stall(ModeRead, r.f.Type())
n, err = r.Reader.Read(p)
}
r.f.s.count(ModeRead, r.f.Type(), n)
if err != nil && err != io.EOF {
r.f.s.logI("read error, num=%d type=%v n=%d err=%v", r.f.Num(), r.f.Type(), n, err)
}
return
}
func (r *reader) ReadAt(p []byte, off int64) (n int, err error) {
err = r.f.s.emulateError(ModeRead, r.f.Type())
if err == nil {
r.f.s.stall(ModeRead, r.f.Type())
n, err = r.Reader.ReadAt(p, off)
}
r.f.s.count(ModeRead, r.f.Type(), n)
if err != nil && err != io.EOF {
r.f.s.logI("readAt error, num=%d type=%v offset=%d n=%d err=%v", r.f.Num(), r.f.Type(), off, n, err)
}
return
}
func (r *reader) Close() (err error) {
return r.f.doClose(r.Reader)
}
type writer struct {
f *file
storage.Writer
}
func (w *writer) Write(p []byte) (n int, err error) {
err = w.f.s.emulateError(ModeWrite, w.f.Type())
if err == nil {
w.f.s.stall(ModeWrite, w.f.Type())
n, err = w.Writer.Write(p)
}
w.f.s.count(ModeWrite, w.f.Type(), n)
if err != nil && err != io.EOF {
w.f.s.logI("write error, num=%d type=%v n=%d err=%v", w.f.Num(), w.f.Type(), n, err)
}
return
}
func (w *writer) Sync() (err error) {
err = w.f.s.emulateError(ModeSync, w.f.Type())
if err == nil {
w.f.s.stall(ModeSync, w.f.Type())
err = w.Writer.Sync()
}
w.f.s.count(ModeSync, w.f.Type(), 0)
if err != nil {
w.f.s.logI("sync error, num=%d type=%v err=%v", w.f.Num(), w.f.Type(), err)
}
return
}
func (w *writer) Close() (err error) {
return w.f.doClose(w.Writer)
}
type file struct {
s *Storage
storage.File
}
func (f *file) pack() uint64 {
return packFile(f.Num(), f.Type())
}
func (f *file) assertOpen() {
ExpectWithOffset(2, f.s.opens).NotTo(HaveKey(f.pack()), "File open, num=%d type=%v writer=%v", f.Num(), f.Type(), f.s.opens[f.pack()])
}
func (f *file) doClose(closer io.Closer) (err error) {
err = f.s.emulateError(ModeClose, f.Type())
if err == nil {
f.s.stall(ModeClose, f.Type())
}
f.s.mu.Lock()
defer f.s.mu.Unlock()
if err == nil {
ExpectWithOffset(2, f.s.opens).To(HaveKey(f.pack()), "File closed, num=%d type=%v", f.Num(), f.Type())
err = closer.Close()
}
f.s.countNB(ModeClose, f.Type(), 0)
writer := f.s.opens[f.pack()]
if err != nil {
f.s.logISkip(1, "file close failed, num=%d type=%v writer=%v err=%v", f.Num(), f.Type(), writer, err)
} else {
f.s.logISkip(1, "file closed, num=%d type=%v writer=%v", f.Num(), f.Type(), writer)
delete(f.s.opens, f.pack())
}
return
}
func (f *file) Open() (r storage.Reader, err error) {
err = f.s.emulateError(ModeOpen, f.Type())
if err == nil {
f.s.stall(ModeOpen, f.Type())
}
f.s.mu.Lock()
defer f.s.mu.Unlock()
if err == nil {
f.assertOpen()
f.s.countNB(ModeOpen, f.Type(), 0)
r, err = f.File.Open()
}
if err != nil {
f.s.logI("file open failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
} else {
f.s.logI("file opened, num=%d type=%v", f.Num(), f.Type())
f.s.opens[f.pack()] = false
r = &reader{f, r}
}
return
}
func (f *file) Create() (w storage.Writer, err error) {
err = f.s.emulateError(ModeCreate, f.Type())
if err == nil {
f.s.stall(ModeCreate, f.Type())
}
f.s.mu.Lock()
defer f.s.mu.Unlock()
if err == nil {
f.assertOpen()
f.s.countNB(ModeCreate, f.Type(), 0)
w, err = f.File.Create()
}
if err != nil {
f.s.logI("file create failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
} else {
f.s.logI("file created, num=%d type=%v", f.Num(), f.Type())
f.s.opens[f.pack()] = true
w = &writer{f, w}
}
return
}
func (f *file) Remove() (err error) {
err = f.s.emulateError(ModeRemove, f.Type())
if err == nil {
f.s.stall(ModeRemove, f.Type())
}
f.s.mu.Lock()
defer f.s.mu.Unlock()
if err == nil {
f.assertOpen()
f.s.countNB(ModeRemove, f.Type(), 0)
err = f.File.Remove()
}
if err != nil {
f.s.logI("file remove failed, num=%d type=%v err=%v", f.Num(), f.Type(), err)
} else {
f.s.logI("file removed, num=%d type=%v", f.Num(), f.Type())
}
return
}
type Storage struct {
storage.Storage
closeFn func() error
lmu sync.Mutex
lb bytes.Buffer
mu sync.Mutex
// Open files, true=writer, false=reader
opens map[uint64]bool
counters [flattenCount]int
bytesCounter [flattenCount]int64
emulatedError [flattenCount]error
stallCond sync.Cond
stalled [flattenCount]bool
}
func (s *Storage) log(skip int, str string) {
s.lmu.Lock()
defer s.lmu.Unlock()
_, file, line, ok := runtime.Caller(skip + 2)
if ok {
// Truncate file name at last file name separator.
if index := strings.LastIndex(file, "/"); index >= 0 {
file = file[index+1:]
} else if index = strings.LastIndex(file, "\\"); index >= 0 {
file = file[index+1:]
}
} else {
file = "???"
line = 1
}
fmt.Fprintf(&s.lb, "%s:%d: ", file, line)
lines := strings.Split(str, "\n")
if l := len(lines); l > 1 && lines[l-1] == "" {
lines = lines[:l-1]
}
for i, line := range lines {
if i > 0 {
s.lb.WriteString("\n\t")
}
s.lb.WriteString(line)
}
s.lb.WriteByte('\n')
}
func (s *Storage) logISkip(skip int, format string, args ...interface{}) {
pc, _, _, ok := runtime.Caller(skip + 1)
if ok {
if f := runtime.FuncForPC(pc); f != nil {
fname := f.Name()
if index := strings.LastIndex(fname, "."); index >= 0 {
fname = fname[index+1:]
}
format = fname + ": " + format
}
}
s.log(skip+1, fmt.Sprintf(format, args...))
}
func (s *Storage) logI(format string, args ...interface{}) {
s.logISkip(1, format, args...)
}
func (s *Storage) Log(str string) {
s.log(1, "Log: "+str)
s.Storage.Log(str)
}
func (s *Storage) Lock() (r util.Releaser, err error) {
r, err = s.Storage.Lock()
if err != nil {
s.logI("storage locking failed, err=%v", err)
} else {
s.logI("storage locked")
r = storageLock{s, r}
}
return
}
func (s *Storage) GetFile(num uint64, t storage.FileType) storage.File {
return &file{s, s.Storage.GetFile(num, t)}
}
func (s *Storage) GetFiles(t storage.FileType) (files []storage.File, err error) {
rfiles, err := s.Storage.GetFiles(t)
if err != nil {
s.logI("get files failed, err=%v", err)
return
}
files = make([]storage.File, len(rfiles))
for i, f := range rfiles {
files[i] = &file{s, f}
}
s.logI("get files, type=0x%x count=%d", int(t), len(files))
return
}
func (s *Storage) GetManifest() (f storage.File, err error) {
manifest, err := s.Storage.GetManifest()
if err != nil {
if !os.IsNotExist(err) {
s.logI("get manifest failed, err=%v", err)
}
return
}
s.logI("get manifest, num=%d", manifest.Num())
return &file{s, manifest}, nil
}
func (s *Storage) SetManifest(f storage.File) error {
f_, ok := f.(*file)
ExpectWithOffset(1, ok).To(BeTrue())
ExpectWithOffset(1, f_.Type()).To(Equal(storage.TypeManifest))
err := s.Storage.SetManifest(f_.File)
if err != nil {
s.logI("set manifest failed, err=%v", err)
} else {
s.logI("set manifest, num=%d", f_.Num())
}
return err
}
func (s *Storage) openFiles() string {
out := "Open files:"
for x, writer := range s.opens {
num, t := unpackFile(x)
out += fmt.Sprintf("\n · num=%d type=%v writer=%v", num, t, writer)
}
return out
}
func (s *Storage) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
ExpectWithOffset(1, s.opens).To(BeEmpty(), s.openFiles())
err := s.Storage.Close()
if err != nil {
s.logI("storage closing failed, err=%v", err)
} else {
s.logI("storage closed")
}
if s.closeFn != nil {
if err1 := s.closeFn(); err1 != nil {
s.logI("close func error, err=%v", err1)
}
}
return err
}
func (s *Storage) countNB(m StorageMode, t storage.FileType, n int) {
s.counters[flattenType(m, t)]++
s.bytesCounter[flattenType(m, t)] += int64(n)
}
func (s *Storage) count(m StorageMode, t storage.FileType, n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.countNB(m, t, n)
}
func (s *Storage) ResetCounter(m StorageMode, t storage.FileType) {
for _, x := range listFlattenType(m, t) {
s.counters[x] = 0
s.bytesCounter[x] = 0
}
}
func (s *Storage) Counter(m StorageMode, t storage.FileType) (count int, bytes int64) {
for _, x := range listFlattenType(m, t) {
count += s.counters[x]
bytes += s.bytesCounter[x]
}
return
}
func (s *Storage) emulateError(m StorageMode, t storage.FileType) error {
s.mu.Lock()
defer s.mu.Unlock()
err := s.emulatedError[flattenType(m, t)]
if err != nil {
return emulatedError{err}
}
return nil
}
func (s *Storage) EmulateError(m StorageMode, t storage.FileType, err error) {
s.mu.Lock()
defer s.mu.Unlock()
for _, x := range listFlattenType(m, t) {
s.emulatedError[x] = err
}
}
func (s *Storage) stall(m StorageMode, t storage.FileType) {
x := flattenType(m, t)
s.mu.Lock()
defer s.mu.Unlock()
for s.stalled[x] {
s.stallCond.Wait()
}
}
func (s *Storage) Stall(m StorageMode, t storage.FileType) {
s.mu.Lock()
defer s.mu.Unlock()
for _, x := range listFlattenType(m, t) {
s.stalled[x] = true
}
}
func (s *Storage) Release(m StorageMode, t storage.FileType) {
s.mu.Lock()
defer s.mu.Unlock()
for _, x := range listFlattenType(m, t) {
s.stalled[x] = false
}
s.stallCond.Broadcast()
}
func NewStorage() *Storage {
var stor storage.Storage
var closeFn func() error
if storageUseFS {
for {
storageMu.Lock()
num := storageNum
storageNum++
storageMu.Unlock()
path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
if _, err := os.Stat(path); os.IsNotExist(err) {
stor, err = storage.OpenFile(path)
ExpectWithOffset(1, err).NotTo(HaveOccurred(), "creating storage at %s", path)
closeFn = func() error {
if storageKeepFS {
return nil
}
return os.RemoveAll(path)
}
break
}
}
} else {
stor = storage.NewMemStorage()
}
s := &Storage{
Storage: stor,
closeFn: closeFn,
opens: make(map[uint64]bool),
}
s.stallCond.L = &s.mu
return s
}

View File

@ -1,7 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- tip

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -109,10 +109,10 @@ func main() {
build(pkg, tags)
case "test":
test("./...")
test("./lib/...", "./cmd/...")
case "bench":
bench("./...")
bench("./lib/...", "./cmd/...")
case "assets":
assets()
@ -126,9 +126,6 @@ func main() {
case "transifex":
transifex()
case "deps":
deps()
case "tar":
buildTar()
@ -180,13 +177,13 @@ func setup() {
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/cover")
runPrint("go", "get", "-v", "golang.org/x/tools/cmd/vet")
runPrint("go", "get", "-v", "golang.org/x/net/html")
runPrint("go", "get", "-v", "github.com/tools/godep")
runPrint("go", "get", "-v", "github.com/FiloSottile/gvt")
runPrint("go", "get", "-v", "github.com/axw/gocov/gocov")
runPrint("go", "get", "-v", "github.com/AlekSi/gocov-xml")
runPrint("go", "get", "-v", "bitbucket.org/tebeka/go2xunit")
}
func test(pkg string) {
func test(pkgs ...string) {
setBuildEnv()
useRace := runtime.GOARCH == "amd64"
switch runtime.GOOS {
@ -196,15 +193,15 @@ func test(pkg string) {
}
if useRace {
runPrint("go", "test", "-short", "-race", "-timeout", "60s", pkg)
runPrint("go", append([]string{"test", "-short", "-race", "-timeout", "60s"}, pkgs...)...)
} else {
runPrint("go", "test", "-short", "-timeout", "60s", pkg)
runPrint("go", append([]string{"test", "-short", "-timeout", "60s"}, pkgs...)...)
}
}
func bench(pkg string) {
func bench(pkgs ...string) {
setBuildEnv()
runPrint("go", "test", "-run", "NONE", "-bench", ".", pkg)
runPrint("go", append([]string{"test", "-run", "NONE", "-bench", "."}, pkgs...)...)
}
func install(pkg string, tags []string) {
@ -405,13 +402,7 @@ func listFiles(dir string) []string {
func setBuildEnv() {
os.Setenv("GOOS", goos)
os.Setenv("GOARCH", goarch)
wd, err := os.Getwd()
if err != nil {
log.Println("Warning: can't determine current dir:", err)
log.Println("Build might not work as expected")
}
os.Setenv("GOPATH", fmt.Sprintf("%s%c%s", filepath.Join(wd, "Godeps", "_workspace"), os.PathListSeparator, os.Getenv("GOPATH")))
log.Println("GOPATH=" + os.Getenv("GOPATH"))
os.Setenv("GO15VENDOREXPERIMENT", "1")
}
func assets() {
@ -441,13 +432,8 @@ func transifex() {
assets()
}
func deps() {
rmr("Godeps")
runPrint("godep", "save", "./cmd/...")
}
func clean() {
rmr("bin", "Godeps/_workspace/pkg", "Godeps/_workspace/bin")
rmr("bin")
rmr(filepath.Join(os.Getenv("GOPATH"), fmt.Sprintf("pkg/%s_%s/github.com/syncthing", goos, goarch)))
}

View File

@ -27,10 +27,6 @@ case "${1:-default}" in
build "$@"
;;
deps)
build "$@"
;;
assets)
build "$@"
;;
@ -106,7 +102,7 @@ case "${1:-default}" in
fail=0
# For every package in the repo
for dir in $(go list ./...) ; do
for dir in $(go list ./lib/... ./cmd/...) ; do
# run the tests
GOPATH="$(pwd)/Godeps/_workspace:$GOPATH" go test -race -coverprofile=profile.out $dir
if [ -f profile.out ] ; then

63
vendor/github.com/bkaradzic/go-lz4/lz4_test.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package lz4
import (
"bytes"
"io/ioutil"
"testing"
)
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
func roundtrip(t *testing.T, input []byte) {
dst, err := Encode(nil, input)
if err != nil {
t.Errorf("got error during compression: %s", err)
}
output, err := Decode(nil, dst)
if err != nil {
t.Errorf("got error during decompress: %s", err)
}
if !bytes.Equal(output, input) {
t.Errorf("roundtrip failed")
}
}
func TestEmpty(t *testing.T) {
roundtrip(t, nil)
}
func TestLengths(t *testing.T) {
for i := 0; i < 1024; i++ {
roundtrip(t, testfile[:i])
}
for i := 1024; i < 4096; i += 23 {
roundtrip(t, testfile[:i])
}
}
func TestWords(t *testing.T) {
roundtrip(t, testfile)
}
func BenchmarkLZ4Encode(b *testing.B) {
for i := 0; i < b.N; i++ {
Encode(nil, testfile)
}
}
func BenchmarkLZ4Decode(b *testing.B) {
var compressed, _ = Encode(nil, testfile)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(nil, compressed)
}
}

13052
vendor/github.com/bkaradzic/go-lz4/testdata/pg1661.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

59
vendor/github.com/calmh/luhn/luhn_test.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright (C) 2014 Jakob Borg
package luhn_test
import (
"testing"
"github.com/calmh/luhn"
)
func TestGenerate(t *testing.T) {
// Base 6 Luhn
a := luhn.Alphabet("abcdef")
c, err := a.Generate("abcdef")
if err != nil {
t.Fatal(err)
}
if c != 'e' {
t.Errorf("Incorrect check digit %c != e", c)
}
// Base 10 Luhn
a = luhn.Alphabet("0123456789")
c, err = a.Generate("7992739871")
if err != nil {
t.Fatal(err)
}
if c != '3' {
t.Errorf("Incorrect check digit %c != 3", c)
}
}
func TestInvalidString(t *testing.T) {
a := luhn.Alphabet("ABC")
_, err := a.Generate("7992739871")
t.Log(err)
if err == nil {
t.Error("Unexpected nil error")
}
}
func TestBadAlphabet(t *testing.T) {
a := luhn.Alphabet("01234566789")
_, err := a.Generate("7992739871")
t.Log(err)
if err == nil {
t.Error("Unexpected nil error")
}
}
func TestValidate(t *testing.T) {
a := luhn.Alphabet("abcdef")
if !a.Validate("abcdefe") {
t.Errorf("Incorrect validation response for abcdefe")
}
if a.Validate("abcdefd") {
t.Errorf("Incorrect validation response for abcdefd")
}
}

52
vendor/github.com/calmh/xdr/bench_test.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import "testing"
type XDRBenchStruct struct {
I1 uint64
I2 uint32
I3 uint16
I4 uint8
Bs0 []byte // max:128
Bs1 []byte
Is0 []int32
S0 string // max:128
S1 string
}
var res []byte // not to be optimized away
var s = XDRBenchStruct{
I1: 42,
I2: 43,
I3: 44,
I4: 45,
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
Is0: []int32{23, 43},
S0: "Hello World! String one.",
S1: "Hello World! String two.",
}
func BenchmarkThisMarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
res, _ = s.MarshalXDR()
}
b.ReportAllocs()
}
func BenchmarkThisUnmarshal(b *testing.B) {
bs := s.MustMarshalXDR()
var t XDRBenchStruct
for i := 0; i < b.N; i++ {
err := t.UnmarshalXDR(bs)
if err != nil {
b.Fatal(err)
}
}
b.ReportAllocs()
}

140
vendor/github.com/calmh/xdr/bench_xdr_test.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"github.com/calmh/xdr"
)
/*
XDRBenchStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I1 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I2 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 16 zero bits | I3 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 24 zero bits | I4 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs0 (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Bs1 (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of Is0 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
| Is0 (n items) |
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S0 (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S1 (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct XDRBenchStruct {
unsigned hyper I1;
unsigned int I2;
unsigned int I3;
unsigned int I4;
opaque Bs0<128>;
opaque Bs1<>;
int Is0<>;
string S0<128>;
string S1<>;
}
*/
func (o XDRBenchStruct) XDRSize() int {
return 8 + 4 + 4 + 4 +
4 + len(o.Bs0) + xdr.Padding(len(o.Bs0)) +
4 + len(o.Bs1) + xdr.Padding(len(o.Bs1)) +
4 + len(o.Is0)*4 +
4 + len(o.S0) + xdr.Padding(len(o.S0)) +
4 + len(o.S1) + xdr.Padding(len(o.S1))
}
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o XDRBenchStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o XDRBenchStruct) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalUint64(o.I1)
m.MarshalUint32(o.I2)
m.MarshalUint16(o.I3)
m.MarshalUint8(o.I4)
if l := len(o.Bs0); l > 128 {
return xdr.ElementSizeExceeded("Bs0", l, 128)
}
m.MarshalBytes(o.Bs0)
m.MarshalBytes(o.Bs1)
m.MarshalUint32(uint32(len(o.Is0)))
for i := range o.Is0 {
m.MarshalUint32(uint32(o.Is0[i]))
}
if l := len(o.S0); l > 128 {
return xdr.ElementSizeExceeded("S0", l, 128)
}
m.MarshalString(o.S0)
m.MarshalString(o.S1)
return m.Error
}
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *XDRBenchStruct) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.I1 = u.UnmarshalUint64()
o.I2 = u.UnmarshalUint32()
o.I3 = u.UnmarshalUint16()
o.I4 = u.UnmarshalUint8()
o.Bs0 = u.UnmarshalBytesMax(128)
o.Bs1 = u.UnmarshalBytes()
_Is0Size := int(u.UnmarshalUint32())
if _Is0Size < 0 {
return xdr.ElementSizeExceeded("Is0", _Is0Size, 0)
} else if _Is0Size == 0 {
o.Is0 = nil
} else {
if _Is0Size <= len(o.Is0) {
o.Is0 = o.Is0[:_Is0Size]
} else {
o.Is0 = make([]int32, _Is0Size)
}
for i := range o.Is0 {
o.Is0[i] = int32(u.UnmarshalUint32())
}
}
o.S0 = u.UnmarshalStringMax(128)
o.S1 = u.UnmarshalString()
return u.Error
}

241
vendor/github.com/calmh/xdr/encdec_test.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package xdr_test
import (
"bytes"
"io"
"log"
"math/rand"
"reflect"
"testing"
"testing/quick"
"github.com/calmh/xdr"
)
// Contains all supported types
type TestStruct struct {
B bool
I int
I8 int8
UI8 uint8
I16 int16
UI16 uint16
I32 int32
UI32 uint32
I64 int64
UI64 uint64
BS []byte // max:1024
S string // max:1024
C Opaque
SS []string // max:1024
ES EmptyStruct
OS OtherStruct
OSs []OtherStruct
}
func (s1 TestStruct) TestEquals(s2 TestStruct) bool {
if s1.B != s2.B {
log.Printf("B differ; %v != %v", s1.B, s2.B)
return false
}
if s1.I != s2.I {
log.Printf("I differ; %d != %d", s1.I, s2.I)
return false
}
if s1.I8 != s2.I8 {
log.Printf("I8 differ; %d != %d", s1.I8, s2.I8)
return false
}
if s1.UI8 != s2.UI8 {
log.Printf("UI8 differ; %d != %d", s1.UI8, s2.UI8)
return false
}
if s1.I16 != s2.I16 {
log.Printf("I16 differ; %d != %d", s1.I16, s2.I16)
return false
}
if s1.UI16 != s2.UI16 {
log.Printf("UI16 differ; %d != %d", s1.UI16, s2.UI16)
return false
}
if s1.I32 != s2.I32 {
log.Printf("I32 differ; %d != %d", s1.I32, s2.I32)
return false
}
if s1.UI32 != s2.UI32 {
log.Printf("UI32 differ; %d != %d", s1.UI32, s2.UI32)
return false
}
if s1.I64 != s2.I64 {
log.Printf("I64 differ; %d != %d", s1.I64, s2.I64)
return false
}
if s1.UI64 != s2.UI64 {
log.Printf("UI64 differ; %d != %d", s1.UI64, s2.UI64)
return false
}
if !bytes.Equal(s1.BS, s2.BS) {
log.Println("BS differ")
return false
}
if s1.S != s2.S {
log.Printf("S differ; %q != %q", s1.S, s2.S)
return false
}
if s1.C != s2.C {
log.Printf("C differ; %q != %q", s1.C, s2.C)
return false
}
if len(s1.SS) != len(s2.SS) {
log.Printf("len(SS) differ; %q != %q", len(s1.SS), len(s2.SS))
return false
}
for i := range s1.SS {
if s1.SS[i] != s2.SS[i] {
log.Printf("SS[%d] differ; %q != %q", i, s1.SS[i], s2.SS[i])
return false
}
}
if s1.OS != s2.OS {
log.Printf("OS differ; %q != %q", s1.OS, s2.OS)
return false
}
if len(s1.OSs) != len(s2.OSs) {
log.Printf("len(OSs) differ; %q != %q", len(s1.OSs), len(s2.OSs))
return false
}
for i := range s1.OSs {
if s1.OSs[i] != s2.OSs[i] {
log.Printf("OSs[%d] differ; %q != %q", i, s1.OSs[i], s2.OSs[i])
return false
}
}
return true
}
type EmptyStruct struct {
}
type OtherStruct struct {
F1 uint32
F2 string
}
type Opaque [32]byte
func (u *Opaque) XDRSize() int {
return 32
}
func (u *Opaque) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalRaw(u[:])
return m.Error
}
func (o *Opaque) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
copy((*o)[:], u.UnmarshalRaw(32))
return u.Error
}
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
var u Opaque
for i := range u[:] {
u[i] = byte(rand.Int())
}
return reflect.ValueOf(u)
}
func TestEncDec(t *testing.T) {
fn := func(t0 TestStruct) bool {
bs, err := t0.MarshalXDR()
if err != nil {
t.Fatal(err)
}
var t1 TestStruct
err = t1.UnmarshalXDR(bs)
if err != nil {
t.Fatal(err)
}
return t0.TestEquals(t1)
}
if err := quick.Check(fn, nil); err != nil {
t.Error(err)
}
}
func TestMarshalShortBuffer(t *testing.T) {
var s TestStruct
buf := make([]byte, s.XDRSize())
if err := s.MarshalXDRInto(&xdr.Marshaller{Data: buf}); err != nil {
t.Fatal("Unexpected error", err)
}
if err := s.MarshalXDRInto(&xdr.Marshaller{Data: buf[1:]}); err != io.ErrShortBuffer {
t.Fatal("Expected io.ErrShortBuffer, got", err)
}
}
func TestUnmarshalUnexpectedEOF(t *testing.T) {
var s TestStruct
buf := make([]byte, s.XDRSize())
if err := s.MarshalXDRInto(&xdr.Marshaller{Data: buf}); err != nil {
t.Fatal("Unexpected error", err)
}
if err := s.UnmarshalXDR(buf[:len(buf)-1]); err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u := &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalRaw(4)
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalString()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalBytes()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalBool()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalUint8()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalUint16()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:3]}
u.UnmarshalUint32()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
u = &xdr.Unmarshaller{Data: buf[:7]}
u.UnmarshalUint64()
if err := u.Error; err != io.ErrUnexpectedEOF {
t.Fatal("Expected io.ErrUnexpectedEOF, got", err)
}
}

319
vendor/github.com/calmh/xdr/encdec_xdr_test.go generated vendored Normal file
View File

@ -0,0 +1,319 @@
// ************************************************************
// This file is automatically generated by genxdr. Do not edit.
// ************************************************************
package xdr_test
import (
"github.com/calmh/xdr"
)
/*
TestStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| B (V=0 or 1) |V|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ int Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 24 zero bits | I8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 24 zero bits | UI8 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 16 zero bits | I16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| 16 zero bits | UI16 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| I32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| UI32 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ I64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ UI64 (64 bits) +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ BS (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ S (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Opaque Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of SS |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
/ /
\ SS (length + padded data) \
/ /
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ EmptyStruct Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ OtherStruct Structure \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Number of OSs |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ Zero or more OtherStruct Structures \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct TestStruct {
bool B;
int I;
int I8;
unsigned int UI8;
int I16;
unsigned int UI16;
int I32;
unsigned int UI32;
hyper I64;
unsigned hyper UI64;
opaque BS<1024>;
string S<1024>;
Opaque C;
string SS<1024>;
EmptyStruct ES;
OtherStruct OS;
OtherStruct OSs<>;
}
*/
func (o TestStruct) XDRSize() int {
return 4 + 8 + 4 + 4 + 4 + 4 + 4 + 4 + 8 + 8 +
4 + len(o.BS) + xdr.Padding(len(o.BS)) +
4 + len(o.S) + xdr.Padding(len(o.S)) +
o.C.XDRSize() +
4 + xdr.SizeOfSlice(o.SS) +
o.ES.XDRSize() +
o.OS.XDRSize() +
4 + xdr.SizeOfSlice(o.OSs)
}
func (o TestStruct) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o TestStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o TestStruct) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalBool(o.B)
m.MarshalUint64(uint64(o.I))
m.MarshalUint8(uint8(o.I8))
m.MarshalUint8(o.UI8)
m.MarshalUint16(uint16(o.I16))
m.MarshalUint16(o.UI16)
m.MarshalUint32(uint32(o.I32))
m.MarshalUint32(o.UI32)
m.MarshalUint64(uint64(o.I64))
m.MarshalUint64(o.UI64)
if l := len(o.BS); l > 1024 {
return xdr.ElementSizeExceeded("BS", l, 1024)
}
m.MarshalBytes(o.BS)
if l := len(o.S); l > 1024 {
return xdr.ElementSizeExceeded("S", l, 1024)
}
m.MarshalString(o.S)
if err := o.C.MarshalXDRInto(m); err != nil {
return err
}
if l := len(o.SS); l > 1024 {
return xdr.ElementSizeExceeded("SS", l, 1024)
}
m.MarshalUint32(uint32(len(o.SS)))
for i := range o.SS {
m.MarshalString(o.SS[i])
}
if err := o.ES.MarshalXDRInto(m); err != nil {
return err
}
if err := o.OS.MarshalXDRInto(m); err != nil {
return err
}
m.MarshalUint32(uint32(len(o.OSs)))
for i := range o.OSs {
if err := o.OSs[i].MarshalXDRInto(m); err != nil {
return err
}
}
return m.Error
}
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *TestStruct) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.B = u.UnmarshalBool()
o.I = int(u.UnmarshalUint64())
o.I8 = int8(u.UnmarshalUint8())
o.UI8 = u.UnmarshalUint8()
o.I16 = int16(u.UnmarshalUint16())
o.UI16 = u.UnmarshalUint16()
o.I32 = int32(u.UnmarshalUint32())
o.UI32 = u.UnmarshalUint32()
o.I64 = int64(u.UnmarshalUint64())
o.UI64 = u.UnmarshalUint64()
o.BS = u.UnmarshalBytesMax(1024)
o.S = u.UnmarshalStringMax(1024)
(&o.C).UnmarshalXDRFrom(u)
_SSSize := int(u.UnmarshalUint32())
if _SSSize < 0 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
} else if _SSSize == 0 {
o.SS = nil
} else {
if _SSSize > 1024 {
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
}
if _SSSize <= len(o.SS) {
for i := _SSSize; i < len(o.SS); i++ {
o.SS[i] = ""
}
o.SS = o.SS[:_SSSize]
} else {
o.SS = make([]string, _SSSize)
}
for i := range o.SS {
o.SS[i] = u.UnmarshalString()
}
}
(&o.ES).UnmarshalXDRFrom(u)
(&o.OS).UnmarshalXDRFrom(u)
_OSsSize := int(u.UnmarshalUint32())
if _OSsSize < 0 {
return xdr.ElementSizeExceeded("OSs", _OSsSize, 0)
} else if _OSsSize == 0 {
o.OSs = nil
} else {
if _OSsSize <= len(o.OSs) {
o.OSs = o.OSs[:_OSsSize]
} else {
o.OSs = make([]OtherStruct, _OSsSize)
}
for i := range o.OSs {
(&o.OSs[i]).UnmarshalXDRFrom(u)
}
}
return u.Error
}
/*
EmptyStruct Structure:
(contains no fields)
struct EmptyStruct {
}
*/
func (o EmptyStruct) XDRSize() int {
return 0
}
func (o EmptyStruct) MarshalXDR() ([]byte, error) {
return nil, nil
}
func (o EmptyStruct) MustMarshalXDR() []byte {
return nil
}
func (o EmptyStruct) MarshalXDRInto(m *xdr.Marshaller) error {
return nil
}
func (o *EmptyStruct) UnmarshalXDR(bs []byte) error {
return nil
}
func (o *EmptyStruct) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
return nil
}
/*
OtherStruct Structure:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| F1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/ /
\ F2 (length + padded data) \
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
struct OtherStruct {
unsigned int F1;
string F2<>;
}
*/
func (o OtherStruct) XDRSize() int {
return 4 +
4 + len(o.F2) + xdr.Padding(len(o.F2))
}
func (o OtherStruct) MarshalXDR() ([]byte, error) {
buf := make([]byte, o.XDRSize())
m := &xdr.Marshaller{Data: buf}
return buf, o.MarshalXDRInto(m)
}
func (o OtherStruct) MustMarshalXDR() []byte {
bs, err := o.MarshalXDR()
if err != nil {
panic(err)
}
return bs
}
func (o OtherStruct) MarshalXDRInto(m *xdr.Marshaller) error {
m.MarshalUint32(o.F1)
m.MarshalString(o.F2)
return m.Error
}
func (o *OtherStruct) UnmarshalXDR(bs []byte) error {
u := &xdr.Unmarshaller{Data: bs}
return o.UnmarshalXDRFrom(u)
}
func (o *OtherStruct) UnmarshalXDRFrom(u *xdr.Unmarshaller) error {
o.F1 = u.UnmarshalUint32()
o.F2 = u.UnmarshalString()
return u.Error
}

View File

@ -11,4 +11,5 @@
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -32,5 +32,6 @@ Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

View File

@ -0,0 +1,74 @@
/*
To build the snappytool binary:
g++ main.cpp /usr/lib/libsnappy.a -o snappytool
*/
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "snappy.h"
#define N 1000000
char dst[N];
char src[N];
int main(int argc, char** argv) {
// Parse args.
if (argc != 2) {
fprintf(stderr, "exactly one of -d or -e must be given\n");
return 1;
}
bool decode = strcmp(argv[1], "-d") == 0;
bool encode = strcmp(argv[1], "-e") == 0;
if (decode == encode) {
fprintf(stderr, "exactly one of -d or -e must be given\n");
return 1;
}
// Read all of stdin into src[:s].
size_t s = 0;
while (1) {
if (s == N) {
fprintf(stderr, "input too large\n");
return 1;
}
ssize_t n = read(0, src+s, N-s);
if (n == 0) {
break;
}
if (n < 0) {
fprintf(stderr, "read error: %s\n", strerror(errno));
// TODO: handle EAGAIN, EINTR?
return 1;
}
s += n;
}
// Encode or decode src[:s] to dst[:d], and write to stdout.
size_t d = 0;
if (encode) {
if (N < snappy::MaxCompressedLength(s)) {
fprintf(stderr, "input too large after encoding\n");
return 1;
}
snappy::RawCompress(src, s, dst, &d);
} else {
if (!snappy::GetUncompressedLength(src, s, &d)) {
fprintf(stderr, "could not get uncompressed length\n");
return 1;
}
if (N < d) {
fprintf(stderr, "input too large after decoding\n");
return 1;
}
if (!snappy::RawUncompress(src, s, dst)) {
fprintf(stderr, "input was not valid Snappy-compressed data\n");
return 1;
}
}
write(1, dst, d);
return 0;
}

View File

@ -17,6 +17,9 @@ var (
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedCopy4Tag = errors.New("snappy: unsupported COPY_4 tag")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
@ -40,96 +43,36 @@ func decodedLen(src []byte) (blockLen, headerLen int, err error) {
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
decodeErrCodeUnsupportedCopy4Tag = 3
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if s > len(src) {
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
case decodeErrCodeUnsupportedCopy4Tag:
return nil, errUnsupportedCopy4Tag
}
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}
// NewReader returns a new Reader that decompresses from r, using the framing
@ -138,12 +81,12 @@ func Decode(dst, src []byte) ([]byte, error) {
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxUncompressedChunkLen),
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader than can read Snappy-compressed bytes.
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error

10
vendor/github.com/golang/snappy/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

472
vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,472 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// func decode(dst, src []byte) int
//
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(src[s-2]) | int(src[s-1])<<8
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA errUC4T
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET
errUC4T:
// return decodeErrCodeUnsupportedCopy4Tag
MOVQ $3, ret+48(FP)
RET

96
vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return decodeErrCodeUnsupportedCopy4Tag
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

403
vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,403 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// maxOffset limits how far copy back-references can go, the same as the C++
// code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int32) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
d += encodeBlock(dst[d:], p)
}
return dst[:d]
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// 0 < len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Return early if src is short.
if len(src) <= 4 {
return emitLiteral(dst, src)
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int32
// Iterate over the source bytes.
var (
s int32 // The iterator position.
t int32 // The last position with the same hash as s.
lit int32 // The start position of any pending literal bytes.
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned, look at every third byte, etc.. When a match is found,
// immediately go back to looking at every byte. This is a small loss
// (~5% performance, ~0.1% density) for compressible data due to more
// bookkeeping, but for non-compressible data (such as JPEG) it's a
// huge win since the compressor quickly "realizes" the data is
// incompressible and doesn't bother looking for matches everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip uint32 = 32
)
for uint32(s+3) < uint32(len(src)) { // The uint32 conversions catch overflow from the +3.
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s += int32(skip >> 5)
skip++
continue
}
skip = 32
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for int(s) < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if int(lit) != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return d
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer than can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

View File

@ -6,7 +6,7 @@
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
package snappy
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
@ -46,9 +46,25 @@ const (
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
maxUncompressedChunkLen = 65536
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (

973
vendor/github.com/golang/snappy/snappy_test.go generated vendored Normal file
View File

@ -0,0 +1,973 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"bytes"
"encoding/binary"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
)
var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) {
got := maxEncodedLenOfMaxBlockSize
want := MaxEncodedLen(maxBlockSize)
if got != want {
t.Fatalf("got %d, want %d", got, want)
}
}
func cmp(a, b []byte) error {
if bytes.Equal(a, b) {
return nil
}
if len(a) != len(b) {
return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
}
for i := range a {
if a[i] != b[i] {
return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
}
}
return nil
}
func roundtrip(b, ebuf, dbuf []byte) error {
d, err := Decode(dbuf, Encode(ebuf, b))
if err != nil {
return fmt.Errorf("decoding error: %v", err)
}
if err := cmp(d, b); err != nil {
return fmt.Errorf("roundtrip mismatch: %v", err)
}
return nil
}
func TestEmpty(t *testing.T) {
if err := roundtrip(nil, nil, nil); err != nil {
t.Fatal(err)
}
}
func TestSmallCopy(t *testing.T) {
for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
for i := 0; i < 32; i++ {
s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
}
}
}
}
}
func TestSmallRand(t *testing.T) {
rng := rand.New(rand.NewSource(1))
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(rng.Intn(256))
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestSmallRegular(t *testing.T) {
for n := 1; n < 20000; n += 23 {
b := make([]byte, n)
for i := range b {
b[i] = uint8(i%10 + 'a')
}
if err := roundtrip(b, nil, nil); err != nil {
t.Fatal(err)
}
}
}
func TestInvalidVarint(t *testing.T) {
testCases := []struct {
desc string
input string
}{{
"invalid varint, final byte has continuation bit set",
"\xff",
}, {
"invalid varint, value overflows uint64",
"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00",
}, {
// https://github.com/google/snappy/blob/master/format_description.txt
// says that "the stream starts with the uncompressed length [as a
// varint] (up to a maximum of 2^32 - 1)".
"valid varint (as uint64), but value overflows uint32",
"\x80\x80\x80\x80\x10",
}}
for _, tc := range testCases {
input := []byte(tc.input)
if _, err := DecodedLen(input); err != ErrCorrupt {
t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err)
}
if _, err := Decode(nil, input); err != ErrCorrupt {
t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err)
}
}
}
func TestDecode(t *testing.T) {
lit40Bytes := make([]byte, 40)
for i := range lit40Bytes {
lit40Bytes[i] = byte(i)
}
lit40 := string(lit40Bytes)
testCases := []struct {
desc string
input string
want string
wantErr error
}{{
`decodedLen=0; valid input`,
"\x00",
"",
nil,
}, {
`decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`,
"\x03" + "\x08\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`,
"\x02" + "\x08\xff\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`,
"\x03" + "\x08\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`,
"\x28" + "\x9c" + lit40,
lit40,
nil,
}, {
`decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`,
"\x01" + "\xf0",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`,
"\x03" + "\xf0\x02\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`,
"\x01" + "\xf4\x00",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`,
"\x03" + "\xf4\x02\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`,
"\x01" + "\xf8\x00\x00",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`,
"\x03" + "\xf8\x02\x00\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`,
"\x01" + "\xfc\x00\x00\x00",
"",
ErrCorrupt,
}, {
`decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`,
"\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`,
"\x04" + "\xfc\x02\x00\x00\x00\xff",
"",
ErrCorrupt,
}, {
`decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`,
"\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
"\xff\xff\xff",
nil,
}, {
`decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`,
"\x04" + "\x01",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`,
"\x04" + "\x02\x00",
"",
ErrCorrupt,
}, {
`decodedLen=4; tagCopy4; unsupported COPY_4 tag`,
"\x04" + "\x03\x00\x00\x00\x00",
"",
errUnsupportedCopy4Tag,
}, {
`decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`,
"\x04" + "\x0cabcd",
"abcd",
nil,
}, {
`decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`,
"\x0d" + "\x0cabcd" + "\x15\x04",
"abcdabcdabcda",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`,
"\x08" + "\x0cabcd" + "\x01\x04",
"abcdabcd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`,
"\x08" + "\x0cabcd" + "\x01\x02",
"abcdcdcd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`,
"\x08" + "\x0cabcd" + "\x01\x01",
"abcddddd",
nil,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`,
"\x08" + "\x0cabcd" + "\x01\x00",
"",
ErrCorrupt,
}, {
`decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`,
"\x09" + "\x0cabcd" + "\x01\x04",
"",
ErrCorrupt,
}, {
`decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`,
"\x08" + "\x0cabcd" + "\x01\x05",
"",
ErrCorrupt,
}, {
`decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`,
"\x07" + "\x0cabcd" + "\x01\x04",
"",
ErrCorrupt,
}, {
`decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`,
"\x06" + "\x0cabcd" + "\x06\x03\x00",
"abcdbc",
nil,
}}
const (
// notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
// not present in either the input or the output. It is written to dBuf
// to check that Decode does not write bytes past the end of
// dBuf[:dLen].
//
// The magic number 37 was chosen because it is prime. A more 'natural'
// number like 32 might lead to a false negative if, for example, a
// byte was incorrectly copied 4*8 bytes later.
notPresentBase = 0xa0
notPresentLen = 37
)
var dBuf [100]byte
loop:
for i, tc := range testCases {
input := []byte(tc.input)
for _, x := range input {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input)
continue loop
}
}
dLen, n := binary.Uvarint(input)
if n <= 0 {
t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc)
continue
}
if dLen > uint64(len(dBuf)) {
t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen)
continue
}
for j := range dBuf {
dBuf[j] = byte(notPresentBase + j%notPresentLen)
}
g, gotErr := Decode(dBuf[:], input)
if got := string(g); got != tc.want || gotErr != tc.wantErr {
t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v",
i, tc.desc, got, gotErr, tc.want, tc.wantErr)
continue
}
for j, x := range dBuf {
if uint64(j) < dLen {
continue
}
if w := byte(notPresentBase + j%notPresentLen); x != w {
t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x",
i, tc.desc, j, x, w, dBuf)
continue loop
}
}
}
}
// TestDecodeLengthOffset tests decoding an encoding of the form literal +
// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB".
func TestDecodeLengthOffset(t *testing.T) {
const (
prefix = "abcdefghijklmnopqr"
suffix = "ABCDEFGHIJKLMNOPQR"
// notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
// not present in either the input or the output. It is written to
// gotBuf to check that Decode does not write bytes past the end of
// gotBuf[:totalLen].
//
// The magic number 37 was chosen because it is prime. A more 'natural'
// number like 32 might lead to a false negative if, for example, a
// byte was incorrectly copied 4*8 bytes later.
notPresentBase = 0xa0
notPresentLen = 37
)
var gotBuf, wantBuf, inputBuf [128]byte
for length := 1; length <= 18; length++ {
for offset := 1; offset <= 18; offset++ {
loop:
for suffixLen := 0; suffixLen <= 18; suffixLen++ {
totalLen := len(prefix) + length + suffixLen
inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen))
inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1)
inputLen++
inputLen += copy(inputBuf[inputLen:], prefix)
inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1)
inputBuf[inputLen+1] = byte(offset)
inputBuf[inputLen+2] = 0x00
inputLen += 3
if suffixLen > 0 {
inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1)
inputLen++
inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen])
}
input := inputBuf[:inputLen]
for i := range gotBuf {
gotBuf[i] = byte(notPresentBase + i%notPresentLen)
}
got, err := Decode(gotBuf[:], input)
if err != nil {
t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err)
continue
}
wantLen := 0
wantLen += copy(wantBuf[wantLen:], prefix)
for i := 0; i < length; i++ {
wantBuf[wantLen] = wantBuf[wantLen-offset]
wantLen++
}
wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen])
want := wantBuf[:wantLen]
for _, x := range input {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x",
length, offset, suffixLen, x, input)
continue loop
}
}
for i, x := range gotBuf {
if i < totalLen {
continue
}
if w := byte(notPresentBase + i%notPresentLen); x != w {
t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+
"Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x",
length, offset, suffixLen, totalLen, i, x, w, gotBuf)
continue loop
}
}
for _, x := range want {
if notPresentBase <= x && x < notPresentBase+notPresentLen {
t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x",
length, offset, suffixLen, x, want)
continue loop
}
}
if !bytes.Equal(got, want) {
t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x",
length, offset, suffixLen, input, got, want)
continue
}
}
}
}
}
func TestDecodeGoldenInput(t *testing.T) {
src, err := ioutil.ReadFile("testdata/pi.txt.rawsnappy")
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
got, err := Decode(nil, src)
if err != nil {
t.Fatalf("Decode: %v", err)
}
want, err := ioutil.ReadFile("testdata/pi.txt")
if err != nil {
t.Fatalf("ReadFile: %v", err)
}
if err := cmp(got, want); err != nil {
t.Fatal(err)
}
}
// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm
// described in decode_amd64.s and its claim of a 10 byte overrun worst case.
func TestSlowForwardCopyOverrun(t *testing.T) {
const base = 100
for length := 1; length < 18; length++ {
for offset := 1; offset < 18; offset++ {
highWaterMark := base
d := base
l := length
o := offset
// makeOffsetAtLeast8
for o < 8 {
if end := d + 8; highWaterMark < end {
highWaterMark = end
}
l -= o
d += o
o += o
}
// fixUpSlowForwardCopy
a := d
d += l
// finishSlowForwardCopy
for l > 0 {
if end := a + 8; highWaterMark < end {
highWaterMark = end
}
a += 8
l -= 8
}
dWant := base + length
overrun := highWaterMark - dWant
if d != dWant || overrun < 0 || 10 < overrun {
t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])",
length, offset, d, overrun, dWant)
}
}
}
}
// TestEncodeNoiseThenRepeats encodes input for which the first half is very
// incompressible and the second half is very compressible. The encoded form's
// length should be closer to 50% of the original length than 100%.
func TestEncodeNoiseThenRepeats(t *testing.T) {
for _, origLen := range []int{32 * 1024, 256 * 1024, 2048 * 1024} {
src := make([]byte, origLen)
rng := rand.New(rand.NewSource(1))
firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
for i := range firstHalf {
firstHalf[i] = uint8(rng.Intn(256))
}
for i := range secondHalf {
secondHalf[i] = uint8(i >> 8)
}
dst := Encode(nil, src)
if got, want := len(dst), origLen*3/4; got >= want {
t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want)
}
}
}
func TestFramingFormat(t *testing.T) {
// src is comprised of alternating 1e5-sized sequences of random
// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
// because it is larger than maxBlockSize (64k).
src := make([]byte, 1e6)
rng := rand.New(rand.NewSource(1))
for i := 0; i < 10; i++ {
if i%2 == 0 {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(rng.Intn(256))
}
} else {
for j := 0; j < 1e5; j++ {
src[1e5*i+j] = uint8(i)
}
}
}
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(src); err != nil {
t.Fatalf("Write: encoding: %v", err)
}
dst, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Fatalf("ReadAll: decoding: %v", err)
}
if err := cmp(dst, src); err != nil {
t.Fatal(err)
}
}
func TestWriterGoldenOutput(t *testing.T) {
buf := new(bytes.Buffer)
w := NewBufferedWriter(buf)
defer w.Close()
w.Write([]byte("abcd")) // Not compressible.
w.Flush()
w.Write(bytes.Repeat([]byte{'A'}, 100)) // Compressible.
w.Flush()
got := buf.String()
want := strings.Join([]string{
magicChunk,
"\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum).
"\x68\x10\xe6\xb6", // Checksum.
"\x61\x62\x63\x64", // Uncompressed payload: "abcd".
"\x00\x0d\x00\x00", // Compressed chunk, 13 bytes long (including 4 byte checksum).
"\x37\xcb\xbc\x9d", // Checksum.
"\x64", // Compressed payload: Uncompressed length (varint encoded): 100.
"\x00\x41", // Compressed payload: tagLiteral, length=1, "A".
"\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
"\x8a\x01\x00", // Compressed payload: tagCopy2, length=35, offset=1.
}, "")
if got != want {
t.Fatalf("\ngot: % x\nwant: % x", got, want)
}
}
func TestNewBufferedWriter(t *testing.T) {
// Test all 32 possible sub-sequences of these 5 input slices.
//
// Their lengths sum to 400,000, which is over 6 times the Writer ibuf
// capacity: 6 * maxBlockSize is 393,216.
inputs := [][]byte{
bytes.Repeat([]byte{'a'}, 40000),
bytes.Repeat([]byte{'b'}, 150000),
bytes.Repeat([]byte{'c'}, 60000),
bytes.Repeat([]byte{'d'}, 120000),
bytes.Repeat([]byte{'e'}, 30000),
}
loop:
for i := 0; i < 1<<uint(len(inputs)); i++ {
var want []byte
buf := new(bytes.Buffer)
w := NewBufferedWriter(buf)
for j, input := range inputs {
if i&(1<<uint(j)) == 0 {
continue
}
if _, err := w.Write(input); err != nil {
t.Errorf("i=%#02x: j=%d: Write: %v", i, j, err)
continue loop
}
want = append(want, input...)
}
if err := w.Close(); err != nil {
t.Errorf("i=%#02x: Close: %v", i, err)
continue
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("i=%#02x: ReadAll: %v", i, err)
continue
}
if err := cmp(got, want); err != nil {
t.Errorf("i=%#02x: %v", i, err)
continue
}
}
}
func TestFlush(t *testing.T) {
buf := new(bytes.Buffer)
w := NewBufferedWriter(buf)
defer w.Close()
if _, err := w.Write(bytes.Repeat([]byte{'x'}, 20)); err != nil {
t.Fatalf("Write: %v", err)
}
if n := buf.Len(); n != 0 {
t.Fatalf("before Flush: %d bytes were written to the underlying io.Writer, want 0", n)
}
if err := w.Flush(); err != nil {
t.Fatalf("Flush: %v", err)
}
if n := buf.Len(); n == 0 {
t.Fatalf("after Flush: %d bytes were written to the underlying io.Writer, want non-0", n)
}
}
func TestReaderReset(t *testing.T) {
gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
buf := new(bytes.Buffer)
if _, err := NewWriter(buf).Write(gold); err != nil {
t.Fatalf("Write: %v", err)
}
encoded, invalid, partial := buf.String(), "invalid", "partial"
r := NewReader(nil)
for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
if s == partial {
r.Reset(strings.NewReader(encoded))
if _, err := r.Read(make([]byte, 101)); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
continue
}
r.Reset(strings.NewReader(s))
got, err := ioutil.ReadAll(r)
switch s {
case encoded:
if err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
if err := cmp(got, gold); err != nil {
t.Errorf("#%d: %v", i, err)
continue
}
case invalid:
if err == nil {
t.Errorf("#%d: got nil error, want non-nil", i)
continue
}
}
}
}
func TestWriterReset(t *testing.T) {
gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
const n = 20
for _, buffered := range []bool{false, true} {
var w *Writer
if buffered {
w = NewBufferedWriter(nil)
defer w.Close()
} else {
w = NewWriter(nil)
}
var gots, wants [][]byte
failed := false
for i := 0; i <= n; i++ {
buf := new(bytes.Buffer)
w.Reset(buf)
want := gold[:len(gold)*i/n]
if _, err := w.Write(want); err != nil {
t.Errorf("#%d: Write: %v", i, err)
failed = true
continue
}
if buffered {
if err := w.Flush(); err != nil {
t.Errorf("#%d: Flush: %v", i, err)
failed = true
continue
}
}
got, err := ioutil.ReadAll(NewReader(buf))
if err != nil {
t.Errorf("#%d: ReadAll: %v", i, err)
failed = true
continue
}
gots = append(gots, got)
wants = append(wants, want)
}
if failed {
continue
}
for i := range gots {
if err := cmp(gots[i], wants[i]); err != nil {
t.Errorf("#%d: %v", i, err)
}
}
}
}
func TestWriterResetWithoutFlush(t *testing.T) {
buf0 := new(bytes.Buffer)
buf1 := new(bytes.Buffer)
w := NewBufferedWriter(buf0)
if _, err := w.Write([]byte("xxx")); err != nil {
t.Fatalf("Write #0: %v", err)
}
// Note that we don't Flush the Writer before calling Reset.
w.Reset(buf1)
if _, err := w.Write([]byte("yyy")); err != nil {
t.Fatalf("Write #1: %v", err)
}
if err := w.Flush(); err != nil {
t.Fatalf("Flush: %v", err)
}
got, err := ioutil.ReadAll(NewReader(buf1))
if err != nil {
t.Fatalf("ReadAll: %v", err)
}
if err := cmp(got, []byte("yyy")); err != nil {
t.Fatal(err)
}
}
type writeCounter int
func (c *writeCounter) Write(p []byte) (int, error) {
*c++
return len(p), nil
}
// TestNumUnderlyingWrites tests that each Writer flush only makes one or two
// Write calls on its underlying io.Writer, depending on whether or not the
// flushed buffer was compressible.
func TestNumUnderlyingWrites(t *testing.T) {
testCases := []struct {
input []byte
want int
}{
{bytes.Repeat([]byte{'x'}, 100), 1},
{bytes.Repeat([]byte{'y'}, 100), 1},
{[]byte("ABCDEFGHIJKLMNOPQRST"), 2},
}
var c writeCounter
w := NewBufferedWriter(&c)
defer w.Close()
for i, tc := range testCases {
c = 0
if _, err := w.Write(tc.input); err != nil {
t.Errorf("#%d: Write: %v", i, err)
continue
}
if err := w.Flush(); err != nil {
t.Errorf("#%d: Flush: %v", i, err)
continue
}
if int(c) != tc.want {
t.Errorf("#%d: got %d underlying writes, want %d", i, c, tc.want)
continue
}
}
}
func benchDecode(b *testing.B, src []byte) {
encoded := Encode(nil, src)
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(src, encoded)
}
}
func benchEncode(b *testing.B, src []byte) {
// Bandwidth is in amount of uncompressed data.
b.SetBytes(int64(len(src)))
dst := make([]byte, MaxEncodedLen(len(src)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Encode(dst, src)
}
}
func readFile(b testing.TB, filename string) []byte {
src, err := ioutil.ReadFile(filename)
if err != nil {
b.Skipf("skipping benchmark: %v", err)
}
if len(src) == 0 {
b.Fatalf("%s has zero length", filename)
}
return src
}
// expand returns a slice of length n containing repeated copies of src.
func expand(src []byte, n int) []byte {
dst := make([]byte, n)
for x := dst; len(x) > 0; {
i := copy(x, src)
x = x[i:]
}
return dst
}
func benchWords(b *testing.B, n int, decode bool) {
// Note: the file is OS-language dependent so the resulting values are not
// directly comparable for non-US-English OS installations.
data := expand(readFile(b, "/usr/share/dict/words"), n)
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) }
func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) }
func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) }
func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) }
func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
func BenchmarkRandomEncode(b *testing.B) {
rng := rand.New(rand.NewSource(1))
data := make([]byte, 1<<20)
for i := range data {
data[i] = uint8(rng.Intn(256))
}
benchEncode(b, data)
}
// testFiles' values are copied directly from
// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
// The label field is unused in snappy-go.
var testFiles = []struct {
label string
filename string
sizeLimit int
}{
{"html", "html", 0},
{"urls", "urls.10K", 0},
{"jpg", "fireworks.jpeg", 0},
{"jpg_200", "fireworks.jpeg", 200},
{"pdf", "paper-100k.pdf", 0},
{"html4", "html_x_4", 0},
{"txt1", "alice29.txt", 0},
{"txt2", "asyoulik.txt", 0},
{"txt3", "lcet10.txt", 0},
{"txt4", "plrabn12.txt", 0},
{"pb", "geo.protodata", 0},
{"gaviota", "kppkn.gtb", 0},
}
const (
// The benchmark data files are at this canonical URL.
benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
// They are copied to this local directory.
benchDir = "testdata/bench"
)
func downloadBenchmarkFiles(b *testing.B, basename string) (errRet error) {
filename := filepath.Join(benchDir, basename)
if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
return nil
}
if !*download {
b.Skipf("test data not found; skipping benchmark without the -download flag")
}
// Download the official snappy C++ implementation reference test data
// files for benchmarking.
if err := os.MkdirAll(benchDir, 0777); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create %s: %s", benchDir, err)
}
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to create %s: %s", filename, err)
}
defer f.Close()
defer func() {
if errRet != nil {
os.Remove(filename)
}
}()
url := benchURL + basename
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("failed to download %s: %s", url, err)
}
defer resp.Body.Close()
if s := resp.StatusCode; s != http.StatusOK {
return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
}
_, err = io.Copy(f, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
}
return nil
}
func benchFile(b *testing.B, n int, decode bool) {
if err := downloadBenchmarkFiles(b, testFiles[n].filename); err != nil {
b.Fatalf("failed to download testdata: %s", err)
}
data := readFile(b, filepath.Join(benchDir, testFiles[n].filename))
if n := testFiles[n].sizeLimit; 0 < n && n < len(data) {
data = data[:n]
}
if decode {
benchDecode(b, data)
} else {
benchEncode(b, data)
}
}
// Naming convention is kept similar to what snappy's C++ implementation uses.
func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }

1
vendor/github.com/golang/snappy/testdata/pi.txt generated vendored Normal file

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@ -8,10 +8,10 @@
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
"math"
)
// Bucket represents a token bucket that fills at a predetermined rate.
@ -171,6 +171,30 @@ func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
return count
}
// Available returns the number of available tokens. It will be negative
// when there are consumers waiting for tokens. Note that if this
// returns greater than zero, it does not guarantee that calls that take
// tokens from the buffer will succeed, as the number of available
// tokens could have changed in the meantime. This method is intended
// primarily for metrics reporting and debugging.
func (tb *Bucket) Available() int64 {
return tb.available(time.Now())
}
// available is the internal version of available - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
return tb.avail
}
// Capacity returns the capacity that the bucket was created with.
func (tb *Bucket) Capacity() int64 {
return tb.capacity
}
// Rate returns the fill rate of the bucket, in tokens per second.
func (tb *Bucket) Rate() float64 {
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)

389
vendor/github.com/juju/ratelimit/ratelimit_test.go generated vendored Normal file
View File

@ -0,0 +1,389 @@
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
package ratelimit
import (
"math"
"testing"
"time"
gc "gopkg.in/check.v1"
)
func TestPackage(t *testing.T) {
gc.TestingT(t)
}
type rateLimitSuite struct{}
var _ = gc.Suite(rateLimitSuite{})
type takeReq struct {
time time.Duration
count int64
expectWait time.Duration
}
var takeTests = []struct {
about string
fillInterval time.Duration
capacity int64
reqs []takeReq
}{{
about: "serial requests",
fillInterval: 250 * time.Millisecond,
capacity: 10,
reqs: []takeReq{{
time: 0,
count: 0,
expectWait: 0,
}, {
time: 0,
count: 10,
expectWait: 0,
}, {
time: 0,
count: 1,
expectWait: 250 * time.Millisecond,
}, {
time: 250 * time.Millisecond,
count: 1,
expectWait: 250 * time.Millisecond,
}},
}, {
about: "concurrent requests",
fillInterval: 250 * time.Millisecond,
capacity: 10,
reqs: []takeReq{{
time: 0,
count: 10,
expectWait: 0,
}, {
time: 0,
count: 2,
expectWait: 500 * time.Millisecond,
}, {
time: 0,
count: 2,
expectWait: 1000 * time.Millisecond,
}, {
time: 0,
count: 1,
expectWait: 1250 * time.Millisecond,
}},
}, {
about: "more than capacity",
fillInterval: 1 * time.Millisecond,
capacity: 10,
reqs: []takeReq{{
time: 0,
count: 10,
expectWait: 0,
}, {
time: 20 * time.Millisecond,
count: 15,
expectWait: 5 * time.Millisecond,
}},
}, {
about: "sub-quantum time",
fillInterval: 10 * time.Millisecond,
capacity: 10,
reqs: []takeReq{{
time: 0,
count: 10,
expectWait: 0,
}, {
time: 7 * time.Millisecond,
count: 1,
expectWait: 3 * time.Millisecond,
}, {
time: 8 * time.Millisecond,
count: 1,
expectWait: 12 * time.Millisecond,
}},
}, {
about: "within capacity",
fillInterval: 10 * time.Millisecond,
capacity: 5,
reqs: []takeReq{{
time: 0,
count: 5,
expectWait: 0,
}, {
time: 60 * time.Millisecond,
count: 5,
expectWait: 0,
}, {
time: 60 * time.Millisecond,
count: 1,
expectWait: 10 * time.Millisecond,
}, {
time: 80 * time.Millisecond,
count: 2,
expectWait: 10 * time.Millisecond,
}},
}}
var availTests = []struct {
about string
capacity int64
fillInterval time.Duration
take int64
sleep time.Duration
expectCountAfterTake int64
expectCountAfterSleep int64
}{{
about: "should fill tokens after interval",
capacity: 5,
fillInterval: time.Second,
take: 5,
sleep: time.Second,
expectCountAfterTake: 0,
expectCountAfterSleep: 1,
}, {
about: "should fill tokens plus existing count",
capacity: 2,
fillInterval: time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}, {
about: "shouldn't fill before interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 1,
}, {
about: "should fill only once after 1*interval before 2*interval",
capacity: 2,
fillInterval: 2 * time.Second,
take: 1,
sleep: 3 * time.Second,
expectCountAfterTake: 1,
expectCountAfterSleep: 2,
}}
func (rateLimitSuite) TestTake(c *gc.C) {
for i, test := range takeTests {
tb := NewBucket(test.fillInterval, test.capacity)
for j, req := range test.reqs {
d, ok := tb.take(tb.startTime.Add(req.time), req.count, infinityDuration)
c.Assert(ok, gc.Equals, true)
if d != req.expectWait {
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
}
}
}
}
func (rateLimitSuite) TestTakeMaxDuration(c *gc.C) {
for i, test := range takeTests {
tb := NewBucket(test.fillInterval, test.capacity)
for j, req := range test.reqs {
if req.expectWait > 0 {
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait-1)
c.Assert(ok, gc.Equals, false)
c.Assert(d, gc.Equals, time.Duration(0))
}
d, ok := tb.take(tb.startTime.Add(req.time), req.count, req.expectWait)
c.Assert(ok, gc.Equals, true)
if d != req.expectWait {
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expectWait)
}
}
}
}
type takeAvailableReq struct {
time time.Duration
count int64
expect int64
}
var takeAvailableTests = []struct {
about string
fillInterval time.Duration
capacity int64
reqs []takeAvailableReq
}{{
about: "serial requests",
fillInterval: 250 * time.Millisecond,
capacity: 10,
reqs: []takeAvailableReq{{
time: 0,
count: 0,
expect: 0,
}, {
time: 0,
count: 10,
expect: 10,
}, {
time: 0,
count: 1,
expect: 0,
}, {
time: 250 * time.Millisecond,
count: 1,
expect: 1,
}},
}, {
about: "concurrent requests",
fillInterval: 250 * time.Millisecond,
capacity: 10,
reqs: []takeAvailableReq{{
time: 0,
count: 5,
expect: 5,
}, {
time: 0,
count: 2,
expect: 2,
}, {
time: 0,
count: 5,
expect: 3,
}, {
time: 0,
count: 1,
expect: 0,
}},
}, {
about: "more than capacity",
fillInterval: 1 * time.Millisecond,
capacity: 10,
reqs: []takeAvailableReq{{
time: 0,
count: 10,
expect: 10,
}, {
time: 20 * time.Millisecond,
count: 15,
expect: 10,
}},
}, {
about: "within capacity",
fillInterval: 10 * time.Millisecond,
capacity: 5,
reqs: []takeAvailableReq{{
time: 0,
count: 5,
expect: 5,
}, {
time: 60 * time.Millisecond,
count: 5,
expect: 5,
}, {
time: 70 * time.Millisecond,
count: 1,
expect: 1,
}},
}}
func (rateLimitSuite) TestTakeAvailable(c *gc.C) {
for i, test := range takeAvailableTests {
tb := NewBucket(test.fillInterval, test.capacity)
for j, req := range test.reqs {
d := tb.takeAvailable(tb.startTime.Add(req.time), req.count)
if d != req.expect {
c.Fatalf("test %d.%d, %s, got %v want %v", i, j, test.about, d, req.expect)
}
}
}
}
func (rateLimitSuite) TestPanics(c *gc.C) {
c.Assert(func() { NewBucket(0, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
c.Assert(func() { NewBucket(-2, 1) }, gc.PanicMatches, "token bucket fill interval is not > 0")
c.Assert(func() { NewBucket(1, 0) }, gc.PanicMatches, "token bucket capacity is not > 0")
c.Assert(func() { NewBucket(1, -2) }, gc.PanicMatches, "token bucket capacity is not > 0")
}
func isCloseTo(x, y, tolerance float64) bool {
return math.Abs(x-y)/y < tolerance
}
func (rateLimitSuite) TestRate(c *gc.C) {
tb := NewBucket(1, 1)
if !isCloseTo(tb.Rate(), 1e9, 0.00001) {
c.Fatalf("got %v want 1e9", tb.Rate())
}
tb = NewBucket(2*time.Second, 1)
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
c.Fatalf("got %v want 0.5", tb.Rate())
}
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
if !isCloseTo(tb.Rate(), 50, 0.00001) {
c.Fatalf("got %v want 50", tb.Rate())
}
}
func checkRate(c *gc.C, rate float64) {
tb := NewBucketWithRate(rate, 1<<62)
if !isCloseTo(tb.Rate(), rate, rateMargin) {
c.Fatalf("got %g want %v", tb.Rate(), rate)
}
d, ok := tb.take(tb.startTime, 1<<62, infinityDuration)
c.Assert(ok, gc.Equals, true)
c.Assert(d, gc.Equals, time.Duration(0))
// Check that the actual rate is as expected by
// asking for a not-quite multiple of the bucket's
// quantum and checking that the wait time
// correct.
d, ok = tb.take(tb.startTime, tb.quantum*2-tb.quantum/2, infinityDuration)
c.Assert(ok, gc.Equals, true)
expectTime := 1e9 * float64(tb.quantum) * 2 / rate
if !isCloseTo(float64(d), expectTime, rateMargin) {
c.Fatalf("rate %g: got %g want %v", rate, float64(d), expectTime)
}
}
func (rateLimitSuite) TestNewWithRate(c *gc.C) {
for rate := float64(1); rate < 1e6; rate += 7 {
checkRate(c, rate)
}
for _, rate := range []float64{
1024 * 1024 * 1024,
1e-5,
0.9e-5,
0.5,
0.9,
0.9e8,
3e12,
4e18,
} {
checkRate(c, rate)
checkRate(c, rate/3)
checkRate(c, rate*1.3)
}
}
func TestAvailable(t *testing.T) {
for i, tt := range availTests {
tb := NewBucket(tt.fillInterval, tt.capacity)
if c := tb.takeAvailable(tb.startTime, tt.take); c != tt.take {
t.Fatalf("#%d: %s, take = %d, want = %d", i, tt.about, c, tt.take)
}
if c := tb.available(tb.startTime); c != tt.expectCountAfterTake {
t.Fatalf("#%d: %s, after take, available = %d, want = %d", i, tt.about, c, tt.expectCountAfterTake)
}
if c := tb.available(tb.startTime.Add(tt.sleep)); c != tt.expectCountAfterSleep {
t.Fatalf("#%d: %s, after some time it should fill in new tokens, available = %d, want = %d",
i, tt.about, c, tt.expectCountAfterSleep)
}
}
}
func BenchmarkWait(b *testing.B) {
tb := NewBucket(1, 16*1024)
for i := b.N - 1; i >= 0; i-- {
tb.Wait(1)
}
}

View File

@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext
package osext // import "github.com/kardianos/osext"
import "path/filepath"

203
vendor/github.com/kardianos/osext/osext_test.go generated vendored Normal file
View File

@ -0,0 +1,203 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows openbsd
package osext
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
const (
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
executableEnvValueMatch = "match"
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
// fullpath to be of the form "dir/prog".
dir := filepath.Dir(filepath.Dir(ep))
fullpath, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
// Make child start with a relative program path.
// Alter argv[0] for child to verify getting real path without argv[0].
cmd := &exec.Cmd{
Dir: dir,
Path: fullpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func TestExecutableDelete(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
fpath, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
r, w := io.Pipe()
stderrBuff := &bytes.Buffer{}
stdoutBuff := &bytes.Buffer{}
cmd := &exec.Cmd{
Path: fpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
Stdin: r,
Stderr: stderrBuff,
Stdout: stdoutBuff,
}
err = cmd.Start()
if err != nil {
t.Fatalf("exec(self) start failed: %v", err)
}
tempPath := fpath + "_copy"
_ = os.Remove(tempPath)
err = copyFile(tempPath, fpath)
if err != nil {
t.Fatalf("copy file failed: %v", err)
}
err = os.Remove(fpath)
if err != nil {
t.Fatalf("remove running test file failed: %v", err)
}
err = os.Rename(tempPath, fpath)
if err != nil {
t.Fatalf("rename copy to previous name failed: %v", err)
}
w.Write([]byte{0})
w.Close()
err = cmd.Wait()
if err != nil {
t.Fatalf("exec wait failed: %v", err)
}
childPath := stderrBuff.String()
if !filepath.IsAbs(childPath) {
t.Fatalf("Child returned %q, want an absolute path", childPath)
}
if !sameFile(childPath, fpath) {
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func copyFile(dest, src string) error {
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
_, err = io.Copy(df, sf)
return err
}
func TestMain(m *testing.M) {
env := os.Getenv(executableEnvVar)
switch env {
case "":
os.Exit(m.Run())
case executableEnvValueMatch:
// First chdir to another path.
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
case executableEnvValueDelete:
bb := make([]byte, 1)
var err error
n, err := os.Stdin.Read(bb)
if err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
os.Exit(2)
}
if n != 1 {
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
os.Exit(2)
}
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
}
os.Exit(0)
}

136
vendor/github.com/onsi/ginkgo/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,136 @@
## HEAD
Improvements:
- `Skip(message)` can be used to skip the current test.
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
Bug Fixes:
- Ginkgo tests now fail when you `panic(nil)` (#167)
## 1.2.0 5/31/2015
Improvements
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
## 1.2.0-beta
Ginkgo now requires Go 1.4+
Improvements:
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
- Improved focus behavior. Now, this:
```golang
FDescribe("Some describe", func() {
It("A", func() {})
FIt("B", func() {})
})
```
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
- Improved output when an error occurs in a setup or teardown block.
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
- Add support for precompiled tests:
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
- The compiled `package.test` file can be run directly. This runs the tests in series.
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
- `ginkgo -notify` now works on Linux
Bug Fixes:
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
- Fix tempfile leak when running in parallel
- Fix incorrect failure message when a panic occurs during a parallel test run
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
- Be more consistent about handling SIGTERM as well as SIGINT
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
## 1.1.0 (8/2/2014)
No changes, just dropping the beta.
## 1.1.0-beta (7/22/2014)
New Features:
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
- `ginkgo --untilItFails` no longer recompiles between attempts.
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
Bug Fixes:
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
## 1.0.0 (5/24/2014)
New Features:
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
Improvements:
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
Bug Fixes:
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
- Fix all remaining race conditions in Ginkgo's test suite.
## 1.0.0-beta (4/14/2014)
Breaking changes:
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
- Modified the Reporter interface
- `watch` is now a subcommand, not a flag.
DSL changes:
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
CLI changes:
- `watch` is now a subcommand, not a flag
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
- Additional arguments can be passed to specs. Pass them after the `--` separator
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
Misc:
- Start using semantic versioning
- Start maintaining changelog
Major refactor:
- Pull out Ginkgo's internal to `internal`
- Rename `example` everywhere to `spec`
- Much more!

20
vendor/github.com/onsi/ginkgo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013-2014 Onsi Fakhouri
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

115
vendor/github.com/onsi/ginkgo/README.md generated vendored Normal file
View File

@ -0,0 +1,115 @@
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
## Feature List
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
- Structure your BDD-style tests expressively:
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- A comprehensive test runner that lets you:
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Golang's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- `ginkgo -r` runs all tests suites under the current directory
- `ginkgo -v` prints out identifying information for each tests just before it runs
And much more: run `ginkgo help` for details!
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
- A modular architecture that lets you easily:
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
## Set Me Up!
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
```bash
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get github.com/onsi/gomega # fetches the matcher library
cd path/to/package/you/want/to/test
ginkgo bootstrap # set up a new ginkgo suite
ginkgo generate # will create a sample test file. edit this file and add your tests then...
go test # to run your tests
ginkgo # also runs your tests
```
## I'm new to Go: What are my testing options?
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
With that said, it's great to know what your options are :)
### What Golang gives you out of the box
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Golang's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
- [testify](https://github.com/stretchr/testify)
- [gocheck](http://labix.org/gocheck)
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
### BDD style testing frameworks
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
- [GoConvey](https://github.com/smartystreets/goconvey)
- [Goblin](https://github.com/franela/goblin)
- [Mao](https://github.com/azer/mao)
- [Zen](https://github.com/pranavraja/zen)
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
Go explore!
## License
Ginkgo is MIT-Licensed

170
vendor/github.com/onsi/ginkgo/config/config.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
/*
Ginkgo accepts a number of configuration options.
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
You can also learn more via
ginkgo help
or (I kid you not):
go test -asdf
*/
package config
import (
"flag"
"time"
"fmt"
)
const VERSION = "1.2.0"
type GinkgoConfigType struct {
RandomSeed int64
RandomizeAllSpecs bool
FocusString string
SkipString string
SkipMeasurements bool
FailOnPending bool
FailFast bool
EmitSpecProgress bool
DryRun bool
ParallelNode int
ParallelTotal int
SyncHost string
StreamHost string
}
var GinkgoConfig = GinkgoConfigType{}
type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
Succinct bool
Verbose bool
FullTrace bool
}
var DefaultReporterConfig = DefaultReporterConfigType{}
func processPrefix(prefix string) string {
if prefix != "" {
prefix = prefix + "."
}
return prefix
}
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
prefix = processPrefix(prefix)
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
if includeParallelFlags {
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
}
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
}
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
prefix = processPrefix(prefix)
result := make([]string, 0)
if ginkgo.RandomSeed > 0 {
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
}
if ginkgo.RandomizeAllSpecs {
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
}
if ginkgo.SkipMeasurements {
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
}
if ginkgo.FailOnPending {
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
}
if ginkgo.FailFast {
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
}
if ginkgo.DryRun {
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
}
if ginkgo.FocusString != "" {
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
}
if ginkgo.SkipString != "" {
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
}
if ginkgo.EmitSpecProgress {
result = append(result, fmt.Sprintf("--%sprogress", prefix))
}
if ginkgo.ParallelNode != 0 {
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
}
if ginkgo.ParallelTotal != 0 {
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
}
if ginkgo.StreamHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
}
if ginkgo.SyncHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
}
if reporter.NoColor {
result = append(result, fmt.Sprintf("--%snoColor", prefix))
}
if reporter.SlowSpecThreshold > 0 {
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
}
if !reporter.NoisyPendings {
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}
if reporter.Succinct {
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
}
if reporter.FullTrace {
result = append(result, fmt.Sprintf("--%strace", prefix))
}
return result
}

View File

@ -0,0 +1,98 @@
/*
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
*/
package table
import (
"fmt"
"reflect"
"github.com/onsi/ginkgo"
)
/*
DescribeTable describes a table-driven test.
For example:
DescribeTable("a simple table",
func(x int, y int, expected bool) {
Ω(x > y).Should(Equal(expected))
},
Entry("x > y", 1, 0, true),
Entry("x == y", 0, 0, false),
Entry("x < y", 0, 1, false),
)
The first argument to `DescribeTable` is a string description.
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
*/
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, false)
return true
}
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, false, true)
return true
}
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
/*
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
*/
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
describeTable(description, itBody, entries, true, false)
return true
}
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
itBodyValue := reflect.ValueOf(itBody)
if itBodyValue.Kind() != reflect.Func {
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
}
if pending {
ginkgo.PDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else if focused {
ginkgo.FDescribe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
} else {
ginkgo.Describe(description, func() {
for _, entry := range entries {
entry.generateIt(itBodyValue)
}
})
}
}

View File

@ -0,0 +1,81 @@
package table
import (
"reflect"
"github.com/onsi/ginkgo"
)
/*
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
Description string
Parameters []interface{}
Pending bool
Focused bool
}
func (t TableEntry) generateIt(itBody reflect.Value) {
if t.Pending {
ginkgo.PIt(t.Description)
return
}
values := []reflect.Value{}
for i, param := range t.Parameters {
var value reflect.Value
if param == nil {
inType := itBody.Type().In(i)
value = reflect.Zero(inType)
} else {
value = reflect.ValueOf(param)
}
values = append(values, value)
}
body := func() {
itBody.Call(values)
}
if t.Focused {
ginkgo.FIt(t.Description, body)
} else {
ginkgo.It(t.Description, body)
}
}
/*
Entry constructs a TableEntry.
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
Each Entry ends up generating an individual Ginkgo It.
*/
func Entry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, false}
}
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
func FEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, true}
}
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
func PEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}
/*
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
*/
func XEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}

View File

@ -0,0 +1,13 @@
package table_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestTable(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Table Suite")
}

View File

@ -0,0 +1,64 @@
package table_test
import (
"strings"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Table", func() {
DescribeTable("a simple table",
func(x int, y int, expected bool) {
Ω(x > y).Should(Equal(expected))
},
Entry("x > y", 1, 0, true),
Entry("x == y", 0, 0, false),
Entry("x < y", 0, 1, false),
)
type ComplicatedThings struct {
Superstructure string
Substructure string
Count int
}
DescribeTable("a more complicated table",
func(c ComplicatedThings) {
Ω(strings.Count(c.Superstructure, c.Substructure)).Should(BeNumerically("==", c.Count))
},
Entry("with no matching substructures", ComplicatedThings{
Superstructure: "the sixth sheikh's sixth sheep's sick",
Substructure: "emir",
Count: 0,
}),
Entry("with one matching substructure", ComplicatedThings{
Superstructure: "the sixth sheikh's sixth sheep's sick",
Substructure: "sheep",
Count: 1,
}),
Entry("with many matching substructures", ComplicatedThings{
Superstructure: "the sixth sheikh's sixth sheep's sick",
Substructure: "si",
Count: 3,
}),
)
PDescribeTable("a failure",
func(value bool) {
Ω(value).Should(BeFalse())
},
Entry("when true", true),
Entry("when false", false),
Entry("when malformed", 2),
)
DescribeTable("an untyped nil as an entry",
func(x interface{}) {
Expect(x).To(BeNil())
},
Entry("nil", nil),
)
})

View File

@ -0,0 +1,182 @@
package main
import (
"bytes"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
"go/build"
"github.com/onsi/ginkgo/ginkgo/nodot"
)
func BuildBootstrapCommand() *Command {
var agouti, noDot bool
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
return &Command{
Name: "bootstrap",
FlagSet: flagSet,
UsageCommand: "ginkgo bootstrap <FLAGS>",
Usage: []string{
"Bootstrap a test suite for the current package",
"Accepts the following flags:",
},
Command: func(args []string, additionalArgs []string) {
generateBootstrap(agouti, noDot)
},
}
}
var bootstrapText = `package {{.Package}}_test
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "{{.FormattedName}} Suite")
}
`
var agoutiBootstrapText = `package {{.Package}}_test
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"github.com/sclevine/agouti"
"testing"
)
func Test{{.FormattedName}}(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "{{.FormattedName}} Suite")
}
var agoutiDriver *agouti.WebDriver
var _ = BeforeSuite(func() {
// Choose a WebDriver:
agoutiDriver = agouti.PhantomJS()
// agoutiDriver = agouti.Selenium()
// agoutiDriver = agouti.ChromeDriver()
Expect(agoutiDriver.Start()).To(Succeed())
})
var _ = AfterSuite(func() {
Expect(agoutiDriver.Stop()).To(Succeed())
})
`
type bootstrapData struct {
Package string
FormattedName string
GinkgoImport string
GomegaImport string
}
func getPackageAndFormattedName() (string, string, string) {
path, err := os.Getwd()
if err != nil {
complainAndQuit("Could not get current working directory: \n" + err.Error())
}
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
dirName = strings.Replace(dirName, " ", "_", -1)
pkg, err := build.ImportDir(path, 0)
packageName := pkg.Name
if err != nil {
packageName = dirName
}
formattedName := prettifyPackageName(filepath.Base(path))
return packageName, dirName, formattedName
}
func prettifyPackageName(name string) string {
name = strings.Replace(name, "-", " ", -1)
name = strings.Replace(name, "_", " ", -1)
name = strings.Title(name)
name = strings.Replace(name, " ", "", -1)
return name
}
func fileExists(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
return false
}
func generateBootstrap(agouti bool, noDot bool) {
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
data := bootstrapData{
Package: packageName,
FormattedName: formattedName,
GinkgoImport: `. "github.com/onsi/ginkgo"`,
GomegaImport: `. "github.com/onsi/gomega"`,
}
if noDot {
data.GinkgoImport = `"github.com/onsi/ginkgo"`
data.GomegaImport = `"github.com/onsi/gomega"`
}
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
if fileExists(targetFile) {
fmt.Printf("%s already exists.\n\n", targetFile)
os.Exit(1)
} else {
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
}
f, err := os.Create(targetFile)
if err != nil {
complainAndQuit("Could not create file: " + err.Error())
panic(err.Error())
}
defer f.Close()
var templateText string
if agouti {
templateText = agoutiBootstrapText
} else {
templateText = bootstrapText
}
bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
if err != nil {
panic(err.Error())
}
buf := &bytes.Buffer{}
bootstrapTemplate.Execute(buf, data)
if noDot {
contents, err := nodot.ApplyNoDot(buf.Bytes())
if err != nil {
complainAndQuit("Failed to import nodot declarations: " + err.Error())
}
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
buf = bytes.NewBuffer(contents)
}
buf.WriteTo(f)
goFmt(targetFile)
}

68
vendor/github.com/onsi/ginkgo/ginkgo/build_command.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
"github.com/onsi/ginkgo/ginkgo/testrunner"
)
func BuildBuildCommand() *Command {
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
interruptHandler := interrupthandler.NewInterruptHandler()
builder := &SpecBuilder{
commandFlags: commandFlags,
interruptHandler: interruptHandler,
}
return &Command{
Name: "build",
FlagSet: commandFlags.FlagSet,
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
Usage: []string{
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
"Accepts the following flags:",
},
Command: builder.BuildSpecs,
}
}
type SpecBuilder struct {
commandFlags *RunWatchAndBuildCommandFlags
interruptHandler *interrupthandler.InterruptHandler
}
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
r.commandFlags.computeNodes()
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
if len(suites) == 0 {
complainAndQuit("Found no test suites")
}
passed := true
for _, suite := range suites {
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
fmt.Printf("Compiling %s...\n", suite.PackageName)
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
err := runner.CompileTo(path)
if err != nil {
fmt.Println(err.Error())
passed = false
} else {
fmt.Printf(" compiled %s.test\n", suite.PackageName)
}
runner.CleanUp()
}
if passed {
os.Exit(0)
}
os.Exit(1)
}

View File

@ -0,0 +1,123 @@
package convert
import (
"fmt"
"go/ast"
"strings"
"unicode"
)
/*
* Creates a func init() node
*/
func createVarUnderscoreBlock() *ast.ValueSpec {
valueSpec := &ast.ValueSpec{}
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
ident := &ast.Ident{Name: "_", Obj: object}
valueSpec.Names = append(valueSpec.Names, ident)
return valueSpec
}
/*
* Creates a Describe("Testing with ginkgo", func() { }) node
*/
func createDescribeBlock() *ast.CallExpr {
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
fieldList := &ast.FieldList{}
funcType := &ast.FuncType{Params: fieldList}
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
describeIdent := &ast.Ident{Name: "Describe"}
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
}
/*
* Convenience function to return the name of the *testing.T param
* for a Test function that will be rewritten. This is useful because
* we will want to replace the usage of this named *testing.T inside the
* body of the function with a GinktoT.
*/
func namedTestingTArg(node *ast.FuncDecl) string {
return node.Type.Params.List[0].Names[0].Name // *exhale*
}
/*
* Convenience function to return the block statement node for a Describe statement
*/
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
var funcLit *ast.FuncLit
var found = false
for _, node := range desc.Args {
switch node := node.(type) {
case *ast.FuncLit:
found = true
funcLit = node
break
}
}
if !found {
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
}
return funcLit.Body
}
/* convenience function for creating an It("TestNameHere")
* with all the body of the test function inside the anonymous
* func passed to It()
*/
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
fieldList := &ast.FieldList{}
funcType := &ast.FuncType{Params: fieldList}
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
testName := rewriteTestName(testFunc.Name.Name)
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
itBlockIdent := &ast.Ident{Name: "It"}
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
return &ast.ExprStmt{X: callExpr}
}
/*
* rewrite test names to be human readable
* eg: rewrites "TestSomethingAmazing" as "something amazing"
*/
func rewriteTestName(testName string) string {
nameComponents := []string{}
currentString := ""
indexOfTest := strings.Index(testName, "Test")
if indexOfTest != 0 {
return testName
}
testName = strings.Replace(testName, "Test", "", 1)
first, rest := testName[0], testName[1:]
testName = string(unicode.ToLower(rune(first))) + rest
for _, rune := range testName {
if unicode.IsUpper(rune) {
nameComponents = append(nameComponents, currentString)
currentString = string(unicode.ToLower(rune))
} else {
currentString += string(rune)
}
}
return strings.Join(append(nameComponents, currentString), " ")
}
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
return &ast.CallExpr{
Lparen: ident.NamePos + 1,
Rparen: ident.NamePos + 2,
Fun: &ast.Ident{Name: "GinkgoT"},
}
}
func newGinkgoTInterface() *ast.Ident {
return &ast.Ident{Name: "GinkgoTInterface"}
}

91
vendor/github.com/onsi/ginkgo/ginkgo/convert/import.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package convert
import (
"errors"
"fmt"
"go/ast"
)
/*
* Given the root node of an AST, returns the node containing the
* import statements for the file.
*/
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
for _, declaration := range rootNode.Decls {
decl, ok := declaration.(*ast.GenDecl)
if !ok || len(decl.Specs) == 0 {
continue
}
_, ok = decl.Specs[0].(*ast.ImportSpec)
if ok {
imports = decl
return
}
}
err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
return
}
/*
* Removes "testing" import, if present
*/
func removeTestingImport(rootNode *ast.File) {
importDecl, err := importsForRootNode(rootNode)
if err != nil {
panic(err.Error())
}
var index int
for i, importSpec := range importDecl.Specs {
importSpec := importSpec.(*ast.ImportSpec)
if importSpec.Path.Value == "\"testing\"" {
index = i
break
}
}
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
}
/*
* Adds import statements for onsi/ginkgo, if missing
*/
func addGinkgoImports(rootNode *ast.File) {
importDecl, err := importsForRootNode(rootNode)
if err != nil {
panic(err.Error())
}
if len(importDecl.Specs) == 0 {
// TODO: might need to create a import decl here
panic("unimplemented : expected to find an imports block")
}
needsGinkgo := true
for _, importSpec := range importDecl.Specs {
importSpec, ok := importSpec.(*ast.ImportSpec)
if !ok {
continue
}
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
needsGinkgo = false
}
}
if needsGinkgo {
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
}
}
/*
* convenience function to create an import statement
*/
func createImport(name, path string) *ast.ImportSpec {
return &ast.ImportSpec{
Name: &ast.Ident{Name: name},
Path: &ast.BasicLit{Kind: 9, Value: path},
}
}

View File

@ -0,0 +1,127 @@
package convert
import (
"fmt"
"go/build"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
)
/*
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
* Go's build package, and then rewrites them. A ginkgo test suite file will
* also be added for this package, and all of its child packages.
*/
func RewritePackage(packageName string) {
pkg, err := packageWithName(packageName)
if err != nil {
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
}
for _, filename := range findTestsInPackage(pkg) {
rewriteTestsInFile(filename)
}
return
}
/*
* Given a package, findTestsInPackage reads the test files in the directory,
* and then recurses on each child package, returning a slice of all test files
* found in this process.
*/
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
}
dirFiles, err := ioutil.ReadDir(pkg.Dir)
if err != nil {
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
}
re := regexp.MustCompile(`^[._]`)
for _, file := range dirFiles {
if !file.IsDir() {
continue
}
if re.Match([]byte(file.Name())) {
continue
}
packageName := filepath.Join(pkg.ImportPath, file.Name())
subPackage, err := packageWithName(packageName)
if err != nil {
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
}
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
}
addGinkgoSuiteForPackage(pkg)
goFmtPackage(pkg)
return
}
/*
* Shells out to `ginkgo bootstrap` to create a test suite file
*/
func addGinkgoSuiteForPackage(pkg *build.Package) {
originalDir, err := os.Getwd()
if err != nil {
panic(err)
}
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
_, err = os.Stat(suite_test_file)
if err == nil {
return // test file already exists, this should be a no-op
}
err = os.Chdir(pkg.Dir)
if err != nil {
panic(err)
}
output, err := exec.Command("ginkgo", "bootstrap").Output()
if err != nil {
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
}
err = os.Chdir(originalDir)
if err != nil {
panic(err)
}
}
/*
* Shells out to `go fmt` to format the package
*/
func goFmtPackage(pkg *build.Package) {
output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
if err != nil {
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
}
}
/*
* Attempts to return a package with its test files already read.
* The ImportMode arg to build.Import lets you specify if you want go to read the
* buildable go files inside the package, but it fails if the package has no go files
*/
func packageWithName(name string) (pkg *build.Package, err error) {
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
if err == nil {
return
}
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
return
}

View File

@ -0,0 +1,56 @@
package convert
import (
"go/ast"
"regexp"
)
/*
* Given a root node, walks its top level statements and returns
* points to function nodes to rewrite as It statements.
* These functions, according to Go testing convention, must be named
* TestWithCamelCasedName and receive a single *testing.T argument.
*/
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
ast.Inspect(rootNode, func(node ast.Node) bool {
if node == nil {
return false
}
switch node := node.(type) {
case *ast.FuncDecl:
matches := testNameRegexp.MatchString(node.Name.Name)
if matches && receivesTestingT(node) {
testsToRewrite = append(testsToRewrite, node)
}
}
return true
})
return
}
/*
* convenience function that looks at args to a function and determines if its
* params include an argument of type *testing.T
*/
func receivesTestingT(node *ast.FuncDecl) bool {
if len(node.Type.Params.List) != 1 {
return false
}
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
if !ok {
return false
}
intermediate := base.X.(*ast.SelectorExpr)
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
isTestingT := intermediate.Sel.Name == "T"
return isTestingPackage && isTestingT
}

Some files were not shown because too many files have changed in this diff Show More