mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 23:00:58 +00:00
Use vendored dependencies, new relay/client location
This commit is contained in:
parent
3b2adc9a3e
commit
969d7c802d
93
Godeps/Godeps.json
generated
Normal file
93
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,93 @@
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/discosrv",
|
||||
"GoVersion": "go1.5.1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/bkaradzic/go-lz4",
|
||||
"Rev": "74ddf82598bc4745b965729e9c6a463bedd33049"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/logger",
|
||||
"Rev": "c96f6a1a8c7b6bf2f4860c667867d90174799eb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/luhn",
|
||||
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "5f7208e86762911861c94f1849eddbfc0a60cbf0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/camlistore/lock",
|
||||
"Rev": "ae27720f340952636b826119b58130b9c1a847a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/b",
|
||||
"Rev": "e2e747ce049fb910cff6b1fd7ad8faf3900939d5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/bufs",
|
||||
"Rev": "3dcccbd7064a1689f9c093a988ea11ac00e21f51"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/exp/lldb",
|
||||
"Rev": "36265f1914ea00990ff0b73f72350edf9b1850df"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/fileutil",
|
||||
"Rev": "1c9c88fbf552b3737c7b97e1f243860359687976"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/mathutil",
|
||||
"Rev": "a804f0f2d8521e22d6adabf02cbec61dc1f9dbd2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/ql",
|
||||
"Rev": "9c77931b60c6317f94de402268bbc3e8334b71f4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/sortutil",
|
||||
"Rev": "4c7342852e65c2088c981288f2c5610d10b9f7f4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/strutil",
|
||||
"Rev": "1eb03e3cc9d345307a45ec82bd3016cde4bd4464"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cznic/zappy",
|
||||
"Rev": "47331054e4f96186e3ff772877c0443909368a45"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "772f5c38e468398c4511514f4f6aa9a4185bc0a0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
"Comment": "go1.0-cutoff-47-g93e9980",
|
||||
"Rev": "93e9980741c9e593411b94e07d5bad8cfb4809db"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/syncthing/lib/protocol",
|
||||
"Comment": "v0.12.0-beta1-119-g24c499d",
|
||||
"Rev": "24c499d2822ae891c95406066456872e8d6c8164"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/thejerf/suture",
|
||||
"Comment": "v1.0.1",
|
||||
"Rev": "99c1f2d613756768fc4299acd9dc621e11ed3fd7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "723492b65e225eafcba054e76ba18bb9c5ac1ea2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "723492b65e225eafcba054e76ba18bb9c5ac1ea2"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
2
Godeps/_workspace/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/pkg
|
||||
/bin
|
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
/lz4-example/lz4-example
|
9
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/LICENSE
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
71
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/README.md
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
go-lz4
|
||||
======
|
||||
|
||||
go-lz4 is port of LZ4 lossless compression algorithm to Go. The original C code
|
||||
is located at:
|
||||
|
||||
https://github.com/Cyan4973/lz4
|
||||
|
||||
Status
|
||||
------
|
||||
[![Build Status](https://secure.travis-ci.org/bkaradzic/go-lz4.png)](http://travis-ci.org/bkaradzic/go-lz4)
|
||||
[![GoDoc](https://godoc.org/github.com/bkaradzic/go-lz4?status.png)](https://godoc.org/github.com/bkaradzic/go-lz4)
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
go get github.com/bkaradzic/go-lz4
|
||||
|
||||
import "github.com/bkaradzic/go-lz4"
|
||||
|
||||
The package name is `lz4`
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
* go-lz4 saves a uint32 with the original uncompressed length at the beginning
|
||||
of the encoded buffer. They may get in the way of interoperability with
|
||||
other implementations.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
Damian Gryski ([@dgryski](https://github.com/dgryski))
|
||||
Dustin Sallings ([@dustin](https://github.com/dustin))
|
||||
|
||||
Contact
|
||||
-------
|
||||
|
||||
[@bkaradzic](https://twitter.com/bkaradzic)
|
||||
http://www.stuckingeometry.com
|
||||
|
||||
Project page
|
||||
https://github.com/bkaradzic/go-lz4
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
Copyright 2013 Damian Gryski. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
23
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzz.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzz.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// +build gofuzz
|
||||
|
||||
package lz4
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
|
||||
if len(data) < 4 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ln := binary.LittleEndian.Uint32(data)
|
||||
if ln > (1 << 21) {
|
||||
return 0
|
||||
}
|
||||
|
||||
if _, err := Decode(nil, data); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
74
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/fuzzer/main.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
|
||||
// lz4's API matches snappy's, so we can easily see how it performs
|
||||
// lz4 "code.google.com/p/snappy-go/snappy"
|
||||
)
|
||||
|
||||
var input = `
|
||||
ADVENTURE I. A SCANDAL IN BOHEMIA
|
||||
|
||||
I.
|
||||
|
||||
To Sherlock Holmes she is always THE woman. I have seldom heard
|
||||
him mention her under any other name. In his eyes she eclipses
|
||||
and predominates the whole of her sex. It was not that he felt
|
||||
any emotion akin to love for Irene Adler. All emotions, and that
|
||||
one particularly, were abhorrent to his cold, precise but
|
||||
admirably balanced mind. He was, I take it, the most perfect
|
||||
reasoning and observing machine that the world has seen, but as a
|
||||
lover he would have placed himself in a false position. He never
|
||||
spoke of the softer passions, save with a gibe and a sneer. They
|
||||
were admirable things for the observer--excellent for drawing the
|
||||
veil from men's motives and actions. But for the trained reasoner
|
||||
to admit such intrusions into his own delicate and finely
|
||||
adjusted temperament was to introduce a distracting factor which
|
||||
might throw a doubt upon all his mental results. Grit in a
|
||||
sensitive instrument, or a crack in one of his own high-power
|
||||
lenses, would not be more disturbing than a strong emotion in a
|
||||
nature such as his. And yet there was but one woman to him, and
|
||||
that woman was the late Irene Adler, of dubious and questionable
|
||||
memory.
|
||||
|
||||
I had seen little of Holmes lately. My marriage had drifted us
|
||||
away from each other. My own complete happiness, and the
|
||||
home-centred interests which rise up around the man who first
|
||||
finds himself master of his own establishment, were sufficient to
|
||||
absorb all my attention, while Holmes, who loathed every form of
|
||||
society with his whole Bohemian soul, remained in our lodgings in
|
||||
Baker Street, buried among his old books, and alternating from
|
||||
week to week between cocaine and ambition, the drowsiness of the
|
||||
drug, and the fierce energy of his own keen nature. He was still,
|
||||
as ever, deeply attracted by the study of crime, and occupied his
|
||||
immense faculties and extraordinary powers of observation in
|
||||
following out those clues, and clearing up those mysteries which
|
||||
had been abandoned as hopeless by the official police. From time
|
||||
to time I heard some vague account of his doings: of his summons
|
||||
to Odessa in the case of the Trepoff murder, of his clearing up
|
||||
of the singular tragedy of the Atkinson brothers at Trincomalee,
|
||||
and finally of the mission which he had accomplished so
|
||||
delicately and successfully for the reigning family of Holland.
|
||||
Beyond these signs of his activity, however, which I merely
|
||||
shared with all the readers of the daily press, I knew little of
|
||||
my former friend and companion.
|
||||
`
|
||||
|
||||
func main() {
|
||||
|
||||
compressed, _ := lz4.Encode(nil, []byte(input))
|
||||
|
||||
modified := make([]byte, len(compressed))
|
||||
|
||||
for {
|
||||
copy(modified, compressed)
|
||||
for i := 0; i < 100; i++ {
|
||||
modified[rand.Intn(len(compressed)-4)+4] = byte(rand.Intn(256))
|
||||
}
|
||||
lz4.Decode(nil, modified)
|
||||
}
|
||||
|
||||
}
|
94
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4-example/main.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright 2011 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
|
||||
lz4 "github.com/bkaradzic/go-lz4"
|
||||
)
|
||||
|
||||
var (
|
||||
decompress = flag.Bool("d", false, "decompress")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
var optCPUProfile = flag.String("cpuprofile", "", "profile")
|
||||
flag.Parse()
|
||||
|
||||
if *optCPUProfile != "" {
|
||||
f, err := os.Create(*optCPUProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
|
||||
var data []byte
|
||||
|
||||
if len(args) < 2 {
|
||||
fmt.Print("Usage: lz4 [-d] <input> <output>\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
input, err := os.OpenFile(args[0], os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open input file %s\n", args[0])
|
||||
os.Exit(1)
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
if *decompress {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, err = lz4.Decode(nil, data)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to decode:", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
data, _ = ioutil.ReadAll(input)
|
||||
data, err = lz4.Encode(nil, data)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to encode:", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(args[1], data, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open output file %s\n", args[1])
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
63
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/lz4_test.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testfile, _ = ioutil.ReadFile("testdata/pg1661.txt")
|
||||
|
||||
func roundtrip(t *testing.T, input []byte) {
|
||||
|
||||
dst, err := Encode(nil, input)
|
||||
if err != nil {
|
||||
t.Errorf("got error during compression: %s", err)
|
||||
}
|
||||
|
||||
output, err := Decode(nil, dst)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("got error during decompress: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(output, input) {
|
||||
t.Errorf("roundtrip failed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
roundtrip(t, nil)
|
||||
}
|
||||
|
||||
func TestLengths(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1024; i++ {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
|
||||
for i := 1024; i < 4096; i += 23 {
|
||||
roundtrip(t, testfile[:i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWords(t *testing.T) {
|
||||
roundtrip(t, testfile)
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Encode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Encode(nil, testfile)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLZ4Decode(b *testing.B) {
|
||||
|
||||
var compressed, _ = Encode(nil, testfile)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
Decode(nil, compressed)
|
||||
}
|
||||
}
|
199
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
199
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/reader.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt indicates the input was corrupt
|
||||
ErrCorrupt = errors.New("corrupt input")
|
||||
)
|
||||
|
||||
const (
|
||||
mlBits = 4
|
||||
mlMask = (1 << mlBits) - 1
|
||||
runBits = 8 - mlBits
|
||||
runMask = (1 << runBits) - 1
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
spos uint32
|
||||
dpos uint32
|
||||
ref uint32
|
||||
}
|
||||
|
||||
func (d *decoder) readByte() (uint8, error) {
|
||||
if int(d.spos) == len(d.src) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
b := d.src[d.spos]
|
||||
d.spos++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) getLen() (uint32, error) {
|
||||
|
||||
length := uint32(0)
|
||||
ln, err := d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
for ln == 255 {
|
||||
length += 255
|
||||
ln, err = d.readByte()
|
||||
if err != nil {
|
||||
return 0, ErrCorrupt
|
||||
}
|
||||
}
|
||||
length += uint32(ln)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (d *decoder) cp(length, decr uint32) {
|
||||
|
||||
if int(d.ref+length) < int(d.dpos) {
|
||||
copy(d.dst[d.dpos:], d.dst[d.ref:d.ref+length])
|
||||
} else {
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.dst[d.ref+ii]
|
||||
}
|
||||
}
|
||||
d.dpos += length
|
||||
d.ref += length - decr
|
||||
}
|
||||
|
||||
func (d *decoder) finish(err error) error {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a
|
||||
// subslice of dst if it was large enough to hold the entire decoded block.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) < 4 {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
uncompressedLen := binary.LittleEndian.Uint32(src)
|
||||
|
||||
if uncompressedLen == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if uncompressedLen > MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if dst == nil || len(dst) < int(uncompressedLen) {
|
||||
dst = make([]byte, uncompressedLen)
|
||||
}
|
||||
|
||||
d := decoder{src: src, dst: dst[:uncompressedLen], spos: 4}
|
||||
|
||||
decr := []uint32{0, 3, 2, 3}
|
||||
|
||||
for {
|
||||
code, err := d.readByte()
|
||||
if err != nil {
|
||||
return d.dst, d.finish(err)
|
||||
}
|
||||
|
||||
length := uint32(code >> mlBits)
|
||||
if length == runMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
if int(d.spos+length) > len(d.src) || int(d.dpos+length) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
d.dst[d.dpos+ii] = d.src[d.spos+ii]
|
||||
}
|
||||
|
||||
d.spos += length
|
||||
d.dpos += length
|
||||
|
||||
if int(d.spos) == len(d.src) {
|
||||
return d.dst, nil
|
||||
}
|
||||
|
||||
if int(d.spos+2) >= len(d.src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
back := uint32(d.src[d.spos]) | uint32(d.src[d.spos+1])<<8
|
||||
|
||||
if back > d.dpos {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.spos += 2
|
||||
d.ref = d.dpos - back
|
||||
|
||||
length = uint32(code & mlMask)
|
||||
if length == mlMask {
|
||||
ln, err := d.getLen()
|
||||
if err != nil {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length += ln
|
||||
}
|
||||
|
||||
literal := d.dpos - d.ref
|
||||
|
||||
if literal < 4 {
|
||||
if int(d.dpos+4) > len(d.dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(4, decr[literal])
|
||||
} else {
|
||||
length += 4
|
||||
}
|
||||
|
||||
if d.dpos+length > uncompressedLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
d.cp(length, 0)
|
||||
}
|
||||
}
|
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
13052
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/testdata/pg1661.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
190
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
190
Godeps/_workspace/src/github.com/bkaradzic/go-lz4/writer.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
/*
|
||||
* Copyright 2011-2012 Branimir Karadzic. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||
* list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
package lz4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
minMatch = 4
|
||||
hashLog = 17
|
||||
hashTableSize = 1 << hashLog
|
||||
hashShift = (minMatch * 8) - hashLog
|
||||
incompressible uint32 = 128
|
||||
uninitHash = 0x88888888
|
||||
|
||||
// MaxInputSize is the largest buffer than can be compressed in a single block
|
||||
MaxInputSize = 0x7E000000
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooLarge indicates the input buffer was too large
|
||||
ErrTooLarge = errors.New("input too large")
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
src []byte
|
||||
dst []byte
|
||||
hashTable []uint32
|
||||
pos uint32
|
||||
anchor uint32
|
||||
dpos uint32
|
||||
}
|
||||
|
||||
// CompressBound returns the maximum length of a lz4 block, given it's uncompressed length
|
||||
func CompressBound(isize int) int {
|
||||
if isize > MaxInputSize {
|
||||
return 0
|
||||
}
|
||||
return isize + ((isize) / 255) + 16 + 4
|
||||
}
|
||||
|
||||
func (e *encoder) writeLiterals(length, mlLen, pos uint32) {
|
||||
|
||||
ln := length
|
||||
|
||||
var code byte
|
||||
if ln > runMask-1 {
|
||||
code = runMask
|
||||
} else {
|
||||
code = byte(ln)
|
||||
}
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlMask)
|
||||
} else {
|
||||
e.dst[e.dpos] = (code << mlBits) + byte(mlLen)
|
||||
}
|
||||
e.dpos++
|
||||
|
||||
if code == runMask {
|
||||
ln -= runMask
|
||||
for ; ln > 254; ln -= 255 {
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(ln)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
for ii := uint32(0); ii < length; ii++ {
|
||||
e.dst[e.dpos+ii] = e.src[pos+ii]
|
||||
}
|
||||
|
||||
e.dpos += length
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned array may be a
|
||||
// sub-slice of dst if it was large enough to hold the entire output.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(src) >= MaxInputSize {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
if n := CompressBound(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
e := encoder{src: src, dst: dst, hashTable: make([]uint32, hashTableSize)}
|
||||
|
||||
binary.LittleEndian.PutUint32(dst, uint32(len(src)))
|
||||
e.dpos = 4
|
||||
|
||||
var (
|
||||
step uint32 = 1
|
||||
limit = incompressible
|
||||
)
|
||||
|
||||
for {
|
||||
if int(e.pos)+12 >= len(e.src) {
|
||||
e.writeLiterals(uint32(len(e.src))-e.anchor, 0, e.anchor)
|
||||
return e.dst[:e.dpos], nil
|
||||
}
|
||||
|
||||
sequence := uint32(e.src[e.pos+3])<<24 | uint32(e.src[e.pos+2])<<16 | uint32(e.src[e.pos+1])<<8 | uint32(e.src[e.pos+0])
|
||||
|
||||
hash := (sequence * 2654435761) >> hashShift
|
||||
ref := e.hashTable[hash] + uninitHash
|
||||
e.hashTable[hash] = e.pos - uninitHash
|
||||
|
||||
if ((e.pos-ref)>>16) != 0 || uint32(e.src[ref+3])<<24|uint32(e.src[ref+2])<<16|uint32(e.src[ref+1])<<8|uint32(e.src[ref+0]) != sequence {
|
||||
if e.pos-e.anchor > limit {
|
||||
limit <<= 1
|
||||
step += 1 + (step >> 2)
|
||||
}
|
||||
e.pos += step
|
||||
continue
|
||||
}
|
||||
|
||||
if step > 1 {
|
||||
e.hashTable[hash] = ref - uninitHash
|
||||
e.pos -= step - 1
|
||||
step = 1
|
||||
continue
|
||||
}
|
||||
limit = incompressible
|
||||
|
||||
ln := e.pos - e.anchor
|
||||
back := e.pos - ref
|
||||
|
||||
anchor := e.anchor
|
||||
|
||||
e.pos += minMatch
|
||||
ref += minMatch
|
||||
e.anchor = e.pos
|
||||
|
||||
for int(e.pos) < len(e.src)-5 && e.src[e.pos] == e.src[ref] {
|
||||
e.pos++
|
||||
ref++
|
||||
}
|
||||
|
||||
mlLen := e.pos - e.anchor
|
||||
|
||||
e.writeLiterals(ln, mlLen, anchor)
|
||||
e.dst[e.dpos] = uint8(back)
|
||||
e.dst[e.dpos+1] = uint8(back >> 8)
|
||||
e.dpos += 2
|
||||
|
||||
if mlLen > mlMask-1 {
|
||||
mlLen -= mlMask
|
||||
for mlLen > 254 {
|
||||
mlLen -= 255
|
||||
|
||||
e.dst[e.dpos] = 255
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.dst[e.dpos] = byte(mlLen)
|
||||
e.dpos++
|
||||
}
|
||||
|
||||
e.anchor = e.pos
|
||||
}
|
||||
}
|
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/logger/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2013 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/logger/README.md
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
logger
|
||||
======
|
||||
|
||||
A small wrapper around `log` to provide log levels.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
http://godoc.org/github.com/calmh/logger
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
187
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
187
Godeps/_workspace/src/github.com/calmh/logger/logger.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package logger implements a standardized logger with callback functionality
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type LogLevel int
|
||||
|
||||
const (
|
||||
LevelDebug LogLevel = iota
|
||||
LevelVerbose
|
||||
LevelInfo
|
||||
LevelOK
|
||||
LevelWarn
|
||||
LevelFatal
|
||||
NumLevels
|
||||
)
|
||||
|
||||
// A MessageHandler is called with the log level and message text.
|
||||
type MessageHandler func(l LogLevel, msg string)
|
||||
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
handlers [NumLevels][]MessageHandler
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
// The default logger logs to standard output with a time prefix.
|
||||
var DefaultLogger = New()
|
||||
|
||||
func New() *Logger {
|
||||
if os.Getenv("LOGGER_DISCARD") != "" {
|
||||
// Hack to completely disable logging, for example when running benchmarks.
|
||||
return &Logger{
|
||||
logger: log.New(ioutil.Discard, "", 0),
|
||||
}
|
||||
}
|
||||
|
||||
return &Logger{
|
||||
logger: log.New(os.Stdout, "", log.Ltime),
|
||||
}
|
||||
}
|
||||
|
||||
// AddHandler registers a new MessageHandler to receive messages with the
|
||||
// specified log level or above.
|
||||
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
l.handlers[level] = append(l.handlers[level], h)
|
||||
}
|
||||
|
||||
// See log.SetFlags
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.logger.SetFlags(flag)
|
||||
}
|
||||
|
||||
// See log.SetPrefix
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.logger.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
func (l *Logger) callHandlers(level LogLevel, s string) {
|
||||
for _, h := range l.handlers[level] {
|
||||
h(level, strings.TrimSpace(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln logs a line with a DEBUG prefix.
|
||||
func (l *Logger) Debugln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted line with a DEBUG prefix.
|
||||
func (l *Logger) Debugf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "DEBUG: "+s)
|
||||
l.callHandlers(LevelDebug, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with a VERBOSE prefix.
|
||||
func (l *Logger) Verboseln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "VERBOSE: "+s)
|
||||
l.callHandlers(LevelVerbose, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with a VERBOSE prefix.
|
||||
func (l *Logger) Verbosef(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "VERBOSE: "+s)
|
||||
l.callHandlers(LevelVerbose, s)
|
||||
}
|
||||
|
||||
// Infoln logs a line with an INFO prefix.
|
||||
func (l *Logger) Infoln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Infof logs a formatted line with an INFO prefix.
|
||||
func (l *Logger) Infof(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "INFO: "+s)
|
||||
l.callHandlers(LevelInfo, s)
|
||||
}
|
||||
|
||||
// Okln logs a line with an OK prefix.
|
||||
func (l *Logger) Okln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Okf logs a formatted line with an OK prefix.
|
||||
func (l *Logger) Okf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "OK: "+s)
|
||||
l.callHandlers(LevelOK, s)
|
||||
}
|
||||
|
||||
// Warnln logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Warnf logs a formatted line with a WARNING prefix.
|
||||
func (l *Logger) Warnf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "WARNING: "+s)
|
||||
l.callHandlers(LevelWarn, s)
|
||||
}
|
||||
|
||||
// Fatalln logs a line with a FATAL prefix and exits the process with exit
|
||||
// code 1.
|
||||
func (l *Logger) Fatalln(vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintln(vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
|
||||
// exit code 1.
|
||||
func (l *Logger) Fatalf(format string, vals ...interface{}) {
|
||||
l.mut.Lock()
|
||||
defer l.mut.Unlock()
|
||||
s := fmt.Sprintf(format, vals...)
|
||||
l.logger.Output(2, "FATAL: "+s)
|
||||
l.callHandlers(LevelFatal, s)
|
||||
os.Exit(1)
|
||||
}
|
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/calmh/logger/logger_test.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAPI(t *testing.T) {
|
||||
l := New()
|
||||
l.SetFlags(0)
|
||||
l.SetPrefix("testing")
|
||||
|
||||
debug := 0
|
||||
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
|
||||
info := 0
|
||||
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
|
||||
warn := 0
|
||||
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
|
||||
ok := 0
|
||||
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
|
||||
|
||||
l.Debugf("test %d", 0)
|
||||
l.Debugln("test", 0)
|
||||
l.Infof("test %d", 1)
|
||||
l.Infoln("test", 1)
|
||||
l.Warnf("test %d", 2)
|
||||
l.Warnln("test", 2)
|
||||
l.Okf("test %d", 3)
|
||||
l.Okln("test", 3)
|
||||
|
||||
if debug != 2 {
|
||||
t.Errorf("Debug handler called %d != 2 times", debug)
|
||||
}
|
||||
if info != 2 {
|
||||
t.Errorf("Info handler called %d != 2 times", info)
|
||||
}
|
||||
if warn != 2 {
|
||||
t.Errorf("Warn handler called %d != 2 times", warn)
|
||||
}
|
||||
if ok != 2 {
|
||||
t.Errorf("Ok handler called %d != 2 times", ok)
|
||||
}
|
||||
}
|
||||
|
||||
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
|
||||
return func(l LogLevel, msg string) {
|
||||
*counter++
|
||||
if l != expectl {
|
||||
t.Errorf("Incorrect message level %d != %d", l, expectl)
|
||||
}
|
||||
if !strings.HasSuffix(msg, expectmsg) {
|
||||
t.Errorf("%q does not end with %q", msg, expectmsg)
|
||||
}
|
||||
}
|
||||
}
|
19
Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/luhn/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Jakob Borg
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
70
Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
Normal file
70
Godeps/_workspace/src/github.com/calmh/luhn/luhn.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
// Package luhn generates and validates Luhn mod N check digits.
|
||||
package luhn
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An alphabet is a string of N characters, representing the digits of a given
|
||||
// base N.
|
||||
type Alphabet string
|
||||
|
||||
var (
|
||||
Base32 Alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
|
||||
)
|
||||
|
||||
// Generate returns a check digit for the string s, which should be composed
|
||||
// of characters from the Alphabet a.
|
||||
func (a Alphabet) Generate(s string) (rune, error) {
|
||||
if err := a.check(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
factor := 1
|
||||
sum := 0
|
||||
n := len(a)
|
||||
|
||||
for i := range s {
|
||||
codepoint := strings.IndexByte(string(a), s[i])
|
||||
if codepoint == -1 {
|
||||
return 0, fmt.Errorf("Digit %q not valid in alphabet %q", s[i], a)
|
||||
}
|
||||
addend := factor * codepoint
|
||||
if factor == 2 {
|
||||
factor = 1
|
||||
} else {
|
||||
factor = 2
|
||||
}
|
||||
addend = (addend / n) + (addend % n)
|
||||
sum += addend
|
||||
}
|
||||
remainder := sum % n
|
||||
checkCodepoint := (n - remainder) % n
|
||||
return rune(a[checkCodepoint]), nil
|
||||
}
|
||||
|
||||
// Validate returns true if the last character of the string s is correct, for
|
||||
// a string s composed of characters in the alphabet a.
|
||||
func (a Alphabet) Validate(s string) bool {
|
||||
t := s[:len(s)-1]
|
||||
c, err := a.Generate(t)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rune(s[len(s)-1]) == c
|
||||
}
|
||||
|
||||
// check returns an error if the given alphabet does not consist of unique characters
|
||||
func (a Alphabet) check() error {
|
||||
cm := make(map[byte]bool, len(a))
|
||||
for i := range a {
|
||||
if cm[a[i]] {
|
||||
return fmt.Errorf("Digit %q non-unique in alphabet %q", a[i], a)
|
||||
}
|
||||
cm[a[i]] = true
|
||||
}
|
||||
return nil
|
||||
}
|
59
Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
Normal file
59
Godeps/_workspace/src/github.com/calmh/luhn/luhn_test.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright (C) 2014 Jakob Borg
|
||||
|
||||
package luhn_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/luhn"
|
||||
)
|
||||
|
||||
func TestGenerate(t *testing.T) {
|
||||
// Base 6 Luhn
|
||||
a := luhn.Alphabet("abcdef")
|
||||
c, err := a.Generate("abcdef")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c != 'e' {
|
||||
t.Errorf("Incorrect check digit %c != e", c)
|
||||
}
|
||||
|
||||
// Base 10 Luhn
|
||||
a = luhn.Alphabet("0123456789")
|
||||
c, err = a.Generate("7992739871")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c != '3' {
|
||||
t.Errorf("Incorrect check digit %c != 3", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidString(t *testing.T) {
|
||||
a := luhn.Alphabet("ABC")
|
||||
_, err := a.Generate("7992739871")
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadAlphabet(t *testing.T) {
|
||||
a := luhn.Alphabet("01234566789")
|
||||
_, err := a.Generate("7992739871")
|
||||
t.Log(err)
|
||||
if err == nil {
|
||||
t.Error("Unexpected nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
a := luhn.Alphabet("abcdef")
|
||||
if !a.Validate("abcdefe") {
|
||||
t.Errorf("Incorrect validation response for abcdefe")
|
||||
}
|
||||
if a.Validate("abcdefd") {
|
||||
t.Errorf("Incorrect validation response for abcdefd")
|
||||
}
|
||||
}
|
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/calmh/xdr/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
coverage.out
|
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- ./generate.sh
|
||||
- go test -coverprofile=coverage.out
|
||||
|
||||
after_success:
|
||||
- goveralls -coverprofile=coverage.out -service=travis-ci -package=calmh/xdr -repotoken="$COVERALLS_TOKEN"
|
||||
|
||||
env:
|
||||
global:
|
||||
secure: SmgnrGfp2zLrA44ChRMpjPeujubt9veZ8Fx/OseMWECmacyV5N/TuDhzIbwo6QwV4xB0sBacoPzvxQbJRVjNKsPiSu72UbcQmQ7flN4Tf7nW09tSh1iW8NgrpBCq/3UYLoBu2iPBEBKm93IK0aGNAKs6oEkB0fU27iTVBwiTXOY=
|
19
Godeps/_workspace/src/github.com/calmh/xdr/LICENSE
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/calmh/xdr/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Jakob Borg.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
sell copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
- The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/calmh/xdr/README.md
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
xdr
|
||||
===
|
||||
|
||||
[![Build Status](https://img.shields.io/travis/calmh/xdr.svg?style=flat)](https://travis-ci.org/calmh/xdr)
|
||||
[![Coverage Status](https://img.shields.io/coveralls/calmh/xdr.svg?style=flat)](https://coveralls.io/r/calmh/xdr?branch=master)
|
||||
[![API Documentation](http://img.shields.io/badge/api-Godoc-blue.svg?style=flat)](http://godoc.org/github.com/calmh/xdr)
|
||||
[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](http://opensource.org/licenses/MIT)
|
||||
|
||||
This is an XDR encoding/decoding library. It uses code generation and
|
||||
not reflection. It supports the IPDR bastardized XDR format when built
|
||||
with `-tags ipdr`.
|
||||
|
117
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
117
Godeps/_workspace/src/github.com/calmh/xdr/bench_test.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
type XDRBenchStruct struct {
|
||||
I1 uint64
|
||||
I2 uint32
|
||||
I3 uint16
|
||||
I4 uint8
|
||||
Bs0 []byte // max:128
|
||||
Bs1 []byte
|
||||
S0 string // max:128
|
||||
S1 string
|
||||
}
|
||||
|
||||
var res []byte // no to be optimized away
|
||||
var s = XDRBenchStruct{
|
||||
I1: 42,
|
||||
I2: 43,
|
||||
I3: 44,
|
||||
I4: 45,
|
||||
Bs0: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
|
||||
Bs1: []byte{11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
S0: "Hello World! String one.",
|
||||
S1: "Hello World! String two.",
|
||||
}
|
||||
var e []byte
|
||||
|
||||
func init() {
|
||||
e, _ = s.MarshalXDR()
|
||||
}
|
||||
|
||||
func BenchmarkThisMarshal(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, _ = s.MarshalXDR()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.UnmarshalXDR(e)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncode(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.EncodeXDR(ioutil.Discard)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisEncoder(b *testing.B) {
|
||||
w := xdr.NewWriter(ioutil.Discard)
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.EncodeXDRInto(w)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeatReader struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (r *repeatReader) Read(bs []byte) (n int, err error) {
|
||||
if len(bs) > len(r.data) {
|
||||
err = io.EOF
|
||||
}
|
||||
n = copy(bs, r.data)
|
||||
r.data = r.data[n:]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *repeatReader) Reset(bs []byte) {
|
||||
r.data = bs
|
||||
}
|
||||
|
||||
func BenchmarkThisDecode(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.DecodeXDR(rr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkThisDecoder(b *testing.B) {
|
||||
rr := &repeatReader{e}
|
||||
r := xdr.NewReader(rr)
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := t.DecodeXDRFrom(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
rr.Reset(e)
|
||||
}
|
||||
}
|
201
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
201
Godeps/_workspace/src/github.com/calmh/xdr/bench_xdr_test.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
XDRBenchStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I1 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I2 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I3 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ uint8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of Bs1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Bs1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S0 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S0 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S1 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S1 (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct XDRBenchStruct {
|
||||
unsigned hyper I1;
|
||||
unsigned int I2;
|
||||
unsigned int I3;
|
||||
uint8 I4;
|
||||
opaque Bs0<128>;
|
||||
opaque Bs1<>;
|
||||
string S0<128>;
|
||||
string S1<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o XDRBenchStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(o.I1)
|
||||
xw.WriteUint32(o.I2)
|
||||
xw.WriteUint16(o.I3)
|
||||
xw.WriteUint8(o.I4)
|
||||
if l := len(o.Bs0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Bs0", l, 128)
|
||||
}
|
||||
xw.WriteBytes(o.Bs0)
|
||||
xw.WriteBytes(o.Bs1)
|
||||
if l := len(o.S0); l > 128 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S0", l, 128)
|
||||
}
|
||||
xw.WriteString(o.S0)
|
||||
xw.WriteString(o.S1)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *XDRBenchStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.I1 = xr.ReadUint64()
|
||||
o.I2 = xr.ReadUint32()
|
||||
o.I3 = xr.ReadUint16()
|
||||
o.I4 = xr.ReadUint8()
|
||||
o.Bs0 = xr.ReadBytesMax(128)
|
||||
o.Bs1 = xr.ReadBytes()
|
||||
o.S0 = xr.ReadStringMax(128)
|
||||
o.S1 = xr.ReadString()
|
||||
return xr.Error()
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
repeatReader Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of data |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ data (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct repeatReader {
|
||||
opaque data<>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o repeatReader) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o repeatReader) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o repeatReader) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o repeatReader) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o repeatReader) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.data)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *repeatReader) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.data = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
467
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
467
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
Normal file
@ -0,0 +1,467 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type fieldInfo struct {
|
||||
Name string
|
||||
IsBasic bool // handled by one the native Read/WriteUint64 etc functions
|
||||
IsSlice bool // field is a slice of FieldType
|
||||
FieldType string // original type of field, i.e. "int"
|
||||
Encoder string // the encoder name, i.e. "Uint64" for Read/WriteUint64
|
||||
Convert string // what to convert to when encoding, i.e. "uint64"
|
||||
Max int // max size for slices and strings
|
||||
}
|
||||
|
||||
type structInfo struct {
|
||||
Name string
|
||||
Fields []fieldInfo
|
||||
}
|
||||
|
||||
var headerTpl = template.Must(template.New("header").Parse(`// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package {{.Package}}
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
`))
|
||||
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
{{else}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if l := len(o.{{$fieldInfo.Name}}); l > {{$fieldInfo.Max}} {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", l, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
xw.WriteUint32(uint32(len(o.{{$fieldInfo.Name}})))
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
xw.Write{{$fieldInfo.Encoder}}({{$fieldInfo.Convert}}(o.{{$fieldInfo.Name}}[i]))
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xw.Tot(), xw.Error()
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}} = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}Max({{$fieldInfo.Max}})
|
||||
{{else}}
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
if _{{$fieldInfo.Name}}Size < 0 {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{if ge $fieldInfo.Max 1}}
|
||||
if _{{$fieldInfo.Name}}Size > {{$fieldInfo.Max}} {
|
||||
return xdr.ElementSizeExceeded("{{$fieldInfo.Name}}", _{{$fieldInfo.Name}}Size, {{$fieldInfo.Max}})
|
||||
}
|
||||
{{end}}
|
||||
o.{{$fieldInfo.Name}} = make([]{{$fieldInfo.FieldType}}, _{{$fieldInfo.Name}}Size)
|
||||
for i := range o.{{$fieldInfo.Name}} {
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
o.{{$fieldInfo.Name}}[i] = {{$fieldInfo.FieldType}}(xr.Read{{$fieldInfo.Encoder}}())
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
{{end}}
|
||||
return xr.Error()
|
||||
}`))
|
||||
|
||||
var maxRe = regexp.MustCompile(`\Wmax:(\d+)`)
|
||||
|
||||
type typeSet struct {
|
||||
Type string
|
||||
Encoder string
|
||||
}
|
||||
|
||||
var xdrEncoders = map[string]typeSet{
|
||||
"int8": typeSet{"uint8", "Uint8"},
|
||||
"uint8": typeSet{"", "Uint8"},
|
||||
"int16": typeSet{"uint16", "Uint16"},
|
||||
"uint16": typeSet{"", "Uint16"},
|
||||
"int32": typeSet{"uint32", "Uint32"},
|
||||
"uint32": typeSet{"", "Uint32"},
|
||||
"int64": typeSet{"uint64", "Uint64"},
|
||||
"uint64": typeSet{"", "Uint64"},
|
||||
"int": typeSet{"uint64", "Uint64"},
|
||||
"string": typeSet{"", "String"},
|
||||
"[]byte": typeSet{"", "Bytes"},
|
||||
"bool": typeSet{"", "Bool"},
|
||||
}
|
||||
|
||||
func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
var fs []fieldInfo
|
||||
|
||||
for _, sf := range t.Fields.List {
|
||||
if len(sf.Names) == 0 {
|
||||
// We don't handle anonymous fields
|
||||
continue
|
||||
}
|
||||
|
||||
fn := sf.Names[0].Name
|
||||
var max = 0
|
||||
if sf.Comment != nil {
|
||||
c := sf.Comment.List[0].Text
|
||||
if m := maxRe.FindStringSubmatch(c); m != nil {
|
||||
max, _ = strconv.Atoi(m[1])
|
||||
}
|
||||
if strings.Contains(c, "noencode") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var f fieldInfo
|
||||
switch ft := sf.Type.(type) {
|
||||
case *ast.Ident:
|
||||
tn := ft.Name
|
||||
if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.ArrayType:
|
||||
if ft.Len != nil {
|
||||
// We don't handle arrays
|
||||
continue
|
||||
}
|
||||
|
||||
tn := ft.Elt.(*ast.Ident).Name
|
||||
if enc, ok := xdrEncoders["[]"+tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else if enc, ok := xdrEncoders[tn]; ok {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: true,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Encoder: enc.Encoder,
|
||||
Convert: enc.Type,
|
||||
Max: max,
|
||||
}
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
FieldType: ft.Sel.Name,
|
||||
Max: max,
|
||||
}
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return fs
|
||||
}
|
||||
|
||||
func generateCode(output io.Writer, s structInfo) {
|
||||
name := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := encodeTpl.Execute(&buf, map[string]interface{}{"TypeName": name, "Fields": fs})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
bs := regexp.MustCompile(`(\s*\n)+`).ReplaceAll(buf.Bytes(), []byte("\n"))
|
||||
bs = bytes.Replace(bs, []byte("//+n"), []byte("\n"), -1)
|
||||
|
||||
bs, err = format.Source(bs)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Fprintln(output, string(bs))
|
||||
}
|
||||
|
||||
func uncamelize(s string) string {
|
||||
return regexp.MustCompile("[a-z][A-Z]").ReplaceAllStringFunc(s, func(camel string) string {
|
||||
return camel[:1] + " " + camel[1:]
|
||||
})
|
||||
}
|
||||
|
||||
func generateDiagram(output io.Writer, s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Fprintln(output, sn+" Structure:")
|
||||
fmt.Fprintln(output)
|
||||
fmt.Fprintln(output, " 0 1 2 3")
|
||||
fmt.Fprintln(output, " 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1")
|
||||
line := "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+"
|
||||
fmt.Fprintln(output, line)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
if f.IsSlice {
|
||||
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
switch tn {
|
||||
case "bool":
|
||||
fmt.Fprintf(output, "| %s |V|\n", center(name+" (V=0 or 1)", 59))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int16", "uint16":
|
||||
fmt.Fprintf(output, "| %s | %s |\n", center("0x0000", 29), center(name, 29))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int32", "uint32":
|
||||
fmt.Fprintf(output, "| %s |\n", center(name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
case "int64", "uint64":
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
fmt.Fprintf(output, "+ %s +\n", center(name+" (64 bits)", 61))
|
||||
fmt.Fprintf(output, "| %-61s |\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
case "string", "byte": // XXX We assume slice of byte!
|
||||
fmt.Fprintf(output, "| %s |\n", center("Length of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(name+" (variable length)", 61))
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
default:
|
||||
if f.IsSlice {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
} else {
|
||||
tn = tn + " Structure"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
}
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output)
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
|
||||
func generateXdr(output io.Writer, s structInfo) {
|
||||
sn := s.Name
|
||||
fs := s.Fields
|
||||
|
||||
fmt.Fprintf(output, "struct %s {\n", sn)
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
fn := f.Name
|
||||
suf := ""
|
||||
l := ""
|
||||
if f.Max > 0 {
|
||||
l = strconv.Itoa(f.Max)
|
||||
}
|
||||
if f.IsSlice {
|
||||
suf = "<" + l + ">"
|
||||
}
|
||||
|
||||
switch tn {
|
||||
case "int16", "int32":
|
||||
fmt.Fprintf(output, "\tint %s%s;\n", fn, suf)
|
||||
case "uint16", "uint32":
|
||||
fmt.Fprintf(output, "\tunsigned int %s%s;\n", fn, suf)
|
||||
case "int64":
|
||||
fmt.Fprintf(output, "\thyper %s%s;\n", fn, suf)
|
||||
case "uint64":
|
||||
fmt.Fprintf(output, "\tunsigned hyper %s%s;\n", fn, suf)
|
||||
case "string":
|
||||
fmt.Fprintf(output, "\tstring %s<%s>;\n", fn, l)
|
||||
case "byte":
|
||||
fmt.Fprintf(output, "\topaque %s<%s>;\n", fn, l)
|
||||
default:
|
||||
fmt.Fprintf(output, "\t%s %s%s;\n", tn, fn, suf)
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output, "}")
|
||||
fmt.Fprintln(output)
|
||||
}
|
||||
|
||||
func center(s string, w int) string {
|
||||
w -= len(s)
|
||||
l := w / 2
|
||||
r := l
|
||||
if l+r < w {
|
||||
r++
|
||||
}
|
||||
return strings.Repeat(" ", l) + s + strings.Repeat(" ", r)
|
||||
}
|
||||
|
||||
func inspector(structs *[]structInfo) func(ast.Node) bool {
|
||||
return func(n ast.Node) bool {
|
||||
switch n := n.(type) {
|
||||
case *ast.TypeSpec:
|
||||
switch t := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
name := n.Name.Name
|
||||
fs := handleStruct(t)
|
||||
*structs = append(*structs, structInfo{name, fs})
|
||||
}
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
outputFile := flag.String("o", "", "Output file, blank for stdout")
|
||||
flag.Parse()
|
||||
fname := flag.Arg(0)
|
||||
|
||||
fset := token.NewFileSet()
|
||||
f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var structs []structInfo
|
||||
i := inspector(&structs)
|
||||
ast.Inspect(f, i)
|
||||
|
||||
var output io.Writer = os.Stdout
|
||||
if *outputFile != "" {
|
||||
fd, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
output = fd
|
||||
}
|
||||
|
||||
headerTpl.Execute(output, map[string]string{"Package": f.Name.Name})
|
||||
for _, s := range structs {
|
||||
fmt.Fprintf(output, "\n/*\n\n")
|
||||
generateDiagram(output, s)
|
||||
generateXdr(output, s)
|
||||
fmt.Fprintf(output, "*/\n")
|
||||
generateCode(output, s)
|
||||
}
|
||||
}
|
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/calmh/xdr/debug.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
debug = len(os.Getenv("XDRTRACE")) > 0
|
||||
dl = log.New(os.Stdout, "xdr: ", log.Lshortfile|log.Ltime|log.Lmicroseconds)
|
||||
)
|
||||
|
||||
const maxDebugBytes = 32
|
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
5
Godeps/_workspace/src/github.com/calmh/xdr/doc.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// Package xdr implements an XDR (RFC 4506) encoder/decoder.
|
||||
package xdr
|
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/calmh/xdr/encdec_test.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
// Contains all supported types
|
||||
type TestStruct struct {
|
||||
I int
|
||||
I8 int8
|
||||
UI8 uint8
|
||||
I16 int16
|
||||
UI16 uint16
|
||||
I32 int32
|
||||
UI32 uint32
|
||||
I64 int64
|
||||
UI64 uint64
|
||||
BS []byte // max:1024
|
||||
S string // max:1024
|
||||
C Opaque
|
||||
SS []string // max:1024
|
||||
}
|
||||
|
||||
type Opaque [32]byte
|
||||
|
||||
func (u *Opaque) EncodeXDRInto(w *xdr.Writer) (int, error) {
|
||||
return w.WriteRaw(u[:])
|
||||
}
|
||||
|
||||
func (u *Opaque) DecodeXDRFrom(r *xdr.Reader) (int, error) {
|
||||
return r.ReadRaw(u[:])
|
||||
}
|
||||
|
||||
func (Opaque) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
var u Opaque
|
||||
for i := range u[:] {
|
||||
u[i] = byte(rand.Int())
|
||||
}
|
||||
return reflect.ValueOf(u)
|
||||
}
|
||||
|
||||
func TestEncDec(t *testing.T) {
|
||||
fn := func(t0 TestStruct) bool {
|
||||
bs, err := t0.MarshalXDR()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var t1 TestStruct
|
||||
err = t1.UnmarshalXDR(bs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Not comparing with DeepEqual since we'll unmarshal nil slices as empty
|
||||
if t0.I != t1.I ||
|
||||
t0.I16 != t1.I16 || t0.UI16 != t1.UI16 ||
|
||||
t0.I32 != t1.I32 || t0.UI32 != t1.UI32 ||
|
||||
t0.I64 != t1.I64 || t0.UI64 != t1.UI64 ||
|
||||
bytes.Compare(t0.BS, t1.BS) != 0 ||
|
||||
t0.S != t1.S || t0.C != t1.C {
|
||||
t.Logf("%#v", t0)
|
||||
t.Logf("%#v", t1)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
185
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/calmh/xdr/encdec_xdr_test.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
// ************************************************************
|
||||
// This file is automatically generated by genxdr. Do not edit.
|
||||
// ************************************************************
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/calmh/xdr"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TestStruct Structure:
|
||||
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ int Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ int8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ uint8 Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | I16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| 0x0000 | UI16 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| I32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| UI32 |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ I64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ UI64 (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of BS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ BS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of S |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ S (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ Opaque Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of SS |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
/ /
|
||||
\ SS (variable length) \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
|
||||
struct TestStruct {
|
||||
int I;
|
||||
int8 I8;
|
||||
uint8 UI8;
|
||||
int I16;
|
||||
unsigned int UI16;
|
||||
int I32;
|
||||
unsigned int UI32;
|
||||
hyper I64;
|
||||
unsigned hyper UI64;
|
||||
opaque BS<1024>;
|
||||
string S<1024>;
|
||||
Opaque C;
|
||||
string SS<1024>;
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
func (o TestStruct) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o TestStruct) MarshalXDR() ([]byte, error) {
|
||||
return o.AppendXDR(make([]byte, 0, 128))
|
||||
}
|
||||
|
||||
func (o TestStruct) MustMarshalXDR() []byte {
|
||||
bs, err := o.MarshalXDR()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bs
|
||||
}
|
||||
|
||||
func (o TestStruct) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o TestStruct) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.I))
|
||||
xw.WriteUint8(uint8(o.I8))
|
||||
xw.WriteUint8(o.UI8)
|
||||
xw.WriteUint16(uint16(o.I16))
|
||||
xw.WriteUint16(o.UI16)
|
||||
xw.WriteUint32(uint32(o.I32))
|
||||
xw.WriteUint32(o.UI32)
|
||||
xw.WriteUint64(uint64(o.I64))
|
||||
xw.WriteUint64(o.UI64)
|
||||
if l := len(o.BS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("BS", l, 1024)
|
||||
}
|
||||
xw.WriteBytes(o.BS)
|
||||
if l := len(o.S); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("S", l, 1024)
|
||||
}
|
||||
xw.WriteString(o.S)
|
||||
_, err := o.C.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
if l := len(o.SS); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("SS", l, 1024)
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.SS)))
|
||||
for i := range o.SS {
|
||||
xw.WriteString(o.SS[i])
|
||||
}
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *TestStruct) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.I = int(xr.ReadUint64())
|
||||
o.I8 = int8(xr.ReadUint8())
|
||||
o.UI8 = xr.ReadUint8()
|
||||
o.I16 = int16(xr.ReadUint16())
|
||||
o.UI16 = xr.ReadUint16()
|
||||
o.I32 = int32(xr.ReadUint32())
|
||||
o.UI32 = xr.ReadUint32()
|
||||
o.I64 = int64(xr.ReadUint64())
|
||||
o.UI64 = xr.ReadUint64()
|
||||
o.BS = xr.ReadBytesMax(1024)
|
||||
o.S = xr.ReadStringMax(1024)
|
||||
(&o.C).DecodeXDRFrom(xr)
|
||||
_SSSize := int(xr.ReadUint32())
|
||||
if _SSSize < 0 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
if _SSSize > 1024 {
|
||||
return xdr.ElementSizeExceeded("SS", _SSSize, 1024)
|
||||
}
|
||||
o.SS = make([]string, _SSSize)
|
||||
for i := range o.SS {
|
||||
o.SS[i] = xr.ReadString()
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
4
Godeps/_workspace/src/github.com/calmh/xdr/generate.sh
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
go run cmd/genxdr/main.go -- bench_test.go > bench_xdr_test.go
|
||||
go run cmd/genxdr/main.go -- encdec_test.go > encdec_xdr_test.go
|
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/calmh/xdr/pad_ipdr.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
return 0
|
||||
}
|
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/pad_xdr.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func pad(l int) int {
|
||||
d := l % 4
|
||||
if d == 0 {
|
||||
return 0
|
||||
}
|
||||
return 4 - d
|
||||
}
|
171
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
171
Godeps/_workspace/src/github.com/calmh/xdr/reader.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reader) ReadRaw(bs []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, bs)
|
||||
return n, r.err
|
||||
}
|
||||
|
||||
func (r *Reader) ReadString() string {
|
||||
return r.ReadStringMax(0)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadStringMax(max int) string {
|
||||
buf := r.ReadBytesMaxInto(max, nil)
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh := reflect.StringHeader{
|
||||
Data: bh.Data,
|
||||
Len: bh.Len,
|
||||
}
|
||||
return *((*string)(unsafe.Pointer(&sh)))
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytes() []byte {
|
||||
return r.ReadBytesInto(nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMax(max int) []byte {
|
||||
return r.ReadBytesMaxInto(max, nil)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesInto(dst []byte) []byte {
|
||||
return r.ReadBytesMaxInto(0, dst)
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBytesMaxInto(max int, dst []byte) []byte {
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
l := int(r.ReadUint32())
|
||||
if r.err != nil {
|
||||
return nil
|
||||
}
|
||||
if l < 0 || max > 0 && l > max {
|
||||
// l may be negative on 32 bit builds
|
||||
r.err = ElementSizeExceeded("bytes field", l, max)
|
||||
return nil
|
||||
}
|
||||
|
||||
if fullLen := l + pad(l); fullLen > len(dst) {
|
||||
dst = make([]byte, fullLen)
|
||||
} else {
|
||||
dst = dst[:fullLen]
|
||||
}
|
||||
|
||||
var n int
|
||||
n, r.err = io.ReadFull(r.r, dst)
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd bytes (%d): %v", len(dst), r.err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if debug {
|
||||
if n > maxDebugBytes {
|
||||
dl.Printf("rd bytes (%d): %x...", len(dst), dst[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("rd bytes (%d): %x", len(dst), dst)
|
||||
}
|
||||
}
|
||||
return dst[:l]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadBool() bool {
|
||||
return r.ReadUint8() != 0
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint32() uint32 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:4])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint32: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint32(r.b[3]) | uint32(r.b[2])<<8 | uint32(r.b[1])<<16 | uint32(r.b[0])<<24
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint32=%d (0x%08x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint64() uint64 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:8])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint64: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint64(r.b[7]) | uint64(r.b[6])<<8 | uint64(r.b[5])<<16 | uint64(r.b[4])<<24 |
|
||||
uint64(r.b[3])<<32 | uint64(r.b[2])<<40 | uint64(r.b[1])<<48 | uint64(r.b[0])<<56
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint64=%d (0x%016x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type XDRError struct {
|
||||
op string
|
||||
err error
|
||||
}
|
||||
|
||||
func (e XDRError) Error() string {
|
||||
return "xdr " + e.op + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e XDRError) IsEOF() bool {
|
||||
return e.err == io.EOF
|
||||
}
|
||||
|
||||
func (r *Reader) Error() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"read", r.err}
|
||||
}
|
||||
|
||||
func ElementSizeExceeded(field string, size, limit int) error {
|
||||
return fmt.Errorf("%s exceeds size limit; %d > %d", field, size, limit)
|
||||
}
|
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/calmh/xdr/reader_ipdr.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
import "io"
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:1])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint8: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint8=%d (0x%02x)", r.b[0], r.b[0])
|
||||
}
|
||||
return r.b[0]
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
if r.err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
_, r.err = io.ReadFull(r.r, r.b[:2])
|
||||
if r.err != nil {
|
||||
if debug {
|
||||
dl.Printf("rd uint16: %v", r.err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
v := uint16(r.b[1]) | uint16(r.b[0])<<8
|
||||
|
||||
if debug {
|
||||
dl.Printf("rd uint16=%d (0x%04x)", v, v)
|
||||
}
|
||||
return v
|
||||
}
|
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
15
Godeps/_workspace/src/github.com/calmh/xdr/reader_xdr.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (r *Reader) ReadUint8() uint8 {
|
||||
return uint8(r.ReadUint32())
|
||||
}
|
||||
|
||||
func (r *Reader) ReadUint16() uint16 {
|
||||
return uint16(r.ReadUint32())
|
||||
}
|
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/calmh/xdr/refl_test.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build refl
|
||||
|
||||
package xdr_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
refl "github.com/davecgh/go-xdr/xdr"
|
||||
)
|
||||
|
||||
func TestCompareMarshals(t *testing.T) {
|
||||
e0 := s.MarshalXDR()
|
||||
e1, err := refl.Marshal(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Compare(e0, e1) != 0 {
|
||||
t.Fatalf("Encoding mismatch;\n\t%x (this)\n\t%x (refl)", e0, e1)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflMarshal(b *testing.B) {
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
res, err = refl.Marshal(s)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReflUnmarshal(b *testing.B) {
|
||||
var t XDRBenchStruct
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := refl.Unmarshal(e, &t)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
146
Godeps/_workspace/src/github.com/calmh/xdr/writer.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var padBytes = []byte{0, 0, 0}
|
||||
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
tot int
|
||||
err error
|
||||
b [8]byte
|
||||
}
|
||||
|
||||
type AppendWriter []byte
|
||||
|
||||
func (w *AppendWriter) Write(bs []byte) (int, error) {
|
||||
*w = append(*w, bs...)
|
||||
return len(bs), nil
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteRaw(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
var n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
return n, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
sh := *((*reflect.StringHeader)(unsafe.Pointer(&s)))
|
||||
bh := reflect.SliceHeader{
|
||||
Data: sh.Data,
|
||||
Len: sh.Len,
|
||||
Cap: sh.Len,
|
||||
}
|
||||
return w.WriteBytes(*(*[]byte)(unsafe.Pointer(&bh)))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBytes(bs []byte) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
w.WriteUint32(uint32(len(bs)))
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
if len(bs) > maxDebugBytes {
|
||||
dl.Printf("wr bytes (%d): %x...", len(bs), bs[:maxDebugBytes])
|
||||
} else {
|
||||
dl.Printf("wr bytes (%d): %x", len(bs), bs)
|
||||
}
|
||||
}
|
||||
|
||||
var l, n int
|
||||
n, w.err = w.w.Write(bs)
|
||||
l += n
|
||||
|
||||
if p := pad(len(bs)); w.err == nil && p > 0 {
|
||||
n, w.err = w.w.Write(padBytes[:p])
|
||||
l += n
|
||||
}
|
||||
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteBool(v bool) (int, error) {
|
||||
if v {
|
||||
return w.WriteUint8(1)
|
||||
} else {
|
||||
return w.WriteUint8(0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint32(v uint32) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint32=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 24)
|
||||
w.b[1] = byte(v >> 16)
|
||||
w.b[2] = byte(v >> 8)
|
||||
w.b[3] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:4])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint64(v uint64) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint64=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 56)
|
||||
w.b[1] = byte(v >> 48)
|
||||
w.b[2] = byte(v >> 40)
|
||||
w.b[3] = byte(v >> 32)
|
||||
w.b[4] = byte(v >> 24)
|
||||
w.b[5] = byte(v >> 16)
|
||||
w.b[6] = byte(v >> 8)
|
||||
w.b[7] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:8])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) Tot() int {
|
||||
return w.tot
|
||||
}
|
||||
|
||||
func (w *Writer) Error() error {
|
||||
if w.err == nil {
|
||||
return nil
|
||||
}
|
||||
return XDRError{"write", w.err}
|
||||
}
|
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
41
Godeps/_workspace/src/github.com/calmh/xdr/writer_ipdr.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:1])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
if debug {
|
||||
dl.Printf("wr uint8=%d", v)
|
||||
}
|
||||
|
||||
w.b[0] = byte(v >> 8)
|
||||
w.b[1] = byte(v)
|
||||
|
||||
var l int
|
||||
l, w.err = w.w.Write(w.b[:2])
|
||||
w.tot += l
|
||||
return l, w.err
|
||||
}
|
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/calmh/xdr/writer_xdr.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
// +build !ipdr
|
||||
|
||||
package xdr
|
||||
|
||||
func (w *Writer) WriteUint8(v uint8) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteUint16(v uint16) (int, error) {
|
||||
return w.WriteUint32(uint32(v))
|
||||
}
|
93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
93
Godeps/_workspace/src/github.com/calmh/xdr/xdr_test.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
|
||||
// is governed by an MIT-style license that can be found in the LICENSE file.
|
||||
|
||||
package xdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
)
|
||||
|
||||
func TestBytesNil(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
r.ReadBytes()
|
||||
res := r.ReadBytes()
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBytesGiven(t *testing.T) {
|
||||
fn := func(bs []byte) bool {
|
||||
var b = new(bytes.Buffer)
|
||||
var w = NewWriter(b)
|
||||
var r = NewReader(b)
|
||||
w.WriteBytes(bs)
|
||||
w.WriteBytes(bs)
|
||||
res := make([]byte, 12)
|
||||
res = r.ReadBytesInto(res)
|
||||
res = r.ReadBytesInto(res)
|
||||
return bytes.Compare(bs, res) == 0
|
||||
}
|
||||
if err := quick.Check(fn, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesMaxInto(t *testing.T) {
|
||||
var max = 64
|
||||
for tot := 32; tot < 128; tot++ {
|
||||
for diff := -32; diff <= 32; diff++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var buf = make([]byte, tot+diff)
|
||||
var bs = r.ReadBytesMaxInto(max, buf)
|
||||
|
||||
if tot <= max {
|
||||
if read := len(bs); read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, buf=%d, max=%d, read=%d", tot, tot+diff, max, read)
|
||||
}
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d: %v", tot, max, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringMax(t *testing.T) {
|
||||
for tot := 42; tot < 72; tot++ {
|
||||
for max := 0; max < 128; max++ {
|
||||
var b = new(bytes.Buffer)
|
||||
var r = NewReader(b)
|
||||
var w = NewWriter(b)
|
||||
|
||||
var toWrite = make([]byte, tot)
|
||||
w.WriteBytes(toWrite)
|
||||
|
||||
var str = r.ReadStringMax(max)
|
||||
var read = len(str)
|
||||
|
||||
if max == 0 || tot <= max {
|
||||
if read != tot {
|
||||
t.Errorf("Incorrect read bytes, wrote=%d, max=%d, read=%d", tot, max, read)
|
||||
}
|
||||
} else if !strings.Contains(r.err.Error(), "exceeds size") {
|
||||
t.Errorf("Unexpected non-ErrElementSizeExceeded error for wrote=%d, max=%d, read=%d: %v", tot, max, read, r.err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
1
Godeps/_workspace/src/github.com/camlistore/lock/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/camlistore/lock/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*~
|
202
Godeps/_workspace/src/github.com/camlistore/lock/COPYING
generated
vendored
Normal file
202
Godeps/_workspace/src/github.com/camlistore/lock/COPYING
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
3
Godeps/_workspace/src/github.com/camlistore/lock/README.txt
generated
vendored
Normal file
3
Godeps/_workspace/src/github.com/camlistore/lock/README.txt
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
File locking library.
|
||||
|
||||
See http://godoc.org/github.com/camlistore/lock
|
158
Godeps/_workspace/src/github.com/camlistore/lock/lock.go
generated
vendored
Normal file
158
Godeps/_workspace/src/github.com/camlistore/lock/lock.go
generated
vendored
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Lock locks the given file, creating the file if necessary. If the
|
||||
// file already exists, it must have zero size or an error is returned.
|
||||
// The lock is an exclusive lock (a write lock), but locked files
|
||||
// should neither be read from nor written to. Such files should have
|
||||
// zero size and only exist to co-ordinate ownership across processes.
|
||||
//
|
||||
// A nil Closer is returned if an error occurred. Otherwise, close that
|
||||
// Closer to release the lock.
|
||||
//
|
||||
// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s
|
||||
// advisory locks. In particular, closing any other file descriptor for the
|
||||
// same file will release the lock prematurely.
|
||||
//
|
||||
// Attempting to lock a file that is already locked by the current process
|
||||
// has undefined behavior.
|
||||
//
|
||||
// On other operating systems, lock will fallback to using the presence and
|
||||
// content of a file named name + '.lock' to implement locking behavior.
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
return lockFn(name)
|
||||
}
|
||||
|
||||
var lockFn = lockPortable
|
||||
|
||||
// Portable version not using fcntl. Doesn't handle crashes as gracefully,
|
||||
// since it can leave stale lock files.
|
||||
// TODO: write pid of owner to lock file and on race see if pid is
|
||||
// still alive?
|
||||
func lockPortable(name string) (io.Closer, error) {
|
||||
absName, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't Lock file %q: can't find abs path: %v", name, err)
|
||||
}
|
||||
fi, err := os.Stat(absName)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
if isStaleLock(absName) {
|
||||
os.Remove(absName)
|
||||
} else {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
}
|
||||
f, err := os.OpenFile(absName, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lock file %s %v", absName, err)
|
||||
}
|
||||
if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &lockCloser{f: f, abs: absName}, nil
|
||||
}
|
||||
|
||||
type pidLockMeta struct {
|
||||
OwnerPID int
|
||||
}
|
||||
|
||||
func isStaleLock(path string) bool {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer f.Close()
|
||||
var meta pidLockMeta
|
||||
if json.NewDecoder(f).Decode(&meta) != nil {
|
||||
return false
|
||||
}
|
||||
if meta.OwnerPID == 0 {
|
||||
return false
|
||||
}
|
||||
p, err := os.FindProcess(meta.OwnerPID)
|
||||
if err != nil {
|
||||
// e.g. on Windows
|
||||
return true
|
||||
}
|
||||
// On unix, os.FindProcess always is true, so we have to send
|
||||
// it a signal to see if it's alive.
|
||||
if signalZero != nil {
|
||||
if p.Signal(signalZero) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var signalZero os.Signal // nil or set by lock_sigzero.go
|
||||
|
||||
type lockCloser struct {
|
||||
f *os.File
|
||||
abs string
|
||||
once sync.Once
|
||||
err error
|
||||
}
|
||||
|
||||
func (lc *lockCloser) Close() error {
|
||||
lc.once.Do(lc.close)
|
||||
return lc.err
|
||||
}
|
||||
|
||||
func (lc *lockCloser) close() {
|
||||
if err := lc.f.Close(); err != nil {
|
||||
lc.err = err
|
||||
}
|
||||
if err := os.Remove(lc.abs); err != nil {
|
||||
lc.err = err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
lockmu sync.Mutex
|
||||
locked = map[string]bool{} // abs path -> true
|
||||
)
|
||||
|
||||
// unlocker is used by the darwin and linux implementations with fcntl
|
||||
// advisory locks.
|
||||
type unlocker struct {
|
||||
f *os.File
|
||||
abs string
|
||||
}
|
||||
|
||||
func (u *unlocker) Close() error {
|
||||
lockmu.Lock()
|
||||
// Remove is not necessary but it's nice for us to clean up.
|
||||
// If we do do this, though, it needs to be before the
|
||||
// u.f.Close below.
|
||||
os.Remove(u.abs)
|
||||
if err := u.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(locked, u.abs)
|
||||
lockmu.Unlock()
|
||||
return nil
|
||||
}
|
32
Godeps/_workspace/src/github.com/camlistore/lock/lock_appengine.go
generated
vendored
Normal file
32
Godeps/_workspace/src/github.com/camlistore/lock/lock_appengine.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// +build appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockAppEngine
|
||||
}
|
||||
|
||||
func lockAppEngine(name string) (io.Closer, error) {
|
||||
return nil, errors.New("Lock not available on App Engine")
|
||||
}
|
80
Godeps/_workspace/src/github.com/camlistore/lock/lock_darwin_amd64.go
generated
vendored
Normal file
80
Godeps/_workspace/src/github.com/camlistore/lock/lock_darwin_amd64.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// +build darwin,amd64
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
if locked[abs] {
|
||||
lockmu.Unlock()
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
locked[abs] = true
|
||||
lockmu.Unlock()
|
||||
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err)
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Start uint64 // sizeof(off_t): 8
|
||||
Len uint64 // sizeof(off_t): 8
|
||||
Pid uint32 // sizeof(pid_t): 4
|
||||
Type uint16 // sizeof(short): 2
|
||||
Whence uint16 // sizeof(short): 2
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f, abs}, nil
|
||||
}
|
79
Godeps/_workspace/src/github.com/camlistore/lock/lock_freebsd.go
generated
vendored
Normal file
79
Godeps/_workspace/src/github.com/camlistore/lock/lock_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
if locked[abs] {
|
||||
lockmu.Unlock()
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
locked[abs] = true
|
||||
lockmu.Unlock()
|
||||
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Start int64 /* off_t starting offset */
|
||||
Len int64 /* off_t len = 0 means until end of file */
|
||||
Pid int32 /* pid_t lock owner */
|
||||
Type int16 /* short lock type: read/write, etc. */
|
||||
Whence int16 /* short type of l_start */
|
||||
Sysid int32 /* int remote system id or zero for local */
|
||||
}{
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: int32(os.Getpid()),
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Sysid: 0,
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f, abs}, nil
|
||||
}
|
80
Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_amd64.go
generated
vendored
Normal file
80
Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_amd64.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// +build linux,amd64
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
if locked[abs] {
|
||||
lockmu.Unlock()
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
locked[abs] = true
|
||||
lockmu.Unlock()
|
||||
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Type uint32
|
||||
Whence uint32
|
||||
Start uint64
|
||||
Len uint64
|
||||
Pid uint32
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint32(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f, abs}, nil
|
||||
}
|
81
Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_arm.go
generated
vendored
Normal file
81
Godeps/_workspace/src/github.com/camlistore/lock/lock_linux_arm.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
// +build linux,arm
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockFcntl
|
||||
}
|
||||
|
||||
func lockFcntl(name string) (io.Closer, error) {
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
if locked[abs] {
|
||||
lockmu.Unlock()
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
locked[abs] = true
|
||||
lockmu.Unlock()
|
||||
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
|
||||
// TODO: move this into the standard syscall package.
|
||||
k := struct {
|
||||
Type uint16
|
||||
Whence uint16
|
||||
Start uint32
|
||||
Len uint32
|
||||
Pid uint32
|
||||
}{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: uint16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: uint32(os.Getpid()),
|
||||
}
|
||||
|
||||
const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k)))
|
||||
if errno != 0 {
|
||||
f.Close()
|
||||
return nil, errno
|
||||
}
|
||||
return &unlocker{f, abs}, nil
|
||||
}
|
55
Godeps/_workspace/src/github.com/camlistore/lock/lock_plan9.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/camlistore/lock/lock_plan9.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func init() {
|
||||
lockFn = lockPlan9
|
||||
}
|
||||
|
||||
func lockPlan9(name string) (io.Closer, error) {
|
||||
var f *os.File
|
||||
abs, err := filepath.Abs(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockmu.Lock()
|
||||
if locked[abs] {
|
||||
lockmu.Unlock()
|
||||
return nil, fmt.Errorf("file %q already locked", abs)
|
||||
}
|
||||
locked[abs] = true
|
||||
lockmu.Unlock()
|
||||
|
||||
fi, err := os.Stat(name)
|
||||
if err == nil && fi.Size() > 0 {
|
||||
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
|
||||
}
|
||||
|
||||
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err)
|
||||
}
|
||||
|
||||
return &unlocker{f, abs}, nil
|
||||
}
|
26
Godeps/_workspace/src/github.com/camlistore/lock/lock_sigzero.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/camlistore/lock/lock_sigzero.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// +build !appengine
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
signalZero = syscall.Signal(0)
|
||||
}
|
131
Godeps/_workspace/src/github.com/camlistore/lock/lock_test.go
generated
vendored
Normal file
131
Godeps/_workspace/src/github.com/camlistore/lock/lock_test.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2013 The Go Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
testLock(t, false)
|
||||
}
|
||||
|
||||
func TestLockPortable(t *testing.T) {
|
||||
testLock(t, true)
|
||||
}
|
||||
|
||||
func TestLockInChild(t *testing.T) {
|
||||
f := os.Getenv("TEST_LOCK_FILE")
|
||||
if f == "" {
|
||||
// not child
|
||||
return
|
||||
}
|
||||
lock := Lock
|
||||
if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_PORTABLE")); v {
|
||||
lock = lockPortable
|
||||
}
|
||||
|
||||
lk, err := lock(f)
|
||||
if err != nil {
|
||||
log.Fatalf("Lock failed: %v", err)
|
||||
}
|
||||
|
||||
if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_CRASH")); v {
|
||||
// Simulate a crash, or at least not unlocking the
|
||||
// lock. We still exit 0 just to simplify the parent
|
||||
// process exec code.
|
||||
os.Exit(0)
|
||||
}
|
||||
lk.Close()
|
||||
}
|
||||
|
||||
func testLock(t *testing.T, portable bool) {
|
||||
lock := Lock
|
||||
if portable {
|
||||
lock = lockPortable
|
||||
}
|
||||
|
||||
td, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
path := filepath.Join(td, "foo.lock")
|
||||
|
||||
childLock := func(crash bool) error {
|
||||
cmd := exec.Command(os.Args[0], "-test.run=LockInChild$")
|
||||
cmd.Env = []string{"TEST_LOCK_FILE=" + path}
|
||||
if portable {
|
||||
cmd.Env = append(cmd.Env, "TEST_LOCK_PORTABLE=1")
|
||||
}
|
||||
if crash {
|
||||
cmd.Env = append(cmd.Env, "TEST_LOCK_CRASH=1")
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
t.Logf("Child output: %q (err %v)", out, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Child Process lock of %s failed: %v %s", path, err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Logf("Locking in crashing child...")
|
||||
if err := childLock(true); err != nil {
|
||||
t.Fatalf("first lock in child process: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Locking+unlocking in child...")
|
||||
if err := childLock(false); err != nil {
|
||||
t.Fatalf("lock in child process after crashing child: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Locking in parent...")
|
||||
lk1, err := lock(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("Again in parent...")
|
||||
_, err = lock(path)
|
||||
if err == nil {
|
||||
t.Fatal("expected second lock to fail")
|
||||
}
|
||||
|
||||
t.Logf("Locking in child...")
|
||||
if childLock(false) == nil {
|
||||
t.Fatalf("expected lock in child process to fail")
|
||||
}
|
||||
|
||||
t.Logf("Unlocking lock in parent")
|
||||
if err := lk1.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lk3, err := lock(path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lk3.Close()
|
||||
}
|
11
Godeps/_workspace/src/github.com/cznic/b/AUTHORS
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/cznic/b/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# This file lists authors for copyright purposes. This file is distinct from
|
||||
# the CONTRIBUTORS files. See the latter for an explanation.
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Jan Mercl <0xjnml@gmail.com>
|
11
Godeps/_workspace/src/github.com/cznic/b/CONTRIBUTORS
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/cznic/b/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# This file lists people who contributed code to this repository. The AUTHORS
|
||||
# file lists the copyright holders; this file lists people.
|
||||
#
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Brian Fallik <bfallik@gmail.com>
|
||||
Dan Kortschak <dan.kortschak@adelaide.edu.au>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
27
Godeps/_workspace/src/github.com/cznic/b/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/cznic/b/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The b Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
53
Godeps/_workspace/src/github.com/cznic/b/Makefile
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/cznic/b/Makefile
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright 2014 The b Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
.PHONY: all todo clean cover generic mem nuke cpu
|
||||
|
||||
testbin=b.test
|
||||
|
||||
all: editor
|
||||
go build
|
||||
go vet
|
||||
golint .
|
||||
go install
|
||||
make todo
|
||||
|
||||
editor:
|
||||
gofmt -l -s -w .
|
||||
go test -i
|
||||
go test
|
||||
|
||||
clean:
|
||||
@go clean
|
||||
rm -f *~ *.out $(testbin)
|
||||
|
||||
cover:
|
||||
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
|
||||
|
||||
cpu:
|
||||
go test -c
|
||||
./$(testbin) -test.cpuprofile cpu.out
|
||||
go tool pprof --lines $(testbin) cpu.out
|
||||
|
||||
generic:
|
||||
@# writes to stdout a version where the type of key is KEY and the type
|
||||
@# of value is VALUE.
|
||||
@#
|
||||
@# Intended use is to replace all textual occurrences of KEY or VALUE in
|
||||
@# the output with your desired types.
|
||||
@sed -e 's|interface{}[^{]*/\*K\*/|KEY|g' -e 's|interface{}[^{]*/\*V\*/|VALUE|g' btree.go
|
||||
|
||||
mem:
|
||||
go test -c
|
||||
./$(testbin) -test.bench . -test.memprofile mem.out -test.memprofilerate 1
|
||||
go tool pprof --lines --web --alloc_space $(testbin) mem.out
|
||||
|
||||
nuke: clean
|
||||
rm -f *.test *.out
|
||||
|
||||
todo:
|
||||
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go || true
|
||||
@grep -n TODO *.go || true
|
||||
@grep -n BUG *.go || true
|
||||
@grep -n println *.go || true
|
10
Godeps/_workspace/src/github.com/cznic/b/README.md
generated
vendored
Normal file
10
Godeps/_workspace/src/github.com/cznic/b/README.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
b
|
||||
=
|
||||
|
||||
Package b implements a B+tree.
|
||||
|
||||
Installation:
|
||||
|
||||
$ go get github.com/cznic/b
|
||||
|
||||
Documentation: [godoc.org/github.com/cznic/b](http://godoc.org/github.com/cznic/b)
|
1300
Godeps/_workspace/src/github.com/cznic/b/all_test.go
generated
vendored
Normal file
1300
Godeps/_workspace/src/github.com/cznic/b/all_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
929
Godeps/_workspace/src/github.com/cznic/b/btree.go
generated
vendored
Normal file
929
Godeps/_workspace/src/github.com/cznic/b/btree.go
generated
vendored
Normal file
@ -0,0 +1,929 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k interface{} /*K*/, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b interface{} /*K*/) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k interface{} /*K*/
|
||||
v interface{} /*V*/
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumaretor is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k interface{} /*K*/
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k interface{} /*K*/
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk interface{} /*K*/
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k interface{} /*K*/, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k interface{} /*K*/) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r interface{} /*V*/) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k interface{} /*K*/) (i int, ok bool) {
|
||||
var mk interface{} /*K*/
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k interface{} /*K*/) (v interface{} /*V*/, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k interface{} /*K*/, v interface{} /*V*/) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k interface{} /*K*/, v interface{} /*V*/) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on a an item such that k >= item's
|
||||
// key. ok reports if k == item.key The Enumerator's position is possibly
|
||||
// after the last item in the tree.
|
||||
func (t *Tree) Seek(k interface{} /*K*/) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k interface{} /*K*/, v interface{} /*V*/) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(interface{} /*K*/, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k interface{} /*K*/, upd func(oldV interface{} /*V*/, exists bool) (newV interface{} /*V*/, write bool)) (oldV interface{} /*V*/, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV interface{} /*V*/
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k interface{} /*K*/, v interface{} /*V*/) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return p, pi
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
nr := newX(q).insert(0, q.x[kx].k, r)
|
||||
t.r = nr
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return nr, 0
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k interface{} /*K*/, v interface{} /*V*/, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
53
Godeps/_workspace/src/github.com/cznic/b/doc.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/cznic/b/doc.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package b implements the B+tree flavor of a BTree.
|
||||
//
|
||||
// Changelog
|
||||
//
|
||||
// 2014-06-26: Lower GC presure by recycling things.
|
||||
//
|
||||
// 2014-04-18: Added new method Put.
|
||||
//
|
||||
// Generic types
|
||||
//
|
||||
// Keys and their associated values are interface{} typed, similar to all of
|
||||
// the containers in the standard library.
|
||||
//
|
||||
// Semiautomatic production of a type specific variant of this package is
|
||||
// supported via
|
||||
//
|
||||
// $ make generic
|
||||
//
|
||||
// This command will write to stdout a version of the btree.go file where every
|
||||
// key type occurrence is replaced by the word 'KEY' and every value type
|
||||
// occurrence is replaced by the word 'VALUE'. Then you have to replace these
|
||||
// tokens with your desired type(s), using any technique you're comfortable
|
||||
// with.
|
||||
//
|
||||
// This is how, for example, 'example/int.go' was created:
|
||||
//
|
||||
// $ mkdir example
|
||||
// $ make generic | sed -e 's/KEY/int/g' -e 's/VALUE/int/g' > example/int.go
|
||||
//
|
||||
// No other changes to int.go are necessary, it compiles just fine.
|
||||
//
|
||||
// Running the benchmarks for 1000 keys on a machine with Intel i5-4670 CPU @
|
||||
// 3.4GHz, Go release 1.4.2.
|
||||
//
|
||||
// $ go test -bench 1e3 example/all_test.go example/int.go
|
||||
// PASS
|
||||
// BenchmarkSetSeq1e3 10000 151620 ns/op
|
||||
// BenchmarkGetSeq1e3 10000 115354 ns/op
|
||||
// BenchmarkSetRnd1e3 5000 255865 ns/op
|
||||
// BenchmarkGetRnd1e3 10000 140466 ns/op
|
||||
// BenchmarkDelSeq1e3 10000 143860 ns/op
|
||||
// BenchmarkDelRnd1e3 10000 188228 ns/op
|
||||
// BenchmarkSeekSeq1e3 10000 156448 ns/op
|
||||
// BenchmarkSeekRnd1e3 10000 190587 ns/op
|
||||
// BenchmarkNext1e3 200000 9407 ns/op
|
||||
// BenchmarkPrev1e3 200000 9306 ns/op
|
||||
// ok command-line-arguments 26.369s
|
||||
// $
|
||||
package b
|
35
Godeps/_workspace/src/github.com/cznic/b/example/Makefile
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/cznic/b/example/Makefile
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright 2014 The b Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
.PHONY: all todo clean cover mem
|
||||
|
||||
testbin=b.test
|
||||
|
||||
all: editor
|
||||
go build
|
||||
go vet
|
||||
make todo
|
||||
|
||||
editor:
|
||||
go fmt
|
||||
go test -i
|
||||
go test
|
||||
|
||||
mem:
|
||||
go test -c
|
||||
./$(testbin) -test.bench . -test.memprofile mem.out -test.memprofilerate 1
|
||||
go tool pprof --lines --web --alloc_space $(testbin) mem.out
|
||||
|
||||
todo:
|
||||
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* *.go || true
|
||||
@grep -n TODO *.go || true
|
||||
@grep -n BUG *.go || true
|
||||
@grep -n println *.go || true
|
||||
|
||||
clean:
|
||||
@go clean
|
||||
rm -f *~
|
||||
|
||||
cover:
|
||||
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
|
1126
Godeps/_workspace/src/github.com/cznic/b/example/all_test.go
generated
vendored
Normal file
1126
Godeps/_workspace/src/github.com/cznic/b/example/all_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
929
Godeps/_workspace/src/github.com/cznic/b/example/int.go
generated
vendored
Normal file
929
Godeps/_workspace/src/github.com/cznic/b/example/int.go
generated
vendored
Normal file
@ -0,0 +1,929 @@
|
||||
// Copyright 2014 The b Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package b
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
kx = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
kd = 32 //TODO benchmark tune this number if using custom key/value type(s).
|
||||
)
|
||||
|
||||
func init() {
|
||||
if kd < 1 {
|
||||
panic(fmt.Errorf("kd %d: out of range", kd))
|
||||
}
|
||||
|
||||
if kx < 2 {
|
||||
panic(fmt.Errorf("kx %d: out of range", kx))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
btDPool = sync.Pool{New: func() interface{} { return &d{} }}
|
||||
btEPool = btEpool{sync.Pool{New: func() interface{} { return &Enumerator{} }}}
|
||||
btTPool = btTpool{sync.Pool{New: func() interface{} { return &Tree{} }}}
|
||||
btXPool = sync.Pool{New: func() interface{} { return &x{} }}
|
||||
)
|
||||
|
||||
type btTpool struct{ sync.Pool }
|
||||
|
||||
func (p *btTpool) get(cmp Cmp) *Tree {
|
||||
x := p.Get().(*Tree)
|
||||
x.cmp = cmp
|
||||
return x
|
||||
}
|
||||
|
||||
type btEpool struct{ sync.Pool }
|
||||
|
||||
func (p *btEpool) get(err error, hit bool, i int, k int, q *d, t *Tree, ver int64) *Enumerator {
|
||||
x := p.Get().(*Enumerator)
|
||||
x.err, x.hit, x.i, x.k, x.q, x.t, x.ver = err, hit, i, k, q, t, ver
|
||||
return x
|
||||
}
|
||||
|
||||
type (
|
||||
// Cmp compares a and b. Return value is:
|
||||
//
|
||||
// < 0 if a < b
|
||||
// 0 if a == b
|
||||
// > 0 if a > b
|
||||
//
|
||||
Cmp func(a, b int) int
|
||||
|
||||
d struct { // data page
|
||||
c int
|
||||
d [2*kd + 1]de
|
||||
n *d
|
||||
p *d
|
||||
}
|
||||
|
||||
de struct { // d element
|
||||
k int
|
||||
v int
|
||||
}
|
||||
|
||||
// Enumerator captures the state of enumerating a tree. It is returned
|
||||
// from the Seek* methods. The enumerator is aware of any mutations
|
||||
// made to the tree in the process of enumerating it and automatically
|
||||
// resumes the enumeration at the proper key, if possible.
|
||||
//
|
||||
// However, once an Enumerator returns io.EOF to signal "no more
|
||||
// items", it does no more attempt to "resync" on tree mutation(s). In
|
||||
// other words, io.EOF from an Enumaretor is "sticky" (idempotent).
|
||||
Enumerator struct {
|
||||
err error
|
||||
hit bool
|
||||
i int
|
||||
k int
|
||||
q *d
|
||||
t *Tree
|
||||
ver int64
|
||||
}
|
||||
|
||||
// Tree is a B+tree.
|
||||
Tree struct {
|
||||
c int
|
||||
cmp Cmp
|
||||
first *d
|
||||
last *d
|
||||
r interface{}
|
||||
ver int64
|
||||
}
|
||||
|
||||
xe struct { // x element
|
||||
ch interface{}
|
||||
k int
|
||||
}
|
||||
|
||||
x struct { // index page
|
||||
c int
|
||||
x [2*kx + 2]xe
|
||||
}
|
||||
)
|
||||
|
||||
var ( // R/O zero values
|
||||
zd d
|
||||
zde de
|
||||
ze Enumerator
|
||||
zk int
|
||||
zt Tree
|
||||
zx x
|
||||
zxe xe
|
||||
)
|
||||
|
||||
func clr(q interface{}) {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
for i := 0; i <= x.c; i++ { // Ch0 Sep0 ... Chn-1 Sepn-1 Chn
|
||||
clr(x.x[i].ch)
|
||||
}
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- x
|
||||
|
||||
func newX(ch0 interface{}) *x {
|
||||
r := btXPool.Get().(*x)
|
||||
r.x[0].ch = ch0
|
||||
return r
|
||||
}
|
||||
|
||||
func (q *x) extract(i int) {
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.x[i:], q.x[i+1:q.c+1])
|
||||
q.x[q.c].ch = q.x[q.c+1].ch
|
||||
q.x[q.c].k = zk // GC
|
||||
q.x[q.c+1] = zxe // GC
|
||||
}
|
||||
}
|
||||
|
||||
func (q *x) insert(i int, k int, ch interface{}) *x {
|
||||
c := q.c
|
||||
if i < c {
|
||||
q.x[c+1].ch = q.x[c].ch
|
||||
copy(q.x[i+2:], q.x[i+1:c])
|
||||
q.x[i+1].k = q.x[i].k
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.x[i].k = k
|
||||
q.x[i+1].ch = ch
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *x) siblings(i int) (l, r *d) {
|
||||
if i >= 0 {
|
||||
if i > 0 {
|
||||
l = q.x[i-1].ch.(*d)
|
||||
}
|
||||
if i < q.c {
|
||||
r = q.x[i+1].ch.(*d)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------- d
|
||||
|
||||
func (l *d) mvL(r *d, c int) {
|
||||
copy(l.d[l.c:], r.d[:c])
|
||||
copy(r.d[:], r.d[c:r.c])
|
||||
l.c += c
|
||||
r.c -= c
|
||||
}
|
||||
|
||||
func (l *d) mvR(r *d, c int) {
|
||||
copy(r.d[c:], r.d[:r.c])
|
||||
copy(r.d[:c], l.d[l.c-c:])
|
||||
r.c += c
|
||||
l.c -= c
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------- Tree
|
||||
|
||||
// TreeNew returns a newly created, empty Tree. The compare function is used
|
||||
// for key collation.
|
||||
func TreeNew(cmp Cmp) *Tree {
|
||||
return btTPool.get(cmp)
|
||||
}
|
||||
|
||||
// Clear removes all K/V pairs from the tree.
|
||||
func (t *Tree) Clear() {
|
||||
if t.r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clr(t.r)
|
||||
t.c, t.first, t.last, t.r = 0, nil, nil, nil
|
||||
t.ver++
|
||||
}
|
||||
|
||||
// Close performs Clear and recycles t to a pool for possible later reuse. No
|
||||
// references to t should exist or such references must not be used afterwards.
|
||||
func (t *Tree) Close() {
|
||||
t.Clear()
|
||||
*t = zt
|
||||
btTPool.Put(t)
|
||||
}
|
||||
|
||||
func (t *Tree) cat(p *x, q, r *d, pi int) {
|
||||
t.ver++
|
||||
q.mvL(r, r.c)
|
||||
if r.n != nil {
|
||||
r.n.p = q
|
||||
} else {
|
||||
t.last = q
|
||||
}
|
||||
q.n = r.n
|
||||
*r = zd
|
||||
btDPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.extract(pi)
|
||||
p.x[pi].ch = q
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
func (t *Tree) catX(p, q, r *x, pi int) {
|
||||
t.ver++
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
copy(q.x[q.c+1:], r.x[:r.c])
|
||||
q.c += r.c + 1
|
||||
q.x[q.c].ch = r.x[r.c].ch
|
||||
*r = zx
|
||||
btXPool.Put(r)
|
||||
if p.c > 1 {
|
||||
p.c--
|
||||
pc := p.c
|
||||
if pi < pc {
|
||||
p.x[pi].k = p.x[pi+1].k
|
||||
copy(p.x[pi+1:], p.x[pi+2:pc+1])
|
||||
p.x[pc].ch = p.x[pc+1].ch
|
||||
p.x[pc].k = zk // GC
|
||||
p.x[pc+1].ch = nil // GC
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := t.r.(type) {
|
||||
case *x:
|
||||
*x = zx
|
||||
btXPool.Put(x)
|
||||
case *d:
|
||||
*x = zd
|
||||
btDPool.Put(x)
|
||||
}
|
||||
t.r = q
|
||||
}
|
||||
|
||||
// Delete removes the k's KV pair, if it exists, in which case Delete returns
|
||||
// true.
|
||||
func (t *Tree) Delete(k int) (ok bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
i, ok = t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[pi].ch
|
||||
ok = false
|
||||
continue
|
||||
case *d:
|
||||
t.extract(x, i)
|
||||
if x.c >= kd {
|
||||
return true
|
||||
}
|
||||
|
||||
if q != t.r {
|
||||
t.underflow(p, x, pi)
|
||||
} else if t.c == 0 {
|
||||
t.Clear()
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c < kx && q != t.r {
|
||||
x, i = t.underflowX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) extract(q *d, i int) { // (r int) {
|
||||
t.ver++
|
||||
//r = q.d[i].v // prepared for Extract
|
||||
q.c--
|
||||
if i < q.c {
|
||||
copy(q.d[i:], q.d[i+1:q.c+1])
|
||||
}
|
||||
q.d[q.c] = zde // GC
|
||||
t.c--
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Tree) find(q interface{}, k int) (i int, ok bool) {
|
||||
var mk int
|
||||
l := 0
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.x[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
case *d:
|
||||
h := x.c - 1
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
mk = x.d[m].k
|
||||
switch cmp := t.cmp(k, mk); {
|
||||
case cmp > 0:
|
||||
l = m + 1
|
||||
case cmp == 0:
|
||||
return m, true
|
||||
default:
|
||||
h = m - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return l, false
|
||||
}
|
||||
|
||||
// First returns the first item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) First() (k int, v int) {
|
||||
if q := t.first; q != nil {
|
||||
q := &q.d[0]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get returns the value associated with k and true if it exists. Otherwise Get
|
||||
// returns (zero-value, false).
|
||||
func (t *Tree) Get(k int) (v int, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return x.d[i].v, true
|
||||
}
|
||||
}
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) insert(q *d, i int, k int, v int) *d {
|
||||
t.ver++
|
||||
c := q.c
|
||||
if i < c {
|
||||
copy(q.d[i+1:], q.d[i:c])
|
||||
}
|
||||
c++
|
||||
q.c = c
|
||||
q.d[i].k, q.d[i].v = k, v
|
||||
t.c++
|
||||
return q
|
||||
}
|
||||
|
||||
// Last returns the last item of the tree in the key collating order, or
|
||||
// (zero-value, zero-value) if the tree is empty.
|
||||
func (t *Tree) Last() (k int, v int) {
|
||||
if q := t.last; q != nil {
|
||||
q := &q.d[q.c-1]
|
||||
k, v = q.k, q.v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the tree.
|
||||
func (t *Tree) Len() int {
|
||||
return t.c
|
||||
}
|
||||
|
||||
func (t *Tree) overflow(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c < 2*kd {
|
||||
l.mvL(q, 1)
|
||||
t.insert(q, i-1, k, v)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && r.c < 2*kd {
|
||||
if i < 2*kd {
|
||||
q.mvR(r, 1)
|
||||
t.insert(q, i, k, v)
|
||||
p.x[pi].k = r.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(r, 0, k, v)
|
||||
p.x[pi].k = k
|
||||
return
|
||||
}
|
||||
|
||||
t.split(p, q, pi, i, k, v)
|
||||
}
|
||||
|
||||
// Seek returns an Enumerator positioned on a an item such that k >= item's
|
||||
// key. ok reports if k == item.key The Enumerator's position is possibly
|
||||
// after the last item in the tree.
|
||||
func (t *Tree) Seek(k int) (e *Enumerator, ok bool) {
|
||||
q := t.r
|
||||
if q == nil {
|
||||
e = btEPool.get(nil, false, 0, k, nil, t, t.ver)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var i int
|
||||
if i, ok = t.find(q, k); ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), true
|
||||
}
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
return btEPool.get(nil, ok, i, k, x, t, t.ver), false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekFirst returns an enumerator positioned on the first KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekFirst() (e *Enumerator, err error) {
|
||||
q := t.first
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, 0, q.d[0].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// SeekLast returns an enumerator positioned on the last KV pair in the tree,
|
||||
// if any. For an empty tree, err == io.EOF is returned and e will be nil.
|
||||
func (t *Tree) SeekLast() (e *Enumerator, err error) {
|
||||
q := t.last
|
||||
if q == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return btEPool.get(nil, true, q.c-1, q.d[q.c-1].k, q, t, t.ver), nil
|
||||
}
|
||||
|
||||
// Set sets the value associated with k.
|
||||
func (t *Tree) Set(k int, v int) {
|
||||
//dbg("--- PRE Set(%v, %v)\n%s", k, v, t.dump())
|
||||
//defer func() {
|
||||
// dbg("--- POST\n%s\n====\n", t.dump())
|
||||
//}()
|
||||
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
if q == nil {
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, v)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
x.d[i].v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d:
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, v)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Put combines Get and Set in a more efficient way where the tree is walked
|
||||
// only once. The upd(ater) receives (old-value, true) if a KV pair for k
|
||||
// exists or (zero-value, false) otherwise. It can then return a (new-value,
|
||||
// true) to create or overwrite the existing value in the KV pair, or
|
||||
// (whatever, false) if it decides not to create or not to update the value of
|
||||
// the KV pair.
|
||||
//
|
||||
// tree.Set(k, v) call conceptually equals calling
|
||||
//
|
||||
// tree.Put(k, func(int, bool){ return v, true })
|
||||
//
|
||||
// modulo the differing return values.
|
||||
func (t *Tree) Put(k int, upd func(oldV int, exists bool) (newV int, write bool)) (oldV int, written bool) {
|
||||
pi := -1
|
||||
var p *x
|
||||
q := t.r
|
||||
var newV int
|
||||
if q == nil {
|
||||
// new KV pair in empty tree
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
z := t.insert(btDPool.Get().(*d), 0, k, newV)
|
||||
t.r, t.first, t.last = z, z, z
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
i, ok := t.find(q, k)
|
||||
if ok {
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i + 1
|
||||
p = x
|
||||
q = x.x[i+1].ch
|
||||
continue
|
||||
case *d:
|
||||
oldV = x.d[i].v
|
||||
newV, written = upd(oldV, true)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
x.d[i].v = newV
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
switch x := q.(type) {
|
||||
case *x:
|
||||
if x.c > 2*kx {
|
||||
x, i = t.splitX(p, x, pi, i)
|
||||
}
|
||||
pi = i
|
||||
p = x
|
||||
q = x.x[i].ch
|
||||
case *d: // new KV pair
|
||||
newV, written = upd(newV, false)
|
||||
if !written {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case x.c < 2*kd:
|
||||
t.insert(x, i, k, newV)
|
||||
default:
|
||||
t.overflow(p, x, pi, i, k, newV)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) split(p *x, q *d, pi, i int, k int, v int) {
|
||||
t.ver++
|
||||
r := btDPool.Get().(*d)
|
||||
if q.n != nil {
|
||||
r.n = q.n
|
||||
r.n.p = r
|
||||
} else {
|
||||
t.last = r
|
||||
}
|
||||
q.n = r
|
||||
r.p = q
|
||||
|
||||
copy(r.d[:], q.d[kd:2*kd])
|
||||
for i := range q.d[kd:] {
|
||||
q.d[kd+i] = zde
|
||||
}
|
||||
q.c = kd
|
||||
r.c = kd
|
||||
var done bool
|
||||
if i > kd {
|
||||
done = true
|
||||
t.insert(r, i-kd, k, v)
|
||||
}
|
||||
if pi >= 0 {
|
||||
p.insert(pi, r.d[0].k, r)
|
||||
} else {
|
||||
t.r = newX(q).insert(0, r.d[0].k, r)
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
t.insert(q, i, k, v)
|
||||
}
|
||||
|
||||
func (t *Tree) splitX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
r := btXPool.Get().(*x)
|
||||
copy(r.x[:], q.x[kx+1:])
|
||||
q.c = kx
|
||||
r.c = kx
|
||||
if pi >= 0 {
|
||||
p.insert(pi, q.x[kx].k, r)
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return p, pi
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
nr := newX(q).insert(0, q.x[kx].k, r)
|
||||
t.r = nr
|
||||
q.x[kx].k = zk
|
||||
for i := range q.x[kx+1:] {
|
||||
q.x[kx+i+1] = zxe
|
||||
}
|
||||
|
||||
switch {
|
||||
case i < kx:
|
||||
return q, i
|
||||
case i == kx:
|
||||
return nr, 0
|
||||
default: // i > kx
|
||||
return r, i - kx - 1
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) underflow(p *x, q *d, pi int) {
|
||||
t.ver++
|
||||
l, r := p.siblings(pi)
|
||||
|
||||
if l != nil && l.c+q.c >= 2*kd {
|
||||
l.mvR(q, 1)
|
||||
p.x[pi-1].k = q.d[0].k
|
||||
return
|
||||
}
|
||||
|
||||
if r != nil && q.c+r.c >= 2*kd {
|
||||
q.mvL(r, 1)
|
||||
p.x[pi].k = r.d[0].k
|
||||
r.d[r.c] = zde // GC
|
||||
return
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
t.cat(p, l, q, pi-1)
|
||||
return
|
||||
}
|
||||
|
||||
t.cat(p, q, r, pi)
|
||||
}
|
||||
|
||||
func (t *Tree) underflowX(p *x, q *x, pi int, i int) (*x, int) {
|
||||
t.ver++
|
||||
var l, r *x
|
||||
|
||||
if pi >= 0 {
|
||||
if pi > 0 {
|
||||
l = p.x[pi-1].ch.(*x)
|
||||
}
|
||||
if pi < p.c {
|
||||
r = p.x[pi+1].ch.(*x)
|
||||
}
|
||||
}
|
||||
|
||||
if l != nil && l.c > kx {
|
||||
q.x[q.c+1].ch = q.x[q.c].ch
|
||||
copy(q.x[1:], q.x[:q.c])
|
||||
q.x[0].ch = l.x[l.c].ch
|
||||
q.x[0].k = p.x[pi-1].k
|
||||
q.c++
|
||||
i++
|
||||
l.c--
|
||||
p.x[pi-1].k = l.x[l.c].k
|
||||
return q, i
|
||||
}
|
||||
|
||||
if r != nil && r.c > kx {
|
||||
q.x[q.c].k = p.x[pi].k
|
||||
q.c++
|
||||
q.x[q.c].ch = r.x[0].ch
|
||||
p.x[pi].k = r.x[0].k
|
||||
copy(r.x[:], r.x[1:r.c])
|
||||
r.c--
|
||||
rc := r.c
|
||||
r.x[rc].ch = r.x[rc+1].ch
|
||||
r.x[rc].k = zk
|
||||
r.x[rc+1].ch = nil
|
||||
return q, i
|
||||
}
|
||||
|
||||
if l != nil {
|
||||
i += l.c + 1
|
||||
t.catX(p, l, q, pi-1)
|
||||
q = l
|
||||
return q, i
|
||||
}
|
||||
|
||||
t.catX(p, q, r, pi)
|
||||
return q, i
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------- Enumerator
|
||||
|
||||
// Close recycles e to a pool for possible later reuse. No references to e
|
||||
// should exist or such references must not be used afterwards.
|
||||
func (e *Enumerator) Close() {
|
||||
*e = ze
|
||||
btEPool.Put(e)
|
||||
}
|
||||
|
||||
// Next returns the currently enumerated item, if it exists and moves to the
|
||||
// next item in the key collation order. If there is no item to return, err ==
|
||||
// io.EOF is returned.
|
||||
func (e *Enumerator) Next() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.next()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) next() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i < e.q.c-1:
|
||||
e.i++
|
||||
default:
|
||||
if e.q, e.i = e.q.n, 0; e.q == nil {
|
||||
e.err = io.EOF
|
||||
}
|
||||
}
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Prev returns the currently enumerated item, if it exists and moves to the
|
||||
// previous item in the key collation order. If there is no item to return, err
|
||||
// == io.EOF is returned.
|
||||
func (e *Enumerator) Prev() (k int, v int, err error) {
|
||||
if err = e.err; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if e.ver != e.t.ver {
|
||||
f, hit := e.t.Seek(e.k)
|
||||
if !e.hit && hit {
|
||||
if err = f.prev(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*e = *f
|
||||
f.Close()
|
||||
}
|
||||
if e.q == nil {
|
||||
e.err, err = io.EOF, io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
if e.i >= e.q.c {
|
||||
if err = e.next(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
i := e.q.d[e.i]
|
||||
k, v = i.k, i.v
|
||||
e.k, e.hit = k, false
|
||||
e.prev()
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Enumerator) prev() error {
|
||||
if e.q == nil {
|
||||
e.err = io.EOF
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
switch {
|
||||
case e.i > 0:
|
||||
e.i--
|
||||
default:
|
||||
if e.q = e.q.p; e.q == nil {
|
||||
e.err = io.EOF
|
||||
break
|
||||
}
|
||||
|
||||
e.i = e.q.c - 1
|
||||
}
|
||||
return e.err
|
||||
}
|
11
Godeps/_workspace/src/github.com/cznic/bufs/AUTHORS
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/cznic/bufs/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# This file lists authors for copyright purposes. This file is distinct from
|
||||
# the CONTRIBUTORS files. See the latter for an explanation.
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Jan Mercl <0xjnml@gmail.com>
|
9
Godeps/_workspace/src/github.com/cznic/bufs/CONTRIBUTORS
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/cznic/bufs/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This file lists people who contributed code to this repository. The AUTHORS
|
||||
# file lists the copyright holders; this file lists people.
|
||||
#
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Jan Mercl <0xjnml@gmail.com>
|
27
Godeps/_workspace/src/github.com/cznic/bufs/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/cznic/bufs/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The bufs Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31
Godeps/_workspace/src/github.com/cznic/bufs/Makefile
generated
vendored
Normal file
31
Godeps/_workspace/src/github.com/cznic/bufs/Makefile
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright 2014 The bufs Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
all: clean
|
||||
go fmt
|
||||
go test -i
|
||||
go test
|
||||
go build
|
||||
go vet
|
||||
golint .
|
||||
go install
|
||||
make todo
|
||||
|
||||
todo:
|
||||
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alnum:]] *.go || true
|
||||
@grep -n TODO *.go || true
|
||||
@grep -n FIXME *.go || true
|
||||
@grep -n BUG *.go || true
|
||||
|
||||
clean:
|
||||
rm -f bufs.test mem.out *~
|
||||
|
||||
demo:
|
||||
go test -bench . -benchmem
|
||||
go test -c
|
||||
./bufs.test -test.v -test.run Foo -test.memprofile mem.out \
|
||||
-test.memprofilerate 1
|
||||
go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 \
|
||||
--edgefraction 0 -web
|
||||
@echo "Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB."
|
8
Godeps/_workspace/src/github.com/cznic/bufs/README.md
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/cznic/bufs/README.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
bufs
|
||||
====
|
||||
|
||||
Package bufs implements a simple buffer cache.
|
||||
|
||||
installation: go get github.com/cznic/bufs
|
||||
|
||||
documentation: http://godoc.org/github.com/cznic/bufs
|
391
Godeps/_workspace/src/github.com/cznic/bufs/bufs.go
generated
vendored
Normal file
391
Godeps/_workspace/src/github.com/cznic/bufs/bufs.go
generated
vendored
Normal file
@ -0,0 +1,391 @@
|
||||
// Copyright 2014 The bufs Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bufs implements a simple buffer cache.
|
||||
//
|
||||
// The intended use scheme is like:
|
||||
//
|
||||
// type Foo struct {
|
||||
// buffers bufs.Buffers
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// // Bar can call Qux, but not the other way around (in this example).
|
||||
// const maxFooDepth = 2
|
||||
//
|
||||
// func NewFoo() *Foo {
|
||||
// return &Foo{buffers: bufs.New(maxFooDepth), ...}
|
||||
// }
|
||||
//
|
||||
// func (f *Foo) Bar(n int) {
|
||||
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
|
||||
// defer f.buffers.Free()
|
||||
// ...
|
||||
// f.Qux(whatever)
|
||||
// }
|
||||
//
|
||||
// func (f *Foo) Qux(n int) {
|
||||
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
|
||||
// defer f.buffers.Free()
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The whole idea behind 'bufs' is that when calling e.g. Foo.Bar N times, then
|
||||
// normally, without using 'bufs', there will be 2*N (in this example) []byte
|
||||
// buffers allocated. While using 'bufs', only 2 buffers (in this example)
|
||||
// will ever be created. For large N it can be a substantial difference.
|
||||
//
|
||||
// It's not a good idea to use Buffers to cache too big buffers. The cost of
|
||||
// having a cached buffer is that the buffer is naturally not eligible for
|
||||
// garbage collection. Of course, that holds only while the Foo instance is
|
||||
// reachable, in the above example.
|
||||
//
|
||||
// The buffer count limit is intentionally "hard" (read panicking), although
|
||||
// configurable in New(). The rationale is to prevent recursive calls, using
|
||||
// Alloc, to cause excessive, "static" memory consumption. Tune the limit
|
||||
// carefully or do not use Buffers from within [mutually] recursive functions
|
||||
// where the nesting depth is not realistically bounded to some rather small
|
||||
// number.
|
||||
//
|
||||
// Buffers cannot guarantee improvements to you program performance. There may
|
||||
// be a gain in case where they fit well. Firm grasp on what your code is
|
||||
// actually doing, when and in what order is essential to proper use of
|
||||
// Buffers. It's _highly_ recommended to first do profiling and memory
|
||||
// profiling before even thinking about using 'bufs'. The real world example,
|
||||
// and cause for this package, was a first correct, yet no optimizations done
|
||||
// version of a program; producing few MB of useful data while allocating 20+GB
|
||||
// of memory. Of course the garbage collector properly kicked in, yet the
|
||||
// memory abuse caused ~80+% of run time to be spent memory management. The
|
||||
// program _was_ expected to be slow in its still development phase, but the
|
||||
// bottleneck was guessed to be in I/O. Actually the hard disk was waiting for
|
||||
// the billions bytes being allocated and zeroed. Garbage collect on low
|
||||
// memory, rinse and repeat.
|
||||
//
|
||||
// In the provided tests, TestFoo and TestFooBufs do the same simulated work,
|
||||
// except the later uses Buffers while the former does not. Suggested test runs
|
||||
// which show the differences:
|
||||
//
|
||||
// $ go test -bench . -benchmem
|
||||
//
|
||||
// or
|
||||
//
|
||||
// $ go test -c
|
||||
// $ ./bufs.test -test.v -test.run Foo -test.memprofile mem.out -test.memprofilerate 1
|
||||
// $ go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 --edgefraction 0 -web
|
||||
// $ # Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB.
|
||||
//
|
||||
// or
|
||||
//
|
||||
// $ make demo # same as all of the above
|
||||
//
|
||||
//
|
||||
// NOTE: Alloc/Free calls must be properly nested in the same way as in for
|
||||
// example BeginTransaction/EndTransaction pairs. If your code can panic then
|
||||
// the pairing should be enforced by deferred calls.
|
||||
//
|
||||
// NOTE: Buffers objects do not allocate any space until requested by Alloc,
|
||||
// the mechanism works on demand only.
|
||||
//
|
||||
// FAQ: Why the 'bufs' package name?
|
||||
//
|
||||
// Package name 'bufs' was intentionally chosen instead of the perhaps more
|
||||
// conventional 'buf'. There are already too many 'buf' named things in the
|
||||
// code out there and that'll be a source of a lot of trouble. It's a bit
|
||||
// similar situation as in the case of package "strings" (not "string").
|
||||
package bufs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffers type represents a buffer ([]byte) cache.
|
||||
//
|
||||
// NOTE: Do not modify Buffers directly, use only its methods. Do not create
|
||||
// additional values (copies) of Buffers, that'll break its functionality. Use
|
||||
// a pointer instead to refer to a single instance from different
|
||||
// places/scopes.
|
||||
type Buffers [][]byte
|
||||
|
||||
// New returns a newly created instance of Buffers with a maximum capacity of n
|
||||
// buffers.
|
||||
//
|
||||
// NOTE: 'bufs.New(n)' is the same as 'make(bufs.Buffers, n)'.
|
||||
func New(n int) Buffers {
|
||||
return make(Buffers, n)
|
||||
}
|
||||
|
||||
// Alloc will return a buffer such that len(r) == n. It will firstly try to
|
||||
// find an existing and unused buffer of big enough size. Only when there is no
|
||||
// such, then one of the buffer slots is reallocated to a bigger size.
|
||||
//
|
||||
// It's okay to use append with buffers returned by Alloc. But it can cause
|
||||
// allocation in that case and will again be producing load for the garbage
|
||||
// collector. The best use of Alloc is for I/O buffers where the needed size of
|
||||
// the buffer is figured out at some point of the code path in a 'final size'
|
||||
// sense. Another real world example are compression/decompression buffers.
|
||||
//
|
||||
// NOTE: The buffer returned by Alloc _is not_ zeroed. That's okay for e.g.
|
||||
// passing a buffer to io.Reader. If you need a zeroed buffer use Calloc.
|
||||
//
|
||||
// NOTE: Buffers returned from Alloc _must not_ be exposed/returned to your
|
||||
// clients. Those buffers are intended to be used strictly internally, within
|
||||
// the methods of some "object".
|
||||
//
|
||||
// NOTE: Alloc will panic if there are no buffers (buffer slots) left.
|
||||
func (p *Buffers) Alloc(n int) (r []byte) {
|
||||
b := *p
|
||||
if len(b) == 0 {
|
||||
panic(errors.New("Buffers.Alloc: out of buffers"))
|
||||
}
|
||||
|
||||
biggest, best, biggestI, bestI := -1, -1, -1, -1
|
||||
for i, v := range b {
|
||||
//ln := len(v)
|
||||
// The above was correct, buts it's just confusing. It worked
|
||||
// because not the buffers, but slices of them are returned in
|
||||
// the 'if best >= n' code path.
|
||||
ln := cap(v)
|
||||
|
||||
if ln >= biggest {
|
||||
biggest, biggestI = ln, i
|
||||
}
|
||||
|
||||
if ln >= n && (bestI < 0 || best > ln) {
|
||||
best, bestI = ln, i
|
||||
if ln == n {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last := len(b) - 1
|
||||
if best >= n {
|
||||
r = b[bestI]
|
||||
b[last], b[bestI] = b[bestI], b[last]
|
||||
*p = b[:last]
|
||||
return r[:n]
|
||||
}
|
||||
|
||||
r = make([]byte, n, overCommit(n))
|
||||
b[biggestI] = r
|
||||
b[last], b[biggestI] = b[biggestI], b[last]
|
||||
*p = b[:last]
|
||||
return
|
||||
}
|
||||
|
||||
// Calloc will acquire a buffer using Alloc and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (p *Buffers) Calloc(n int) (r []byte) {
|
||||
r = p.Alloc(n)
|
||||
for i := range r {
|
||||
r[i] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Free makes the lastly allocated by Alloc buffer free (available) again for
|
||||
// Alloc.
|
||||
//
|
||||
// NOTE: Improper Free invocations, like in the sequence {New, Alloc, Free,
|
||||
// Free}, will panic.
|
||||
func (p *Buffers) Free() {
|
||||
b := *p
|
||||
b = b[:len(b)+1]
|
||||
*p = b
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by Buffers, without accounting for some
|
||||
// (smallish) additional overhead.
|
||||
func (p *Buffers) Stats() (bytes int) {
|
||||
b := *p
|
||||
b = b[:cap(b)]
|
||||
for _, v := range b {
|
||||
bytes += cap(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Cache caches buffers ([]byte). A zero value of Cache is ready for use.
|
||||
//
|
||||
// NOTE: Do not modify a Cache directly, use only its methods. Do not create
|
||||
// additional values (copies) of a Cache, that'll break its functionality. Use
|
||||
// a pointer instead to refer to a single instance from different
|
||||
// places/scopes.
|
||||
type Cache [][]byte
|
||||
|
||||
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
|
||||
// a biggest cached buffer is resized to have length n and returned. If there
|
||||
// are no cached items at all, Get returns a newly allocated buffer.
|
||||
//
|
||||
// In other words the cache policy is:
|
||||
//
|
||||
// - If the cache is empty, the buffer must be newly created and returned.
|
||||
// Cache remains empty.
|
||||
//
|
||||
// - If a buffer of sufficient size is found in the cache, remove it from the
|
||||
// cache and return it.
|
||||
//
|
||||
// - Otherwise the cache is non empty, but no cached buffer is big enough.
|
||||
// Enlarge the biggest cached buffer, remove it from the cache and return it.
|
||||
// This provide cached buffers size adjustment based on demand.
|
||||
//
|
||||
// In short, if the cache is not empty, Get guarantees to make it always one
|
||||
// item less. This rules prevent uncontrolled cache grow in some scenarios.
|
||||
// The older policy was not preventing that. Another advantage is better cached
|
||||
// buffers sizes "auto tuning", although not in every possible use case.
|
||||
//
|
||||
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
|
||||
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
|
||||
// use Cget.
|
||||
func (c *Cache) Get(n int) []byte {
|
||||
r, _ := c.get(n)
|
||||
return r
|
||||
}
|
||||
|
||||
func (c *Cache) get(n int) (r []byte, isZeroed bool) {
|
||||
s := *c
|
||||
lens := len(s)
|
||||
if lens == 0 {
|
||||
r, isZeroed = make([]byte, n, overCommit(n)), true
|
||||
return
|
||||
}
|
||||
|
||||
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= n })
|
||||
if i == lens {
|
||||
i--
|
||||
s[i] = make([]byte, n, overCommit(n))
|
||||
}
|
||||
r = s[i][:n]
|
||||
copy(s[i:], s[i+1:])
|
||||
s[lens-1] = nil
|
||||
s = s[:lens-1]
|
||||
*c = s
|
||||
return r, false
|
||||
}
|
||||
|
||||
// Cget will acquire a buffer using Get and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (c *Cache) Cget(n int) (r []byte) {
|
||||
r, ok := c.get(n)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range r {
|
||||
r[i] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put caches b for possible later reuse (via Get). No other references to b's
|
||||
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
|
||||
func (c *Cache) Put(b []byte) {
|
||||
b = b[:cap(b)]
|
||||
lenb := len(b)
|
||||
if lenb == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s := *c
|
||||
lens := len(s)
|
||||
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= lenb })
|
||||
s = append(s, nil)
|
||||
copy(s[i+1:], s[i:])
|
||||
s[i] = b
|
||||
*c = s
|
||||
return
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by a Cache, without accounting for some
|
||||
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
|
||||
// is their combined capacity.
|
||||
func (c Cache) Stats() (n, bytes int) {
|
||||
n = len(c)
|
||||
for _, v := range c {
|
||||
bytes += cap(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CCache is a Cache which is safe for concurrent use by multiple goroutines.
|
||||
type CCache struct {
|
||||
c Cache
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
|
||||
// a biggest cached buffer is resized to have length n and returned. If there
|
||||
// are no cached items at all, Get returns a newly allocated buffer.
|
||||
//
|
||||
// In other words the cache policy is:
|
||||
//
|
||||
// - If the cache is empty, the buffer must be newly created and returned.
|
||||
// Cache remains empty.
|
||||
//
|
||||
// - If a buffer of sufficient size is found in the cache, remove it from the
|
||||
// cache and return it.
|
||||
//
|
||||
// - Otherwise the cache is non empty, but no cached buffer is big enough.
|
||||
// Enlarge the biggest cached buffer, remove it from the cache and return it.
|
||||
// This provide cached buffers size adjustment based on demand.
|
||||
//
|
||||
// In short, if the cache is not empty, Get guarantees to make it always one
|
||||
// item less. This rules prevent uncontrolled cache grow in some scenarios.
|
||||
// The older policy was not preventing that. Another advantage is better cached
|
||||
// buffers sizes "auto tuning", although not in every possible use case.
|
||||
//
|
||||
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
|
||||
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
|
||||
// use Cget.
|
||||
func (c *CCache) Get(n int) []byte {
|
||||
c.mu.Lock()
|
||||
r, _ := c.c.get(n)
|
||||
c.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
// Cget will acquire a buffer using Get and then clears it to zeros. The
|
||||
// zeroing goes up to n, not cap(r).
|
||||
func (c *CCache) Cget(n int) (r []byte) {
|
||||
c.mu.Lock()
|
||||
r = c.c.Cget(n)
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Put caches b for possible later reuse (via Get). No other references to b's
|
||||
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
|
||||
func (c *CCache) Put(b []byte) {
|
||||
c.mu.Lock()
|
||||
c.c.Put(b)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Stats reports memory consumed by a Cache, without accounting for some
|
||||
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
|
||||
// is their combined capacity.
|
||||
func (c *CCache) Stats() (n, bytes int) {
|
||||
c.mu.Lock()
|
||||
n, bytes = c.c.Stats()
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GCache is a ready to use global instance of a CCache.
|
||||
var GCache CCache
|
||||
|
||||
func overCommit(n int) int {
|
||||
switch {
|
||||
case n < 8:
|
||||
return 8
|
||||
case n < 1e5:
|
||||
return 2 * n
|
||||
case n < 1e6:
|
||||
return 3 * n / 2
|
||||
default:
|
||||
return n
|
||||
}
|
||||
}
|
174
Godeps/_workspace/src/github.com/cznic/bufs/bufs_test.go
generated
vendored
Normal file
174
Godeps/_workspace/src/github.com/cznic/bufs/bufs_test.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2014 The bufs Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bufs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var dbg = func(s string, va ...interface{}) {
|
||||
_, fn, fl, _ := runtime.Caller(1)
|
||||
fmt.Printf("%s:%d: ", path.Base(fn), fl)
|
||||
fmt.Printf(s, va...)
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
func Test0(t *testing.T) {
|
||||
b := New(0)
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
|
||||
b.Alloc(1)
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
func Test1(t *testing.T) {
|
||||
b := New(1)
|
||||
expected := false
|
||||
defer func() {
|
||||
if e := recover(); e != nil && !expected {
|
||||
t.Fatal(fmt.Errorf("%v", e))
|
||||
}
|
||||
}()
|
||||
|
||||
b.Alloc(1)
|
||||
expected = true
|
||||
b.Alloc(1)
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
func Test2(t *testing.T) {
|
||||
b := New(1)
|
||||
expected := false
|
||||
defer func() {
|
||||
if e := recover(); e != nil && !expected {
|
||||
t.Fatal(fmt.Errorf("%v", e))
|
||||
}
|
||||
}()
|
||||
|
||||
b.Alloc(1)
|
||||
b.Free()
|
||||
b.Alloc(1)
|
||||
expected = true
|
||||
b.Alloc(1)
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
func Test3(t *testing.T) {
|
||||
b := New(1)
|
||||
expected := false
|
||||
defer func() {
|
||||
if e := recover(); e != nil && !expected {
|
||||
t.Fatal(fmt.Errorf("%v", e))
|
||||
}
|
||||
}()
|
||||
|
||||
b.Alloc(1)
|
||||
b.Free()
|
||||
expected = true
|
||||
b.Free()
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
const (
|
||||
N = 1e5
|
||||
bufSize = 1 << 12
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
result []byte
|
||||
}
|
||||
|
||||
func NewFoo() *Foo {
|
||||
return &Foo{}
|
||||
}
|
||||
|
||||
func (f *Foo) Bar(n int) {
|
||||
buf := make([]byte, n)
|
||||
sum := 0
|
||||
for _, v := range buf {
|
||||
sum += int(v)
|
||||
}
|
||||
f.result = append(f.result, byte(sum))
|
||||
f.Qux(n)
|
||||
}
|
||||
|
||||
func (f *Foo) Qux(n int) {
|
||||
buf := make([]byte, n)
|
||||
sum := 0
|
||||
for _, v := range buf {
|
||||
sum += int(v)
|
||||
}
|
||||
f.result = append(f.result, byte(sum))
|
||||
}
|
||||
|
||||
type FooBufs struct {
|
||||
buffers Buffers
|
||||
result []byte
|
||||
}
|
||||
|
||||
const maxFooDepth = 2
|
||||
|
||||
func NewFooBufs() *FooBufs {
|
||||
return &FooBufs{buffers: New(maxFooDepth)}
|
||||
}
|
||||
|
||||
func (f *FooBufs) Bar(n int) {
|
||||
buf := f.buffers.Alloc(n)
|
||||
defer f.buffers.Free()
|
||||
|
||||
sum := 0
|
||||
for _, v := range buf {
|
||||
sum += int(v)
|
||||
}
|
||||
f.result = append(f.result, byte(sum))
|
||||
f.Qux(n)
|
||||
}
|
||||
|
||||
func (f *FooBufs) Qux(n int) {
|
||||
buf := f.buffers.Alloc(n)
|
||||
defer f.buffers.Free()
|
||||
|
||||
sum := 0
|
||||
for _, v := range buf {
|
||||
sum += int(v)
|
||||
}
|
||||
f.result = append(f.result, byte(sum))
|
||||
}
|
||||
|
||||
func TestFoo(t *testing.T) {
|
||||
foo := NewFoo()
|
||||
for i := 0; i < N; i++ {
|
||||
foo.Bar(bufSize)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFooBufs(t *testing.T) {
|
||||
foo := NewFooBufs()
|
||||
for i := 0; i < N; i++ {
|
||||
foo.Bar(bufSize)
|
||||
}
|
||||
t.Log("buffers.Stats()", foo.buffers.Stats())
|
||||
}
|
||||
|
||||
func BenchmarkFoo(b *testing.B) {
|
||||
b.SetBytes(2 * bufSize)
|
||||
foo := NewFoo()
|
||||
for i := 0; i < b.N; i++ {
|
||||
foo.Bar(bufSize)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkFooBufs(b *testing.B) {
|
||||
b.SetBytes(2 * bufSize)
|
||||
foo := NewFooBufs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
foo.Bar(bufSize)
|
||||
}
|
||||
}
|
324
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc.go
generated
vendored
Normal file
324
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Two Phase Commit & Structural ACID
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
|
||||
|
||||
type acidWrite struct {
|
||||
b []byte
|
||||
off int64
|
||||
}
|
||||
|
||||
type acidWriter0 ACIDFiler0
|
||||
|
||||
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
if f.bwal == nil { // new epoch
|
||||
f.data = f.data[:0]
|
||||
f.bwal = bufio.NewWriter(f.wal)
|
||||
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.data = append(f.data, acidWrite{b, off})
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
|
||||
f := (*ACIDFiler0)(a)
|
||||
b, err := EncodeScalars(items...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var b4 [4]byte
|
||||
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
|
||||
if _, err = f.bwal.Write(b4[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = f.bwal.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m := (4 + len(b)) % 16; m != 0 {
|
||||
var pad [15]byte
|
||||
_, err = f.bwal.Write(pad[:16-m])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WAL Packet Tags
|
||||
const (
|
||||
wpt00Header = iota
|
||||
wpt00WriteData
|
||||
wpt00Checkpoint
|
||||
)
|
||||
|
||||
const (
|
||||
walTypeACIDFiler0 = iota
|
||||
)
|
||||
|
||||
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
|
||||
// single write ahead log file to provide the structural atomicity
|
||||
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
|
||||
// WAL if a crash occurred).
|
||||
//
|
||||
// ACIDFiler0 is a Filer.
|
||||
//
|
||||
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
|
||||
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
|
||||
// transactions for, say one second before performing the two phase commit as
|
||||
// the typical performance for rotational hard disks is about few tens of
|
||||
// fsyncs per second atmost. For an example of such collective transaction
|
||||
// approach please see the colecting FSM STT in Dbm's documentation[1].
|
||||
//
|
||||
// [1]: http://godoc.org/github.com/cznic/exp/dbm
|
||||
type ACIDFiler0 struct {
|
||||
*RollbackFiler
|
||||
wal *os.File
|
||||
bwal *bufio.Writer
|
||||
data []acidWrite
|
||||
testHook bool // keeps WAL untruncated (once)
|
||||
peakWal int64 // tracks WAL maximum used size
|
||||
peakBitFilerPages int // track maximum transaction memory
|
||||
}
|
||||
|
||||
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
|
||||
//
|
||||
// If the WAL is zero sized then a previous clean shutdown of db is taken for
|
||||
// granted and no recovery procedure is taken.
|
||||
//
|
||||
// If the WAL is of non zero size then it is checked for having a
|
||||
// commited/fully finished transaction not yet been reflected in db. If such
|
||||
// transaction exists it's committed to db. If the recovery process finishes
|
||||
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
|
||||
// from NewACIDFiler0.
|
||||
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
|
||||
fi, err := wal.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r = &ACIDFiler0{wal: wal}
|
||||
|
||||
if fi.Size() != 0 {
|
||||
if err = r.recoverDb(db); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
acidWriter := (*acidWriter0)(r)
|
||||
|
||||
if r.RollbackFiler, err = NewRollbackFiler(
|
||||
db,
|
||||
func(sz int64) (err error) {
|
||||
// Checkpoint
|
||||
if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.bwal.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.bwal = nil
|
||||
|
||||
if err = r.wal.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
wfi, err := r.wal.Stat()
|
||||
switch err != nil {
|
||||
case true:
|
||||
// unexpected, but ignored
|
||||
case false:
|
||||
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
|
||||
}
|
||||
|
||||
// Phase 1 commit complete
|
||||
|
||||
for _, v := range r.data {
|
||||
if _, err := db.WriteAt(v.b, v.off); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 2 commit complete
|
||||
|
||||
if !r.testHook {
|
||||
if err = r.wal.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = r.wal.Seek(0, 0); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.testHook = false
|
||||
return r.wal.Sync()
|
||||
|
||||
},
|
||||
acidWriter,
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// PeakWALSize reports the maximum size WAL has ever used.
|
||||
func (a ACIDFiler0) PeakWALSize() int64 {
|
||||
return a.peakWal
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) {
|
||||
var b4 [4]byte
|
||||
n, err := io.ReadAtLeast(f, b4[:], 4)
|
||||
if n != 4 {
|
||||
return
|
||||
}
|
||||
|
||||
ln := int(binary.BigEndian.Uint32(b4[:]))
|
||||
m := (4 + ln) % 16
|
||||
padd := (16 - m) % 16
|
||||
b := make([]byte, ln+padd)
|
||||
if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) {
|
||||
return
|
||||
}
|
||||
|
||||
return DecodeScalars(b[:ln])
|
||||
}
|
||||
|
||||
func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
|
||||
fi, err := a.wal.Stat()
|
||||
if err != nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err}
|
||||
}
|
||||
|
||||
if sz := fi.Size(); sz%16 != 0 {
|
||||
return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz}
|
||||
}
|
||||
|
||||
f := bufio.NewReader(a.wal)
|
||||
items, err := a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
|
||||
}
|
||||
|
||||
tr := NewBTree(nil)
|
||||
|
||||
for {
|
||||
items, err = a.readPacket(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(items) < 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)}
|
||||
}
|
||||
|
||||
switch items[0] {
|
||||
case int64(wpt00WriteData):
|
||||
if len(items) != 3 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)}
|
||||
}
|
||||
|
||||
b, off := items[1].([]byte), items[2].(int64)
|
||||
var key [8]byte
|
||||
binary.BigEndian.PutUint64(key[:], uint64(off))
|
||||
if err = tr.Set(key[:], b); err != nil {
|
||||
return
|
||||
}
|
||||
case int64(wpt00Checkpoint):
|
||||
var b1 [1]byte
|
||||
if n, err := f.Read(b1[:]); n != 0 || err == nil {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)}
|
||||
}
|
||||
|
||||
if len(items) != 2 {
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)}
|
||||
}
|
||||
|
||||
sz := items[1].(int64)
|
||||
enum, err := tr.seekFirst()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
k, v, err := enum.current()
|
||||
if err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = enum.next(); err != nil {
|
||||
if fileutil.IsEOF(err) {
|
||||
break
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = db.Truncate(sz); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = db.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Recovery complete
|
||||
|
||||
if err = a.wal.Truncate(0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return a.wal.Sync()
|
||||
default:
|
||||
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])}
|
||||
}
|
||||
}
|
||||
}
|
44
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc_docs.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc_docs.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
|
||||
Anatomy of a WAL file
|
||||
|
||||
WAL file
|
||||
A sequence of packets
|
||||
|
||||
WAL packet, parts in slice notation
|
||||
[0:4], 4 bytes: N uint32 // network byte order
|
||||
[4:4+N], N bytes: payload []byte // gb encoded scalars
|
||||
|
||||
Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod
|
||||
16). The values of the padding bytes MUST BE zero.
|
||||
|
||||
Encoded scalars first item is a packet type number (packet tag). The meaning of
|
||||
any other item(s) of the payload depends on the packet tag.
|
||||
|
||||
Packet definitions
|
||||
|
||||
{wpt00Header int, typ int, s string}
|
||||
typ: Must be zero (ACIDFiler0 file).
|
||||
s: Any comment string, empty string is okay.
|
||||
|
||||
This packet must be present only once - as the first packet of
|
||||
a WAL file.
|
||||
|
||||
{wpt00WriteData int, b []byte, off int64}
|
||||
Write data (WriteAt(b, off)).
|
||||
|
||||
{wpt00Checkpoint int, sz int64}
|
||||
Checkpoint (Truncate(sz)).
|
||||
|
||||
This packet must be present only once - as the last packet of
|
||||
a WAL file.
|
||||
|
||||
*/
|
||||
|
||||
package lldb
|
||||
|
||||
//TODO optimize bitfiler/wal/2pc data above final size
|
285
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc_test.go
generated
vendored
Normal file
285
Godeps/_workspace/src/github.com/cznic/exp/lldb/2pc_test.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Two Phase Commit & Structural ACID
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &truncFiler{}
|
||||
|
||||
type truncFiler struct {
|
||||
f Filer
|
||||
fake *MemFiler
|
||||
totalWritten int // Including silently dropped
|
||||
realWritten int
|
||||
limit int // -1: unlimited, n: silently stop writing after limit bytes
|
||||
}
|
||||
|
||||
func NewTruncFiler(f Filer, limit int) *truncFiler {
|
||||
return &truncFiler{f: f, fake: NewMemFiler(), limit: limit}
|
||||
}
|
||||
|
||||
func (f *truncFiler) BeginUpdate() error { panic("internal error") }
|
||||
func (f *truncFiler) Close() error { return f.f.Close() }
|
||||
func (f *truncFiler) EndUpdate() error { panic("internal error") }
|
||||
func (f *truncFiler) Name() string { return f.f.Name() }
|
||||
func (f *truncFiler) PunchHole(off, sz int64) error { panic("internal error") }
|
||||
func (f *truncFiler) ReadAt(b []byte, off int64) (int, error) { return f.fake.ReadAt(b, off) }
|
||||
func (f *truncFiler) Rollback() error { panic("internal error") }
|
||||
func (f *truncFiler) Size() (int64, error) { return f.fake.Size() }
|
||||
func (f *truncFiler) Sync() error { return f.f.Sync() }
|
||||
|
||||
func (f *truncFiler) Truncate(sz int64) error {
|
||||
f.fake.Truncate(sz)
|
||||
return f.f.Truncate(sz)
|
||||
}
|
||||
|
||||
func (f *truncFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
rq := len(b)
|
||||
n = f.totalWritten
|
||||
if lim := f.limit; lim >= 0 && n+rq > lim {
|
||||
over := n + rq - lim
|
||||
rq -= over
|
||||
rq = mathutil.Max(rq, 0)
|
||||
}
|
||||
|
||||
if n, err = f.fake.WriteAt(b, off); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.totalWritten += n
|
||||
if rq != 0 {
|
||||
n, err := f.f.WriteAt(b[:rq], off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
f.realWritten += n
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Verify memory BTrees don't have maxRq limits.
|
||||
func TestACID0MemBTreeCaps(t *testing.T) {
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
tr := NewBTree(nil)
|
||||
b := make([]byte, 2*maxRq)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
if err := tr.Set(nil, b); err != nil {
|
||||
t.Fatal(len(b), err)
|
||||
}
|
||||
|
||||
g, err := tr.Get(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(g, b) {
|
||||
t.Fatal("data mismatach")
|
||||
}
|
||||
}
|
||||
|
||||
func TestACIDFiler0(t *testing.T) {
|
||||
const SZ = 1 << 17
|
||||
|
||||
// Phase 1: Create a DB, fill with it with data.
|
||||
|
||||
wal, err := ioutil.TempFile("", "test-acidfiler0-wal-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !*oKeep {
|
||||
defer os.Remove(wal.Name())
|
||||
}
|
||||
|
||||
db, err := ioutil.TempFile("", "test-acidfiler0-db-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dbName := db.Name()
|
||||
if !*oKeep {
|
||||
defer os.Remove(db.Name())
|
||||
}
|
||||
|
||||
realFiler := NewSimpleFileFiler(db)
|
||||
truncFiler := NewTruncFiler(realFiler, -1)
|
||||
acidFiler, err := NewACIDFiler(truncFiler, wal)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = acidFiler.BeginUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
a, err := NewAllocator(acidFiler, &Options{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
a.Compress = true
|
||||
|
||||
tr, h, err := CreateBTree(a, nil)
|
||||
if h != 1 || err != nil {
|
||||
t.Error(h, err)
|
||||
return
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
var key, val [8]byte
|
||||
ref := map[int64]int64{}
|
||||
|
||||
for {
|
||||
sz, err := acidFiler.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if sz > SZ {
|
||||
break
|
||||
}
|
||||
|
||||
k, v := rng.Int63(), rng.Int63()
|
||||
ref[k] = v
|
||||
binary.BigEndian.PutUint64(key[:], uint64(k))
|
||||
binary.BigEndian.PutUint64(val[:], uint64(v))
|
||||
if err := tr.Set(key[:], val[:]); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
acidFiler.testHook = true // keep WAL
|
||||
|
||||
if err := acidFiler.EndUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := acidFiler.Close(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := wal.Sync(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = wal.Seek(0, 0); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 2: Reopen and verify structure and data.
|
||||
db, err = os.OpenFile(dbName, os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
filer := NewSimpleFileFiler(db)
|
||||
a, err = NewAllocator(filer, &Options{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = a.Verify(NewMemFiler(), nil, nil); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
tr, err = OpenBTree(a, nil, 1)
|
||||
for k, v := range ref {
|
||||
binary.BigEndian.PutUint64(key[:], uint64(k))
|
||||
binary.BigEndian.PutUint64(val[:], uint64(v))
|
||||
var b []byte
|
||||
b, err = tr.Get(b, key[:])
|
||||
if err != nil || b == nil || !bytes.Equal(b, val[:]) {
|
||||
t.Error(err, b, val[:])
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
okImage, err := ioutil.ReadFile(dbName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 3: Simulate a crash
|
||||
sz, err := filer.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
sz /= 2
|
||||
if err := db.Truncate(sz); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
z := make([]byte, sz/3)
|
||||
n, err := db.WriteAt(z, sz/3)
|
||||
if n != len(z) {
|
||||
t.Error(n, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := db.Sync(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 4: Open the corrupted DB
|
||||
filer = NewSimpleFileFiler(db)
|
||||
acidFiler, err = NewACIDFiler(filer, wal)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = acidFiler.Sync(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = acidFiler.Close(); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Phase 5: Verify DB was recovered.
|
||||
newImage, err := ioutil.ReadFile(dbName)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(okImage, newImage) {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
11
Godeps/_workspace/src/github.com/cznic/exp/lldb/AUTHORS
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/cznic/exp/lldb/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# This file lists authors for copyright purposes. This file is distinct from
|
||||
# the CONTRIBUTORS files. See the latter for an explanation.
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Jan Mercl <0xjnml@gmail.com>
|
9
Godeps/_workspace/src/github.com/cznic/exp/lldb/CONTRIBUTORS
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/cznic/exp/lldb/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This file lists people who contributed code to this repository. The AUTHORS
|
||||
# file lists the copyright holders; this file lists people.
|
||||
#
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Jan Mercl <0xjnml@gmail.com>
|
27
Godeps/_workspace/src/github.com/cznic/exp/lldb/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/cznic/exp/lldb/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2014 The lldb Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the names of the authors nor the names of the
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
45
Godeps/_workspace/src/github.com/cznic/exp/lldb/Makefile
generated
vendored
Normal file
45
Godeps/_workspace/src/github.com/cznic/exp/lldb/Makefile
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright 2014 The lldb Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
.PHONY: all editor clean cover nuke
|
||||
|
||||
testbin=lldb.test
|
||||
grep=--include=*.go
|
||||
|
||||
all: editor
|
||||
go build
|
||||
go vet
|
||||
golint .
|
||||
go install
|
||||
make todo
|
||||
|
||||
clean:
|
||||
go clean
|
||||
rm -f *~ cov cov.html bad-dump good-dump lldb.test old.txt new.txt \
|
||||
test-acidfiler0-* _test.db _wal
|
||||
|
||||
cover:
|
||||
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
|
||||
|
||||
editor:
|
||||
go fmt
|
||||
go test -i
|
||||
go test -timeout 1h
|
||||
|
||||
mem:
|
||||
go test -c
|
||||
./$(testbin) -test.bench . -test.memprofile mem.out -test.memprofilerate 1 -test.timeout 24h
|
||||
go tool pprof --lines --web --alloc_space $(testbin) mem.out
|
||||
|
||||
nuke: clean
|
||||
go clean -i
|
||||
|
||||
todo:
|
||||
@grep -nr $(grep) BUG * || true
|
||||
@grep -nr $(grep) LATER * || true
|
||||
@grep -nr $(grep) MAYBE * || true
|
||||
@grep -nr $(grep) TODO * || true
|
||||
@grep -nr $(grep) FIXME * || true
|
||||
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
|
||||
@grep -nr $(grep) println * || true
|
8
Godeps/_workspace/src/github.com/cznic/exp/lldb/README.md
generated
vendored
Normal file
8
Godeps/_workspace/src/github.com/cznic/exp/lldb/README.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
lldb
|
||||
====
|
||||
|
||||
Package lldb (WIP) implements a low level database engine.
|
||||
|
||||
Installation: $ go get github.com/cznic/exp/lldb
|
||||
|
||||
Documentation: [godoc.org/github.com/cznic/exp/lldb](http://godoc.org/github.com/cznic/exp/lldb)
|
43
Godeps/_workspace/src/github.com/cznic/exp/lldb/all_test.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/cznic/exp/lldb/all_test.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
testDbName = "_test.db"
|
||||
walName = "_wal"
|
||||
)
|
||||
|
||||
func now() time.Time { return time.Now() }
|
||||
|
||||
func hdump(b []byte) string {
|
||||
return hex.Dump(b)
|
||||
}
|
||||
|
||||
func die() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func stack() string {
|
||||
buf := make([]byte, 1<<16)
|
||||
return string(buf[:runtime.Stack(buf, false)])
|
||||
}
|
||||
|
||||
func temp() (dir, name string) {
|
||||
dir, err := ioutil.TempDir("", "test-lldb-")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dir, filepath.Join(dir, "test.tmp")
|
||||
}
|
2297
Godeps/_workspace/src/github.com/cznic/exp/lldb/btree.go
generated
vendored
Normal file
2297
Godeps/_workspace/src/github.com/cznic/exp/lldb/btree.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1887
Godeps/_workspace/src/github.com/cznic/exp/lldb/btree_test.go
generated
vendored
Normal file
1887
Godeps/_workspace/src/github.com/cznic/exp/lldb/btree_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
170
Godeps/_workspace/src/github.com/cznic/exp/lldb/errors.go
generated
vendored
Normal file
170
Godeps/_workspace/src/github.com/cznic/exp/lldb/errors.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Some errors returned by this package.
|
||||
//
|
||||
// Note that this package can return more errors than declared here, for
|
||||
// example io.EOF from Filer.ReadAt().
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrDecodeScalars is possibly returned from DecodeScalars
|
||||
type ErrDecodeScalars struct {
|
||||
B []byte // Data being decoded
|
||||
I int // offending offset
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrDecodeScalars) Error() string {
|
||||
return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B))
|
||||
}
|
||||
|
||||
// ErrINVAL reports invalid values passed as parameters, for example negative
|
||||
// offsets where only non-negative ones are allowed or read from the DB.
|
||||
type ErrINVAL struct {
|
||||
Src string
|
||||
Val interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrINVAL) Error() string {
|
||||
return fmt.Sprintf("%s: %+v", e.Src, e.Val)
|
||||
}
|
||||
|
||||
// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s)
|
||||
// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback
|
||||
// is invoked which is not paired with a BeginUpdate.
|
||||
type ErrPERM struct {
|
||||
Src string
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrPERM) Error() string {
|
||||
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
|
||||
}
|
||||
|
||||
// ErrTag represents an ErrILSEQ kind.
|
||||
type ErrType int
|
||||
|
||||
// ErrILSEQ types
|
||||
const (
|
||||
ErrOther ErrType = iota
|
||||
|
||||
ErrAdjacentFree // Adjacent free blocks (.Off and .Arg)
|
||||
ErrDecompress // Used compressed block: corrupted compression
|
||||
ErrExpFreeTag // Expected a free block tag, got .Arg
|
||||
ErrExpUsedTag // Expected a used block tag, got .Arg
|
||||
ErrFLT // Free block is invalid or referenced multiple times
|
||||
ErrFLTLoad // FLT truncated to .Off, need size >= .Arg
|
||||
ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2
|
||||
ErrFileSize // File .Name size (.Arg) != 0 (mod 16)
|
||||
ErrFreeChaining // Free block, .prev.next doesn't point back to this block
|
||||
ErrFreeTailBlock // Last block is free
|
||||
ErrHead // Head of a free block list has non zero Prev (.Arg)
|
||||
ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block
|
||||
ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more
|
||||
ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg
|
||||
ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg
|
||||
ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF
|
||||
ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF
|
||||
ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg
|
||||
ErrLostFreeBlock // Free block is not in any FLT list
|
||||
ErrNullReloc // Used reloc block with nil target
|
||||
ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF
|
||||
ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg
|
||||
ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off
|
||||
ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg
|
||||
ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg
|
||||
ErrVerifyPadding // Used block has nonzero padding
|
||||
ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2
|
||||
ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF
|
||||
)
|
||||
|
||||
// ErrILSEQ reports a corrupted file format. Details in fields according to Type.
|
||||
type ErrILSEQ struct {
|
||||
Type ErrType
|
||||
Off int64
|
||||
Arg int64
|
||||
Arg2 int64
|
||||
Arg3 int64
|
||||
Name string
|
||||
More interface{}
|
||||
}
|
||||
|
||||
// Error implements the built in error type.
|
||||
func (e *ErrILSEQ) Error() string {
|
||||
switch e.Type {
|
||||
case ErrAdjacentFree:
|
||||
return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg)
|
||||
case ErrDecompress:
|
||||
return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off)
|
||||
case ErrExpFreeTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrExpUsedTag:
|
||||
return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrFLT:
|
||||
return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off)
|
||||
case ErrFLTLoad:
|
||||
return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg)
|
||||
case ErrFLTSize:
|
||||
return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2)
|
||||
case ErrFileSize:
|
||||
return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg)
|
||||
case ErrFreeChaining:
|
||||
return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off)
|
||||
case ErrFreeTailBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off)
|
||||
case ErrHead:
|
||||
return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg)
|
||||
case ErrInvalidRelocTarget:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg)
|
||||
case ErrInvalidWAL:
|
||||
return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More)
|
||||
case ErrLongFreeBlkTooLong:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeBlkTooShort:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg)
|
||||
case ErrLongFreeNextBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreePrevBeyondEOF:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrLongFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrLostFreeBlock:
|
||||
return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off)
|
||||
case ErrNullReloc:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off)
|
||||
case ErrRelocBeyondEOF:
|
||||
return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg)
|
||||
case ErrShortFreeTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrSmall:
|
||||
return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off)
|
||||
case ErrTailTag:
|
||||
return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg)
|
||||
case ErrUnexpReloc:
|
||||
return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg)
|
||||
case ErrVerifyPadding:
|
||||
return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off)
|
||||
case ErrVerifyTailSize:
|
||||
return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2)
|
||||
case ErrVerifyUsedSpan:
|
||||
return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg)
|
||||
}
|
||||
|
||||
more := ""
|
||||
if e.More != nil {
|
||||
more = fmt.Sprintf(", %v", e.More)
|
||||
}
|
||||
off := ""
|
||||
if e.Off != 0 {
|
||||
off = fmt.Sprintf(", off: %#x", e.Off)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Error%s%s", off, more)
|
||||
}
|
1981
Godeps/_workspace/src/github.com/cznic/exp/lldb/falloc.go
generated
vendored
Normal file
1981
Godeps/_workspace/src/github.com/cznic/exp/lldb/falloc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1833
Godeps/_workspace/src/github.com/cznic/exp/lldb/falloc_test.go
generated
vendored
Normal file
1833
Godeps/_workspace/src/github.com/cznic/exp/lldb/falloc_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
192
Godeps/_workspace/src/github.com/cznic/exp/lldb/filer.go
generated
vendored
Normal file
192
Godeps/_workspace/src/github.com/cznic/exp/lldb/filer.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// An abstraction of file like (persistent) storage with optional (abstracted)
|
||||
// support for structural integrity.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
func doubleTrouble(first, second error) error {
|
||||
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
|
||||
}
|
||||
|
||||
// A Filer is a []byte-like model of a file or similar entity. It may
|
||||
// optionally implement support for structural transaction safety. In contrast
|
||||
// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt
|
||||
// are always "addressed" by an offset and are assumed to perform atomically.
|
||||
// A Filer is not safe for concurrent access, it's designed for consumption by
|
||||
// the other objects in package, which should use a Filer from one goroutine
|
||||
// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all
|
||||
// implemented by a Filer for structural integrity - or they should be all
|
||||
// no-ops; where/if that requirement is relaxed.
|
||||
//
|
||||
// If a Filer wraps another Filer implementation, it usually invokes the same
|
||||
// methods on the "inner" one, after some possible argument translations etc.
|
||||
// If a Filer implements the structural transactions handling methods
|
||||
// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer:
|
||||
// it then still MUST invoke those methods on the inner Filer. This is
|
||||
// important for the case where a RollbackFiler exists somewhere down the
|
||||
// chain. It's also important for an Allocator - to know when it must
|
||||
// invalidate its FLT cache.
|
||||
type Filer interface {
|
||||
// BeginUpdate increments the "nesting" counter (initially zero). Every
|
||||
// call to BeginUpdate must be eventually "balanced" by exactly one of
|
||||
// EndUpdate or Rollback. Calls to BeginUpdate may nest.
|
||||
BeginUpdate() error
|
||||
|
||||
// Analogous to os.File.Close().
|
||||
Close() error
|
||||
|
||||
// EndUpdate decrements the "nesting" counter. If it's zero after that
|
||||
// then assume the "storage" has reached structural integrity (after a
|
||||
// batch of partial updates). If a Filer implements some support for
|
||||
// that (write ahead log, journal, etc.) then the appropriate actions
|
||||
// are to be taken for nesting == 0. Invocation of an unbalanced
|
||||
// EndUpdate is an error.
|
||||
EndUpdate() error
|
||||
|
||||
// Analogous to os.File.Name().
|
||||
Name() string
|
||||
|
||||
// PunchHole deallocates space inside a "file" in the byte range
|
||||
// starting at off and continuing for size bytes. The actual hole
|
||||
// created by PunchHole may be smaller than requested. The Filer size
|
||||
// (as reported by `Size()` does not change when hole punching, even
|
||||
// when punching the end of a file off. In contrast to the Linux
|
||||
// implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is
|
||||
// free not only to ignore `PunchHole()` (implement it as a nop), but
|
||||
// additionally no guarantees about the content of the hole, when
|
||||
// eventually read back, are required, i.e. any data, not only zeros,
|
||||
// can be read from the "hole", including just anything what was left
|
||||
// there - with all of the possible security problems.
|
||||
PunchHole(off, size int64) error
|
||||
|
||||
// As os.File.ReadAt. Note: `off` is an absolute "file pointer"
|
||||
// address and cannot be negative even when a Filer is a InnerFiler.
|
||||
ReadAt(b []byte, off int64) (n int, err error)
|
||||
|
||||
// Rollback cancels and undoes the innermost pending update level.
|
||||
// Rollback decrements the "nesting" counter. If a Filer implements
|
||||
// some support for keeping structural integrity (write ahead log,
|
||||
// journal, etc.) then the appropriate actions are to be taken.
|
||||
// Invocation of an unbalanced Rollback is an error.
|
||||
Rollback() error
|
||||
|
||||
// Analogous to os.File.FileInfo().Size().
|
||||
Size() (int64, error)
|
||||
|
||||
// Analogous to os.Sync().
|
||||
Sync() (err error)
|
||||
|
||||
// Analogous to os.File.Truncate().
|
||||
Truncate(size int64) error
|
||||
|
||||
// Analogous to os.File.WriteAt(). Note: `off` is an absolute "file
|
||||
// pointer" address and cannot be negative even when a Filer is a
|
||||
// InnerFiler.
|
||||
WriteAt(b []byte, off int64) (n int, err error)
|
||||
}
|
||||
|
||||
var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer.
|
||||
|
||||
// A InnerFiler is a Filer with added addressing/size translation.
|
||||
type InnerFiler struct {
|
||||
outer Filer
|
||||
off int64
|
||||
}
|
||||
|
||||
// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which
|
||||
// adds `off` to every access.
|
||||
//
|
||||
// For example, considering:
|
||||
//
|
||||
// inner := NewInnerFiler(outer, 10)
|
||||
//
|
||||
// then
|
||||
//
|
||||
// inner.WriteAt([]byte{42}, 4)
|
||||
//
|
||||
// translates to
|
||||
//
|
||||
// outer.WriteAt([]byte{42}, 14)
|
||||
//
|
||||
// But an attempt to emulate
|
||||
//
|
||||
// outer.WriteAt([]byte{17}, 9)
|
||||
//
|
||||
// by
|
||||
//
|
||||
// inner.WriteAt([]byte{17}, -1)
|
||||
//
|
||||
// will fail as the `off` parameter can never be < 0. Also note that
|
||||
//
|
||||
// inner.Size() == outer.Size() - off,
|
||||
//
|
||||
// i.e. `inner` pretends no `outer` exists. Finally, after e.g.
|
||||
//
|
||||
// inner.Truncate(7)
|
||||
// outer.Size() == 17
|
||||
//
|
||||
// will be true.
|
||||
func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} }
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() }
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *InnerFiler) Close() (err error) { return f.outer.Close() }
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() }
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *InnerFiler) Name() string { return f.outer.Name() }
|
||||
|
||||
// PunchHole implements Filer. `off`, `size` must be >= 0.
|
||||
func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) }
|
||||
|
||||
// ReadAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.ReadAt(b, f.off+off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *InnerFiler) Rollback() error { return f.outer.Rollback() }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *InnerFiler) Size() (int64, error) {
|
||||
sz, err := f.outer.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mathutil.MaxInt64(sz-f.off, 0), nil
|
||||
}
|
||||
|
||||
// Sync() implements Filer.
|
||||
func (f *InnerFiler) Sync() (err error) {
|
||||
return f.outer.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) }
|
||||
|
||||
// WriteAt implements Filer. `off` must be >= 0.
|
||||
func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off}
|
||||
}
|
||||
|
||||
return f.outer.WriteAt(b, f.off+off)
|
||||
}
|
764
Godeps/_workspace/src/github.com/cznic/exp/lldb/filer_test.go
generated
vendored
Normal file
764
Godeps/_workspace/src/github.com/cznic/exp/lldb/filer_test.go
generated
vendored
Normal file
@ -0,0 +1,764 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
)
|
||||
|
||||
// Bench knobs.
|
||||
const (
|
||||
filerTestChunkSize = 32e3
|
||||
filerTotalSize = 10e6
|
||||
)
|
||||
|
||||
type newFunc func() Filer
|
||||
|
||||
type testFileFiler struct {
|
||||
Filer
|
||||
}
|
||||
|
||||
func (t *testFileFiler) Close() (err error) {
|
||||
n := t.Name()
|
||||
err = t.Filer.Close()
|
||||
if errDel := os.Remove(n); errDel != nil && err == nil {
|
||||
err = errDel
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
newFileFiler = func() Filer {
|
||||
file, err := ioutil.TempFile("", "lldb-test-file")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &testFileFiler{NewSimpleFileFiler(file)}
|
||||
}
|
||||
|
||||
newOSFileFiler = func() Filer {
|
||||
file, err := ioutil.TempFile("", "lldb-test-osfile")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &testFileFiler{NewOSFiler(file)}
|
||||
}
|
||||
|
||||
newMemFiler = func() Filer {
|
||||
return NewMemFiler()
|
||||
}
|
||||
|
||||
nwBitFiler = func() Filer {
|
||||
f, err := newBitFiler(NewMemFiler())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
newRollbackFiler = func() Filer {
|
||||
f := NewMemFiler()
|
||||
|
||||
var r Filer
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
)
|
||||
|
||||
func TestFilerNesting(t *testing.T) {
|
||||
testFilerNesting(t, newFileFiler)
|
||||
testFilerNesting(t, newOSFileFiler)
|
||||
testFilerNesting(t, newMemFiler)
|
||||
testFilerNesting(t, newRollbackFiler)
|
||||
}
|
||||
|
||||
func testFilerNesting(t *testing.T, nf newFunc) {
|
||||
// Check {Create, Close} works.
|
||||
f := nf()
|
||||
t.Log(f.Name())
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check {Create, EndUpdate} doesn't work.
|
||||
f = nf()
|
||||
t.Log(f.Name())
|
||||
if err := f.EndUpdate(); err == nil {
|
||||
f.Close()
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check {Create, BeginUpdate, Close} doesn't work.
|
||||
f = nf()
|
||||
t.Log(f.Name())
|
||||
f.BeginUpdate()
|
||||
|
||||
if err := f.Close(); err == nil {
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
// Check {Create, BeginUpdate, EndUpdate, Close} works.
|
||||
f = nf()
|
||||
t.Log(f.Name())
|
||||
f.BeginUpdate()
|
||||
if err := f.EndUpdate(); err != nil {
|
||||
f.Close()
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilerTruncate(t *testing.T) {
|
||||
testFilerTruncate(t, newFileFiler)
|
||||
testFilerTruncate(t, newOSFileFiler)
|
||||
testFilerTruncate(t, newMemFiler)
|
||||
testFilerTruncate(t, nwBitFiler)
|
||||
testFilerTruncate(t, newRollbackFiler)
|
||||
}
|
||||
|
||||
func testFilerTruncate(t *testing.T, nf newFunc) {
|
||||
f := nf()
|
||||
t.Log(f.Name())
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, ok := f.(*RollbackFiler); ok {
|
||||
if err := f.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := f.EndUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Check Truncate works.
|
||||
sz := int64(1e6)
|
||||
if err := f.Truncate(sz); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fsz, err := f.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := fsz, sz; g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
sz *= 2
|
||||
if err := f.Truncate(sz); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fsz, err = f.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := fsz, sz; g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
sz = 0
|
||||
if err := f.Truncate(sz); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fsz, err = f.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := fsz, sz; g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
// Check Truncate(-1) doesn't work.
|
||||
sz = -1
|
||||
if err := f.Truncate(sz); err == nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFilerReadAtWriteAt(t *testing.T) {
|
||||
testFilerReadAtWriteAt(t, newFileFiler)
|
||||
testFilerReadAtWriteAt(t, newOSFileFiler)
|
||||
testFilerReadAtWriteAt(t, newMemFiler)
|
||||
testFilerReadAtWriteAt(t, nwBitFiler)
|
||||
testFilerReadAtWriteAt(t, newRollbackFiler)
|
||||
}
|
||||
|
||||
func testFilerReadAtWriteAt(t *testing.T, nf newFunc) {
|
||||
f := nf()
|
||||
t.Log(f.Name())
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, ok := f.(*RollbackFiler); ok {
|
||||
if err := f.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := f.EndUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
const (
|
||||
N = 1 << 16
|
||||
M = 2e2
|
||||
)
|
||||
|
||||
s := make([]byte, N)
|
||||
e := make([]byte, N)
|
||||
rnd := rand.New(rand.NewSource(42))
|
||||
for i := range e {
|
||||
s[i] = byte(rnd.Intn(256))
|
||||
}
|
||||
n2 := 0
|
||||
for i := 0; i < M; i++ {
|
||||
var from, to int
|
||||
for {
|
||||
from = rnd.Intn(N)
|
||||
to = rnd.Intn(N)
|
||||
if from != to {
|
||||
break
|
||||
}
|
||||
}
|
||||
if from > to {
|
||||
from, to = to, from
|
||||
}
|
||||
for i := range s[from:to] {
|
||||
s[from+i] = byte(rnd.Intn(256))
|
||||
}
|
||||
copy(e[from:to], s[from:to])
|
||||
if to > n2 {
|
||||
n2 = to
|
||||
}
|
||||
n, err := f.WriteAt(s[from:to], int64(from))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, to-from; g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fsz, err := f.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := fsz, int64(n2); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
b := make([]byte, n2)
|
||||
for i := 0; i <= M; i++ {
|
||||
from := rnd.Intn(n2)
|
||||
to := rnd.Intn(n2)
|
||||
if from > to {
|
||||
from, to = to, from
|
||||
}
|
||||
if i == M {
|
||||
from, to = 0, n2
|
||||
}
|
||||
n, err := f.ReadAt(b[from:to], int64(from))
|
||||
if err != nil && (!fileutil.IsEOF(err) && n != 0) {
|
||||
fsz, err = f.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
t.Error(fsz, from, to, err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, to-from; g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := b[from:to], e[from:to]; !bytes.Equal(g, e) {
|
||||
if x, ok := f.(*MemFiler); ok {
|
||||
for i := int64(0); i <= 3; i++ {
|
||||
t.Logf("pg %d\n----\n%s", i, hex.Dump(x.m[i][:]))
|
||||
}
|
||||
}
|
||||
t.Errorf(
|
||||
"i %d from %d to %d len(g) %d len(e) %d\n---- got ----\n%s\n---- exp ----\n%s",
|
||||
i, from, to, len(g), len(e), hex.Dump(g), hex.Dump(e),
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mf, ok := f.(*MemFiler)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
if _, err := mf.WriteTo(buf); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := buf.Bytes(), e[:n2]; !bytes.Equal(g, e) {
|
||||
t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e))
|
||||
return
|
||||
}
|
||||
|
||||
if err := mf.Truncate(0); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := mf.ReadFrom(buf); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
roundTrip := make([]byte, n2)
|
||||
if n, err := mf.ReadAt(roundTrip, 0); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := roundTrip, e[:n2]; !bytes.Equal(g, e) {
|
||||
t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestInnerFiler(t *testing.T) {
|
||||
testInnerFiler(t, newFileFiler)
|
||||
testInnerFiler(t, newOSFileFiler)
|
||||
testInnerFiler(t, newMemFiler)
|
||||
testInnerFiler(t, nwBitFiler)
|
||||
testInnerFiler(t, newRollbackFiler)
|
||||
}
|
||||
|
||||
func testInnerFiler(t *testing.T, nf newFunc) {
|
||||
const (
|
||||
HDR_SIZE = 42
|
||||
LONG_OFF = 3330
|
||||
)
|
||||
outer := nf()
|
||||
t.Log(outer.Name())
|
||||
inner := NewInnerFiler(outer, HDR_SIZE)
|
||||
defer func() {
|
||||
if err := outer.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, ok := outer.(*RollbackFiler); ok {
|
||||
if err := outer.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := outer.EndUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
b := []byte{2, 5, 11}
|
||||
n, err := inner.WriteAt(b, -1)
|
||||
if err == nil {
|
||||
t.Error("unexpected success")
|
||||
return
|
||||
}
|
||||
|
||||
n, err = inner.ReadAt(make([]byte, 10), -1)
|
||||
if err == nil {
|
||||
t.Error("unexpected success")
|
||||
return
|
||||
}
|
||||
|
||||
n, err = inner.WriteAt(b, 0)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(b); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
osz, err := outer.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := osz, int64(HDR_SIZE+3); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
isz, err := inner.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := isz, int64(3); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
rbuf := make([]byte, 3)
|
||||
if n, err = outer.ReadAt(rbuf, 0); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(rbuf); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := rbuf, make([]byte, 3); !bytes.Equal(g, e) {
|
||||
t.Error(g, e)
|
||||
}
|
||||
|
||||
rbuf = make([]byte, 3)
|
||||
if n, err = outer.ReadAt(rbuf, HDR_SIZE); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(rbuf); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) {
|
||||
t.Error(g, e)
|
||||
}
|
||||
|
||||
rbuf = make([]byte, 3)
|
||||
if n, err = inner.ReadAt(rbuf, 0); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(rbuf); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) {
|
||||
t.Error(g, e)
|
||||
}
|
||||
|
||||
b = []byte{22, 55, 111}
|
||||
if n, err = inner.WriteAt(b, LONG_OFF); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(b); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
osz, err = outer.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := osz, int64(HDR_SIZE+LONG_OFF+3); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
isz, err = inner.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := isz, int64(LONG_OFF+3); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
rbuf = make([]byte, 3)
|
||||
if n, err = outer.ReadAt(rbuf, HDR_SIZE+LONG_OFF); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(rbuf); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) {
|
||||
t.Error(g, e)
|
||||
}
|
||||
|
||||
rbuf = make([]byte, 3)
|
||||
if n, err = inner.ReadAt(rbuf, LONG_OFF); err != nil && n == 0 {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := n, len(rbuf); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
if err = inner.Truncate(1); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
isz, err = inner.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := isz, int64(1); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
|
||||
osz, err = outer.Size()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if g, e := osz, int64(HDR_SIZE+1); g != e {
|
||||
t.Error(g, e)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileReadAtHole(t *testing.T) {
|
||||
testFileReadAtHole(t, newFileFiler)
|
||||
testFileReadAtHole(t, newOSFileFiler)
|
||||
testFileReadAtHole(t, newMemFiler)
|
||||
testFileReadAtHole(t, nwBitFiler)
|
||||
testFileReadAtHole(t, newRollbackFiler)
|
||||
}
|
||||
|
||||
func testFileReadAtHole(t *testing.T, nf newFunc) {
|
||||
f := nf()
|
||||
t.Log(f.Name())
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, ok := f.(*RollbackFiler); ok {
|
||||
if err := f.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := f.EndUpdate(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
n, err := f.WriteAt([]byte{1}, 40000)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if n != 1 {
|
||||
t.Error(n)
|
||||
return
|
||||
}
|
||||
|
||||
n, err = f.ReadAt(make([]byte, 1000), 20000)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if n != 1000 {
|
||||
t.Error(n)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMemFilerWrSeq(b *testing.B) {
|
||||
b.StopTimer()
|
||||
buf := make([]byte, filerTestChunkSize)
|
||||
for i := range buf {
|
||||
buf[i] = byte(rand.Int())
|
||||
}
|
||||
f := newMemFiler()
|
||||
runtime.GC()
|
||||
b.StartTimer()
|
||||
var ofs int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := f.WriteAt(buf, ofs)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ofs = (ofs + filerTestChunkSize) % filerTotalSize
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMemFilerRdSeq(b *testing.B) {
|
||||
b.StopTimer()
|
||||
buf := make([]byte, filerTestChunkSize)
|
||||
for i := range buf {
|
||||
buf[i] = byte(rand.Int())
|
||||
}
|
||||
f := newMemFiler()
|
||||
var ofs int64
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := f.WriteAt(buf, ofs)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ofs = (ofs + filerTestChunkSize) % filerTotalSize
|
||||
}
|
||||
runtime.GC()
|
||||
b.StartTimer()
|
||||
ofs = 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
n, err := f.ReadAt(buf, ofs)
|
||||
if err != nil && n == 0 {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ofs = (ofs + filerTestChunkSize) % filerTotalSize
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMemFilerWrRand(b *testing.B) {
|
||||
b.StopTimer()
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
f := newMemFiler()
|
||||
var bytes int64
|
||||
|
||||
var ofs, runs []int
|
||||
for i := 0; i < b.N; i++ {
|
||||
ofs = append(ofs, rng.Intn(1<<31-1))
|
||||
runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize))
|
||||
}
|
||||
data := make([]byte, 2*pgSize)
|
||||
for i := range data {
|
||||
data[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
b.StartTimer()
|
||||
for i, v := range ofs {
|
||||
n := runs[i]
|
||||
bytes += int64(n)
|
||||
f.WriteAt(data[:n], int64(v))
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkMemFilerRdRand(b *testing.B) {
|
||||
b.StopTimer()
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
f := newMemFiler()
|
||||
var bytes int64
|
||||
|
||||
var ofs, runs []int
|
||||
for i := 0; i < b.N; i++ {
|
||||
ofs = append(ofs, rng.Intn(1<<31-1))
|
||||
runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize))
|
||||
}
|
||||
data := make([]byte, 2*pgSize)
|
||||
for i := range data {
|
||||
data[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
for i, v := range ofs {
|
||||
n := runs[i]
|
||||
bytes += int64(n)
|
||||
f.WriteAt(data[:n], int64(v))
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
b.StartTimer()
|
||||
for _, v := range ofs {
|
||||
f.ReadAt(data, int64(v))
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
812
Godeps/_workspace/src/github.com/cznic/exp/lldb/gb.go
generated
vendored
Normal file
812
Godeps/_workspace/src/github.com/cznic/exp/lldb/gb.go
generated
vendored
Normal file
@ -0,0 +1,812 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Utilities to encode/decode and collate Go predeclared scalar types (and the
|
||||
// typeless nil and []byte). The encoding format is a variation of the one
|
||||
// used by the "encoding/gob" package.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
gbNull = iota // 0x00
|
||||
gbFalse // 0x01
|
||||
gbTrue // 0x02
|
||||
gbFloat0 // 0x03
|
||||
gbFloat1 // 0x04
|
||||
gbFloat2 // 0x05
|
||||
gbFloat3 // 0x06
|
||||
gbFloat4 // 0x07
|
||||
gbFloat5 // 0x08
|
||||
gbFloat6 // 0x09
|
||||
gbFloat7 // 0x0a
|
||||
gbFloat8 // 0x0b
|
||||
gbComplex0 // 0x0c
|
||||
gbComplex1 // 0x0d
|
||||
gbComplex2 // 0x0e
|
||||
gbComplex3 // 0x0f
|
||||
gbComplex4 // 0x10
|
||||
gbComplex5 // 0x11
|
||||
gbComplex6 // 0x12
|
||||
gbComplex7 // 0x13
|
||||
gbComplex8 // 0x14
|
||||
gbBytes00 // 0x15
|
||||
gbBytes01 // 0x16
|
||||
gbBytes02 // 0x17
|
||||
gbBytes03 // 0x18
|
||||
gbBytes04 // 0x19
|
||||
gbBytes05 // 0x1a
|
||||
gbBytes06 // 0x1b
|
||||
gbBytes07 // 0x1c
|
||||
gbBytes08 // 0x1d
|
||||
gbBytes09 // 0x1e
|
||||
gbBytes10 // 0x1f
|
||||
gbBytes11 // 0x20
|
||||
gbBytes12 // 0x21
|
||||
gbBytes13 // 0x22
|
||||
gbBytes14 // 0x23
|
||||
gbBytes15 // 0x24
|
||||
gbBytes16 // 0x25
|
||||
gbBytes17 // Ox26
|
||||
gbBytes1 // 0x27
|
||||
gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte.
|
||||
gbString00 // 0x29
|
||||
gbString01 // 0x2a
|
||||
gbString02 // 0x2b
|
||||
gbString03 // 0x2c
|
||||
gbString04 // 0x2d
|
||||
gbString05 // 0x2e
|
||||
gbString06 // 0x2f
|
||||
gbString07 // 0x30
|
||||
gbString08 // 0x31
|
||||
gbString09 // 0x32
|
||||
gbString10 // 0x33
|
||||
gbString11 // 0x34
|
||||
gbString12 // 0x35
|
||||
gbString13 // 0x36
|
||||
gbString14 // 0x37
|
||||
gbString15 // 0x38
|
||||
gbString16 // 0x39
|
||||
gbString17 // 0x3a
|
||||
gbString1 // 0x3b
|
||||
gbString2 // 0x3c
|
||||
gbUintP1 // 0x3d
|
||||
gbUintP2 // 0x3e
|
||||
gbUintP3 // 0x3f
|
||||
gbUintP4 // 0x40
|
||||
gbUintP5 // 0x41
|
||||
gbUintP6 // 0x42
|
||||
gbUintP7 // 0x43
|
||||
gbUintP8 // 0x44
|
||||
gbIntM8 // 0x45
|
||||
gbIntM7 // 0x46
|
||||
gbIntM6 // 0x47
|
||||
gbIntM5 // 0x48
|
||||
gbIntM4 // 0x49
|
||||
gbIntM3 // 0x4a
|
||||
gbIntM2 // 0x4b
|
||||
gbIntM1 // 0x4c
|
||||
gbIntP1 // 0x4d
|
||||
gbIntP2 // 0x4e
|
||||
gbIntP3 // 0x4f
|
||||
gbIntP4 // 0x50
|
||||
gbIntP5 // 0x51
|
||||
gbIntP6 // 0x52
|
||||
gbIntP7 // 0x53
|
||||
gbIntP8 // 0x54
|
||||
gbInt0 // 0x55
|
||||
|
||||
gbIntMax = 255 - gbInt0 // 0xff == 170
|
||||
)
|
||||
|
||||
// EncodeScalars encodes a vector of predeclared scalar type values to a
|
||||
// []byte, making it suitable to store it as a "record" in a DB or to use it as
|
||||
// a key of a BTree.
|
||||
func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
|
||||
for _, scalar := range scalars {
|
||||
switch x := scalar.(type) {
|
||||
default:
|
||||
return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)}
|
||||
|
||||
case nil:
|
||||
b = append(b, gbNull)
|
||||
|
||||
case bool:
|
||||
switch x {
|
||||
case false:
|
||||
b = append(b, gbFalse)
|
||||
case true:
|
||||
b = append(b, gbTrue)
|
||||
}
|
||||
|
||||
case float32:
|
||||
encFloat(float64(x), &b)
|
||||
case float64:
|
||||
encFloat(x, &b)
|
||||
|
||||
case complex64:
|
||||
encComplex(complex128(x), &b)
|
||||
case complex128:
|
||||
encComplex(x, &b)
|
||||
|
||||
case string:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbString00+n))
|
||||
b = append(b, []byte(x)...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 65535 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbString1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
encUint0(uint64(n), &b)
|
||||
b = append(b, []byte(x)...)
|
||||
|
||||
case int8:
|
||||
encInt(int64(x), &b)
|
||||
case int16:
|
||||
encInt(int64(x), &b)
|
||||
case int32:
|
||||
encInt(int64(x), &b)
|
||||
case int64:
|
||||
encInt(x, &b)
|
||||
case int:
|
||||
encInt(int64(x), &b)
|
||||
|
||||
case uint8:
|
||||
encUint(uint64(x), &b)
|
||||
case uint16:
|
||||
encUint(uint64(x), &b)
|
||||
case uint32:
|
||||
encUint(uint64(x), &b)
|
||||
case uint64:
|
||||
encUint(x, &b)
|
||||
case uint:
|
||||
encUint(uint64(x), &b)
|
||||
case []byte:
|
||||
n := len(x)
|
||||
if n <= 17 {
|
||||
b = append(b, byte(gbBytes00+n))
|
||||
b = append(b, []byte(x)...)
|
||||
break
|
||||
}
|
||||
|
||||
if n > 655356 {
|
||||
return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n)
|
||||
}
|
||||
|
||||
pref := byte(gbBytes1)
|
||||
if n > 255 {
|
||||
pref++
|
||||
}
|
||||
b = append(b, pref)
|
||||
if n <= 255 {
|
||||
b = append(b, byte(n))
|
||||
} else {
|
||||
n--
|
||||
b = append(b, byte(n>>8), byte(n))
|
||||
}
|
||||
b = append(b, x...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func encComplex(f complex128, b *[]byte) {
|
||||
encFloatPrefix(gbComplex0, real(f), b)
|
||||
encFloatPrefix(gbComplex0, imag(f), b)
|
||||
}
|
||||
|
||||
func encFloatPrefix(prefix byte, f float64, b *[]byte) {
|
||||
u := math.Float64bits(f)
|
||||
var n uint64
|
||||
for i := 0; i < 8; i++ {
|
||||
n <<= 8
|
||||
n |= u & 0xFF
|
||||
u >>= 8
|
||||
}
|
||||
bits := mathutil.BitLenUint64(n)
|
||||
if bits == 0 {
|
||||
*b = append(*b, prefix)
|
||||
return
|
||||
}
|
||||
|
||||
// 0 1 2 3 4 5 6 7 8 9
|
||||
// . 1 1 1 1 1 1 1 1 2
|
||||
encUintPrefix(prefix+1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encFloat(f float64, b *[]byte) {
|
||||
encFloatPrefix(gbFloat0, f, b)
|
||||
}
|
||||
|
||||
func encUint0(n uint64, b *[]byte) {
|
||||
switch {
|
||||
case n <= 0xff:
|
||||
*b = append(*b, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= math.MaxUint64:
|
||||
*b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func encUintPrefix(prefix byte, n uint64, b *[]byte) {
|
||||
*b = append(*b, prefix)
|
||||
encUint0(n, b)
|
||||
}
|
||||
|
||||
func encUint(n uint64, b *[]byte) {
|
||||
bits := mathutil.Max(1, mathutil.BitLenUint64(n))
|
||||
encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b)
|
||||
}
|
||||
|
||||
func encInt(n int64, b *[]byte) {
|
||||
switch {
|
||||
case n < -0x100000000000000:
|
||||
*b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000000000:
|
||||
*b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000000000:
|
||||
*b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100000000:
|
||||
*b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x1000000:
|
||||
*b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x10000:
|
||||
*b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n < -0x100:
|
||||
*b = append(*b, byte(gbIntM2), byte(n>>8), byte(n))
|
||||
case n < 0:
|
||||
*b = append(*b, byte(gbIntM1), byte(n))
|
||||
case n <= gbIntMax:
|
||||
*b = append(*b, byte(gbInt0+n))
|
||||
case n <= 0xff:
|
||||
*b = append(*b, gbIntP1, byte(n))
|
||||
case n <= 0xffff:
|
||||
*b = append(*b, gbIntP2, byte(n>>8), byte(n))
|
||||
case n <= 0xffffff:
|
||||
*b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffff:
|
||||
*b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffff:
|
||||
*b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffff:
|
||||
*b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0xffffffffffffff:
|
||||
*b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
case n <= 0x7fffffffffffffff:
|
||||
*b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
|
||||
}
|
||||
}
|
||||
|
||||
func decodeFloat(b []byte) float64 {
|
||||
var u uint64
|
||||
for i, v := range b {
|
||||
u |= uint64(v) << uint((i+8-len(b))*8)
|
||||
}
|
||||
return math.Float64frombits(u)
|
||||
}
|
||||
|
||||
// DecodeScalars decodes a []byte produced by EncodeScalars.
|
||||
func DecodeScalars(b []byte) (scalars []interface{}, err error) {
|
||||
b0 := b
|
||||
for len(b) != 0 {
|
||||
switch tag := b[0]; tag {
|
||||
//default:
|
||||
//return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0])
|
||||
case gbNull:
|
||||
scalars = append(scalars, nil)
|
||||
b = b[1:]
|
||||
case gbFalse:
|
||||
scalars = append(scalars, false)
|
||||
b = b[1:]
|
||||
case gbTrue:
|
||||
scalars = append(scalars, true)
|
||||
b = b[1:]
|
||||
case gbFloat0:
|
||||
scalars = append(scalars, 0.0)
|
||||
b = b[1:]
|
||||
case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8:
|
||||
n := 1 + int(tag) - gbFloat0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, decodeFloat(b[1:n]))
|
||||
b = b[n:]
|
||||
case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8:
|
||||
n := 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
re := decodeFloat(b[1:n])
|
||||
b = b[n:]
|
||||
|
||||
if len(b) == 0 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
tag = b[0]
|
||||
if tag < gbComplex0 || tag > gbComplex8 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n = 1 + int(tag) - gbComplex0
|
||||
if len(b) < n-1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, complex(re, decodeFloat(b[1:n])))
|
||||
b = b[n:]
|
||||
case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04,
|
||||
gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09,
|
||||
gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14,
|
||||
gbBytes15, gbBytes16, gbBytes17:
|
||||
n := int(tag - gbBytes00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[1:n+1]...))
|
||||
b = b[n+1:]
|
||||
case gbBytes1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbBytes2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2]) + 1
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, append([]byte(nil), b[:n]...))
|
||||
b = b[n:]
|
||||
case gbString00, gbString01, gbString02, gbString03, gbString04,
|
||||
gbString05, gbString06, gbString07, gbString08, gbString09,
|
||||
gbString10, gbString11, gbString12, gbString13, gbString14,
|
||||
gbString15, gbString16, gbString17:
|
||||
n := int(tag - gbString00)
|
||||
if len(b) < n+1 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[1:n+1]))
|
||||
b = b[n+1:]
|
||||
case gbString1:
|
||||
if len(b) < 2 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])
|
||||
b = b[2:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbString2:
|
||||
if len(b) < 3 {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
n := int(b[1])<<8 | int(b[2])
|
||||
b = b[3:]
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
scalars = append(scalars, string(b[:n]))
|
||||
b = b[n:]
|
||||
case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbUintP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
var u uint64
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, u)
|
||||
b = b[n:]
|
||||
case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1:
|
||||
b = b[1:]
|
||||
n := 8 - (int(tag) - gbIntM8)
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
u := uint64(math.MaxUint64)
|
||||
for _, v := range b[:n] {
|
||||
u = u<<8 | uint64(v)
|
||||
}
|
||||
scalars = append(scalars, int64(u))
|
||||
b = b[n:]
|
||||
case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8:
|
||||
b = b[1:]
|
||||
n := 1 + int(tag) - gbIntP1
|
||||
if len(b) < n {
|
||||
goto corrupted
|
||||
}
|
||||
|
||||
i := int64(0)
|
||||
for _, v := range b[:n] {
|
||||
i = i<<8 | int64(v)
|
||||
}
|
||||
scalars = append(scalars, i)
|
||||
b = b[n:]
|
||||
default:
|
||||
scalars = append(scalars, int64(b[0])-gbInt0)
|
||||
b = b[1:]
|
||||
}
|
||||
}
|
||||
return append([]interface{}(nil), scalars...), nil
|
||||
|
||||
corrupted:
|
||||
return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)}
|
||||
}
|
||||
|
||||
func collateComplex(x, y complex128) int {
|
||||
switch rx, ry := real(x), real(y); {
|
||||
case rx < ry:
|
||||
return -1
|
||||
case rx == ry:
|
||||
switch ix, iy := imag(x), imag(y); {
|
||||
case ix < iy:
|
||||
return -1
|
||||
case ix == iy:
|
||||
return 0
|
||||
case ix > iy:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
//case rx > ry:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateFloat(x, y float64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateInt(x, y int64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateUint(x, y uint64) int {
|
||||
switch {
|
||||
case x < y:
|
||||
return -1
|
||||
case x == y:
|
||||
return 0
|
||||
}
|
||||
//case x > y:
|
||||
return 1
|
||||
}
|
||||
|
||||
func collateIntUint(x int64, y uint64) int {
|
||||
if y > math.MaxInt64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return collateInt(x, int64(y))
|
||||
}
|
||||
|
||||
func collateUintInt(x uint64, y int64) int {
|
||||
return -collateIntUint(y, x)
|
||||
}
|
||||
|
||||
func collateType(i interface{}) (r interface{}, err error) {
|
||||
switch x := i.(type) {
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid collate type %T", x)
|
||||
case nil:
|
||||
return i, nil
|
||||
case bool:
|
||||
return i, nil
|
||||
case int8:
|
||||
return int64(x), nil
|
||||
case int16:
|
||||
return int64(x), nil
|
||||
case int32:
|
||||
return int64(x), nil
|
||||
case int64:
|
||||
return i, nil
|
||||
case int:
|
||||
return int64(x), nil
|
||||
case uint8:
|
||||
return uint64(x), nil
|
||||
case uint16:
|
||||
return uint64(x), nil
|
||||
case uint32:
|
||||
return uint64(x), nil
|
||||
case uint64:
|
||||
return i, nil
|
||||
case uint:
|
||||
return uint64(x), nil
|
||||
case float32:
|
||||
return float64(x), nil
|
||||
case float64:
|
||||
return i, nil
|
||||
case complex64:
|
||||
return complex128(x), nil
|
||||
case complex128:
|
||||
return i, nil
|
||||
case []byte:
|
||||
return i, nil
|
||||
case string:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Collate collates two arrays of Go predeclared scalar types (and the typeless
|
||||
// nil or []byte). If any other type appears in x or y, Collate will return a
|
||||
// non nil error. String items are collated using strCollate or lexically
|
||||
// byte-wise (as when using Go comparison operators) when strCollate is nil.
|
||||
// []byte items are collated using bytes.Compare.
|
||||
//
|
||||
// Collate returns:
|
||||
//
|
||||
// -1 if x < y
|
||||
// 0 if x == y
|
||||
// +1 if x > y
|
||||
//
|
||||
// The same value as defined above must be returned from strCollate.
|
||||
//
|
||||
// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is
|
||||
// "smaller" than anything else except other nil, numbers collate before
|
||||
// []byte, []byte collate before strings, etc.
|
||||
//
|
||||
// Integers and real numbers collate as expected in math. However, complex
|
||||
// numbers are not ordered in Go. Here the ordering is defined: Complex numbers
|
||||
// are in comparison considered first only by their real part. Iff the result
|
||||
// is equality then the imaginary part is used to determine the ordering. In
|
||||
// this "second order" comparing, integers and real numbers are considered as
|
||||
// complex numbers with a zero imaginary part.
|
||||
func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) {
|
||||
nx, ny := len(x), len(y)
|
||||
|
||||
switch {
|
||||
case nx == 0 && ny != 0:
|
||||
return -1, nil
|
||||
case nx == 0 && ny == 0:
|
||||
return 0, nil
|
||||
case nx != 0 && ny == 0:
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
r = 1
|
||||
if nx > ny {
|
||||
x, y, r = y, x, -r
|
||||
}
|
||||
|
||||
var c int
|
||||
for i, xi0 := range x {
|
||||
yi0 := y[i]
|
||||
xi, err := collateType(xi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
yi, err := collateType(yi0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch x := xi.(type) {
|
||||
default:
|
||||
panic(fmt.Errorf("internal error: %T", x))
|
||||
|
||||
case nil:
|
||||
switch yi.(type) {
|
||||
case nil:
|
||||
// nop
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case bool:
|
||||
switch y := yi.(type) {
|
||||
case nil:
|
||||
return r, nil
|
||||
case bool:
|
||||
switch {
|
||||
case !x && y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
// nop
|
||||
case x && !y:
|
||||
return r, nil
|
||||
}
|
||||
default:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
case int64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateInt(x, y)
|
||||
case uint64:
|
||||
c = collateIntUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case uint64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateUintInt(x, y)
|
||||
case uint64:
|
||||
c = collateUint(x, y)
|
||||
case float64:
|
||||
c = collateFloat(float64(x), y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(float64(x), 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case float64:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case uint64:
|
||||
c = collateFloat(x, float64(y))
|
||||
case float64:
|
||||
c = collateFloat(x, y)
|
||||
case complex128:
|
||||
c = collateComplex(complex(x, 0), y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case complex128:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool:
|
||||
return r, nil
|
||||
case int64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case uint64:
|
||||
c = collateComplex(x, complex(float64(y), 0))
|
||||
case float64:
|
||||
c = collateComplex(x, complex(y, 0))
|
||||
case complex128:
|
||||
c = collateComplex(x, y)
|
||||
case []byte:
|
||||
return -r, nil
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case []byte:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
c = bytes.Compare(x, y)
|
||||
case string:
|
||||
return -r, nil
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
|
||||
case string:
|
||||
switch y := yi.(type) {
|
||||
case nil, bool, int64, uint64, float64, complex128:
|
||||
return r, nil
|
||||
case []byte:
|
||||
return r, nil
|
||||
case string:
|
||||
switch {
|
||||
case strCollate != nil:
|
||||
c = strCollate(x, y)
|
||||
case x < y:
|
||||
return -r, nil
|
||||
case x == y:
|
||||
c = 0
|
||||
case x > y:
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
if c != 0 {
|
||||
return c * r, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nx == ny {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return -r, nil
|
||||
}
|
364
Godeps/_workspace/src/github.com/cznic/exp/lldb/gb_test.go
generated
vendored
Normal file
364
Godeps/_workspace/src/github.com/cznic/exp/lldb/gb_test.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Utilities to encode/decode and collate Go predeclared scalar types. The
|
||||
// encoding format reused the one used by the "encoding/gob" package.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const s256 = "" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef" +
|
||||
"0123456789abcdef"
|
||||
|
||||
func TestEncodeDecodeScalars(t *testing.T) {
|
||||
table := []struct{ v, exp interface{} }{
|
||||
{nil, "00"},
|
||||
{false, "01"},
|
||||
{true, "02"},
|
||||
{math.Float64frombits(0), []byte{gbFloat0}},
|
||||
{17., []byte{gbFloat2, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031320000000000), []byte{gbFloat3, 0x32, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031323300000000), []byte{gbFloat4, 0x33, 0x32, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031323334000000), []byte{gbFloat5, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031323334350000), []byte{gbFloat6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031323334353600), []byte{gbFloat7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{math.Float64frombits(0x4031323334353637), []byte{gbFloat8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{0 + 0i, []byte{gbComplex0, gbComplex0}},
|
||||
{17 + 17i, []byte{gbComplex2, 0x31, 0x40, gbComplex2, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041420000000000), math.Float64frombits(0x4031320000000000)), []byte{gbComplex3, 0x42, 0x41, 0x40, gbComplex3, 0x32, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041424300000000), math.Float64frombits(0x4031323300000000)), []byte{gbComplex4, 0x43, 0x42, 0x41, 0x40, gbComplex4, 0x33, 0x32, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041424344000000), math.Float64frombits(0x4031323334000000)), []byte{gbComplex5, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex5, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041424344450000), math.Float64frombits(0x4031323334350000)), []byte{gbComplex6, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041424344454600), math.Float64frombits(0x4031323334353600)), []byte{gbComplex7, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{complex(math.Float64frombits(0x4041424344454647), math.Float64frombits(0x4031323334353637)), []byte{gbComplex8, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
|
||||
{[]byte(""), []byte{gbBytes00}},
|
||||
{[]byte("f"), []byte{gbBytes01, 'f'}},
|
||||
{[]byte("fo"), []byte{gbBytes02, 'f', 'o'}},
|
||||
{[]byte("0123456789abcdefx"), []byte{gbBytes17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}},
|
||||
{[]byte("0123456789abcdefxy"), []byte{gbBytes1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}},
|
||||
{[]byte(s256[:255]), append([]byte{gbBytes1, 0xff}, []byte(s256[:255])...)},
|
||||
{[]byte(s256), append([]byte{gbBytes2, 0x00, 0xff}, []byte(s256)...)},
|
||||
{"", []byte{gbString00}},
|
||||
{"f", []byte{gbString01, 'f'}},
|
||||
{"fo", []byte{gbString02, 'f', 'o'}},
|
||||
{"0123456789abcdefx", []byte{gbString17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}},
|
||||
{"0123456789abcdefxy", []byte{gbString1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}},
|
||||
{s256[:255], append([]byte{gbString1, 0xff}, []byte(s256[:255])...)},
|
||||
{s256, append([]byte{gbString2, 0x01, 0x00}, []byte(s256)...)},
|
||||
{uint64(0xff), []byte{gbUintP1, 255}},
|
||||
{uint64(0xffff), []byte{gbUintP2, 255, 255}},
|
||||
{uint64(0xffffff), []byte{gbUintP3, 255, 255, 255}},
|
||||
{uint64(0xffffffff), []byte{gbUintP4, 255, 255, 255, 255}},
|
||||
{uint64(0xffffffffff), []byte{gbUintP5, 255, 255, 255, 255, 255}},
|
||||
{uint64(0xffffffffffff), []byte{gbUintP6, 255, 255, 255, 255, 255, 255}},
|
||||
{uint64(0xffffffffffffff), []byte{gbUintP7, 255, 255, 255, 255, 255, 255, 255}},
|
||||
{uint64(0xffffffffffffffff), []byte{gbUintP8, 255, 255, 255, 255, 255, 255, 255, 255}},
|
||||
{int64(math.MinInt64), []byte{gbIntM8, 128, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{-int64(0x100000000000000), []byte{gbIntM7, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{-int64(0x1000000000000), []byte{gbIntM6, 0, 0, 0, 0, 0, 0}},
|
||||
{-int64(0x10000000000), []byte{gbIntM5, 0, 0, 0, 0, 0}},
|
||||
{-int64(0x100000000), []byte{gbIntM4, 0, 0, 0, 0}},
|
||||
{-int64(0x1000000), []byte{gbIntM3, 0, 0, 0}},
|
||||
{-int64(0x10000), []byte{gbIntM2, 0, 0}},
|
||||
{-int64(0x100), []byte{gbIntM1, 0}},
|
||||
{-int64(0xff), []byte{gbIntM1, 1}},
|
||||
{-int64(1), []byte{gbIntM1, 255}},
|
||||
{int64(gbIntMax + 1), []byte{gbIntP1, gbIntMax + 1}},
|
||||
{int64(0xff), []byte{gbIntP1, 255}},
|
||||
{int64(0xffff), []byte{gbIntP2, 255, 255}},
|
||||
{int64(0xffffff), []byte{gbIntP3, 255, 255, 255}},
|
||||
{int64(0xffffffff), []byte{gbIntP4, 255, 255, 255, 255}},
|
||||
{int64(0xffffffffff), []byte{gbIntP5, 255, 255, 255, 255, 255}},
|
||||
{int64(0xffffffffffff), []byte{gbIntP6, 255, 255, 255, 255, 255, 255}},
|
||||
{int64(0xffffffffffffff), []byte{gbIntP7, 255, 255, 255, 255, 255, 255, 255}},
|
||||
{int64(0x7fffffffffffffff), []byte{gbIntP8, 127, 255, 255, 255, 255, 255, 255, 255}},
|
||||
{int64(0), []byte{0 + gbInt0}},
|
||||
{int64(1), []byte{1 + gbInt0}},
|
||||
{int64(2), []byte{2 + gbInt0}},
|
||||
{int64(gbIntMax - 2), "fd"},
|
||||
{int64(gbIntMax - 1), "fe"},
|
||||
{int64(gbIntMax), "ff"},
|
||||
}
|
||||
|
||||
for i, v := range table {
|
||||
g, err := EncodeScalars(v.v)
|
||||
if err != nil {
|
||||
t.Fatal(i, err)
|
||||
}
|
||||
|
||||
var e []byte
|
||||
switch x := v.exp.(type) {
|
||||
case string:
|
||||
e = s2b(x)
|
||||
case []byte:
|
||||
e = x
|
||||
}
|
||||
|
||||
if !bytes.Equal(g, e) {
|
||||
t.Fatalf("%d %v\n|% 02x|\n|% 02x|", i, v.v, g, e)
|
||||
}
|
||||
|
||||
t.Logf("%#v |% 02x|", v.v, g)
|
||||
|
||||
dec, err := DecodeScalars(g)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(dec), 1; g != e {
|
||||
t.Fatalf("%d %d %#v", g, e, dec)
|
||||
}
|
||||
|
||||
if g, ok := dec[0].([]byte); ok {
|
||||
if e := v.v.([]byte); !bytes.Equal(g, e) {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if g, e := dec[0], v.v; g != e {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func strcmp(a, b string) (r int) {
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
if a == b {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func TestCollateScalars(t *testing.T) {
|
||||
// all cases must return -1
|
||||
table := []struct{ x, y []interface{} }{
|
||||
{[]interface{}{}, []interface{}{1}},
|
||||
{[]interface{}{1}, []interface{}{2}},
|
||||
{[]interface{}{1, 2}, []interface{}{2, 3}},
|
||||
|
||||
{[]interface{}{nil}, []interface{}{nil, true}},
|
||||
{[]interface{}{nil}, []interface{}{false}},
|
||||
{[]interface{}{nil}, []interface{}{nil, 1}},
|
||||
{[]interface{}{nil}, []interface{}{1}},
|
||||
{[]interface{}{nil}, []interface{}{nil, uint(1)}},
|
||||
{[]interface{}{nil}, []interface{}{uint(1)}},
|
||||
{[]interface{}{nil}, []interface{}{nil, 3.14}},
|
||||
{[]interface{}{nil}, []interface{}{3.14}},
|
||||
{[]interface{}{nil}, []interface{}{nil, 3.14 + 1i}},
|
||||
{[]interface{}{nil}, []interface{}{3.14 + 1i}},
|
||||
{[]interface{}{nil}, []interface{}{nil, []byte("foo")}},
|
||||
{[]interface{}{nil}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{nil}, []interface{}{nil, "foo"}},
|
||||
{[]interface{}{nil}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{false}, []interface{}{false, false}},
|
||||
{[]interface{}{false}, []interface{}{false, true}},
|
||||
{[]interface{}{false}, []interface{}{true}},
|
||||
{[]interface{}{false}, []interface{}{false, 1}},
|
||||
{[]interface{}{false}, []interface{}{1}},
|
||||
{[]interface{}{false}, []interface{}{false, uint(1)}},
|
||||
{[]interface{}{false}, []interface{}{uint(1)}},
|
||||
{[]interface{}{false}, []interface{}{false, 1.5}},
|
||||
{[]interface{}{false}, []interface{}{1.5}},
|
||||
{[]interface{}{false}, []interface{}{false, 1.5 + 3i}},
|
||||
{[]interface{}{false}, []interface{}{1.5 + 3i}},
|
||||
{[]interface{}{false}, []interface{}{false, []byte("foo")}},
|
||||
{[]interface{}{false}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{false}, []interface{}{false, "foo"}},
|
||||
{[]interface{}{false}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{1}, []interface{}{1, 2}},
|
||||
{[]interface{}{1}, []interface{}{1, 1}},
|
||||
{[]interface{}{1}, []interface{}{1, uint(2)}},
|
||||
{[]interface{}{1}, []interface{}{uint(2)}},
|
||||
{[]interface{}{1}, []interface{}{1, 1.1}},
|
||||
{[]interface{}{1}, []interface{}{1.1}},
|
||||
{[]interface{}{1}, []interface{}{1, 1.1 + 2i}},
|
||||
{[]interface{}{1}, []interface{}{1.1 + 2i}},
|
||||
{[]interface{}{1}, []interface{}{1, []byte("foo")}},
|
||||
{[]interface{}{1}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{1}, []interface{}{1, "foo"}},
|
||||
{[]interface{}{1}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(1), uint(1)}},
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(2)}},
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(1), 2.}},
|
||||
{[]interface{}{uint(1)}, []interface{}{2.}},
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(1), 2. + 0i}},
|
||||
{[]interface{}{uint(1)}, []interface{}{2. + 0i}},
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(1), []byte("foo")}},
|
||||
{[]interface{}{uint(1)}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{uint(1)}, []interface{}{uint(1), "foo"}},
|
||||
{[]interface{}{uint(1)}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{1.}, []interface{}{1., 1}},
|
||||
{[]interface{}{1.}, []interface{}{2}},
|
||||
{[]interface{}{1.}, []interface{}{1., uint(1)}},
|
||||
{[]interface{}{1.}, []interface{}{uint(2)}},
|
||||
{[]interface{}{1.}, []interface{}{1., 1.}},
|
||||
{[]interface{}{1.}, []interface{}{1.1}},
|
||||
{[]interface{}{1.}, []interface{}{1., []byte("foo")}},
|
||||
{[]interface{}{1.}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{1.}, []interface{}{1., "foo"}},
|
||||
{[]interface{}{1.}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{2}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, uint(1)}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{uint(2)}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1.1}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{1.1}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, []byte("foo")}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, "foo"}},
|
||||
{[]interface{}{1 + 2i}, []interface{}{"foo"}},
|
||||
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bar"), []byte("bar")}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("foo")}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("c")}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bas")}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bara")}},
|
||||
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{"bap"}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{"bar"}},
|
||||
{[]interface{}{[]byte("bar")}, []interface{}{"bas"}},
|
||||
|
||||
{[]interface{}{"bar"}, []interface{}{"bar", "bar"}},
|
||||
{[]interface{}{"bar"}, []interface{}{"foo"}},
|
||||
{[]interface{}{"bar"}, []interface{}{"c"}},
|
||||
{[]interface{}{"bar"}, []interface{}{"bas"}},
|
||||
{[]interface{}{"bar"}, []interface{}{"bara"}},
|
||||
|
||||
{[]interface{}{1 + 2i}, []interface{}{1 + 3i}},
|
||||
{[]interface{}{int64(math.MaxInt64)}, []interface{}{uint64(math.MaxInt64 + 1)}},
|
||||
{[]interface{}{int8(1)}, []interface{}{int16(2)}},
|
||||
{[]interface{}{int32(1)}, []interface{}{uint8(2)}},
|
||||
{[]interface{}{uint16(1)}, []interface{}{uint32(2)}},
|
||||
{[]interface{}{float32(1)}, []interface{}{complex(float32(2), 0)}},
|
||||
|
||||
// resolved bugs
|
||||
{[]interface{}{"Customer"}, []interface{}{"Date"}},
|
||||
{[]interface{}{"Customer"}, []interface{}{"Items", 1, "Quantity"}},
|
||||
}
|
||||
|
||||
more := []interface{}{42, nil, 1, uint(2), 3.0, 4 + 5i, "..."}
|
||||
|
||||
collate := func(x, y []interface{}, strCollate func(string, string) int) (r int) {
|
||||
var err error
|
||||
r, err = Collate(x, y, strCollate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for _, scf := range []func(string, string) int{nil, strcmp} {
|
||||
for _, prefix := range more {
|
||||
for i, test := range table {
|
||||
var x, y []interface{}
|
||||
if prefix != 42 {
|
||||
x = append(x, prefix)
|
||||
y = append(y, prefix)
|
||||
}
|
||||
x = append(x, test.x...)
|
||||
y = append(y, test.y...)
|
||||
|
||||
// cmp(x, y) == -1
|
||||
if g, e := collate(x, y, scf), -1; g != e {
|
||||
t.Fatal(i, g, e, x, y)
|
||||
}
|
||||
|
||||
// cmp(y, x) == 1
|
||||
if g, e := collate(y, x, scf), 1; g != e {
|
||||
t.Fatal(i, g, e, y, x)
|
||||
}
|
||||
|
||||
src := x
|
||||
for ix := len(src) - 1; ix > 0; ix-- {
|
||||
if g, e := collate(src[:ix], src[:ix], scf), 0; g != e {
|
||||
t.Fatal(ix, g, e)
|
||||
}
|
||||
|
||||
if g, e := collate(src[:ix], src, scf), -1; g != e {
|
||||
t.Fatal(ix, g, e)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
src = y
|
||||
for ix := len(src) - 1; ix > 0; ix-- {
|
||||
if g, e := collate(src[:ix], src[:ix], scf), 0; g != e {
|
||||
t.Fatal(ix, g, e)
|
||||
}
|
||||
|
||||
if g, e := collate(src[:ix], src, scf), -1; g != e {
|
||||
t.Fatal(ix, g, e)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodingBug(t *testing.T) {
|
||||
bits := uint64(0)
|
||||
for i := 0; i <= 64; i++ {
|
||||
encoded, err := EncodeScalars(math.Float64frombits(bits))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("bits %016x, enc |% x|", bits, encoded)
|
||||
decoded, err := DecodeScalars(encoded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(decoded), 1; g != e {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
f, ok := decoded[0].(float64)
|
||||
if !ok {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := math.Float64bits(f), bits; g != e {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(f)
|
||||
|
||||
bits >>= 1
|
||||
bits |= 1 << 63
|
||||
}
|
||||
}
|
155
Godeps/_workspace/src/github.com/cznic/exp/lldb/lldb.go
generated
vendored
Normal file
155
Godeps/_workspace/src/github.com/cznic/exp/lldb/lldb.go
generated
vendored
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package lldb (WIP) implements a low level database engine. The database
|
||||
// model used could be considered a specific implementation of some small(est)
|
||||
// intersection of models listed in [1]. As a settled term is lacking, it'll be
|
||||
// called here a 'Virtual memory model' (VMM).
|
||||
//
|
||||
// Experimental release notes
|
||||
//
|
||||
// This is an experimental release. Don't open a DB from two applications or
|
||||
// two instances of an application - it will get corrupted (no file locking is
|
||||
// implemented and this task is delegated to lldb's clients).
|
||||
//
|
||||
// WARNING: THE LLDB API IS SUBJECT TO CHANGE.
|
||||
//
|
||||
// Filers
|
||||
//
|
||||
// A Filer is an abstraction of storage. A Filer may be a part of some process'
|
||||
// virtual address space, an OS file, a networked, remote file etc. Persistence
|
||||
// of the storage is optional, opaque to VMM and it is specific to a concrete
|
||||
// Filer implementation.
|
||||
//
|
||||
// Space management
|
||||
//
|
||||
// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim
|
||||
// the unused) contiguous parts of a Filer, called blocks. Blocks are
|
||||
// identified and referred to by a handle, an int64.
|
||||
//
|
||||
// BTrees
|
||||
//
|
||||
// In addition to the VMM like services, lldb provides volatile and
|
||||
// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB
|
||||
// each (a bit more actually). Support for larger keys/values, if desired, can
|
||||
// be built atop a BTree to certain limits.
|
||||
//
|
||||
// Handles vs pointers
|
||||
//
|
||||
// A handle is the abstracted storage counterpart of a memory address. There
|
||||
// is one fundamental difference, though. Resizing a block never results in a
|
||||
// change to the handle which refers to the resized block, so a handle is more
|
||||
// akin to an unique numeric id/key. Yet it shares one property of pointers -
|
||||
// handles can be associated again with blocks after the original handle block
|
||||
// was deallocated. In other words, a handle uniqueness domain is the state of
|
||||
// the database and is not something comparable to e.g. an ever growing
|
||||
// numbering sequence.
|
||||
//
|
||||
// Also, as with memory pointers, dangling handles can be created and blocks
|
||||
// overwritten when such handles are used. Using a zero handle to refer to a
|
||||
// block will not panic; however, the resulting error is effectively the same
|
||||
// exceptional situation as dereferencing a nil pointer.
|
||||
//
|
||||
// Blocks
|
||||
//
|
||||
// Allocated/used blocks, are limited in size to only a little bit more than
|
||||
// 64kB. Bigger semantic entities/structures must be built in lldb's client
|
||||
// code. The content of a block has no semantics attached, it's only a fully
|
||||
// opaque `[]byte`.
|
||||
//
|
||||
// Scalars
|
||||
//
|
||||
// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those
|
||||
// first two "to bytes" and "from bytes" functions are suggested for handling
|
||||
// multi-valued Allocator content items and/or keys/values of BTrees (using
|
||||
// Collate for keys). Types called "scalar" are:
|
||||
//
|
||||
// nil (the typeless one)
|
||||
// bool
|
||||
// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64
|
||||
// all floating point types: float32, float64
|
||||
// all complex types: complex64, complex128
|
||||
// []byte (64kB max)
|
||||
// string (64kb max)
|
||||
//
|
||||
// Specific implementations
|
||||
//
|
||||
// Included are concrete implementations of some of the VMM interfaces included
|
||||
// to ease serving simple client code or for testing and possibly as an
|
||||
// example. More details in the documentation of such implementations.
|
||||
//
|
||||
// [1]: http://en.wikipedia.org/wiki/Database_model
|
||||
package lldb
|
||||
|
||||
const (
|
||||
fltSz = 0x70 // size of the FLT
|
||||
maxShort = 251
|
||||
maxRq = 65787
|
||||
maxFLTRq = 4112
|
||||
maxHandle = 1<<56 - 1
|
||||
atomLen = 16
|
||||
tagUsedLong = 0xfc
|
||||
tagUsedRelocated = 0xfd
|
||||
tagFreeShort = 0xfe
|
||||
tagFreeLong = 0xff
|
||||
tagNotCompressed = 0
|
||||
tagCompressed = 1
|
||||
)
|
||||
|
||||
// Content size n -> blocksize in atoms.
|
||||
func n2atoms(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return (n+1)/16 + 1
|
||||
}
|
||||
|
||||
// Content size n -> number of padding zeros.
|
||||
func n2padding(n int) int {
|
||||
if n > maxShort {
|
||||
n += 2
|
||||
}
|
||||
return 15 - (n+1)&15
|
||||
}
|
||||
|
||||
// Handle <-> offset
|
||||
func h2off(h int64) int64 { return (h + 6) * 16 }
|
||||
func off2h(off int64) int64 { return off/16 - 6 }
|
||||
|
||||
// Get a 7B int64 from b
|
||||
func b2h(b []byte) (h int64) {
|
||||
for _, v := range b[:7] {
|
||||
h = h<<8 | int64(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put a 7B int64 into b
|
||||
func h2b(b []byte, h int64) []byte {
|
||||
for i := range b[:7] {
|
||||
b[i], h = byte(h>>48), h<<8
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Content length N (must be in [252, 65787]) to long used block M field.
|
||||
func n2m(n int) (m int) {
|
||||
return n % 0x10000
|
||||
}
|
||||
|
||||
// Long used block M (must be in [0, 65535]) field to content length N.
|
||||
func m2n(m int) (n int) {
|
||||
if m <= maxShort {
|
||||
m += 0x10000
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func bpack(a []byte) []byte {
|
||||
if cap(a) > len(a) {
|
||||
return append([]byte(nil), a...)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
217
Godeps/_workspace/src/github.com/cznic/exp/lldb/lldb_test.go
generated
vendored
Normal file
217
Godeps/_workspace/src/github.com/cznic/exp/lldb/lldb_test.go
generated
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var dbg = func(s string, va ...interface{}) {
|
||||
_, fn, fl, _ := runtime.Caller(1)
|
||||
fmt.Printf("%s:%d: ", path.Base(fn), fl)
|
||||
fmt.Printf(s+"\n", va...)
|
||||
}
|
||||
|
||||
func use(...interface{}) {}
|
||||
|
||||
func TestN2Atoms(t *testing.T) {
|
||||
tab := []struct{ n, a int }{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{2, 1},
|
||||
{3, 1},
|
||||
{4, 1},
|
||||
{5, 1},
|
||||
{6, 1},
|
||||
{7, 1},
|
||||
{8, 1},
|
||||
{9, 1},
|
||||
{10, 1},
|
||||
{11, 1},
|
||||
{12, 1},
|
||||
{13, 1},
|
||||
{14, 1},
|
||||
|
||||
{15, 2},
|
||||
{16, 2},
|
||||
{17, 2},
|
||||
{18, 2},
|
||||
{19, 2},
|
||||
{20, 2},
|
||||
{21, 2},
|
||||
{22, 2},
|
||||
{23, 2},
|
||||
{24, 2},
|
||||
{25, 2},
|
||||
{26, 2},
|
||||
{27, 2},
|
||||
{28, 2},
|
||||
{29, 2},
|
||||
{30, 2},
|
||||
|
||||
{31, 3},
|
||||
|
||||
{252, 16},
|
||||
{253, 17},
|
||||
{254, 17},
|
||||
{255, 17},
|
||||
{256, 17},
|
||||
{257, 17},
|
||||
{258, 17},
|
||||
{259, 17},
|
||||
{260, 17},
|
||||
{261, 17},
|
||||
{262, 17},
|
||||
{263, 17},
|
||||
{264, 17},
|
||||
{265, 17},
|
||||
{266, 17},
|
||||
{267, 17},
|
||||
{268, 17},
|
||||
{269, 18},
|
||||
{65532, 4096},
|
||||
{65533, 4097},
|
||||
{65787, 4112},
|
||||
}
|
||||
|
||||
for i, test := range tab {
|
||||
if g, e := n2atoms(test.n), test.a; g != e {
|
||||
t.Errorf("(%d) %d %d %d", i, test.n, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestN2Padding(t *testing.T) {
|
||||
tab := []struct{ n, p int }{
|
||||
{0, 14},
|
||||
{1, 13},
|
||||
{2, 12},
|
||||
{3, 11},
|
||||
{4, 10},
|
||||
{5, 9},
|
||||
{6, 8},
|
||||
{7, 7},
|
||||
{8, 6},
|
||||
{9, 5},
|
||||
{10, 4},
|
||||
{11, 3},
|
||||
{12, 2},
|
||||
{13, 1},
|
||||
{14, 0},
|
||||
|
||||
{15, 15},
|
||||
{16, 14},
|
||||
{17, 13},
|
||||
{18, 12},
|
||||
{19, 11},
|
||||
{20, 10},
|
||||
{21, 9},
|
||||
{22, 8},
|
||||
{23, 7},
|
||||
{24, 6},
|
||||
{25, 5},
|
||||
{26, 4},
|
||||
{27, 3},
|
||||
{28, 2},
|
||||
{29, 1},
|
||||
{30, 0},
|
||||
|
||||
{31, 15},
|
||||
|
||||
{252, 0},
|
||||
{253, 15},
|
||||
{254, 14},
|
||||
{255, 13},
|
||||
{256, 12},
|
||||
{257, 11},
|
||||
{258, 10},
|
||||
{259, 9},
|
||||
{260, 8},
|
||||
{261, 7},
|
||||
{262, 6},
|
||||
{263, 5},
|
||||
{264, 4},
|
||||
{265, 3},
|
||||
{266, 2},
|
||||
{267, 1},
|
||||
{268, 0},
|
||||
{269, 15},
|
||||
}
|
||||
|
||||
for i, test := range tab {
|
||||
if g, e := n2padding(test.n), test.p; g != e {
|
||||
t.Errorf("(%d) %d %d %d", i, test.n, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestH2Off(t *testing.T) {
|
||||
tab := []struct{ h, off int64 }{
|
||||
{-1, fltSz - 32},
|
||||
{0, fltSz - 16},
|
||||
{1, fltSz + 0},
|
||||
{2, fltSz + 16},
|
||||
{3, fltSz + 32},
|
||||
}
|
||||
|
||||
for i, test := range tab {
|
||||
if g, e := h2off(test.h), test.off; g != e {
|
||||
t.Error("h2off", i, g, e)
|
||||
}
|
||||
if g, e := off2h(test.off), test.h; g != e {
|
||||
t.Error("off2h", i, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestB2H(t *testing.T) {
|
||||
tab := []struct {
|
||||
b []byte
|
||||
h int64
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0, 0, 0, 0}, 0},
|
||||
{[]byte{0, 0, 0, 0, 0, 0, 1}, 1},
|
||||
{[]byte{0, 0, 0, 0, 0, 0, 1, 2}, 1},
|
||||
{[]byte{0, 0, 0, 0, 0, 0x32, 0x10}, 0x3210},
|
||||
{[]byte{0, 0, 0, 0, 0x54, 0x32, 0x10}, 0x543210},
|
||||
{[]byte{0, 0, 0, 0x76, 0x54, 0x32, 0x10}, 0x76543210},
|
||||
{[]byte{0, 0, 0x98, 0x76, 0x54, 0x32, 0x10}, 0x9876543210},
|
||||
{[]byte{0, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xba9876543210},
|
||||
{[]byte{0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xdcba9876543210},
|
||||
}
|
||||
|
||||
for i, test := range tab {
|
||||
if g, e := b2h(test.b), test.h; g != e {
|
||||
t.Errorf("b2h: %d %#8x %#8x", i, g, e)
|
||||
}
|
||||
var g [7]byte
|
||||
h2b(g[:], test.h)
|
||||
if e := test.b; !bytes.Equal(g[:], e[:7]) {
|
||||
t.Errorf("b2h: %d g: % 0x e: % 0x", i, g, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func s2b(s string) []byte {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
s = strings.Replace(s, " ", "", -1)
|
||||
if n := len(s) & 1; n != 0 {
|
||||
panic(n)
|
||||
}
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
344
Godeps/_workspace/src/github.com/cznic/exp/lldb/memfiler.go
generated
vendored
Normal file
344
Godeps/_workspace/src/github.com/cznic/exp/lldb/memfiler.go
generated
vendored
Normal file
@ -0,0 +1,344 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A memory-only implementation of Filer.
|
||||
|
||||
/*
|
||||
|
||||
pgBits: 8
|
||||
BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s
|
||||
BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s
|
||||
|
||||
pgBits: 9
|
||||
BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s
|
||||
|
||||
pgBits: 10
|
||||
BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s
|
||||
BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s
|
||||
|
||||
pgBits: 11
|
||||
BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s
|
||||
BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s
|
||||
BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s
|
||||
|
||||
pgBits: 12
|
||||
BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s
|
||||
BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s
|
||||
BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s
|
||||
|
||||
pgBits: 13
|
||||
BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s
|
||||
BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s
|
||||
BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s
|
||||
|
||||
pgBits: 14
|
||||
BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s
|
||||
BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s
|
||||
BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s
|
||||
|
||||
pgBits: 15
|
||||
BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s
|
||||
BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s
|
||||
BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s
|
||||
|
||||
pgBits: 16
|
||||
BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s
|
||||
BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s
|
||||
BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s
|
||||
|
||||
pgBits: 17
|
||||
BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s
|
||||
BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s
|
||||
BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s
|
||||
|
||||
pgBits: 18
|
||||
BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s
|
||||
BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s
|
||||
BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s
|
||||
BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s
|
||||
|
||||
*/
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
const (
|
||||
pgBits = 16
|
||||
pgSize = 1 << pgBits
|
||||
pgMask = pgSize - 1
|
||||
)
|
||||
|
||||
var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer.
|
||||
|
||||
type memFilerMap map[int64]*[pgSize]byte
|
||||
|
||||
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
|
||||
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
|
||||
// ReadFrom and WriteTo methods.
|
||||
type MemFiler struct {
|
||||
m memFilerMap
|
||||
nest int
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewMemFiler returns a new MemFiler.
|
||||
func NewMemFiler() *MemFiler {
|
||||
return &MemFiler{m: memFilerMap{}}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *MemFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *MemFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *MemFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ": EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *MemFiler) Name() string {
|
||||
return fmt.Sprintf("%p.memfiler", f)
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *MemFiler) PunchHole(off, size int64) (err error) {
|
||||
if off < 0 {
|
||||
return &ErrINVAL{f.Name() + ": PunchHole off", off}
|
||||
}
|
||||
|
||||
if size < 0 || off+size > f.size {
|
||||
return &ErrINVAL{f.Name() + ": PunchHole size", size}
|
||||
}
|
||||
|
||||
first := off >> pgBits
|
||||
if off&pgMask != 0 {
|
||||
first++
|
||||
}
|
||||
off += size - 1
|
||||
last := off >> pgBits
|
||||
if off&pgMask != 0 {
|
||||
last--
|
||||
}
|
||||
if limit := f.size >> pgBits; last > limit {
|
||||
last = limit
|
||||
}
|
||||
for pg := first; pg <= last; pg++ {
|
||||
delete(f.m, pg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var zeroPage [pgSize]byte
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pgI := off >> pgBits
|
||||
pgO := int(off & pgMask)
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &zeroPage
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
|
||||
// number of bytes read from 'r'.
|
||||
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
if err = f.Truncate(0); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
b [pgSize]byte
|
||||
rn int
|
||||
off int64
|
||||
)
|
||||
|
||||
var rerr error
|
||||
for rerr == nil {
|
||||
if rn, rerr = r.Read(b[:]); rn != 0 {
|
||||
f.WriteAt(b[:rn], off)
|
||||
off += int64(rn)
|
||||
n += int64(rn)
|
||||
}
|
||||
}
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *MemFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *MemFiler) Size() (int64, error) {
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *MemFiler) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *MemFiler) Truncate(size int64) (err error) {
|
||||
switch {
|
||||
case size < 0:
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
case size == 0:
|
||||
f.m = memFilerMap{}
|
||||
f.size = 0
|
||||
return
|
||||
}
|
||||
|
||||
first := size >> pgBits
|
||||
if size&pgMask != 0 {
|
||||
first++
|
||||
}
|
||||
last := f.size >> pgBits
|
||||
if f.size&pgMask != 0 {
|
||||
last++
|
||||
}
|
||||
for ; first < last; first++ {
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
pgI := off >> pgBits
|
||||
pgO := int(off & pgMask)
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) {
|
||||
delete(f.m, pgI)
|
||||
nc = pgSize
|
||||
} else {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = new([pgSize]byte)
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc = copy((*pg)[pgO:], b)
|
||||
}
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off+int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
|
||||
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
|
||||
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
|
||||
// possible, in `w` if that happens to be a freshly created or to zero length
|
||||
// truncated OS file. 'n' reports the number of bytes written to 'w'.
|
||||
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) {
|
||||
var (
|
||||
b [pgSize]byte
|
||||
wn, rn int
|
||||
off int64
|
||||
rerr error
|
||||
)
|
||||
|
||||
if wa, ok := w.(io.WriterAt); ok {
|
||||
lastPgI := f.size >> pgBits
|
||||
for pgI := int64(0); pgI <= lastPgI; pgI++ {
|
||||
sz := pgSize
|
||||
if pgI == lastPgI {
|
||||
sz = int(f.size & pgMask)
|
||||
}
|
||||
pg := f.m[pgI]
|
||||
if pg != nil {
|
||||
wn, err = wa.WriteAt(pg[:sz], off)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
n += int64(wn)
|
||||
off += int64(sz)
|
||||
if wn != sz {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var werr error
|
||||
for rerr == nil {
|
||||
if rn, rerr = f.ReadAt(b[:], off); rn != 0 {
|
||||
off += int64(rn)
|
||||
if wn, werr = w.Write(b[:rn]); werr != nil {
|
||||
return n, werr
|
||||
}
|
||||
|
||||
n += int64(wn)
|
||||
}
|
||||
}
|
||||
if !fileutil.IsEOF(rerr) {
|
||||
err = rerr
|
||||
}
|
||||
return
|
||||
}
|
132
Godeps/_workspace/src/github.com/cznic/exp/lldb/memfiler_test.go
generated
vendored
Normal file
132
Godeps/_workspace/src/github.com/cznic/exp/lldb/memfiler_test.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Test automatic page releasing (hole punching) of zero pages
|
||||
func TestMemFilerWriteAt(t *testing.T) {
|
||||
f := NewMemFiler()
|
||||
|
||||
// Add page index 0
|
||||
if _, err := f.WriteAt([]byte{1}, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 1; g != e {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
// Add page index 1
|
||||
if _, err := f.WriteAt([]byte{2}, pgSize); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 2; g != e {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
// Add page index 2
|
||||
if _, err := f.WriteAt([]byte{3}, 2*pgSize); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 3; g != e {
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
// Remove page index 1
|
||||
if _, err := f.WriteAt(make([]byte, 2*pgSize), pgSize/2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 2; g != e {
|
||||
t.Logf("%#v", f.m)
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
if err := f.Truncate(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 1; g != e {
|
||||
t.Logf("%#v", f.m)
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
|
||||
if err := f.Truncate(0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if g, e := len(f.m), 0; g != e {
|
||||
t.Logf("%#v", f.m)
|
||||
t.Fatal(g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemFilerWriteTo(t *testing.T) {
|
||||
const max = 1e5
|
||||
var b [max]byte
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
for sz := 0; sz < 1e5; sz += 2053 {
|
||||
for i := range b[:sz] {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
f := NewMemFiler()
|
||||
if n, err := f.WriteAt(b[:sz], 0); n != sz || err != nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if n, err := f.WriteTo(&buf); n != int64(sz) || err != nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(b[:sz], buf.Bytes()) {
|
||||
t.Fatal("content differs")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemFilerReadFromWriteTo(t *testing.T) {
|
||||
const (
|
||||
sz = 1e2 * pgSize
|
||||
hole = 1e1 * pgSize
|
||||
)
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
data := make([]byte, sz)
|
||||
for i := range data {
|
||||
data[i] = byte(rng.Int())
|
||||
}
|
||||
f := NewMemFiler()
|
||||
buf := bytes.NewBuffer(data)
|
||||
if n, err := f.ReadFrom(buf); n != int64(len(data)) || err != nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
buf = bytes.NewBuffer(nil)
|
||||
if n, err := f.WriteTo(buf); n != int64(len(data)) || err != nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
rd := buf.Bytes()
|
||||
if !bytes.Equal(data, rd) {
|
||||
t.Fatal("corrupted data")
|
||||
}
|
||||
|
||||
n0 := len(f.m)
|
||||
data = make([]byte, hole)
|
||||
f.WriteAt(data, sz/2)
|
||||
n := len(f.m)
|
||||
t.Log(n0, n)
|
||||
d := n0 - n
|
||||
if d*pgSize < hole-2 || d*pgSize > hole {
|
||||
t.Fatal(n0, n, d)
|
||||
}
|
||||
}
|
130
Godeps/_workspace/src/github.com/cznic/exp/lldb/osfiler.go
generated
vendored
Normal file
130
Godeps/_workspace/src/github.com/cznic/exp/lldb/osfiler.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = (*OSFiler)(nil)
|
||||
|
||||
// OSFile is an os.File like minimal set of methods allowing to construct a
|
||||
// Filer.
|
||||
type OSFile interface {
|
||||
Name() string
|
||||
Stat() (fi os.FileInfo, err error)
|
||||
Sync() (err error)
|
||||
Truncate(size int64) (err error)
|
||||
io.Closer
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Writer
|
||||
io.WriterAt
|
||||
}
|
||||
|
||||
// OSFiler is like a SimpleFileFiler but based on an OSFile.
|
||||
type OSFiler struct {
|
||||
f OSFile
|
||||
nest int
|
||||
size int64 // not set if < 0
|
||||
}
|
||||
|
||||
// NewOSFiler returns a Filer from an OSFile. This Filer is like the
|
||||
// SimpleFileFiler, it does not implement the transaction related methods.
|
||||
func NewOSFiler(f OSFile) (r *OSFiler) {
|
||||
return &OSFiler{
|
||||
f: f,
|
||||
size: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *OSFiler) BeginUpdate() (err error) {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *OSFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.f.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *OSFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *OSFiler) Name() string {
|
||||
return f.f.Name()
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *OSFiler) PunchHole(off, size int64) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *OSFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return f.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *OSFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *OSFiler) Size() (n int64, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := f.f.Stat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *OSFiler) Sync() (err error) {
|
||||
return f.f.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *OSFiler) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return f.f.Truncate(size)
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *OSFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.f.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
|
||||
return f.f.WriteAt(b, off)
|
||||
}
|
123
Godeps/_workspace/src/github.com/cznic/exp/lldb/simplefilefiler.go
generated
vendored
Normal file
123
Godeps/_workspace/src/github.com/cznic/exp/lldb/simplefilefiler.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A basic os.File backed Filer.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var _ Filer = &SimpleFileFiler{} // Ensure SimpleFileFiler is a Filer.
|
||||
|
||||
// SimpleFileFiler is an os.File backed Filer intended for use where structural
|
||||
// consistency can be reached by other means (SimpleFileFiler is for example
|
||||
// wrapped in eg. an RollbackFiler or ACIDFiler0) or where persistence is not
|
||||
// required (temporary/working data sets).
|
||||
//
|
||||
// SimpleFileFiler is the most simple os.File backed Filer implementation as it
|
||||
// does not really implement BeginUpdate and EndUpdate/Rollback in any way
|
||||
// which would protect the structural integrity of data. If misused e.g. as a
|
||||
// real database storage w/o other measures, it can easily cause data loss
|
||||
// when, for example, a power outage occurs or the updating process terminates
|
||||
// abruptly.
|
||||
type SimpleFileFiler struct {
|
||||
file *os.File
|
||||
nest int
|
||||
size int64 // not set if < 0
|
||||
}
|
||||
|
||||
// NewSimpleFileFiler returns a new SimpleFileFiler.
|
||||
func NewSimpleFileFiler(f *os.File) *SimpleFileFiler {
|
||||
return &SimpleFileFiler{file: f, size: -1}
|
||||
}
|
||||
|
||||
// BeginUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) BeginUpdate() error {
|
||||
f.nest++
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close implements Filer.
|
||||
func (f *SimpleFileFiler) Close() (err error) {
|
||||
if f.nest != 0 {
|
||||
return &ErrPERM{(f.Name() + ":Close")}
|
||||
}
|
||||
|
||||
return f.file.Close()
|
||||
}
|
||||
|
||||
// EndUpdate implements Filer.
|
||||
func (f *SimpleFileFiler) EndUpdate() (err error) {
|
||||
if f.nest == 0 {
|
||||
return &ErrPERM{(f.Name() + ":EndUpdate")}
|
||||
}
|
||||
|
||||
f.nest--
|
||||
return
|
||||
}
|
||||
|
||||
// Name implements Filer.
|
||||
func (f *SimpleFileFiler) Name() string {
|
||||
return f.file.Name()
|
||||
}
|
||||
|
||||
// PunchHole implements Filer.
|
||||
func (f *SimpleFileFiler) PunchHole(off, size int64) (err error) {
|
||||
return fileutil.PunchHole(f.file, off, size)
|
||||
}
|
||||
|
||||
// ReadAt implements Filer.
|
||||
func (f *SimpleFileFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return f.file.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Rollback implements Filer.
|
||||
func (f *SimpleFileFiler) Rollback() (err error) { return }
|
||||
|
||||
// Size implements Filer.
|
||||
func (f *SimpleFileFiler) Size() (int64, error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.file.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
return f.size, nil
|
||||
}
|
||||
|
||||
// Sync implements Filer.
|
||||
func (f *SimpleFileFiler) Sync() error {
|
||||
return f.file.Sync()
|
||||
}
|
||||
|
||||
// Truncate implements Filer.
|
||||
func (f *SimpleFileFiler) Truncate(size int64) (err error) {
|
||||
if size < 0 {
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return f.file.Truncate(size)
|
||||
}
|
||||
|
||||
// WriteAt implements Filer.
|
||||
func (f *SimpleFileFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
if f.size < 0 { // boot
|
||||
fi, err := os.Stat(f.file.Name())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
f.size = fi.Size()
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, int64(len(b))+off)
|
||||
return f.file.WriteAt(b, off)
|
||||
}
|
629
Godeps/_workspace/src/github.com/cznic/exp/lldb/xact.go
generated
vendored
Normal file
629
Godeps/_workspace/src/github.com/cznic/exp/lldb/xact.go
generated
vendored
Normal file
@ -0,0 +1,629 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Structural transactions.
|
||||
|
||||
package lldb
|
||||
|
||||
//DONE+ TransactionalMemoryFiler
|
||||
// ----
|
||||
// Use NewRollbackFiler(myMemFiler, ...)
|
||||
|
||||
/*
|
||||
|
||||
bfBits: 3
|
||||
BenchmarkRollbackFiler 20000000 102 ns/op 9.73 MB/s
|
||||
|
||||
bfBits: 4
|
||||
BenchmarkRollbackFiler 50000000 55.7 ns/op 17.95 MB/s
|
||||
|
||||
bfBits: 5
|
||||
BenchmarkRollbackFiler 100000000 32.2 ns/op 31.06 MB/s
|
||||
|
||||
bfBits: 6
|
||||
BenchmarkRollbackFiler 100000000 20.6 ns/op 48.46 MB/s
|
||||
|
||||
bfBits: 7
|
||||
BenchmarkRollbackFiler 100000000 15.1 ns/op 66.12 MB/s
|
||||
|
||||
bfBits: 8
|
||||
BenchmarkRollbackFiler 100000000 10.5 ns/op 95.66 MB/s
|
||||
|
||||
bfBits: 9
|
||||
BenchmarkRollbackFiler 200000000 8.02 ns/op 124.74 MB/s
|
||||
|
||||
bfBits: 10
|
||||
BenchmarkRollbackFiler 200000000 9.25 ns/op 108.09 MB/s
|
||||
|
||||
bfBits: 11
|
||||
BenchmarkRollbackFiler 100000000 11.7 ns/op 85.47 MB/s
|
||||
|
||||
bfBits: 12
|
||||
BenchmarkRollbackFiler 100000000 17.2 ns/op 57.99 MB/s
|
||||
|
||||
bfBits: 13
|
||||
BenchmarkRollbackFiler 100000000 32.7 ns/op 30.58 MB/s
|
||||
|
||||
bfBits: 14
|
||||
BenchmarkRollbackFiler 50000000 39.6 ns/op 25.27 MB/s
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Filer = &bitFiler{} // Ensure bitFiler is a Filer.
|
||||
_ Filer = &RollbackFiler{} // ditto
|
||||
)
|
||||
|
||||
const (
|
||||
bfBits = 9
|
||||
bfSize = 1 << bfBits
|
||||
bfMask = bfSize - 1
|
||||
)
|
||||
|
||||
var (
|
||||
bitmask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
|
||||
bitZeroPage bitPage
|
||||
allDirtyFlags [bfSize >> 3]byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := range allDirtyFlags {
|
||||
allDirtyFlags[i] = 0xff
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
bitPage struct {
|
||||
prev, next *bitPage
|
||||
data [bfSize]byte
|
||||
flags [bfSize >> 3]byte
|
||||
dirty bool
|
||||
}
|
||||
|
||||
bitFilerMap map[int64]*bitPage
|
||||
|
||||
bitFiler struct {
|
||||
parent Filer
|
||||
m bitFilerMap
|
||||
size int64
|
||||
}
|
||||
)
|
||||
|
||||
func newBitFiler(parent Filer) (f *bitFiler, err error) {
|
||||
sz, err := parent.Size()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return &bitFiler{parent: parent, m: bitFilerMap{}, size: sz}, nil
|
||||
}
|
||||
|
||||
func (f *bitFiler) BeginUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) EndUpdate() error { panic("internal error") }
|
||||
func (f *bitFiler) Rollback() error { panic("internal error") }
|
||||
func (f *bitFiler) Sync() error { panic("internal error") }
|
||||
|
||||
func (f *bitFiler) Close() (err error) { return }
|
||||
func (f *bitFiler) Name() string { return fmt.Sprintf("%p.bitfiler", f) }
|
||||
func (f *bitFiler) Size() (int64, error) { return f.size, nil }
|
||||
|
||||
func (f *bitFiler) PunchHole(off, size int64) (err error) {
|
||||
first := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
off += size - 1
|
||||
last := off >> bfBits
|
||||
if off&bfMask != 0 {
|
||||
last--
|
||||
}
|
||||
if limit := f.size >> bfBits; last > limit {
|
||||
last = limit
|
||||
}
|
||||
for pgI := first; pgI <= last; pgI++ {
|
||||
pg := &bitPage{}
|
||||
pg.flags = allDirtyFlags
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
avail := f.size - off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
rem := len(b)
|
||||
if int64(rem) >= avail {
|
||||
rem = int(avail)
|
||||
err = io.EOF
|
||||
}
|
||||
for rem != 0 && avail > 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc := copy(b[:mathutil.Min(rem, bfSize)], pg.data[pgO:])
|
||||
pgI++
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
n += nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) Truncate(size int64) (err error) {
|
||||
switch {
|
||||
case size < 0:
|
||||
return &ErrINVAL{"Truncate size", size}
|
||||
case size == 0:
|
||||
f.m = bitFilerMap{}
|
||||
f.size = 0
|
||||
return
|
||||
}
|
||||
|
||||
first := size >> bfBits
|
||||
if size&bfMask != 0 {
|
||||
first++
|
||||
}
|
||||
last := f.size >> bfBits
|
||||
if f.size&bfMask != 0 {
|
||||
last++
|
||||
}
|
||||
for ; first < last; first++ {
|
||||
delete(f.m, first)
|
||||
}
|
||||
|
||||
f.size = size
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
off0 := off
|
||||
pgI := off >> bfBits
|
||||
pgO := int(off & bfMask)
|
||||
n = len(b)
|
||||
rem := n
|
||||
var nc int
|
||||
for rem != 0 {
|
||||
pg := f.m[pgI]
|
||||
if pg == nil {
|
||||
pg = &bitPage{}
|
||||
if f.parent != nil {
|
||||
_, err = f.parent.ReadAt(pg.data[:], off&^bfMask)
|
||||
if err != nil && !fileutil.IsEOF(err) {
|
||||
return
|
||||
}
|
||||
|
||||
err = nil
|
||||
}
|
||||
f.m[pgI] = pg
|
||||
}
|
||||
nc = copy(pg.data[pgO:], b)
|
||||
pgI++
|
||||
pg.dirty = true
|
||||
for i := pgO; i < pgO+nc; i++ {
|
||||
pg.flags[i>>3] |= bitmask[i&7]
|
||||
}
|
||||
pgO = 0
|
||||
rem -= nc
|
||||
b = b[nc:]
|
||||
off += int64(nc)
|
||||
}
|
||||
f.size = mathutil.MaxInt64(f.size, off0+int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (f *bitFiler) link() {
|
||||
for pgI, pg := range f.m {
|
||||
nx, ok := f.m[pgI+1]
|
||||
if !ok || !nx.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
nx.prev, pg.next = pg, nx
|
||||
}
|
||||
}
|
||||
|
||||
func (f *bitFiler) dumpDirty(w io.WriterAt) (nwr int, err error) {
|
||||
f.link()
|
||||
for pgI, pg := range f.m {
|
||||
if !pg.dirty {
|
||||
continue
|
||||
}
|
||||
|
||||
for pg.prev != nil && pg.prev.dirty {
|
||||
pg = pg.prev
|
||||
pgI--
|
||||
}
|
||||
|
||||
for pg != nil && pg.dirty {
|
||||
last := false
|
||||
var off int64
|
||||
first := -1
|
||||
for i := 0; i < bfSize; i++ {
|
||||
flag := pg.flags[i>>3]&bitmask[i&7] != 0
|
||||
switch {
|
||||
case flag && !last: // Leading edge detected
|
||||
off = pgI<<bfBits + int64(i)
|
||||
first = i
|
||||
case !flag && last: // Trailing edge detected
|
||||
n, err := w.WriteAt(pg.data[first:i], off)
|
||||
if n != i-first {
|
||||
return 0, err
|
||||
}
|
||||
first = -1
|
||||
nwr++
|
||||
}
|
||||
|
||||
last = flag
|
||||
}
|
||||
if first >= 0 {
|
||||
i := bfSize
|
||||
n, err := w.WriteAt(pg.data[first:i], off)
|
||||
if n != i-first {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nwr++
|
||||
}
|
||||
|
||||
pg.dirty = false
|
||||
pg = pg.next
|
||||
pgI++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RollbackFiler is a Filer implementing structural transaction handling.
|
||||
// Structural transactions should be small and short lived because all non
|
||||
// committed data are held in memory until committed or discarded by a
|
||||
// Rollback.
|
||||
//
|
||||
// While using RollbackFiler, every intended update of the wrapped Filler, by
|
||||
// WriteAt, Truncate or PunchHole, _must_ be made within a transaction.
|
||||
// Attempts to do it outside of a transaction will return ErrPERM. OTOH,
|
||||
// invoking ReadAt outside of a transaction is not a problem.
|
||||
//
|
||||
// No nested transactions: All updates within a transaction are held in memory.
|
||||
// On a matching EndUpdate the updates held in memory are actually written to
|
||||
// the wrapped Filer.
|
||||
//
|
||||
// Nested transactions: Correct data will be seen from RollbackFiler when any
|
||||
// level of a nested transaction is rollbacked. The actual writing to the
|
||||
// wrapped Filer happens only when the outer most transaction nesting level is
|
||||
// closed.
|
||||
//
|
||||
// Invoking Rollback is an alternative to EndUpdate. It discards all changes
|
||||
// made at the current transaction level and returns the "state" (possibly not
|
||||
// yet persisted) of the Filer to what it was before the corresponding
|
||||
// BeginUpdate.
|
||||
//
|
||||
// During an open transaction, all reads (using ReadAt) are "dirty" reads,
|
||||
// seeing the uncommitted changes made to the Filer's data.
|
||||
//
|
||||
// Lldb databases should be based upon a RollbackFiler.
|
||||
//
|
||||
// With a wrapped MemFiler one gets transactional memory. With, for example a
|
||||
// wrapped disk based SimpleFileFiler it protects against at least some HW
|
||||
// errors - if Rollback is properly invoked on such failures and/or if there's
|
||||
// some WAL or 2PC or whatever other safe mechanism based recovery procedure
|
||||
// used by the client.
|
||||
//
|
||||
// The "real" writes to the wrapped Filer (or WAL instead) go through the
|
||||
// writerAt supplied to NewRollbackFiler.
|
||||
//
|
||||
// List of functions/methods which are recommended to be wrapped in a
|
||||
// BeginUpdate/EndUpdate structural transaction:
|
||||
//
|
||||
// Allocator.Alloc
|
||||
// Allocator.Free
|
||||
// Allocator.Realloc
|
||||
//
|
||||
// CreateBTree
|
||||
// RemoveBTree
|
||||
// BTree.Clear
|
||||
// BTree.Delete
|
||||
// BTree.DeleteAny
|
||||
// BTree.Clear
|
||||
// BTree.Extract
|
||||
// BTree.Get (it can mutate the DB)
|
||||
// BTree.Put
|
||||
// BTree.Set
|
||||
//
|
||||
// NOTE: RollbackFiler is a generic solution intended to wrap Filers provided
|
||||
// by this package which do not implement any of the transactional methods.
|
||||
// RollbackFiler thus _does not_ invoke any of the transactional methods of its
|
||||
// wrapped Filer.
|
||||
//
|
||||
// RollbackFiler is safe for concurrent use by multiple goroutines.
|
||||
type RollbackFiler struct {
|
||||
mu sync.RWMutex
|
||||
inCallback bool
|
||||
inCallbackMu sync.RWMutex
|
||||
bitFiler *bitFiler
|
||||
checkpoint func(int64) error
|
||||
closed bool
|
||||
f Filer
|
||||
parent Filer
|
||||
tlevel int // transaction nesting level, 0 == not in transaction
|
||||
writerAt io.WriterAt
|
||||
|
||||
// afterRollback, if not nil, is called after performing Rollback
|
||||
// without errros.
|
||||
afterRollback func() error
|
||||
}
|
||||
|
||||
// NewRollbackFiler returns a RollbackFiler wrapping f.
|
||||
//
|
||||
// The checkpoint parameter
|
||||
//
|
||||
// The checkpoint function is called after closing (by EndUpdate) the upper
|
||||
// most level open transaction if all calls of writerAt were successful and the
|
||||
// DB (or eg. a WAL) is thus now in a consistent state (virtually, in the ideal
|
||||
// world with no write caches, no HW failures, no process crashes, ...).
|
||||
//
|
||||
// NOTE: In, for example, a 2PC it is necessary to reflect also the sz
|
||||
// parameter as the new file size (as in the parameter to Truncate). All
|
||||
// changes were successfully written already by writerAt before invoking
|
||||
// checkpoint.
|
||||
//
|
||||
// The writerAt parameter
|
||||
//
|
||||
// The writerAt interface is used to commit the updates of the wrapped Filer.
|
||||
// If any invocation of writerAt fails then a non nil error will be returned
|
||||
// from EndUpdate and checkpoint will _not_ ne called. Neither is necessary to
|
||||
// call Rollback. The rule of thumb: The [structural] transaction [level] is
|
||||
// closed by invoking exactly once one of EndUpdate _or_ Rollback.
|
||||
//
|
||||
// It is presumed that writerAt uses WAL or 2PC or whatever other safe
|
||||
// mechanism to physically commit the updates.
|
||||
//
|
||||
// Updates performed by invocations of writerAt are byte-precise, but not
|
||||
// necessarily maximum possible length precise. IOW, for example an update
|
||||
// crossing page boundaries may be performed by more than one writerAt
|
||||
// invocation. No offset sorting is performed. This may change if it proves
|
||||
// to be a problem. Such change would be considered backward compatible.
|
||||
//
|
||||
// NOTE: Using RollbackFiler, but failing to ever invoke a matching "closing"
|
||||
// EndUpdate after an "opening" BeginUpdate means neither writerAt or
|
||||
// checkpoint will ever get called - with all the possible data loss
|
||||
// consequences.
|
||||
func NewRollbackFiler(f Filer, checkpoint func(sz int64) error, writerAt io.WriterAt) (r *RollbackFiler, err error) {
|
||||
if f == nil || checkpoint == nil || writerAt == nil {
|
||||
return nil, &ErrINVAL{Src: "lldb.NewRollbackFiler, nil argument"}
|
||||
}
|
||||
|
||||
return &RollbackFiler{
|
||||
checkpoint: checkpoint,
|
||||
f: f,
|
||||
writerAt: writerAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) BeginUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
parent := r.f
|
||||
if r.tlevel != 0 {
|
||||
parent = r.bitFiler
|
||||
}
|
||||
r.bitFiler, err = newBitFiler(parent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel++
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
//
|
||||
// Close will return an error if not invoked at nesting level 0. However, to
|
||||
// allow emergency closing from eg. a signal handler; if Close is invoked
|
||||
// within an open transaction(s), it rollbacks any non committed open
|
||||
// transactions and performs the Close operation.
|
||||
//
|
||||
// IOW: Regardless of the transaction nesting level the Close is always
|
||||
// performed but any uncommitted transaction data are lost.
|
||||
func (r *RollbackFiler) Close() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.closed {
|
||||
return &ErrPERM{r.f.Name() + ": Already closed"}
|
||||
}
|
||||
|
||||
r.closed = true
|
||||
if err = r.f.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if r.tlevel != 0 {
|
||||
err = &ErrPERM{r.f.Name() + ": Close inside an open transaction"}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) EndUpdate() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + " : EndUpdate outside of a transaction"}
|
||||
}
|
||||
|
||||
sz, err := r.size() // Cannot call .Size() -> deadlock
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.tlevel--
|
||||
bf := r.bitFiler
|
||||
parent := bf.parent
|
||||
w := r.writerAt
|
||||
if r.tlevel != 0 {
|
||||
w = parent
|
||||
}
|
||||
nwr, err := bf.dumpDirty(w)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case r.tlevel == 0:
|
||||
r.bitFiler = nil
|
||||
if nwr == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return r.checkpoint(sz)
|
||||
default:
|
||||
r.bitFiler = parent.(*bitFiler)
|
||||
sz, _ := bf.Size() // bitFiler.Size() never returns err != nil
|
||||
return parent.Truncate(sz)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Name() string {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
return r.f.Name()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) PunchHole(off, size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": PunchHole outside of a transaction"}
|
||||
}
|
||||
|
||||
if off < 0 {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole off", off}
|
||||
}
|
||||
|
||||
if size < 0 || off+size > r.bitFiler.size {
|
||||
return &ErrINVAL{r.f.Name() + ": PunchHole size", size}
|
||||
}
|
||||
|
||||
return r.bitFiler.PunchHole(off, size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
r.inCallbackMu.RLock()
|
||||
defer r.inCallbackMu.RUnlock()
|
||||
if !r.inCallback {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
}
|
||||
if r.tlevel == 0 {
|
||||
return r.f.ReadAt(b, off)
|
||||
}
|
||||
|
||||
return r.bitFiler.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Rollback() (err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Rollback outside of a transaction"}
|
||||
}
|
||||
|
||||
if r.tlevel > 1 {
|
||||
r.bitFiler = r.bitFiler.parent.(*bitFiler)
|
||||
}
|
||||
r.tlevel--
|
||||
if f := r.afterRollback; f != nil {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = true
|
||||
r.inCallbackMu.Unlock()
|
||||
defer func() {
|
||||
r.inCallbackMu.Lock()
|
||||
r.inCallback = false
|
||||
r.inCallbackMu.Unlock()
|
||||
}()
|
||||
return f()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RollbackFiler) size() (sz int64, err error) {
|
||||
if r.tlevel == 0 {
|
||||
return r.f.Size()
|
||||
}
|
||||
|
||||
return r.bitFiler.Size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Size() (sz int64, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.size()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Sync() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
return r.f.Sync()
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) Truncate(size int64) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return &ErrPERM{r.f.Name() + ": Truncate outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.Truncate(size)
|
||||
}
|
||||
|
||||
// Implements Filer.
|
||||
func (r *RollbackFiler) WriteAt(b []byte, off int64) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.tlevel == 0 {
|
||||
return 0, &ErrPERM{r.f.Name() + ": WriteAt outside of a transaction"}
|
||||
}
|
||||
|
||||
return r.bitFiler.WriteAt(b, off)
|
||||
}
|
400
Godeps/_workspace/src/github.com/cznic/exp/lldb/xact_test.go
generated
vendored
Normal file
400
Godeps/_workspace/src/github.com/cznic/exp/lldb/xact_test.go
generated
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
// Copyright 2014 The lldb Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lldb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/cznic/fileutil"
|
||||
"github.com/cznic/mathutil"
|
||||
)
|
||||
|
||||
func (f *bitFiler) dump(w io.Writer) {
|
||||
fmt.Fprintf(w, "bitFiler @ %p, size: %d(%#x)\n", f, f.size, f.size)
|
||||
for k, v := range f.m {
|
||||
fmt.Fprintf(w, "bitPage @ %p: pgI %d(%#x): %#v\n", v, k, k, *v)
|
||||
}
|
||||
}
|
||||
|
||||
func filerBytes(f Filer) []byte {
|
||||
sz, err := f.Size()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b := make([]byte, int(sz))
|
||||
n, err := f.ReadAt(b, 0)
|
||||
if n != len(b) {
|
||||
panic(fmt.Errorf("sz %d n %d err %v", sz, n, err))
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func cmpFilerBytes(t *testing.T, fg, fe Filer) {
|
||||
g, e := filerBytes(fg), filerBytes(fe)
|
||||
if !bytes.Equal(g, e) {
|
||||
t.Fatalf("Filer content doesn't match: got\n%sexp:\n%s", hex.Dump(g), hex.Dump(e))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollbackFiler0(t *testing.T) {
|
||||
var r *RollbackFiler
|
||||
f, g := NewMemFiler(), NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.EndUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmpFilerBytes(t, f, g)
|
||||
}
|
||||
|
||||
func TestRollbackFiler1(t *testing.T) {
|
||||
const (
|
||||
N = 1e6
|
||||
O = 1234
|
||||
)
|
||||
|
||||
var r *RollbackFiler
|
||||
f, g := NewMemFiler(), NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
b := make([]byte, N)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
if _, err = g.WriteAt(b, O); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = r.WriteAt(b, O); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b = filerBytes(f)
|
||||
if n := len(b); n != 0 {
|
||||
t.Fatal(n)
|
||||
}
|
||||
|
||||
if err = r.EndUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmpFilerBytes(t, f, g)
|
||||
}
|
||||
|
||||
func TestRollbackFiler2(t *testing.T) {
|
||||
const (
|
||||
N = 1e6
|
||||
O = 1234
|
||||
)
|
||||
|
||||
var r *RollbackFiler
|
||||
f, g := NewMemFiler(), NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
b := make([]byte, N)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
if _, err = r.WriteAt(b, O); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b = filerBytes(f)
|
||||
if n := len(b); n != 0 {
|
||||
t.Fatal(n)
|
||||
}
|
||||
|
||||
if err = r.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmpFilerBytes(t, f, g)
|
||||
}
|
||||
|
||||
func rndBytes(rng *rand.Rand, n int) []byte {
|
||||
r := make([]byte, n)
|
||||
for i := range r {
|
||||
r[i] = byte(rng.Int())
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRollbackFiler3(t *testing.T) {
|
||||
var r *RollbackFiler
|
||||
f := NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := r.ReadAt([]byte{0}, 0)
|
||||
if n != 0 || !fileutil.IsEOF(err) {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
n, err = r.ReadAt([]byte{0}, 1e6)
|
||||
if n != 0 || !fileutil.IsEOF(err) {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil { // BeginUpdate: 0 -> 1
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
|
||||
buf := rndBytes(rng, 100)
|
||||
if n, err := r.WriteAt(buf, 1e6); n != 100 || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf = make([]byte, 100)
|
||||
if n, err := r.ReadAt(buf, 1e6-200); n != 100 || err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i, v := range buf {
|
||||
if v != 0 {
|
||||
t.Fatal(i, v)
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Truncate(1e5); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil { // BeginUpdate: 1 -> 2
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n, err := r.ReadAt(buf, 1e6); n != 0 || err == nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
if err := r.Truncate(2e6); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = r.BeginUpdate(); err != nil { // BeginUpdate: 2 -> 3
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if n, err := r.ReadAt(buf, 1e6); n == 0 || err != nil {
|
||||
t.Fatal(n, err)
|
||||
}
|
||||
|
||||
for i, v := range buf {
|
||||
if v != 0 {
|
||||
t.Fatal(i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollbackFiler4(t *testing.T) {
|
||||
const (
|
||||
maxSize = 1e6
|
||||
maxChange = maxSize/100 + 4
|
||||
maxChanges = 10
|
||||
maxNest = 3
|
||||
)
|
||||
|
||||
var r *RollbackFiler
|
||||
f := NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
|
||||
ref := make([]byte, 2*maxSize)
|
||||
for i := range ref {
|
||||
ref[i] = byte(rng.Int())
|
||||
}
|
||||
|
||||
var finalSize int
|
||||
|
||||
var fn func(int, int, []byte) (int, []byte)
|
||||
fn = func(nest, inSize int, in []byte) (outSize int, out []byte) {
|
||||
defer func() {
|
||||
for i := outSize; i < len(out); i++ {
|
||||
out[i] = 0
|
||||
}
|
||||
finalSize = mathutil.Max(finalSize, outSize)
|
||||
}()
|
||||
|
||||
out = make([]byte, len(in), 2*maxSize)
|
||||
copy(out, in)
|
||||
if err := r.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < maxChanges; i++ {
|
||||
changeLen := rng.Intn(maxChange) + 4
|
||||
changeOff := rng.Intn(maxSize * 3 / 2)
|
||||
b := make([]byte, changeLen)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
if n, err := r.WriteAt(b, int64(changeOff)); n != len(b) || err != nil {
|
||||
t.Fatal(n, len(b), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Rollback(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := r.BeginUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < maxChanges; i++ {
|
||||
changeLen := rng.Intn(maxChange) + 4
|
||||
changeOff := rng.Intn(maxSize * 3 / 2)
|
||||
b := make([]byte, changeLen)
|
||||
for i := range b {
|
||||
b[i] = byte(rng.Int())
|
||||
}
|
||||
if n, err := r.WriteAt(b, int64(changeOff)); n != len(b) || err != nil {
|
||||
t.Fatal(n, len(b), err)
|
||||
}
|
||||
copy(out[changeOff:], b)
|
||||
copy(ref[changeOff:], b)
|
||||
}
|
||||
|
||||
newSize := rng.Intn(maxSize*3/2) + 4
|
||||
if nest == maxNest {
|
||||
if err := r.EndUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return newSize, out
|
||||
}
|
||||
|
||||
outSize, out = fn(nest+1, newSize, out)
|
||||
if err := r.EndUpdate(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
sz, result := fn(0, maxSize, ref)
|
||||
if g, e := sz, finalSize; g != e {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
g, e := result[:sz], ref[:sz]
|
||||
if !bytes.Equal(g, e) {
|
||||
if len(g) == len(e) {
|
||||
x := make([]byte, len(g))
|
||||
for i := range x {
|
||||
if g[i] != e[i] {
|
||||
x[i] = 'X'
|
||||
}
|
||||
}
|
||||
//t.Logf("Data diff\n%s", hex.Dump(x))
|
||||
}
|
||||
//t.Fatalf("Data don't match: got\n%sexp:\n%s", hex.Dump(g), hex.Dump(e))
|
||||
t.Fatalf("Data don't match")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRollbackFiler(b *testing.B) {
|
||||
rng := rand.New(rand.NewSource(42))
|
||||
type t struct {
|
||||
off int64
|
||||
b []byte
|
||||
}
|
||||
a := []t{}
|
||||
for rem := b.N; rem > 0; {
|
||||
off := rng.Int63()
|
||||
n := mathutil.Min(rng.Intn(1e3)+1, rem)
|
||||
a = append(a, t{off, rndBytes(rng, n)})
|
||||
rem -= n
|
||||
}
|
||||
|
||||
var r *RollbackFiler
|
||||
f := NewMemFiler()
|
||||
|
||||
checkpoint := func(sz int64) (err error) {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
|
||||
r, err := NewRollbackFiler(f, checkpoint, f)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := r.BeginUpdate(); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for _, v := range a {
|
||||
if _, err := r.WriteAt(v.b, v.off); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
14
Godeps/_workspace/src/github.com/cznic/fileutil/AUTHORS
generated
vendored
Normal file
14
Godeps/_workspace/src/github.com/cznic/fileutil/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# This file lists authors for copyright purposes. This file is distinct from
|
||||
# the CONTRIBUTORS files. See the latter for an explanation.
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
CZ.NIC z.s.p.o. <kontakt@nic.cz>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Aaron Bieber <deftly@gmail.com>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user