mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 23:00:58 +00:00
Update all deps
This commit is contained in:
parent
d6c058c407
commit
2b9fc0fd43
26
Godeps/Godeps.json
generated
26
Godeps/Godeps.json
generated
@ -9,28 +9,28 @@
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/kardianos/osext",
|
||||
"Comment": "null-9",
|
||||
"Rev": "364fb577de68fb646c4cb39cc0e09c887ee16376"
|
||||
"Comment": "null-13",
|
||||
"Rev": "5d3ddcf53a508cc2f7404eaebf546ef2cb5cdb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/bcrypt",
|
||||
"Comment": "null-185",
|
||||
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9"
|
||||
"Comment": "null-212",
|
||||
"Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.crypto/blowfish",
|
||||
"Comment": "null-185",
|
||||
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9"
|
||||
"Comment": "null-212",
|
||||
"Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/transform",
|
||||
"Comment": "null-81",
|
||||
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac"
|
||||
"Comment": "null-87",
|
||||
"Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/go.text/unicode/norm",
|
||||
"Comment": "null-81",
|
||||
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac"
|
||||
"Comment": "null-87",
|
||||
"Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "code.google.com/p/snappy-go/snappy",
|
||||
@ -39,15 +39,15 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "d781998583680cda80cf61e0b37dd0cd8da2eb52"
|
||||
"Rev": "a531d51b7f9f3dd13c1c2b50d42d739b70442dbb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "cbaa435c80a9716e086f25d409344b26c4039358"
|
||||
"Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "ca1565e5fb6658691d7074d270602c9185a55c79"
|
||||
"Rev": "e1f2d2bdccd7c62f4d4a29aaf081bf1fc4404f91"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vitrun/qart/coding",
|
||||
|
18
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
18
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
@ -4,13 +4,17 @@
|
||||
|
||||
package osext
|
||||
|
||||
import "syscall"
|
||||
import (
|
||||
"syscall"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := Open("/proc/" + itoa(Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
||||
|
63
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
63
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var startUpcwd, getwdError = os.Getwd()
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
@ -26,20 +26,20 @@ func executable() (string, error) {
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// get length
|
||||
_, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if err != 0 {
|
||||
return "", err
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // shouldn't happen
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if err != 0 {
|
||||
return "", err
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // shouldn't happen
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
@ -48,35 +48,32 @@ func executable() (string, error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
var strpath string
|
||||
if buf[0] != '/' {
|
||||
var e error
|
||||
if strpath, e = getAbs(buf); e != nil {
|
||||
return strpath, e
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
} else {
|
||||
strpath = string(buf)
|
||||
}
|
||||
// darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if strpath, err := filepath.EvalSymlinks(strpath); err != nil {
|
||||
return strpath, err
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return strpath, nil
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(buf []byte) (string, error) {
|
||||
if getwdError != nil {
|
||||
return string(buf), getwdError
|
||||
} else {
|
||||
if buf[0] == '.' {
|
||||
buf = buf[1:]
|
||||
}
|
||||
if startUpcwd[len(startUpcwd)-1] != '/' && buf[0] != '/' {
|
||||
return startUpcwd + "/" + string(buf), nil
|
||||
}
|
||||
return startUpcwd + string(buf), nil
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
||||
|
9
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
9
Godeps/_workspace/src/code.google.com/p/go.crypto/bcrypt/bcrypt_test.go
generated
vendored
@ -53,6 +53,15 @@ func TestBcryptingIsCorrect(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestVeryShortPasswords(t *testing.T) {
|
||||
key := []byte("k")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
_, err := bcrypt(key, 10, salt)
|
||||
if err != nil {
|
||||
t.Errorf("One byte key resulted in error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTooLongPasswordsWork(t *testing.T) {
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
// One byte over the usual 56 byte limit that blowfish has
|
||||
|
14
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
14
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/blowfish_test.go
generated
vendored
@ -192,19 +192,13 @@ func TestCipherDecrypt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSaltedCipherKeyLength(t *testing.T) {
|
||||
var key []byte
|
||||
for i := 0; i < 4; i++ {
|
||||
_, err := NewSaltedCipher(key, []byte{'a'})
|
||||
if err != KeySizeError(i) {
|
||||
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(i))
|
||||
}
|
||||
key = append(key, 'a')
|
||||
if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
|
||||
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
|
||||
}
|
||||
|
||||
// A 57-byte key. One over the typical blowfish restriction.
|
||||
key = []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
_, err := NewSaltedCipher(key, []byte{'a'})
|
||||
if err != nil {
|
||||
key := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
|
||||
t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
|
||||
}
|
||||
}
|
||||
|
14
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
14
Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish/cipher.go
generated
vendored
@ -26,14 +26,13 @@ func (k KeySizeError) Error() string {
|
||||
}
|
||||
|
||||
// NewCipher creates and returns a Cipher.
|
||||
// The key argument should be the Blowfish key, 4 to 56 bytes.
|
||||
// The key argument should be the Blowfish key, from 1 to 56 bytes.
|
||||
func NewCipher(key []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
k := len(key)
|
||||
if k < 4 || k > 56 {
|
||||
if k := len(key); k < 1 || k > 56 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(key, &result)
|
||||
initCipher(&result)
|
||||
ExpandKey(key, &result)
|
||||
return &result, nil
|
||||
}
|
||||
@ -44,11 +43,10 @@ func NewCipher(key []byte) (*Cipher, error) {
|
||||
// bytes. Only the first 16 bytes of salt are used.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
var result Cipher
|
||||
k := len(key)
|
||||
if k < 4 {
|
||||
if k := len(key); k < 1 {
|
||||
return nil, KeySizeError(k)
|
||||
}
|
||||
initCipher(key, &result)
|
||||
initCipher(&result)
|
||||
expandKeyWithSalt(key, salt, &result)
|
||||
return &result, nil
|
||||
}
|
||||
@ -81,7 +79,7 @@ func (c *Cipher) Decrypt(dst, src []byte) {
|
||||
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
|
||||
}
|
||||
|
||||
func initCipher(key []byte, c *Cipher) {
|
||||
func initCipher(c *Cipher) {
|
||||
copy(c.p[0:], p[0:])
|
||||
copy(c.s0[0:], s0[0:])
|
||||
copy(c.s1[0:], s1[0:])
|
||||
|
8
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
8
Godeps/_workspace/src/github.com/juju/ratelimit/README.md
generated
vendored
@ -42,6 +42,14 @@ NewBucket returns a new token bucket that fills at the rate of one token every
|
||||
fillInterval, up to the given maximum capacity. Both arguments must be positive.
|
||||
The bucket is initially full.
|
||||
|
||||
#### func NewBucketWithQuantum
|
||||
|
||||
```go
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
|
||||
```
|
||||
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
|
||||
the quantum size - quantum tokens are added every fillInterval.
|
||||
|
||||
#### func NewBucketWithRate
|
||||
|
||||
```go
|
||||
|
11
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
11
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit.go
generated
vendored
@ -36,7 +36,7 @@ type Bucket struct {
|
||||
// maximum capacity. Both arguments must be
|
||||
// positive. The bucket is initially full.
|
||||
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
|
||||
return newBucketWithQuantum(fillInterval, capacity, 1)
|
||||
return NewBucketWithQuantum(fillInterval, capacity, 1)
|
||||
}
|
||||
|
||||
// rateMargin specifes the allowed variance of actual
|
||||
@ -54,7 +54,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
continue
|
||||
}
|
||||
tb := newBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
|
||||
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
|
||||
return tb
|
||||
}
|
||||
@ -73,11 +73,10 @@ func nextQuantum(q int64) int64 {
|
||||
return q1
|
||||
}
|
||||
|
||||
// newBucketWithQuantum is similar to NewBucket, but allows
|
||||
// NewBucketWithQuantum is similar to NewBucket, but allows
|
||||
// the specification of the quantum size - quantum tokens
|
||||
// are added every fillInterval. This is so that we can get accurate
|
||||
// rates even when we want to add more than one token per ns.
|
||||
func newBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
// are added every fillInterval.
|
||||
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
|
||||
if fillInterval <= 0 {
|
||||
panic("token bucket fill interval is not > 0")
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/juju/ratelimit/ratelimit_test.go
generated
vendored
@ -273,7 +273,7 @@ func (rateLimitSuite) TestRate(c *gc.C) {
|
||||
if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
|
||||
c.Fatalf("got %v want 0.5", tb.Rate())
|
||||
}
|
||||
tb = newBucketWithQuantum(100*time.Millisecond, 1, 5)
|
||||
tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
|
||||
if !isCloseTo(tb.Rate(), 50, 0.00001) {
|
||||
c.Fatalf("got %v want 50", tb.Rate())
|
||||
}
|
||||
|
52
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
52
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/comparer.go
generated
vendored
@ -9,34 +9,48 @@ package leveldb
|
||||
import "github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
|
||||
type iComparer struct {
|
||||
cmp comparer.Comparer
|
||||
ucmp comparer.Comparer
|
||||
}
|
||||
|
||||
func (p *iComparer) Name() string {
|
||||
return p.cmp.Name()
|
||||
func (icmp *iComparer) uName() string {
|
||||
return icmp.ucmp.Name()
|
||||
}
|
||||
|
||||
func (p *iComparer) Compare(a, b []byte) int {
|
||||
ia, ib := iKey(a), iKey(b)
|
||||
r := p.cmp.Compare(ia.ukey(), ib.ukey())
|
||||
if r == 0 {
|
||||
an, bn := ia.num(), ib.num()
|
||||
if an > bn {
|
||||
r = -1
|
||||
} else if an < bn {
|
||||
r = 1
|
||||
func (icmp *iComparer) uCompare(a, b []byte) int {
|
||||
return icmp.ucmp.Compare(a, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
|
||||
return icmp.ucmp.Separator(dst, a, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
|
||||
return icmp.ucmp.Successor(dst, b)
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Name() string {
|
||||
return icmp.uName()
|
||||
}
|
||||
|
||||
func (icmp *iComparer) Compare(a, b []byte) int {
|
||||
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
|
||||
if x == 0 {
|
||||
if m, n := iKey(a).num(), iKey(b).num(); m > n {
|
||||
x = -1
|
||||
} else if m < n {
|
||||
x = 1
|
||||
}
|
||||
}
|
||||
return r
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
ua, ub := iKey(a).ukey(), iKey(b).ukey()
|
||||
dst = p.cmp.Separator(dst, ua, ub)
|
||||
dst = icmp.ucmp.Separator(dst, ua, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ua) && p.cmp.Compare(ua, dst) < 0 {
|
||||
if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
@ -45,13 +59,13 @@ func (p *iComparer) Separator(dst, a, b []byte) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
func (p *iComparer) Successor(dst, b []byte) []byte {
|
||||
func (icmp *iComparer) Successor(dst, b []byte) []byte {
|
||||
ub := iKey(b).ukey()
|
||||
dst = p.cmp.Successor(dst, ub)
|
||||
dst = icmp.ucmp.Successor(dst, ub)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
if len(dst) < len(ub) && p.cmp.Compare(ub, dst) < 0 {
|
||||
if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
|
||||
dst = append(dst, kMaxNumBytes...)
|
||||
} else {
|
||||
// Did not close possibilities that n maybe longer than len(ub).
|
||||
|
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@ -387,7 +387,6 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
|
||||
func (d *DB) recoverJournal() error {
|
||||
s := d.s
|
||||
icmp := s.cmp
|
||||
|
||||
ff0, err := s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
@ -477,7 +476,7 @@ func (d *DB) recoverJournal() error {
|
||||
// Recover all journals.
|
||||
if len(ff2) > 0 {
|
||||
s.logf("journal@recovery F·%d", len(ff2))
|
||||
mem = memdb.New(icmp, writeBuffer)
|
||||
mem = memdb.New(s.icmp, writeBuffer)
|
||||
for _, file := range ff2 {
|
||||
if err := recoverJournal(file); err != nil {
|
||||
return err
|
||||
@ -508,7 +507,6 @@ func (d *DB) recoverJournal() error {
|
||||
func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
s := d.s
|
||||
|
||||
ucmp := s.cmp.cmp
|
||||
ikey := newIKey(key, seq, tSeek)
|
||||
|
||||
em, fm := d.getMems()
|
||||
@ -519,7 +517,7 @@ func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err
|
||||
mk, mv, me := m.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, t, ok := parseIkey(mk)
|
||||
if ok && ucmp.Compare(ukey, key) == 0 {
|
||||
if ok && s.icmp.uCompare(ukey, key) == 0 {
|
||||
if t == tDel {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
@ -656,13 +654,13 @@ func (d *DB) GetProperty(name string) (value string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// GetApproximateSizes calculates approximate sizes of the given key ranges.
|
||||
// SizeOf calculates approximate sizes of the given key ranges.
|
||||
// The length of the returned sizes are equal with the length of the given
|
||||
// ranges. The returned sizes measure storage space usage, so if the user
|
||||
// data compresses by a factor of ten, the returned sizes will be one-tenth
|
||||
// the size of the corresponding user data size.
|
||||
// The results may not include the sizes of recently written data.
|
||||
func (d *DB) GetApproximateSizes(ranges []util.Range) (Sizes, error) {
|
||||
func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
|
||||
if err := d.ok(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -674,11 +672,11 @@ func (d *DB) GetApproximateSizes(ranges []util.Range) (Sizes, error) {
|
||||
for _, r := range ranges {
|
||||
min := newIKey(r.Start, kMaxSeq, tSeek)
|
||||
max := newIKey(r.Limit, kMaxSeq, tSeek)
|
||||
start, err := v.getApproximateOffset(min)
|
||||
start, err := v.offsetOf(min)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
limit, err := v.getApproximateOffset(max)
|
||||
limit, err := v.offsetOf(max)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
@ -232,7 +232,7 @@ func (d *DB) memCompaction() {
|
||||
// Pause table compaction.
|
||||
ch := make(chan struct{})
|
||||
select {
|
||||
case d.tcompPauseC <- ch:
|
||||
case d.tcompPauseC <- (chan<- struct{})(ch):
|
||||
case _, _ = <-d.closeC:
|
||||
return
|
||||
}
|
||||
@ -268,7 +268,7 @@ func (d *DB) memCompaction() {
|
||||
// Drop frozen mem.
|
||||
d.dropFrozenMem()
|
||||
|
||||
// Unpause table compaction.
|
||||
// Resume table compaction.
|
||||
select {
|
||||
case <-ch:
|
||||
case _, _ = <-d.closeC:
|
||||
@ -281,7 +281,6 @@ func (d *DB) memCompaction() {
|
||||
|
||||
func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
s := d.s
|
||||
ucmp := s.cmp.cmp
|
||||
|
||||
rec := new(sessionRecord)
|
||||
rec.addCompactionPointer(c.level, c.max)
|
||||
@ -382,7 +381,7 @@ func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
hasUkey = false
|
||||
lseq = kMaxSeq
|
||||
} else {
|
||||
if !hasUkey || ucmp.Compare(key.ukey(), ukey) != 0 {
|
||||
if !hasUkey || s.icmp.uCompare(key.ukey(), ukey) != 0 {
|
||||
// First occurrence of this user key
|
||||
ukey = append(ukey[:0], key.ukey()...)
|
||||
hasUkey = true
|
||||
@ -499,7 +498,7 @@ func (d *DB) tableRangeCompaction(level int, min, max []byte) {
|
||||
v := s.version_NB()
|
||||
m := 1
|
||||
for i, t := range v.tables[1:] {
|
||||
if t.isOverlaps(min, max, true, s.cmp) {
|
||||
if t.isOverlaps(min, max, true, s.icmp) {
|
||||
m = i + 1
|
||||
}
|
||||
}
|
||||
|
13
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
13
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"runtime"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@ -35,7 +34,7 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
}
|
||||
i = append(i, ti...)
|
||||
strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
|
||||
mi := iterator.NewMergedIterator(i, s.cmp, strict)
|
||||
mi := iterator.NewMergedIterator(i, s.icmp, strict)
|
||||
mi.SetReleaser(&versionReleaser{v: v})
|
||||
return mi
|
||||
}
|
||||
@ -53,7 +52,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
|
||||
}
|
||||
rawIter := db.newRawIterator(slice_, ro)
|
||||
iter := &dbIter{
|
||||
cmp: db.s.cmp.cmp,
|
||||
icmp: db.s.icmp,
|
||||
iter: rawIter,
|
||||
seq: seq,
|
||||
strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator),
|
||||
@ -76,7 +75,7 @@ const (
|
||||
|
||||
// dbIter represent an interator states over a database session.
|
||||
type dbIter struct {
|
||||
cmp comparer.BasicComparer
|
||||
icmp *iComparer
|
||||
iter iterator.Iterator
|
||||
seq uint64
|
||||
strict bool
|
||||
@ -166,7 +165,7 @@ func (i *dbIter) next() bool {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.dir = dirForward
|
||||
case tVal:
|
||||
if i.dir == dirSOI || i.cmp.Compare(ukey, i.key) > 0 {
|
||||
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
|
||||
i.key = append(i.key[:0], ukey...)
|
||||
i.value = append(i.value[:0], i.iter.Value()...)
|
||||
i.dir = dirForward
|
||||
@ -211,7 +210,7 @@ func (i *dbIter) prev() bool {
|
||||
ukey, seq, t, ok := parseIkey(i.iter.Key())
|
||||
if ok {
|
||||
if seq <= i.seq {
|
||||
if !del && i.cmp.Compare(ukey, i.key) < 0 {
|
||||
if !del && i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
return true
|
||||
}
|
||||
del = (t == tDel)
|
||||
@ -252,7 +251,7 @@ func (i *dbIter) Prev() bool {
|
||||
for i.iter.Prev() {
|
||||
ukey, _, _, ok := parseIkey(i.iter.Key())
|
||||
if ok {
|
||||
if i.cmp.Compare(ukey, i.key) < 0 {
|
||||
if i.icmp.uCompare(ukey, i.key) < 0 {
|
||||
goto cont
|
||||
}
|
||||
} else if i.strict {
|
||||
|
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
@ -46,7 +46,7 @@ func (d *DB) newMem(n int) (mem *memdb.DB, err error) {
|
||||
d.journalWriter = w
|
||||
d.journalFile = file
|
||||
d.frozenMem = d.mem
|
||||
d.mem = memdb.New(s.cmp, maxInt(d.s.o.GetWriteBuffer(), n))
|
||||
d.mem = memdb.New(s.icmp, maxInt(d.s.o.GetWriteBuffer(), n))
|
||||
mem = d.mem
|
||||
// The seq only incremented by the writer.
|
||||
d.frozenSeq = d.seq
|
||||
|
22
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
22
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@ -149,7 +149,6 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
|
||||
db := h.db
|
||||
|
||||
var res uint64
|
||||
ucmp := db.s.cmp.cmp
|
||||
v := db.s.version()
|
||||
for i, tt := range v.tables[1 : len(v.tables)-1] {
|
||||
level := i + 1
|
||||
@ -157,7 +156,7 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
|
||||
for _, t := range tt {
|
||||
var r tFiles
|
||||
min, max := t.min.ukey(), t.max.ukey()
|
||||
next.getOverlaps(min, max, &r, true, ucmp)
|
||||
next.getOverlaps(min, max, &r, true, db.s.icmp.ucmp)
|
||||
sum := r.size()
|
||||
if sum > res {
|
||||
res = sum
|
||||
@ -238,7 +237,7 @@ func (h *dbHarness) getVal(key, value string) {
|
||||
func (h *dbHarness) allEntriesFor(key, want string) {
|
||||
t := h.t
|
||||
db := h.db
|
||||
ucmp := db.s.cmp.cmp
|
||||
s := db.s
|
||||
|
||||
ikey := newIKey([]byte(key), kMaxSeq, tVal)
|
||||
iter := db.newRawIterator(nil, nil)
|
||||
@ -251,7 +250,7 @@ func (h *dbHarness) allEntriesFor(key, want string) {
|
||||
for iter.Valid() {
|
||||
rkey := iKey(iter.Key())
|
||||
if _, t, ok := rkey.parseNum(); ok {
|
||||
if ucmp.Compare(ikey.ukey(), rkey.ukey()) != 0 {
|
||||
if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
|
||||
break
|
||||
}
|
||||
if !first {
|
||||
@ -390,11 +389,11 @@ func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
|
||||
t := h.t
|
||||
db := h.db
|
||||
|
||||
s, err := db.GetApproximateSizes([]util.Range{
|
||||
s, err := db.SizeOf([]util.Range{
|
||||
{[]byte(start), []byte(limit)},
|
||||
})
|
||||
if err != nil {
|
||||
t.Error("GetApproximateSizes: got error: ", err)
|
||||
t.Error("SizeOf: got error: ", err)
|
||||
}
|
||||
if s.Sum() < low || s.Sum() > hi {
|
||||
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
|
||||
@ -994,6 +993,7 @@ func TestDb_SparseMerge(t *testing.T) {
|
||||
h.put("C", "vc")
|
||||
h.compactMem()
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.waitCompaction()
|
||||
|
||||
// Make sparse update
|
||||
h.put("A", "va2")
|
||||
@ -1003,12 +1003,14 @@ func TestDb_SparseMerge(t *testing.T) {
|
||||
|
||||
h.maxNextLevelOverlappingBytes(20 * 1048576)
|
||||
h.compactRangeAt(0, "", "")
|
||||
h.waitCompaction()
|
||||
h.maxNextLevelOverlappingBytes(20 * 1048576)
|
||||
h.compactRangeAt(1, "", "")
|
||||
h.waitCompaction()
|
||||
h.maxNextLevelOverlappingBytes(20 * 1048576)
|
||||
}
|
||||
|
||||
func TestDb_ApproximateSizes(t *testing.T) {
|
||||
func TestDb_SizeOf(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{
|
||||
Compression: opt.NoCompression,
|
||||
WriteBuffer: 10000000,
|
||||
@ -1028,7 +1030,7 @@ func TestDb_ApproximateSizes(t *testing.T) {
|
||||
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
|
||||
}
|
||||
|
||||
// 0 because GetApproximateSizes() does not account for memtable space
|
||||
// 0 because SizeOf() does not account for memtable space
|
||||
h.sizeAssert("", numKey(50), 0, 0)
|
||||
|
||||
for r := 0; r < 3; r++ {
|
||||
@ -1058,7 +1060,7 @@ func TestDb_ApproximateSizes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDb_ApproximateSizes_MixOfSmallAndLarge(t *testing.T) {
|
||||
func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
|
||||
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
|
||||
defer h.close()
|
||||
|
||||
@ -1472,7 +1474,7 @@ func TestDb_ClosedIsClosed(t *testing.T) {
|
||||
_, err = db.GetProperty("leveldb.stats")
|
||||
assertErr(t, err, true)
|
||||
|
||||
_, err = db.GetApproximateSizes([]util.Range{{[]byte("a"), []byte("z")}})
|
||||
_, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
|
||||
assertErr(t, err, true)
|
||||
|
||||
assertErr(t, db.CompactRange(util.Range{}), true)
|
||||
|
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@ -9,7 +9,6 @@ package leveldb
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
"github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
@ -232,11 +231,11 @@ func (d *DB) Delete(key []byte, wo *opt.WriteOptions) error {
|
||||
return d.Write(b, wo)
|
||||
}
|
||||
|
||||
func isMemOverlaps(ucmp comparer.BasicComparer, mem *memdb.DB, min, max []byte) bool {
|
||||
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
|
||||
iter := mem.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
return (max == nil || (iter.First() && ucmp.Compare(max, iKey(iter.Key()).ukey()) >= 0)) &&
|
||||
(min == nil || (iter.Last() && ucmp.Compare(min, iKey(iter.Key()).ukey()) <= 0))
|
||||
return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
|
||||
(min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
|
||||
}
|
||||
|
||||
// CompactRange compacts the underlying DB for the given key range.
|
||||
@ -261,7 +260,7 @@ func (d *DB) CompactRange(r util.Range) error {
|
||||
|
||||
// Check for overlaps in memdb.
|
||||
mem := d.getEffectiveMem()
|
||||
if isMemOverlaps(d.s.cmp.cmp, mem, r.Start, r.Limit) {
|
||||
if isMemOverlaps(d.s.icmp, mem, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := d.rotateMem(0); err != nil {
|
||||
<-d.writeLockC
|
||||
|
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
generated
vendored
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
generated
vendored
@ -13,7 +13,7 @@ import (
|
||||
"github.com/syndtr/goleveldb/leveldb/comparer"
|
||||
)
|
||||
|
||||
var icmp = &iComparer{comparer.DefaultComparer}
|
||||
var defaultIComparer = &iComparer{comparer.DefaultComparer}
|
||||
|
||||
func ikey(key string, seq uint64, t vType) iKey {
|
||||
return newIKey([]byte(key), uint64(seq), t)
|
||||
@ -21,7 +21,7 @@ func ikey(key string, seq uint64, t vType) iKey {
|
||||
|
||||
func shortSep(a, b []byte) []byte {
|
||||
dst := make([]byte, len(a))
|
||||
dst = icmp.Separator(dst[:0], a, b)
|
||||
dst = defaultIComparer.Separator(dst[:0], a, b)
|
||||
if dst == nil {
|
||||
return a
|
||||
}
|
||||
@ -30,7 +30,7 @@ func shortSep(a, b []byte) []byte {
|
||||
|
||||
func shortSuccessor(b []byte) []byte {
|
||||
dst := make([]byte, len(b))
|
||||
dst = icmp.Successor(dst[:0], b)
|
||||
dst = defaultIComparer.Successor(dst[:0], b)
|
||||
if dst == nil {
|
||||
return b
|
||||
}
|
||||
|
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
generated
vendored
@ -32,8 +32,8 @@ func (s *session) setOptions(o *opt.Options) {
|
||||
s.o.BlockCache = nil
|
||||
}
|
||||
// Comparer.
|
||||
s.cmp = &iComparer{o.GetComparer()}
|
||||
s.o.Comparer = s.cmp
|
||||
s.icmp = &iComparer{o.GetComparer()}
|
||||
s.o.Comparer = s.icmp
|
||||
// Filter.
|
||||
if filter := o.GetFilter(); filter != nil {
|
||||
s.o.Filter = &iFilter{filter}
|
||||
|
49
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
49
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@ -32,7 +32,7 @@ type session struct {
|
||||
stor storage.Storage
|
||||
storLock util.Releaser
|
||||
o *opt.Options
|
||||
cmp *iComparer
|
||||
icmp *iComparer
|
||||
tops *tOps
|
||||
|
||||
manifest *journal.Writer
|
||||
@ -150,8 +150,8 @@ func (s *session) recover() (err error) {
|
||||
switch {
|
||||
case !rec.has(recComparer):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")}
|
||||
case rec.comparer != s.cmp.cmp.Name():
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.cmp.cmp.Name() + "', " + "got '" + rec.comparer + "'")}
|
||||
case rec.comparer != s.icmp.uName():
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")}
|
||||
case !rec.has(recNextNum):
|
||||
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")}
|
||||
case !rec.has(recJournalNum):
|
||||
@ -189,9 +189,6 @@ func (s *session) commit(r *sessionRecord) (err error) {
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
icmp := s.cmp
|
||||
ucmp := icmp.cmp
|
||||
|
||||
v := s.version_NB()
|
||||
|
||||
var level int
|
||||
@ -201,7 +198,7 @@ func (s *session) pickCompaction() *compaction {
|
||||
cp := s.stCPtrs[level]
|
||||
tt := v.tables[level]
|
||||
for _, t := range tt {
|
||||
if cp == nil || icmp.Compare(t.max, cp) > 0 {
|
||||
if cp == nil || s.icmp.Compare(t.max, cp) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
@ -221,9 +218,9 @@ func (s *session) pickCompaction() *compaction {
|
||||
|
||||
c := &compaction{s: s, version: v, level: level}
|
||||
if level == 0 {
|
||||
min, max := t0.getRange(icmp)
|
||||
min, max := t0.getRange(s.icmp)
|
||||
t0 = nil
|
||||
v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, ucmp)
|
||||
v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, s.icmp.ucmp)
|
||||
}
|
||||
|
||||
c.tables[0] = t0
|
||||
@ -236,7 +233,7 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
|
||||
v := s.version_NB()
|
||||
|
||||
var t0 tFiles
|
||||
v.tables[level].getOverlaps(min, max, &t0, level != 0, s.cmp.cmp)
|
||||
v.tables[level].getOverlaps(min, max, &t0, level != 0, s.icmp.ucmp)
|
||||
if len(t0) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -285,35 +282,33 @@ type compaction struct {
|
||||
func (c *compaction) expand() {
|
||||
s := c.s
|
||||
v := c.version
|
||||
icmp := s.cmp
|
||||
ucmp := icmp.cmp
|
||||
|
||||
level := c.level
|
||||
vt0, vt1 := v.tables[level], v.tables[level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
min, max := t0.getRange(icmp)
|
||||
vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, ucmp)
|
||||
min, max := t0.getRange(s.icmp)
|
||||
vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, s.icmp.ucmp)
|
||||
|
||||
// Get entire range covered by compaction
|
||||
amin, amax := append(t0, t1...).getRange(icmp)
|
||||
amin, amax := append(t0, t1...).getRange(s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
var exp0 tFiles
|
||||
vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, ucmp)
|
||||
vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, s.icmp.ucmp)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes {
|
||||
var exp1 tFiles
|
||||
xmin, xmax := exp0.getRange(icmp)
|
||||
vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, ucmp)
|
||||
xmin, xmax := exp0.getRange(s.icmp)
|
||||
vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, s.icmp.ucmp)
|
||||
if len(exp1) == len(t1) {
|
||||
s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
min, max = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(icmp)
|
||||
amin, amax = append(t0, t1...).getRange(s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -321,7 +316,7 @@ func (c *compaction) expand() {
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if level+2 < kNumLevels {
|
||||
v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, ucmp)
|
||||
v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, s.icmp.ucmp)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
@ -336,13 +331,13 @@ func (c *compaction) trivial() bool {
|
||||
func (c *compaction) isBaseLevelForKey(key []byte) bool {
|
||||
s := c.s
|
||||
v := c.version
|
||||
ucmp := s.cmp.cmp
|
||||
|
||||
for level, tt := range v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tt) {
|
||||
t := tt[c.tPtrs[level]]
|
||||
if ucmp.Compare(key, t.max.ukey()) <= 0 {
|
||||
if s.icmp.uCompare(key, t.max.ukey()) <= 0 {
|
||||
// We've advanced far enough
|
||||
if ucmp.Compare(key, t.min.ukey()) >= 0 {
|
||||
if s.icmp.uCompare(key, t.min.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level
|
||||
return false
|
||||
}
|
||||
@ -355,10 +350,9 @@ func (c *compaction) isBaseLevelForKey(key []byte) bool {
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(key iKey) bool {
|
||||
icmp := c.s.cmp
|
||||
for ; c.gpidx < len(c.gp); c.gpidx++ {
|
||||
gp := c.gp[c.gpidx]
|
||||
if icmp.Compare(key, gp.max) <= 0 {
|
||||
if c.s.icmp.Compare(key, gp.max) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
@ -377,7 +371,6 @@ func (c *compaction) shouldStopBefore(key iKey) bool {
|
||||
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
s := c.s
|
||||
icmp := s.cmp
|
||||
|
||||
level := c.level
|
||||
icap := 2
|
||||
@ -401,10 +394,10 @@ func (c *compaction) newIterator() iterator.Iterator {
|
||||
its = append(its, s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, icmp, nil, ro), strict, true)
|
||||
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, nil, ro), strict, true)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, icmp, true)
|
||||
return iterator.NewMergedIterator(its, s.icmp, true)
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
@ -148,7 +148,7 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
|
||||
}
|
||||
}
|
||||
|
||||
r.setComparer(s.cmp.cmp.Name())
|
||||
r.setComparer(s.icmp.uName())
|
||||
}
|
||||
}
|
||||
|
||||
|
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@ -117,12 +117,10 @@ func (tf tFiles) searchMax(key iKey, icmp *iComparer) int {
|
||||
}
|
||||
|
||||
func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) bool {
|
||||
ucmp := icmp.cmp
|
||||
|
||||
if !disjSorted {
|
||||
// Need to check against all files
|
||||
for _, t := range tf {
|
||||
if !t.isAfter(min, ucmp) && !t.isBefore(max, ucmp) {
|
||||
if !t.isAfter(min, icmp.ucmp) && !t.isBefore(max, icmp.ucmp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -139,7 +137,7 @@ func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) b
|
||||
// beginning of range is after all files, so no overlap
|
||||
return false
|
||||
}
|
||||
return !tf[idx].isBefore(max, ucmp)
|
||||
return !tf[idx].isBefore(max, icmp.ucmp)
|
||||
}
|
||||
|
||||
func (tf tFiles) getOverlaps(min, max []byte, r *tFiles, disjSorted bool, ucmp comparer.BasicComparer) {
|
||||
@ -338,12 +336,12 @@ func (t *tOps) get(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []by
|
||||
return c.Value().(*table.Reader).Find(key, ro)
|
||||
}
|
||||
|
||||
func (t *tOps) getApproximateOffset(f *tFile, key []byte) (offset uint64, err error) {
|
||||
func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
|
||||
c, err := t.lookup(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_offset, err := c.Value().(*table.Reader).GetApproximateOffset(key)
|
||||
_offset, err := c.Value().(*table.Reader).OffsetOf(key)
|
||||
offset = uint64(_offset)
|
||||
c.Release()
|
||||
return
|
||||
|
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
generated
vendored
@ -732,10 +732,10 @@ func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// GetApproximateOffset returns approximate offset for the given key.
|
||||
// OffsetOf returns approximate offset for the given key.
|
||||
//
|
||||
// It is safe to modify the contents of the argument after Get returns.
|
||||
func (r *Reader) GetApproximateOffset(key []byte) (offset int64, err error) {
|
||||
func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
|
||||
if r.err != nil {
|
||||
err = r.err
|
||||
return
|
||||
|
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
generated
vendored
@ -61,7 +61,7 @@ var _ = testutil.Defer(func() {
|
||||
|
||||
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
|
||||
CheckOffset := func(key string, expect, threshold int) {
|
||||
offset, err := tr.GetApproximateOffset([]byte(key))
|
||||
offset, err := tr.OffsetOf([]byte(key))
|
||||
Expect(err).ShouldNot(HaveOccurred())
|
||||
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
|
||||
}
|
||||
|
37
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
37
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
@ -91,8 +91,6 @@ func (v *version) release() {
|
||||
|
||||
func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) {
|
||||
s := v.s
|
||||
icmp := s.cmp
|
||||
ucmp := icmp.cmp
|
||||
|
||||
ukey := key.ukey()
|
||||
|
||||
@ -112,8 +110,8 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
|
||||
// overlap user_key and process them in order from newest to
|
||||
var tmp tFiles
|
||||
for _, t := range ts {
|
||||
if ucmp.Compare(ukey, t.min.ukey()) >= 0 &&
|
||||
ucmp.Compare(ukey, t.max.ukey()) <= 0 {
|
||||
if s.icmp.uCompare(ukey, t.min.ukey()) >= 0 &&
|
||||
s.icmp.uCompare(ukey, t.max.ukey()) <= 0 {
|
||||
tmp = append(tmp, t)
|
||||
}
|
||||
}
|
||||
@ -125,8 +123,8 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
|
||||
tmp.sortByNum()
|
||||
ts = tmp
|
||||
} else {
|
||||
i := ts.searchMax(key, icmp)
|
||||
if i >= len(ts) || ucmp.Compare(ukey, ts[i].min.ukey()) < 0 {
|
||||
i := ts.searchMax(key, s.icmp)
|
||||
if i >= len(ts) || s.icmp.uCompare(ukey, ts[i].min.ukey()) < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -157,7 +155,7 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
|
||||
|
||||
rkey := iKey(_rkey)
|
||||
if seq, t, ok := rkey.parseNum(); ok {
|
||||
if ucmp.Compare(ukey, rkey.ukey()) == 0 {
|
||||
if s.icmp.uCompare(ukey, rkey.ukey()) == 0 {
|
||||
if level == 0 {
|
||||
if seq >= l0seq {
|
||||
l0found = true
|
||||
@ -201,7 +199,6 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
|
||||
|
||||
func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
|
||||
s := v.s
|
||||
icmp := s.cmp
|
||||
|
||||
// Merge all level zero files together since they may overlap
|
||||
for _, t := range v.tables[0] {
|
||||
@ -215,7 +212,7 @@ func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []it
|
||||
continue
|
||||
}
|
||||
|
||||
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, icmp, slice, ro), strict, true)
|
||||
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, slice, ro), strict, true)
|
||||
its = append(its, it)
|
||||
}
|
||||
|
||||
@ -245,16 +242,13 @@ func (v *version) tLen(level int) int {
|
||||
return len(v.tables[level])
|
||||
}
|
||||
|
||||
func (v *version) getApproximateOffset(key iKey) (n uint64, err error) {
|
||||
icmp := v.s.cmp
|
||||
tops := v.s.tops
|
||||
|
||||
func (v *version) offsetOf(key iKey) (n uint64, err error) {
|
||||
for level, tt := range v.tables {
|
||||
for _, t := range tt {
|
||||
if icmp.Compare(t.max, key) <= 0 {
|
||||
if v.s.icmp.Compare(t.max, key) <= 0 {
|
||||
// Entire file is before "key", so just add the file size
|
||||
n += t.size
|
||||
} else if icmp.Compare(t.min, key) > 0 {
|
||||
} else if v.s.icmp.Compare(t.min, key) > 0 {
|
||||
// Entire file is after "key", so ignore
|
||||
if level > 0 {
|
||||
// Files other than level 0 are sorted by meta->min, so
|
||||
@ -266,7 +260,7 @@ func (v *version) getApproximateOffset(key iKey) (n uint64, err error) {
|
||||
// "key" falls in the range for this table. Add the
|
||||
// approximate offset of "key" within the table.
|
||||
var nn uint64
|
||||
nn, err = tops.getApproximateOffset(t, key)
|
||||
nn, err = v.s.tops.offsetOf(t, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -279,16 +273,13 @@ func (v *version) getApproximateOffset(key iKey) (n uint64, err error) {
|
||||
}
|
||||
|
||||
func (v *version) pickLevel(min, max []byte) (level int) {
|
||||
icmp := v.s.cmp
|
||||
ucmp := icmp.cmp
|
||||
|
||||
if !v.tables[0].isOverlaps(min, max, false, icmp) {
|
||||
if !v.tables[0].isOverlaps(min, max, false, v.s.icmp) {
|
||||
var r tFiles
|
||||
for ; level < kMaxMemCompactLevel; level++ {
|
||||
if v.tables[level+1].isOverlaps(min, max, true, icmp) {
|
||||
if v.tables[level+1].isOverlaps(min, max, true, v.s.icmp) {
|
||||
break
|
||||
}
|
||||
v.tables[level+2].getOverlaps(min, max, &r, true, ucmp)
|
||||
v.tables[level+2].getOverlaps(min, max, &r, true, v.s.icmp.ucmp)
|
||||
if r.size() > kMaxGrandParentOverlapBytes {
|
||||
break
|
||||
}
|
||||
@ -411,7 +402,7 @@ func (p *versionStaging) finish() *version {
|
||||
}
|
||||
|
||||
// sort tables
|
||||
nt.sortByKey(s.cmp)
|
||||
nt.sortByKey(s.icmp)
|
||||
nv.tables[level] = nt
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user