Update all deps

This commit is contained in:
Jakob Borg 2014-07-06 23:13:10 +02:00
parent d6c058c407
commit 2b9fc0fd43
24 changed files with 201 additions and 199 deletions

26
Godeps/Godeps.json generated
View File

@ -9,28 +9,28 @@
"Deps": [ "Deps": [
{ {
"ImportPath": "bitbucket.org/kardianos/osext", "ImportPath": "bitbucket.org/kardianos/osext",
"Comment": "null-9", "Comment": "null-13",
"Rev": "364fb577de68fb646c4cb39cc0e09c887ee16376" "Rev": "5d3ddcf53a508cc2f7404eaebf546ef2cb5cdb6e"
}, },
{ {
"ImportPath": "code.google.com/p/go.crypto/bcrypt", "ImportPath": "code.google.com/p/go.crypto/bcrypt",
"Comment": "null-185", "Comment": "null-212",
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9" "Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
}, },
{ {
"ImportPath": "code.google.com/p/go.crypto/blowfish", "ImportPath": "code.google.com/p/go.crypto/blowfish",
"Comment": "null-185", "Comment": "null-212",
"Rev": "6478cc9340cbbe6c04511280c5007722269108e9" "Rev": "1064b89a6fb591df0dd65422295b8498916b092f"
}, },
{ {
"ImportPath": "code.google.com/p/go.text/transform", "ImportPath": "code.google.com/p/go.text/transform",
"Comment": "null-81", "Comment": "null-87",
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac" "Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
}, },
{ {
"ImportPath": "code.google.com/p/go.text/unicode/norm", "ImportPath": "code.google.com/p/go.text/unicode/norm",
"Comment": "null-81", "Comment": "null-87",
"Rev": "9cbe983aed9b0dfc73954433fead5e00866342ac" "Rev": "c59e4f2f93824f81213799e64c3eead7be24660a"
}, },
{ {
"ImportPath": "code.google.com/p/snappy-go/snappy", "ImportPath": "code.google.com/p/snappy-go/snappy",
@ -39,15 +39,15 @@
}, },
{ {
"ImportPath": "github.com/golang/groupcache/lru", "ImportPath": "github.com/golang/groupcache/lru",
"Rev": "d781998583680cda80cf61e0b37dd0cd8da2eb52" "Rev": "a531d51b7f9f3dd13c1c2b50d42d739b70442dbb"
}, },
{ {
"ImportPath": "github.com/juju/ratelimit", "ImportPath": "github.com/juju/ratelimit",
"Rev": "cbaa435c80a9716e086f25d409344b26c4039358" "Rev": "f9f36d11773655c0485207f0ad30dc2655f69d56"
}, },
{ {
"ImportPath": "github.com/syndtr/goleveldb/leveldb", "ImportPath": "github.com/syndtr/goleveldb/leveldb",
"Rev": "ca1565e5fb6658691d7074d270602c9185a55c79" "Rev": "e1f2d2bdccd7c62f4d4a29aaf081bf1fc4404f91"
}, },
{ {
"ImportPath": "github.com/vitrun/qart/coding", "ImportPath": "github.com/vitrun/qart/coding",

View File

@ -4,13 +4,17 @@
package osext package osext
import "syscall" import (
"syscall"
"os"
"strconv"
)
func executable() (string, error) { func executable() (string, error) {
f, err := Open("/proc/" + itoa(Getpid()) + "/text") f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
if err != nil { if err != nil {
return "", err return "", err
} }
defer f.Close() defer f.Close()
return syscall.Fd2path(int(f.Fd())) return syscall.Fd2path(int(f.Fd()))
} }

View File

@ -14,7 +14,7 @@ import (
"unsafe" "unsafe"
) )
var startUpcwd, getwdError = os.Getwd() var initCwd, initCwdErr = os.Getwd()
func executable() (string, error) { func executable() (string, error) {
var mib [4]int32 var mib [4]int32
@ -26,20 +26,20 @@ func executable() (string, error) {
} }
n := uintptr(0) n := uintptr(0)
// get length // Get length.
_, _, err := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
if err != 0 { if errNum != 0 {
return "", err return "", errNum
} }
if n == 0 { // shouldn't happen if n == 0 { // This shouldn't happen.
return "", nil return "", nil
} }
buf := make([]byte, n) buf := make([]byte, n)
_, _, err = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
if err != 0 { if errNum != 0 {
return "", err return "", errNum
} }
if n == 0 { // shouldn't happen if n == 0 { // This shouldn't happen.
return "", nil return "", nil
} }
for i, v := range buf { for i, v := range buf {
@ -48,35 +48,32 @@ func executable() (string, error) {
break break
} }
} }
var strpath string var err error
if buf[0] != '/' { execPath := string(buf)
var e error // execPath will not be empty due to above checks.
if strpath, e = getAbs(buf); e != nil { // Try to get the absolute path if the execPath is not rooted.
return strpath, e if execPath[0] != '/' {
execPath, err = getAbs(execPath)
if err != nil {
return execPath, err
} }
} else {
strpath = string(buf)
} }
// darwin KERN_PROCARGS may return the path to a symlink rather than the // For darwin KERN_PROCARGS may return the path to a symlink rather than the
// actual executable // actual executable.
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
if strpath, err := filepath.EvalSymlinks(strpath); err != nil { if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
return strpath, err return execPath, err
} }
} }
return strpath, nil return execPath, nil
} }
func getAbs(buf []byte) (string, error) { func getAbs(execPath string) (string, error) {
if getwdError != nil { if initCwdErr != nil {
return string(buf), getwdError return execPath, initCwdErr
} else {
if buf[0] == '.' {
buf = buf[1:]
}
if startUpcwd[len(startUpcwd)-1] != '/' && buf[0] != '/' {
return startUpcwd + "/" + string(buf), nil
}
return startUpcwd + string(buf), nil
} }
// The execPath may begin with a "../" or a "./" so clean it first.
// Join the two paths, trailing and starting slashes undetermined, so use
// the generic Join function.
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
} }

View File

@ -53,6 +53,15 @@ func TestBcryptingIsCorrect(t *testing.T) {
} }
} }
func TestVeryShortPasswords(t *testing.T) {
key := []byte("k")
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
_, err := bcrypt(key, 10, salt)
if err != nil {
t.Errorf("One byte key resulted in error: %s", err)
}
}
func TestTooLongPasswordsWork(t *testing.T) { func TestTooLongPasswordsWork(t *testing.T) {
salt := []byte("XajjQvNhvvRt5GSeFk1xFe") salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
// One byte over the usual 56 byte limit that blowfish has // One byte over the usual 56 byte limit that blowfish has

View File

@ -192,19 +192,13 @@ func TestCipherDecrypt(t *testing.T) {
} }
func TestSaltedCipherKeyLength(t *testing.T) { func TestSaltedCipherKeyLength(t *testing.T) {
var key []byte if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
for i := 0; i < 4; i++ { t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
_, err := NewSaltedCipher(key, []byte{'a'})
if err != KeySizeError(i) {
t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(i))
}
key = append(key, 'a')
} }
// A 57-byte key. One over the typical blowfish restriction. // A 57-byte key. One over the typical blowfish restriction.
key = []byte("012345678901234567890123456789012345678901234567890123456") key := []byte("012345678901234567890123456789012345678901234567890123456")
_, err := NewSaltedCipher(key, []byte{'a'}) if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
if err != nil {
t.Errorf("NewSaltedCipher with long key, gave error %#v", err) t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
} }
} }

View File

@ -26,14 +26,13 @@ func (k KeySizeError) Error() string {
} }
// NewCipher creates and returns a Cipher. // NewCipher creates and returns a Cipher.
// The key argument should be the Blowfish key, 4 to 56 bytes. // The key argument should be the Blowfish key, from 1 to 56 bytes.
func NewCipher(key []byte) (*Cipher, error) { func NewCipher(key []byte) (*Cipher, error) {
var result Cipher var result Cipher
k := len(key) if k := len(key); k < 1 || k > 56 {
if k < 4 || k > 56 {
return nil, KeySizeError(k) return nil, KeySizeError(k)
} }
initCipher(key, &result) initCipher(&result)
ExpandKey(key, &result) ExpandKey(key, &result)
return &result, nil return &result, nil
} }
@ -44,11 +43,10 @@ func NewCipher(key []byte) (*Cipher, error) {
// bytes. Only the first 16 bytes of salt are used. // bytes. Only the first 16 bytes of salt are used.
func NewSaltedCipher(key, salt []byte) (*Cipher, error) { func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
var result Cipher var result Cipher
k := len(key) if k := len(key); k < 1 {
if k < 4 {
return nil, KeySizeError(k) return nil, KeySizeError(k)
} }
initCipher(key, &result) initCipher(&result)
expandKeyWithSalt(key, salt, &result) expandKeyWithSalt(key, salt, &result)
return &result, nil return &result, nil
} }
@ -81,7 +79,7 @@ func (c *Cipher) Decrypt(dst, src []byte) {
dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
} }
func initCipher(key []byte, c *Cipher) { func initCipher(c *Cipher) {
copy(c.p[0:], p[0:]) copy(c.p[0:], p[0:])
copy(c.s0[0:], s0[0:]) copy(c.s0[0:], s0[0:])
copy(c.s1[0:], s1[0:]) copy(c.s1[0:], s1[0:])

View File

@ -42,6 +42,14 @@ NewBucket returns a new token bucket that fills at the rate of one token every
fillInterval, up to the given maximum capacity. Both arguments must be positive. fillInterval, up to the given maximum capacity. Both arguments must be positive.
The bucket is initially full. The bucket is initially full.
#### func NewBucketWithQuantum
```go
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
```
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
the quantum size - quantum tokens are added every fillInterval.
#### func NewBucketWithRate #### func NewBucketWithRate
```go ```go

View File

@ -36,7 +36,7 @@ type Bucket struct {
// maximum capacity. Both arguments must be // maximum capacity. Both arguments must be
// positive. The bucket is initially full. // positive. The bucket is initially full.
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket { func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
return newBucketWithQuantum(fillInterval, capacity, 1) return NewBucketWithQuantum(fillInterval, capacity, 1)
} }
// rateMargin specifes the allowed variance of actual // rateMargin specifes the allowed variance of actual
@ -54,7 +54,7 @@ func NewBucketWithRate(rate float64, capacity int64) *Bucket {
if fillInterval <= 0 { if fillInterval <= 0 {
continue continue
} }
tb := newBucketWithQuantum(fillInterval, capacity, quantum) tb := NewBucketWithQuantum(fillInterval, capacity, quantum)
if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin { if diff := abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb return tb
} }
@ -73,11 +73,10 @@ func nextQuantum(q int64) int64 {
return q1 return q1
} }
// newBucketWithQuantum is similar to NewBucket, but allows // NewBucketWithQuantum is similar to NewBucket, but allows
// the specification of the quantum size - quantum tokens // the specification of the quantum size - quantum tokens
// are added every fillInterval. This is so that we can get accurate // are added every fillInterval.
// rates even when we want to add more than one token per ns. func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
func newBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
if fillInterval <= 0 { if fillInterval <= 0 {
panic("token bucket fill interval is not > 0") panic("token bucket fill interval is not > 0")
} }

View File

@ -273,7 +273,7 @@ func (rateLimitSuite) TestRate(c *gc.C) {
if !isCloseTo(tb.Rate(), 0.5, 0.00001) { if !isCloseTo(tb.Rate(), 0.5, 0.00001) {
c.Fatalf("got %v want 0.5", tb.Rate()) c.Fatalf("got %v want 0.5", tb.Rate())
} }
tb = newBucketWithQuantum(100*time.Millisecond, 1, 5) tb = NewBucketWithQuantum(100*time.Millisecond, 1, 5)
if !isCloseTo(tb.Rate(), 50, 0.00001) { if !isCloseTo(tb.Rate(), 50, 0.00001) {
c.Fatalf("got %v want 50", tb.Rate()) c.Fatalf("got %v want 50", tb.Rate())
} }

View File

@ -9,34 +9,48 @@ package leveldb
import "github.com/syndtr/goleveldb/leveldb/comparer" import "github.com/syndtr/goleveldb/leveldb/comparer"
type iComparer struct { type iComparer struct {
cmp comparer.Comparer ucmp comparer.Comparer
} }
func (p *iComparer) Name() string { func (icmp *iComparer) uName() string {
return p.cmp.Name() return icmp.ucmp.Name()
} }
func (p *iComparer) Compare(a, b []byte) int { func (icmp *iComparer) uCompare(a, b []byte) int {
ia, ib := iKey(a), iKey(b) return icmp.ucmp.Compare(a, b)
r := p.cmp.Compare(ia.ukey(), ib.ukey()) }
if r == 0 {
an, bn := ia.num(), ib.num() func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
if an > bn { return icmp.ucmp.Separator(dst, a, b)
r = -1 }
} else if an < bn {
r = 1 func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
return icmp.ucmp.Successor(dst, b)
}
func (icmp *iComparer) Name() string {
return icmp.uName()
}
func (icmp *iComparer) Compare(a, b []byte) int {
x := icmp.ucmp.Compare(iKey(a).ukey(), iKey(b).ukey())
if x == 0 {
if m, n := iKey(a).num(), iKey(b).num(); m > n {
x = -1
} else if m < n {
x = 1
} }
} }
return r return x
} }
func (p *iComparer) Separator(dst, a, b []byte) []byte { func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
ua, ub := iKey(a).ukey(), iKey(b).ukey() ua, ub := iKey(a).ukey(), iKey(b).ukey()
dst = p.cmp.Separator(dst, ua, ub) dst = icmp.ucmp.Separator(dst, ua, ub)
if dst == nil { if dst == nil {
return nil return nil
} }
if len(dst) < len(ua) && p.cmp.Compare(ua, dst) < 0 { if len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
dst = append(dst, kMaxNumBytes...) dst = append(dst, kMaxNumBytes...)
} else { } else {
// Did not close possibilities that n maybe longer than len(ub). // Did not close possibilities that n maybe longer than len(ub).
@ -45,13 +59,13 @@ func (p *iComparer) Separator(dst, a, b []byte) []byte {
return dst return dst
} }
func (p *iComparer) Successor(dst, b []byte) []byte { func (icmp *iComparer) Successor(dst, b []byte) []byte {
ub := iKey(b).ukey() ub := iKey(b).ukey()
dst = p.cmp.Successor(dst, ub) dst = icmp.ucmp.Successor(dst, ub)
if dst == nil { if dst == nil {
return nil return nil
} }
if len(dst) < len(ub) && p.cmp.Compare(ub, dst) < 0 { if len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
dst = append(dst, kMaxNumBytes...) dst = append(dst, kMaxNumBytes...)
} else { } else {
// Did not close possibilities that n maybe longer than len(ub). // Did not close possibilities that n maybe longer than len(ub).

View File

@ -387,7 +387,6 @@ func recoverTable(s *session, o *opt.Options) error {
func (d *DB) recoverJournal() error { func (d *DB) recoverJournal() error {
s := d.s s := d.s
icmp := s.cmp
ff0, err := s.getFiles(storage.TypeJournal) ff0, err := s.getFiles(storage.TypeJournal)
if err != nil { if err != nil {
@ -477,7 +476,7 @@ func (d *DB) recoverJournal() error {
// Recover all journals. // Recover all journals.
if len(ff2) > 0 { if len(ff2) > 0 {
s.logf("journal@recovery F·%d", len(ff2)) s.logf("journal@recovery F·%d", len(ff2))
mem = memdb.New(icmp, writeBuffer) mem = memdb.New(s.icmp, writeBuffer)
for _, file := range ff2 { for _, file := range ff2 {
if err := recoverJournal(file); err != nil { if err := recoverJournal(file); err != nil {
return err return err
@ -508,7 +507,6 @@ func (d *DB) recoverJournal() error {
func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) { func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
s := d.s s := d.s
ucmp := s.cmp.cmp
ikey := newIKey(key, seq, tSeek) ikey := newIKey(key, seq, tSeek)
em, fm := d.getMems() em, fm := d.getMems()
@ -519,7 +517,7 @@ func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err
mk, mv, me := m.Find(ikey) mk, mv, me := m.Find(ikey)
if me == nil { if me == nil {
ukey, _, t, ok := parseIkey(mk) ukey, _, t, ok := parseIkey(mk)
if ok && ucmp.Compare(ukey, key) == 0 { if ok && s.icmp.uCompare(ukey, key) == 0 {
if t == tDel { if t == tDel {
return nil, ErrNotFound return nil, ErrNotFound
} }
@ -656,13 +654,13 @@ func (d *DB) GetProperty(name string) (value string, err error) {
return return
} }
// GetApproximateSizes calculates approximate sizes of the given key ranges. // SizeOf calculates approximate sizes of the given key ranges.
// The length of the returned sizes are equal with the length of the given // The length of the returned sizes are equal with the length of the given
// ranges. The returned sizes measure storage space usage, so if the user // ranges. The returned sizes measure storage space usage, so if the user
// data compresses by a factor of ten, the returned sizes will be one-tenth // data compresses by a factor of ten, the returned sizes will be one-tenth
// the size of the corresponding user data size. // the size of the corresponding user data size.
// The results may not include the sizes of recently written data. // The results may not include the sizes of recently written data.
func (d *DB) GetApproximateSizes(ranges []util.Range) (Sizes, error) { func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
if err := d.ok(); err != nil { if err := d.ok(); err != nil {
return nil, err return nil, err
} }
@ -674,11 +672,11 @@ func (d *DB) GetApproximateSizes(ranges []util.Range) (Sizes, error) {
for _, r := range ranges { for _, r := range ranges {
min := newIKey(r.Start, kMaxSeq, tSeek) min := newIKey(r.Start, kMaxSeq, tSeek)
max := newIKey(r.Limit, kMaxSeq, tSeek) max := newIKey(r.Limit, kMaxSeq, tSeek)
start, err := v.getApproximateOffset(min) start, err := v.offsetOf(min)
if err != nil { if err != nil {
return nil, err return nil, err
} }
limit, err := v.getApproximateOffset(max) limit, err := v.offsetOf(max)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -232,7 +232,7 @@ func (d *DB) memCompaction() {
// Pause table compaction. // Pause table compaction.
ch := make(chan struct{}) ch := make(chan struct{})
select { select {
case d.tcompPauseC <- ch: case d.tcompPauseC <- (chan<- struct{})(ch):
case _, _ = <-d.closeC: case _, _ = <-d.closeC:
return return
} }
@ -268,7 +268,7 @@ func (d *DB) memCompaction() {
// Drop frozen mem. // Drop frozen mem.
d.dropFrozenMem() d.dropFrozenMem()
// Unpause table compaction. // Resume table compaction.
select { select {
case <-ch: case <-ch:
case _, _ = <-d.closeC: case _, _ = <-d.closeC:
@ -281,7 +281,6 @@ func (d *DB) memCompaction() {
func (d *DB) tableCompaction(c *compaction, noTrivial bool) { func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
s := d.s s := d.s
ucmp := s.cmp.cmp
rec := new(sessionRecord) rec := new(sessionRecord)
rec.addCompactionPointer(c.level, c.max) rec.addCompactionPointer(c.level, c.max)
@ -382,7 +381,7 @@ func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
hasUkey = false hasUkey = false
lseq = kMaxSeq lseq = kMaxSeq
} else { } else {
if !hasUkey || ucmp.Compare(key.ukey(), ukey) != 0 { if !hasUkey || s.icmp.uCompare(key.ukey(), ukey) != 0 {
// First occurrence of this user key // First occurrence of this user key
ukey = append(ukey[:0], key.ukey()...) ukey = append(ukey[:0], key.ukey()...)
hasUkey = true hasUkey = true
@ -499,7 +498,7 @@ func (d *DB) tableRangeCompaction(level int, min, max []byte) {
v := s.version_NB() v := s.version_NB()
m := 1 m := 1
for i, t := range v.tables[1:] { for i, t := range v.tables[1:] {
if t.isOverlaps(min, max, true, s.cmp) { if t.isOverlaps(min, max, true, s.icmp) {
m = i + 1 m = i + 1
} }
} }

View File

@ -10,7 +10,6 @@ import (
"errors" "errors"
"runtime" "runtime"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
@ -35,7 +34,7 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
} }
i = append(i, ti...) i = append(i, ti...)
strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator) strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
mi := iterator.NewMergedIterator(i, s.cmp, strict) mi := iterator.NewMergedIterator(i, s.icmp, strict)
mi.SetReleaser(&versionReleaser{v: v}) mi.SetReleaser(&versionReleaser{v: v})
return mi return mi
} }
@ -53,7 +52,7 @@ func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *d
} }
rawIter := db.newRawIterator(slice_, ro) rawIter := db.newRawIterator(slice_, ro)
iter := &dbIter{ iter := &dbIter{
cmp: db.s.cmp.cmp, icmp: db.s.icmp,
iter: rawIter, iter: rawIter,
seq: seq, seq: seq,
strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator), strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator),
@ -76,7 +75,7 @@ const (
// dbIter represent an interator states over a database session. // dbIter represent an interator states over a database session.
type dbIter struct { type dbIter struct {
cmp comparer.BasicComparer icmp *iComparer
iter iterator.Iterator iter iterator.Iterator
seq uint64 seq uint64
strict bool strict bool
@ -166,7 +165,7 @@ func (i *dbIter) next() bool {
i.key = append(i.key[:0], ukey...) i.key = append(i.key[:0], ukey...)
i.dir = dirForward i.dir = dirForward
case tVal: case tVal:
if i.dir == dirSOI || i.cmp.Compare(ukey, i.key) > 0 { if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
i.key = append(i.key[:0], ukey...) i.key = append(i.key[:0], ukey...)
i.value = append(i.value[:0], i.iter.Value()...) i.value = append(i.value[:0], i.iter.Value()...)
i.dir = dirForward i.dir = dirForward
@ -211,7 +210,7 @@ func (i *dbIter) prev() bool {
ukey, seq, t, ok := parseIkey(i.iter.Key()) ukey, seq, t, ok := parseIkey(i.iter.Key())
if ok { if ok {
if seq <= i.seq { if seq <= i.seq {
if !del && i.cmp.Compare(ukey, i.key) < 0 { if !del && i.icmp.uCompare(ukey, i.key) < 0 {
return true return true
} }
del = (t == tDel) del = (t == tDel)
@ -252,7 +251,7 @@ func (i *dbIter) Prev() bool {
for i.iter.Prev() { for i.iter.Prev() {
ukey, _, _, ok := parseIkey(i.iter.Key()) ukey, _, _, ok := parseIkey(i.iter.Key())
if ok { if ok {
if i.cmp.Compare(ukey, i.key) < 0 { if i.icmp.uCompare(ukey, i.key) < 0 {
goto cont goto cont
} }
} else if i.strict { } else if i.strict {

View File

@ -46,7 +46,7 @@ func (d *DB) newMem(n int) (mem *memdb.DB, err error) {
d.journalWriter = w d.journalWriter = w
d.journalFile = file d.journalFile = file
d.frozenMem = d.mem d.frozenMem = d.mem
d.mem = memdb.New(s.cmp, maxInt(d.s.o.GetWriteBuffer(), n)) d.mem = memdb.New(s.icmp, maxInt(d.s.o.GetWriteBuffer(), n))
mem = d.mem mem = d.mem
// The seq only incremented by the writer. // The seq only incremented by the writer.
d.frozenSeq = d.seq d.frozenSeq = d.seq

View File

@ -149,7 +149,6 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
db := h.db db := h.db
var res uint64 var res uint64
ucmp := db.s.cmp.cmp
v := db.s.version() v := db.s.version()
for i, tt := range v.tables[1 : len(v.tables)-1] { for i, tt := range v.tables[1 : len(v.tables)-1] {
level := i + 1 level := i + 1
@ -157,7 +156,7 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
for _, t := range tt { for _, t := range tt {
var r tFiles var r tFiles
min, max := t.min.ukey(), t.max.ukey() min, max := t.min.ukey(), t.max.ukey()
next.getOverlaps(min, max, &r, true, ucmp) next.getOverlaps(min, max, &r, true, db.s.icmp.ucmp)
sum := r.size() sum := r.size()
if sum > res { if sum > res {
res = sum res = sum
@ -238,7 +237,7 @@ func (h *dbHarness) getVal(key, value string) {
func (h *dbHarness) allEntriesFor(key, want string) { func (h *dbHarness) allEntriesFor(key, want string) {
t := h.t t := h.t
db := h.db db := h.db
ucmp := db.s.cmp.cmp s := db.s
ikey := newIKey([]byte(key), kMaxSeq, tVal) ikey := newIKey([]byte(key), kMaxSeq, tVal)
iter := db.newRawIterator(nil, nil) iter := db.newRawIterator(nil, nil)
@ -251,7 +250,7 @@ func (h *dbHarness) allEntriesFor(key, want string) {
for iter.Valid() { for iter.Valid() {
rkey := iKey(iter.Key()) rkey := iKey(iter.Key())
if _, t, ok := rkey.parseNum(); ok { if _, t, ok := rkey.parseNum(); ok {
if ucmp.Compare(ikey.ukey(), rkey.ukey()) != 0 { if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
break break
} }
if !first { if !first {
@ -390,11 +389,11 @@ func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
t := h.t t := h.t
db := h.db db := h.db
s, err := db.GetApproximateSizes([]util.Range{ s, err := db.SizeOf([]util.Range{
{[]byte(start), []byte(limit)}, {[]byte(start), []byte(limit)},
}) })
if err != nil { if err != nil {
t.Error("GetApproximateSizes: got error: ", err) t.Error("SizeOf: got error: ", err)
} }
if s.Sum() < low || s.Sum() > hi { if s.Sum() < low || s.Sum() > hi {
t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d", t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
@ -994,6 +993,7 @@ func TestDb_SparseMerge(t *testing.T) {
h.put("C", "vc") h.put("C", "vc")
h.compactMem() h.compactMem()
h.compactRangeAt(0, "", "") h.compactRangeAt(0, "", "")
h.waitCompaction()
// Make sparse update // Make sparse update
h.put("A", "va2") h.put("A", "va2")
@ -1003,12 +1003,14 @@ func TestDb_SparseMerge(t *testing.T) {
h.maxNextLevelOverlappingBytes(20 * 1048576) h.maxNextLevelOverlappingBytes(20 * 1048576)
h.compactRangeAt(0, "", "") h.compactRangeAt(0, "", "")
h.waitCompaction()
h.maxNextLevelOverlappingBytes(20 * 1048576) h.maxNextLevelOverlappingBytes(20 * 1048576)
h.compactRangeAt(1, "", "") h.compactRangeAt(1, "", "")
h.waitCompaction()
h.maxNextLevelOverlappingBytes(20 * 1048576) h.maxNextLevelOverlappingBytes(20 * 1048576)
} }
func TestDb_ApproximateSizes(t *testing.T) { func TestDb_SizeOf(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{ h := newDbHarnessWopt(t, &opt.Options{
Compression: opt.NoCompression, Compression: opt.NoCompression,
WriteBuffer: 10000000, WriteBuffer: 10000000,
@ -1028,7 +1030,7 @@ func TestDb_ApproximateSizes(t *testing.T) {
h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10))
} }
// 0 because GetApproximateSizes() does not account for memtable space // 0 because SizeOf() does not account for memtable space
h.sizeAssert("", numKey(50), 0, 0) h.sizeAssert("", numKey(50), 0, 0)
for r := 0; r < 3; r++ { for r := 0; r < 3; r++ {
@ -1058,7 +1060,7 @@ func TestDb_ApproximateSizes(t *testing.T) {
} }
} }
func TestDb_ApproximateSizes_MixOfSmallAndLarge(t *testing.T) { func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression}) h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
defer h.close() defer h.close()
@ -1472,7 +1474,7 @@ func TestDb_ClosedIsClosed(t *testing.T) {
_, err = db.GetProperty("leveldb.stats") _, err = db.GetProperty("leveldb.stats")
assertErr(t, err, true) assertErr(t, err, true)
_, err = db.GetApproximateSizes([]util.Range{{[]byte("a"), []byte("z")}}) _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}})
assertErr(t, err, true) assertErr(t, err, true)
assertErr(t, db.CompactRange(util.Range{}), true) assertErr(t, db.CompactRange(util.Range{}), true)

View File

@ -9,7 +9,6 @@ package leveldb
import ( import (
"time" "time"
"github.com/syndtr/goleveldb/leveldb/comparer"
"github.com/syndtr/goleveldb/leveldb/memdb" "github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
@ -232,11 +231,11 @@ func (d *DB) Delete(key []byte, wo *opt.WriteOptions) error {
return d.Write(b, wo) return d.Write(b, wo)
} }
func isMemOverlaps(ucmp comparer.BasicComparer, mem *memdb.DB, min, max []byte) bool { func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
iter := mem.NewIterator(nil) iter := mem.NewIterator(nil)
defer iter.Release() defer iter.Release()
return (max == nil || (iter.First() && ucmp.Compare(max, iKey(iter.Key()).ukey()) >= 0)) && return (max == nil || (iter.First() && icmp.uCompare(max, iKey(iter.Key()).ukey()) >= 0)) &&
(min == nil || (iter.Last() && ucmp.Compare(min, iKey(iter.Key()).ukey()) <= 0)) (min == nil || (iter.Last() && icmp.uCompare(min, iKey(iter.Key()).ukey()) <= 0))
} }
// CompactRange compacts the underlying DB for the given key range. // CompactRange compacts the underlying DB for the given key range.
@ -261,7 +260,7 @@ func (d *DB) CompactRange(r util.Range) error {
// Check for overlaps in memdb. // Check for overlaps in memdb.
mem := d.getEffectiveMem() mem := d.getEffectiveMem()
if isMemOverlaps(d.s.cmp.cmp, mem, r.Start, r.Limit) { if isMemOverlaps(d.s.icmp, mem, r.Start, r.Limit) {
// Memdb compaction. // Memdb compaction.
if _, err := d.rotateMem(0); err != nil { if _, err := d.rotateMem(0); err != nil {
<-d.writeLockC <-d.writeLockC

View File

@ -13,7 +13,7 @@ import (
"github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/comparer"
) )
var icmp = &iComparer{comparer.DefaultComparer} var defaultIComparer = &iComparer{comparer.DefaultComparer}
func ikey(key string, seq uint64, t vType) iKey { func ikey(key string, seq uint64, t vType) iKey {
return newIKey([]byte(key), uint64(seq), t) return newIKey([]byte(key), uint64(seq), t)
@ -21,7 +21,7 @@ func ikey(key string, seq uint64, t vType) iKey {
func shortSep(a, b []byte) []byte { func shortSep(a, b []byte) []byte {
dst := make([]byte, len(a)) dst := make([]byte, len(a))
dst = icmp.Separator(dst[:0], a, b) dst = defaultIComparer.Separator(dst[:0], a, b)
if dst == nil { if dst == nil {
return a return a
} }
@ -30,7 +30,7 @@ func shortSep(a, b []byte) []byte {
func shortSuccessor(b []byte) []byte { func shortSuccessor(b []byte) []byte {
dst := make([]byte, len(b)) dst := make([]byte, len(b))
dst = icmp.Successor(dst[:0], b) dst = defaultIComparer.Successor(dst[:0], b)
if dst == nil { if dst == nil {
return b return b
} }

View File

@ -32,8 +32,8 @@ func (s *session) setOptions(o *opt.Options) {
s.o.BlockCache = nil s.o.BlockCache = nil
} }
// Comparer. // Comparer.
s.cmp = &iComparer{o.GetComparer()} s.icmp = &iComparer{o.GetComparer()}
s.o.Comparer = s.cmp s.o.Comparer = s.icmp
// Filter. // Filter.
if filter := o.GetFilter(); filter != nil { if filter := o.GetFilter(); filter != nil {
s.o.Filter = &iFilter{filter} s.o.Filter = &iFilter{filter}

View File

@ -32,7 +32,7 @@ type session struct {
stor storage.Storage stor storage.Storage
storLock util.Releaser storLock util.Releaser
o *opt.Options o *opt.Options
cmp *iComparer icmp *iComparer
tops *tOps tops *tOps
manifest *journal.Writer manifest *journal.Writer
@ -150,8 +150,8 @@ func (s *session) recover() (err error) {
switch { switch {
case !rec.has(recComparer): case !rec.has(recComparer):
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")} return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")}
case rec.comparer != s.cmp.cmp.Name(): case rec.comparer != s.icmp.uName():
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.cmp.cmp.Name() + "', " + "got '" + rec.comparer + "'")} return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")}
case !rec.has(recNextNum): case !rec.has(recNextNum):
return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")} return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")}
case !rec.has(recJournalNum): case !rec.has(recJournalNum):
@ -189,9 +189,6 @@ func (s *session) commit(r *sessionRecord) (err error) {
// Pick a compaction based on current state; need external synchronization. // Pick a compaction based on current state; need external synchronization.
func (s *session) pickCompaction() *compaction { func (s *session) pickCompaction() *compaction {
icmp := s.cmp
ucmp := icmp.cmp
v := s.version_NB() v := s.version_NB()
var level int var level int
@ -201,7 +198,7 @@ func (s *session) pickCompaction() *compaction {
cp := s.stCPtrs[level] cp := s.stCPtrs[level]
tt := v.tables[level] tt := v.tables[level]
for _, t := range tt { for _, t := range tt {
if cp == nil || icmp.Compare(t.max, cp) > 0 { if cp == nil || s.icmp.Compare(t.max, cp) > 0 {
t0 = append(t0, t) t0 = append(t0, t)
break break
} }
@ -221,9 +218,9 @@ func (s *session) pickCompaction() *compaction {
c := &compaction{s: s, version: v, level: level} c := &compaction{s: s, version: v, level: level}
if level == 0 { if level == 0 {
min, max := t0.getRange(icmp) min, max := t0.getRange(s.icmp)
t0 = nil t0 = nil
v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, ucmp) v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, s.icmp.ucmp)
} }
c.tables[0] = t0 c.tables[0] = t0
@ -236,7 +233,7 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
v := s.version_NB() v := s.version_NB()
var t0 tFiles var t0 tFiles
v.tables[level].getOverlaps(min, max, &t0, level != 0, s.cmp.cmp) v.tables[level].getOverlaps(min, max, &t0, level != 0, s.icmp.ucmp)
if len(t0) == 0 { if len(t0) == 0 {
return nil return nil
} }
@ -285,35 +282,33 @@ type compaction struct {
func (c *compaction) expand() { func (c *compaction) expand() {
s := c.s s := c.s
v := c.version v := c.version
icmp := s.cmp
ucmp := icmp.cmp
level := c.level level := c.level
vt0, vt1 := v.tables[level], v.tables[level+1] vt0, vt1 := v.tables[level], v.tables[level+1]
t0, t1 := c.tables[0], c.tables[1] t0, t1 := c.tables[0], c.tables[1]
min, max := t0.getRange(icmp) min, max := t0.getRange(s.icmp)
vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, ucmp) vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, s.icmp.ucmp)
// Get entire range covered by compaction // Get entire range covered by compaction
amin, amax := append(t0, t1...).getRange(icmp) amin, amax := append(t0, t1...).getRange(s.icmp)
// See if we can grow the number of inputs in "level" without // See if we can grow the number of inputs in "level" without
// changing the number of "level+1" files we pick up. // changing the number of "level+1" files we pick up.
if len(t1) > 0 { if len(t1) > 0 {
var exp0 tFiles var exp0 tFiles
vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, ucmp) vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, s.icmp.ucmp)
if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes { if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes {
var exp1 tFiles var exp1 tFiles
xmin, xmax := exp0.getRange(icmp) xmin, xmax := exp0.getRange(s.icmp)
vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, ucmp) vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, s.icmp.ucmp)
if len(exp1) == len(t1) { if len(exp1) == len(t1) {
s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
min, max = xmin, xmax min, max = xmin, xmax
t0, t1 = exp0, exp1 t0, t1 = exp0, exp1
amin, amax = append(t0, t1...).getRange(icmp) amin, amax = append(t0, t1...).getRange(s.icmp)
} }
} }
} }
@ -321,7 +316,7 @@ func (c *compaction) expand() {
// Compute the set of grandparent files that overlap this compaction // Compute the set of grandparent files that overlap this compaction
// (parent == level+1; grandparent == level+2) // (parent == level+1; grandparent == level+2)
if level+2 < kNumLevels { if level+2 < kNumLevels {
v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, ucmp) v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, s.icmp.ucmp)
} }
c.tables[0], c.tables[1] = t0, t1 c.tables[0], c.tables[1] = t0, t1
@ -336,13 +331,13 @@ func (c *compaction) trivial() bool {
func (c *compaction) isBaseLevelForKey(key []byte) bool { func (c *compaction) isBaseLevelForKey(key []byte) bool {
s := c.s s := c.s
v := c.version v := c.version
ucmp := s.cmp.cmp
for level, tt := range v.tables[c.level+2:] { for level, tt := range v.tables[c.level+2:] {
for c.tPtrs[level] < len(tt) { for c.tPtrs[level] < len(tt) {
t := tt[c.tPtrs[level]] t := tt[c.tPtrs[level]]
if ucmp.Compare(key, t.max.ukey()) <= 0 { if s.icmp.uCompare(key, t.max.ukey()) <= 0 {
// We've advanced far enough // We've advanced far enough
if ucmp.Compare(key, t.min.ukey()) >= 0 { if s.icmp.uCompare(key, t.min.ukey()) >= 0 {
// Key falls in this file's range, so definitely not base level // Key falls in this file's range, so definitely not base level
return false return false
} }
@ -355,10 +350,9 @@ func (c *compaction) isBaseLevelForKey(key []byte) bool {
} }
func (c *compaction) shouldStopBefore(key iKey) bool { func (c *compaction) shouldStopBefore(key iKey) bool {
icmp := c.s.cmp
for ; c.gpidx < len(c.gp); c.gpidx++ { for ; c.gpidx < len(c.gp); c.gpidx++ {
gp := c.gp[c.gpidx] gp := c.gp[c.gpidx]
if icmp.Compare(key, gp.max) <= 0 { if c.s.icmp.Compare(key, gp.max) <= 0 {
break break
} }
if c.seenKey { if c.seenKey {
@ -377,7 +371,6 @@ func (c *compaction) shouldStopBefore(key iKey) bool {
func (c *compaction) newIterator() iterator.Iterator { func (c *compaction) newIterator() iterator.Iterator {
s := c.s s := c.s
icmp := s.cmp
level := c.level level := c.level
icap := 2 icap := 2
@ -401,10 +394,10 @@ func (c *compaction) newIterator() iterator.Iterator {
its = append(its, s.tops.newIterator(t, nil, ro)) its = append(its, s.tops.newIterator(t, nil, ro))
} }
} else { } else {
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, icmp, nil, ro), strict, true) it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, nil, ro), strict, true)
its = append(its, it) its = append(its, it)
} }
} }
return iterator.NewMergedIterator(its, icmp, true) return iterator.NewMergedIterator(its, s.icmp, true)
} }

View File

@ -148,7 +148,7 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
} }
} }
r.setComparer(s.cmp.cmp.Name()) r.setComparer(s.icmp.uName())
} }
} }

View File

@ -117,12 +117,10 @@ func (tf tFiles) searchMax(key iKey, icmp *iComparer) int {
} }
func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) bool { func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) bool {
ucmp := icmp.cmp
if !disjSorted { if !disjSorted {
// Need to check against all files // Need to check against all files
for _, t := range tf { for _, t := range tf {
if !t.isAfter(min, ucmp) && !t.isBefore(max, ucmp) { if !t.isAfter(min, icmp.ucmp) && !t.isBefore(max, icmp.ucmp) {
return true return true
} }
} }
@ -139,7 +137,7 @@ func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) b
// beginning of range is after all files, so no overlap // beginning of range is after all files, so no overlap
return false return false
} }
return !tf[idx].isBefore(max, ucmp) return !tf[idx].isBefore(max, icmp.ucmp)
} }
func (tf tFiles) getOverlaps(min, max []byte, r *tFiles, disjSorted bool, ucmp comparer.BasicComparer) { func (tf tFiles) getOverlaps(min, max []byte, r *tFiles, disjSorted bool, ucmp comparer.BasicComparer) {
@ -338,12 +336,12 @@ func (t *tOps) get(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []by
return c.Value().(*table.Reader).Find(key, ro) return c.Value().(*table.Reader).Find(key, ro)
} }
func (t *tOps) getApproximateOffset(f *tFile, key []byte) (offset uint64, err error) { func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
c, err := t.lookup(f) c, err := t.lookup(f)
if err != nil { if err != nil {
return return
} }
_offset, err := c.Value().(*table.Reader).GetApproximateOffset(key) _offset, err := c.Value().(*table.Reader).OffsetOf(key)
offset = uint64(_offset) offset = uint64(_offset)
c.Release() c.Release()
return return

View File

@ -732,10 +732,10 @@ func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
return return
} }
// GetApproximateOffset returns approximate offset for the given key. // OffsetOf returns approximate offset for the given key.
// //
// It is safe to modify the contents of the argument after Get returns. // It is safe to modify the contents of the argument after Get returns.
func (r *Reader) GetApproximateOffset(key []byte) (offset int64, err error) { func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
if r.err != nil { if r.err != nil {
err = r.err err = r.err
return return

View File

@ -61,7 +61,7 @@ var _ = testutil.Defer(func() {
tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o) tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
CheckOffset := func(key string, expect, threshold int) { CheckOffset := func(key string, expect, threshold int) {
offset, err := tr.GetApproximateOffset([]byte(key)) offset, err := tr.OffsetOf([]byte(key))
Expect(err).ShouldNot(HaveOccurred()) Expect(err).ShouldNot(HaveOccurred())
Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key)
} }

View File

@ -91,8 +91,6 @@ func (v *version) release() {
func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) { func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) {
s := v.s s := v.s
icmp := s.cmp
ucmp := icmp.cmp
ukey := key.ukey() ukey := key.ukey()
@ -112,8 +110,8 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
// overlap user_key and process them in order from newest to // overlap user_key and process them in order from newest to
var tmp tFiles var tmp tFiles
for _, t := range ts { for _, t := range ts {
if ucmp.Compare(ukey, t.min.ukey()) >= 0 && if s.icmp.uCompare(ukey, t.min.ukey()) >= 0 &&
ucmp.Compare(ukey, t.max.ukey()) <= 0 { s.icmp.uCompare(ukey, t.max.ukey()) <= 0 {
tmp = append(tmp, t) tmp = append(tmp, t)
} }
} }
@ -125,8 +123,8 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
tmp.sortByNum() tmp.sortByNum()
ts = tmp ts = tmp
} else { } else {
i := ts.searchMax(key, icmp) i := ts.searchMax(key, s.icmp)
if i >= len(ts) || ucmp.Compare(ukey, ts[i].min.ukey()) < 0 { if i >= len(ts) || s.icmp.uCompare(ukey, ts[i].min.ukey()) < 0 {
continue continue
} }
@ -157,7 +155,7 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
rkey := iKey(_rkey) rkey := iKey(_rkey)
if seq, t, ok := rkey.parseNum(); ok { if seq, t, ok := rkey.parseNum(); ok {
if ucmp.Compare(ukey, rkey.ukey()) == 0 { if s.icmp.uCompare(ukey, rkey.ukey()) == 0 {
if level == 0 { if level == 0 {
if seq >= l0seq { if seq >= l0seq {
l0found = true l0found = true
@ -201,7 +199,6 @@ func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool,
func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) { func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
s := v.s s := v.s
icmp := s.cmp
// Merge all level zero files together since they may overlap // Merge all level zero files together since they may overlap
for _, t := range v.tables[0] { for _, t := range v.tables[0] {
@ -215,7 +212,7 @@ func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []it
continue continue
} }
it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, icmp, slice, ro), strict, true) it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, slice, ro), strict, true)
its = append(its, it) its = append(its, it)
} }
@ -245,16 +242,13 @@ func (v *version) tLen(level int) int {
return len(v.tables[level]) return len(v.tables[level])
} }
func (v *version) getApproximateOffset(key iKey) (n uint64, err error) { func (v *version) offsetOf(key iKey) (n uint64, err error) {
icmp := v.s.cmp
tops := v.s.tops
for level, tt := range v.tables { for level, tt := range v.tables {
for _, t := range tt { for _, t := range tt {
if icmp.Compare(t.max, key) <= 0 { if v.s.icmp.Compare(t.max, key) <= 0 {
// Entire file is before "key", so just add the file size // Entire file is before "key", so just add the file size
n += t.size n += t.size
} else if icmp.Compare(t.min, key) > 0 { } else if v.s.icmp.Compare(t.min, key) > 0 {
// Entire file is after "key", so ignore // Entire file is after "key", so ignore
if level > 0 { if level > 0 {
// Files other than level 0 are sorted by meta->min, so // Files other than level 0 are sorted by meta->min, so
@ -266,7 +260,7 @@ func (v *version) getApproximateOffset(key iKey) (n uint64, err error) {
// "key" falls in the range for this table. Add the // "key" falls in the range for this table. Add the
// approximate offset of "key" within the table. // approximate offset of "key" within the table.
var nn uint64 var nn uint64
nn, err = tops.getApproximateOffset(t, key) nn, err = v.s.tops.offsetOf(t, key)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -279,16 +273,13 @@ func (v *version) getApproximateOffset(key iKey) (n uint64, err error) {
} }
func (v *version) pickLevel(min, max []byte) (level int) { func (v *version) pickLevel(min, max []byte) (level int) {
icmp := v.s.cmp if !v.tables[0].isOverlaps(min, max, false, v.s.icmp) {
ucmp := icmp.cmp
if !v.tables[0].isOverlaps(min, max, false, icmp) {
var r tFiles var r tFiles
for ; level < kMaxMemCompactLevel; level++ { for ; level < kMaxMemCompactLevel; level++ {
if v.tables[level+1].isOverlaps(min, max, true, icmp) { if v.tables[level+1].isOverlaps(min, max, true, v.s.icmp) {
break break
} }
v.tables[level+2].getOverlaps(min, max, &r, true, ucmp) v.tables[level+2].getOverlaps(min, max, &r, true, v.s.icmp.ucmp)
if r.size() > kMaxGrandParentOverlapBytes { if r.size() > kMaxGrandParentOverlapBytes {
break break
} }
@ -411,7 +402,7 @@ func (p *versionStaging) finish() *version {
} }
// sort tables // sort tables
nt.sortByKey(s.cmp) nt.sortByKey(s.icmp)
nv.tables[level] = nt nv.tables[level] = nt
} }