syncthing/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go

827 lines
17 KiB
Go
Raw Normal View History

2014-07-06 12:46:48 +00:00
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"sync"
"time"
2014-11-18 12:24:42 +00:00
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
2014-07-06 12:46:48 +00:00
)
var (
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
)
type cStat struct {
2014-07-06 12:46:48 +00:00
duration time.Duration
read int64
write int64
2014-07-06 12:46:48 +00:00
}
func (p *cStat) add(n *cStatStaging) {
2014-07-06 12:46:48 +00:00
p.duration += n.duration
p.read += n.read
p.write += n.write
}
func (p *cStat) get() (duration time.Duration, read, write int64) {
2014-07-06 12:46:48 +00:00
return p.duration, p.read, p.write
}
type cStatStaging struct {
2014-07-06 12:46:48 +00:00
start time.Time
duration time.Duration
on bool
read int64
write int64
2014-07-06 12:46:48 +00:00
}
func (p *cStatStaging) startTimer() {
2014-07-06 12:46:48 +00:00
if !p.on {
p.start = time.Now()
p.on = true
}
}
func (p *cStatStaging) stopTimer() {
2014-07-06 12:46:48 +00:00
if p.on {
p.duration += time.Since(p.start)
p.on = false
}
}
type cStats struct {
lk sync.Mutex
stats []cStat
}
func (p *cStats) addStat(level int, n *cStatStaging) {
p.lk.Lock()
if level >= len(p.stats) {
newStats := make([]cStat, level+1)
copy(newStats, p.stats)
p.stats = newStats
}
p.stats[level].add(n)
p.lk.Unlock()
}
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
p.lk.Lock()
defer p.lk.Unlock()
if level < len(p.stats) {
return p.stats[level].get()
}
return
}
2014-07-23 06:31:36 +00:00
func (db *DB) compactionError() {
2015-06-15 19:10:18 +00:00
var err error
2014-07-06 12:46:48 +00:00
noerr:
2014-11-18 12:24:42 +00:00
// No error.
2014-07-06 12:46:48 +00:00
for {
select {
2014-07-23 06:31:36 +00:00
case err = <-db.compErrSetC:
2014-11-18 12:24:42 +00:00
switch {
case err == nil:
2015-06-15 19:10:18 +00:00
case err == ErrReadOnly, errors.IsCorrupted(err):
2014-11-18 12:24:42 +00:00
goto hasperr
default:
2014-07-06 12:46:48 +00:00
goto haserr
}
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
return
2014-07-06 12:46:48 +00:00
}
}
haserr:
2014-11-18 12:24:42 +00:00
// Transient error.
2014-07-06 12:46:48 +00:00
for {
select {
2014-07-23 06:31:36 +00:00
case db.compErrC <- err:
case err = <-db.compErrSetC:
2014-11-18 12:24:42 +00:00
switch {
case err == nil:
2014-07-06 12:46:48 +00:00
goto noerr
2015-06-15 19:10:18 +00:00
case err == ErrReadOnly, errors.IsCorrupted(err):
2014-11-18 12:24:42 +00:00
goto hasperr
default:
2014-07-06 12:46:48 +00:00
}
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
return
2014-07-06 12:46:48 +00:00
}
}
2014-11-18 12:24:42 +00:00
hasperr:
// Persistent error.
for {
select {
case db.compErrC <- err:
case db.compPerErrC <- err:
case db.writeLockC <- struct{}{}:
// Hold write lock, so that write won't pass-through.
2015-06-15 19:10:18 +00:00
db.compWriteLocking = true
2014-11-18 12:24:42 +00:00
case _, _ = <-db.closeC:
2015-06-15 19:10:18 +00:00
if db.compWriteLocking {
2014-11-18 12:24:42 +00:00
// We should release the lock or Close will hang.
<-db.writeLockC
}
return
}
}
2014-07-06 12:46:48 +00:00
}
type compactionTransactCounter int
func (cnt *compactionTransactCounter) incr() {
*cnt++
}
2014-11-18 12:24:42 +00:00
type compactionTransactInterface interface {
run(cnt *compactionTransactCounter) error
revert() error
}
func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
2014-07-06 12:46:48 +00:00
defer func() {
if x := recover(); x != nil {
2014-11-18 12:24:42 +00:00
if x == errCompactionTransactExiting {
if err := t.revert(); err != nil {
db.logf("%s revert error %q", name, err)
2014-07-06 12:46:48 +00:00
}
}
panic(x)
}
}()
2014-07-23 06:31:36 +00:00
2014-07-06 12:46:48 +00:00
const (
backoffMin = 1 * time.Second
backoffMax = 8 * time.Second
backoffMul = 2 * time.Second
)
2014-11-18 12:24:42 +00:00
var (
backoff = backoffMin
backoffT = time.NewTimer(backoff)
lastCnt = compactionTransactCounter(0)
disableBackoff = db.s.o.GetDisableCompactionBackoff()
)
2014-07-06 12:46:48 +00:00
for n := 0; ; n++ {
// Check whether the DB is closed.
2014-07-23 06:31:36 +00:00
if db.isClosed() {
db.logf("%s exiting", name)
db.compactionExitTransact()
2014-07-06 12:46:48 +00:00
} else if n > 0 {
2014-07-23 06:31:36 +00:00
db.logf("%s retrying N·%d", name, n)
2014-07-06 12:46:48 +00:00
}
// Execute.
cnt := compactionTransactCounter(0)
2014-11-18 12:24:42 +00:00
err := t.run(&cnt)
if err != nil {
db.logf("%s error I·%d %q", name, cnt, err)
}
2014-07-06 12:46:48 +00:00
// Set compaction error status.
select {
2014-07-23 06:31:36 +00:00
case db.compErrSetC <- err:
2014-11-18 12:24:42 +00:00
case perr := <-db.compPerErrC:
if err != nil {
db.logf("%s exiting (persistent error %q)", name, perr)
db.compactionExitTransact()
}
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
2014-07-06 12:46:48 +00:00
}
if err == nil {
return
}
2014-11-18 12:24:42 +00:00
if errors.IsCorrupted(err) {
db.logf("%s exiting (corruption detected)", name)
db.compactionExitTransact()
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
if !disableBackoff {
// Reset backoff duration if counter is advancing.
if cnt > lastCnt {
backoff = backoffMin
lastCnt = cnt
}
// Backoff.
backoffT.Reset(backoff)
if backoff < backoffMax {
backoff *= backoffMul
if backoff > backoffMax {
backoff = backoffMax
}
}
select {
case <-backoffT.C:
case _, _ = <-db.closeC:
db.logf("%s exiting", name)
db.compactionExitTransact()
2014-07-06 12:46:48 +00:00
}
}
}
}
2014-11-18 12:24:42 +00:00
type compactionTransactFunc struct {
runFunc func(cnt *compactionTransactCounter) error
revertFunc func() error
}
func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
return t.runFunc(cnt)
}
func (t *compactionTransactFunc) revert() error {
if t.revertFunc != nil {
return t.revertFunc()
}
return nil
}
func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
db.compactionTransact(name, &compactionTransactFunc{run, revert})
}
2014-07-23 06:31:36 +00:00
func (db *DB) compactionExitTransact() {
2014-07-06 12:46:48 +00:00
panic(errCompactionTransactExiting)
}
func (db *DB) compactionCommit(name string, rec *sessionRecord) {
db.compCommitLk.Lock()
defer db.compCommitLk.Unlock() // Defer is necessary.
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
return db.s.commit(rec)
}, nil)
}
2014-07-23 06:31:36 +00:00
func (db *DB) memCompaction() {
2015-06-15 19:10:18 +00:00
mdb := db.getFrozenMem()
if mdb == nil {
2014-07-06 12:46:48 +00:00
return
}
2015-06-15 19:10:18 +00:00
defer mdb.decref()
2014-07-06 12:46:48 +00:00
2015-06-15 19:10:18 +00:00
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
2014-07-06 12:46:48 +00:00
// Don't compact empty memdb.
2015-06-15 19:10:18 +00:00
if mdb.Len() == 0 {
db.logf("memdb@flush skipping")
// drop frozen memdb
2014-07-23 06:31:36 +00:00
db.dropFrozenMem()
2014-07-06 12:46:48 +00:00
return
}
// Pause table compaction.
2014-11-18 12:24:42 +00:00
resumeC := make(chan struct{})
2014-07-06 12:46:48 +00:00
select {
2014-11-18 12:24:42 +00:00
case db.tcompPauseC <- (chan<- struct{})(resumeC):
case <-db.compPerErrC:
close(resumeC)
resumeC = nil
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
2014-07-06 12:46:48 +00:00
return
}
2015-06-15 19:10:18 +00:00
var (
rec = &sessionRecord{}
stats = &cStatStaging{}
2015-06-15 19:10:18 +00:00
flushLevel int
)
// Generate tables.
2015-06-15 19:10:18 +00:00
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
2014-07-06 12:46:48 +00:00
stats.startTimer()
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
2015-06-15 19:10:18 +00:00
stats.stopTimer()
return
2014-07-06 12:46:48 +00:00
}, func() error {
2015-06-15 19:10:18 +00:00
for _, r := range rec.addedTables {
db.logf("memdb@flush revert @%d", r.num)
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
2014-07-06 12:46:48 +00:00
return err
}
}
return nil
})
rec.setJournalNum(db.journalFd.Num)
rec.setSeqNum(db.frozenSeq)
// Commit.
stats.startTimer()
db.compactionCommit("memdb", rec)
stats.stopTimer()
2014-07-06 12:46:48 +00:00
2015-06-15 19:10:18 +00:00
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
2014-07-06 12:46:48 +00:00
2015-06-15 19:10:18 +00:00
for _, r := range rec.addedTables {
2014-07-06 12:46:48 +00:00
stats.write += r.size
}
db.compStats.addStat(flushLevel, stats)
2014-07-06 12:46:48 +00:00
2015-06-15 19:10:18 +00:00
// Drop frozen memdb.
2014-07-23 06:31:36 +00:00
db.dropFrozenMem()
2014-07-06 12:46:48 +00:00
2014-07-06 21:13:10 +00:00
// Resume table compaction.
2014-11-18 12:24:42 +00:00
if resumeC != nil {
select {
case <-resumeC:
close(resumeC)
case _, _ = <-db.closeC:
return
}
2014-07-06 12:46:48 +00:00
}
// Trigger table compaction.
db.compTrigger(db.tcompCmdC)
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
type tableCompactionBuilder struct {
db *DB
s *session
c *compaction
rec *sessionRecord
stat0, stat1 *cStatStaging
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
snapHasLastUkey bool
snapLastUkey []byte
snapLastSeq uint64
snapIter int
snapKerrCnt int
snapDropCnt int
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
kerrCnt int
dropCnt int
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
minSeq uint64
strict bool
tableSize int
tw *tWriter
}
func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
// Create new table if not already.
if b.tw == nil {
// Check for pause event.
if b.db != nil {
select {
case ch := <-b.db.tcompPauseC:
b.db.pauseCompaction(ch)
case _, _ = <-b.db.closeC:
b.db.compactionExitTransact()
default:
2014-07-06 12:46:48 +00:00
}
}
2014-11-18 12:24:42 +00:00
// Create new table.
var err error
b.tw, err = b.s.tops.create()
if err != nil {
return err
}
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
// Write key/value into table.
return b.tw.append(key, value)
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
func (b *tableCompactionBuilder) needFlush() bool {
return b.tw.tw.BytesLen() >= b.tableSize
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
func (b *tableCompactionBuilder) flush() error {
t, err := b.tw.finish()
if err != nil {
return err
}
b.rec.addTableFile(b.c.sourceLevel+1, t)
2014-11-18 12:24:42 +00:00
b.stat1.write += t.size
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
2014-11-18 12:24:42 +00:00
b.tw = nil
return nil
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
func (b *tableCompactionBuilder) cleanup() {
if b.tw != nil {
b.tw.drop()
b.tw = nil
}
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
snapResumed := b.snapIter > 0
hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
lastUkey := append([]byte{}, b.snapLastUkey...)
lastSeq := b.snapLastSeq
b.kerrCnt = b.snapKerrCnt
b.dropCnt = b.snapDropCnt
// Restore compaction state.
b.c.restore()
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
defer b.cleanup()
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
b.stat1.startTimer()
defer b.stat1.stopTimer()
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
iter := b.c.newIterator()
defer iter.Release()
for i := 0; iter.Next(); i++ {
// Incr transact counter.
cnt.incr()
// Skip until last state.
if i < b.snapIter {
continue
}
resumed := false
if snapResumed {
resumed = true
snapResumed = false
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
ikey := iter.Key()
ukey, seq, kt, kerr := parseInternalKey(ikey)
2014-11-18 12:24:42 +00:00
if kerr == nil {
shouldStop := !resumed && b.c.shouldStopBefore(ikey)
if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
// First occurrence of this user key.
// Only rotate tables if ukey doesn't hop across.
if b.tw != nil && (shouldStop || b.needFlush()) {
if err := b.flush(); err != nil {
return err
}
// Creates snapshot of the state.
b.c.save()
b.snapHasLastUkey = hasLastUkey
b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
b.snapLastSeq = lastSeq
b.snapIter = i
b.snapKerrCnt = b.kerrCnt
b.snapDropCnt = b.dropCnt
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
hasLastUkey = true
lastUkey = append(lastUkey[:0], ukey...)
lastSeq = keyMaxSeq
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
switch {
case lastSeq <= b.minSeq:
// Dropped because newer entry for same user key exist
fallthrough // (A)
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
2014-11-18 12:24:42 +00:00
// For this user key:
// (1) there is no data in higher levels
// (2) data in lower levels will have larger seq numbers
// (3) data in layers that are being compacted here and have
// smaller seq numbers will be dropped in the next
// few iterations of this loop (by rule (A) above).
// Therefore this deletion marker is obsolete and can be dropped.
lastSeq = seq
b.dropCnt++
continue
default:
lastSeq = seq
}
} else {
if b.strict {
return kerr
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
// Don't drop corrupted keys.
hasLastUkey = false
lastUkey = lastUkey[:0]
lastSeq = keyMaxSeq
2014-11-18 12:24:42 +00:00
b.kerrCnt++
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
if err := b.appendKV(ikey, iter.Value()); err != nil {
return err
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
}
2014-07-06 12:46:48 +00:00
2014-11-18 12:24:42 +00:00
if err := iter.Error(); err != nil {
return err
}
// Finish last table.
if b.tw != nil && !b.tw.empty() {
return b.flush()
}
return nil
}
func (b *tableCompactionBuilder) revert() error {
for _, at := range b.rec.addedTables {
b.s.logf("table@build revert @%d", at.num)
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
2014-11-18 12:24:42 +00:00
return err
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
}
return nil
}
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
defer c.release()
2015-06-15 19:10:18 +00:00
rec := &sessionRecord{}
rec.addCompPtr(c.sourceLevel, c.imax)
2014-11-18 12:24:42 +00:00
if !noTrivial && c.trivial() {
t := c.levels[0][0]
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
rec.delTable(c.sourceLevel, t.fd.Num)
rec.addTableFile(c.sourceLevel+1, t)
db.compactionCommit("table-move", rec)
2014-07-06 12:46:48 +00:00
return
2014-11-18 12:24:42 +00:00
}
var stats [2]cStatStaging
for i, tables := range c.levels {
2014-11-18 12:24:42 +00:00
for _, t := range tables {
stats[i].read += t.size
// Insert deleted tables into record
rec.delTable(c.sourceLevel+i, t.fd.Num)
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
}
sourceSize := int(stats[0].read + stats[1].read)
minSeq := db.minSeq()
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
2014-11-18 12:24:42 +00:00
b := &tableCompactionBuilder{
db: db,
s: db.s,
c: c,
rec: rec,
stat1: &stats[1],
minSeq: minSeq,
strict: db.s.o.GetStrict(opt.StrictCompaction),
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
2014-11-18 12:24:42 +00:00
}
db.compactionTransact("table@build", b)
2014-07-06 12:46:48 +00:00
// Commit.
stats[1].startTimer()
db.compactionCommit("table", rec)
stats[1].stopTimer()
2014-07-06 12:46:48 +00:00
2014-07-23 06:31:36 +00:00
resultSize := int(stats[1].write)
2014-11-18 12:24:42 +00:00
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
2014-07-06 12:46:48 +00:00
// Save compaction stats
for i := range stats {
db.compStats.addStat(c.sourceLevel+1, &stats[i])
2014-07-06 12:46:48 +00:00
}
}
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
2014-07-23 06:31:36 +00:00
db.logf("table@compaction range L%d %q:%q", level, umin, umax)
2014-07-06 12:46:48 +00:00
if level >= 0 {
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
2014-07-23 06:31:36 +00:00
db.tableCompaction(c, true)
2014-07-06 12:46:48 +00:00
}
} else {
// Retry until nothing to compact.
for {
compacted := false
// Scan for maximum level with overlapped tables.
v := db.s.version()
m := 1
for i := m; i < len(v.levels); i++ {
tables := v.levels[i]
if tables.overlaps(db.s.icmp, umin, umax, false) {
m = i
}
}
v.release()
for level := 0; level < m; level++ {
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
db.tableCompaction(c, true)
compacted = true
}
2014-07-06 12:46:48 +00:00
}
2014-07-23 06:31:36 +00:00
if !compacted {
break
2014-07-06 12:46:48 +00:00
}
}
}
return nil
2014-07-06 12:46:48 +00:00
}
2014-07-23 06:31:36 +00:00
func (db *DB) tableAutoCompaction() {
if c := db.s.pickCompaction(); c != nil {
db.tableCompaction(c, false)
2014-07-06 12:46:48 +00:00
}
}
2014-07-23 06:31:36 +00:00
func (db *DB) tableNeedCompaction() bool {
2014-11-18 12:24:42 +00:00
v := db.s.version()
defer v.release()
return v.needCompaction()
2014-07-06 12:46:48 +00:00
}
2014-07-23 06:31:36 +00:00
func (db *DB) pauseCompaction(ch chan<- struct{}) {
2014-07-06 12:46:48 +00:00
select {
case ch <- struct{}{}:
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
db.compactionExitTransact()
2014-07-06 12:46:48 +00:00
}
}
type cCmd interface {
ack(err error)
}
type cAuto struct {
2014-07-06 12:46:48 +00:00
ackC chan<- error
}
func (r cAuto) ack(err error) {
2014-11-18 12:24:42 +00:00
if r.ackC != nil {
defer func() {
recover()
}()
r.ackC <- err
}
2014-07-06 12:46:48 +00:00
}
type cRange struct {
level int
min, max []byte
ackC chan<- error
}
func (r cRange) ack(err error) {
if r.ackC != nil {
2014-11-04 04:00:11 +00:00
defer func() {
recover()
}()
2014-07-06 12:46:48 +00:00
r.ackC <- err
}
}
// This will trigger auto compaction but will not wait for it.
func (db *DB) compTrigger(compC chan<- cCmd) {
select {
case compC <- cAuto{}:
default:
}
}
// This will trigger auto compaction and/or wait for all compaction to be done.
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
2014-07-06 12:46:48 +00:00
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cAuto{ch}:
2014-11-04 04:00:11 +00:00
case err = <-db.compErrC:
return
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
2014-07-06 12:46:48 +00:00
return ErrClosed
}
// Wait cmd.
2014-11-04 04:00:11 +00:00
select {
case err = <-ch:
case err = <-db.compErrC:
case _, _ = <-db.closeC:
return ErrClosed
}
return err
2014-07-06 12:46:48 +00:00
}
2014-11-18 12:24:42 +00:00
// Send range compaction request.
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
2014-07-06 12:46:48 +00:00
ch := make(chan error)
defer close(ch)
// Send cmd.
select {
case compC <- cRange{level, min, max, ch}:
2014-07-23 06:31:36 +00:00
case err := <-db.compErrC:
2014-07-06 12:46:48 +00:00
return err
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
2014-07-06 12:46:48 +00:00
return ErrClosed
}
// Wait cmd.
select {
case err = <-ch:
2014-11-04 04:00:11 +00:00
case err = <-db.compErrC:
case _, _ = <-db.closeC:
return ErrClosed
2014-07-06 12:46:48 +00:00
}
return err
}
2014-07-23 06:31:36 +00:00
func (db *DB) mCompaction() {
2014-07-06 12:46:48 +00:00
var x cCmd
defer func() {
if x := recover(); x != nil {
if x != errCompactionTransactExiting {
panic(x)
}
}
if x != nil {
x.ack(ErrClosed)
}
2014-07-23 06:31:36 +00:00
db.closeW.Done()
2014-07-06 12:46:48 +00:00
}()
for {
select {
2014-07-23 06:31:36 +00:00
case x = <-db.mcompCmdC:
2014-11-18 12:24:42 +00:00
switch x.(type) {
case cAuto:
2014-11-18 12:24:42 +00:00
db.memCompaction()
x.ack(nil)
x = nil
default:
panic("leveldb: unknown command")
}
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
return
2014-07-06 12:46:48 +00:00
}
}
}
2014-07-23 06:31:36 +00:00
func (db *DB) tCompaction() {
2014-07-06 12:46:48 +00:00
var x cCmd
var ackQ []cCmd
defer func() {
if x := recover(); x != nil {
if x != errCompactionTransactExiting {
panic(x)
}
}
for i := range ackQ {
ackQ[i].ack(ErrClosed)
ackQ[i] = nil
}
if x != nil {
x.ack(ErrClosed)
}
2014-07-23 06:31:36 +00:00
db.closeW.Done()
2014-07-06 12:46:48 +00:00
}()
for {
2014-07-23 06:31:36 +00:00
if db.tableNeedCompaction() {
2014-07-06 12:46:48 +00:00
select {
2014-07-23 06:31:36 +00:00
case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
2014-07-06 12:46:48 +00:00
continue
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
return
2014-07-06 12:46:48 +00:00
default:
}
} else {
for i := range ackQ {
ackQ[i].ack(nil)
ackQ[i] = nil
}
ackQ = ackQ[:0]
select {
2014-07-23 06:31:36 +00:00
case x = <-db.tcompCmdC:
case ch := <-db.tcompPauseC:
db.pauseCompaction(ch)
2014-07-06 12:46:48 +00:00
continue
2014-07-23 06:31:36 +00:00
case _, _ = <-db.closeC:
2014-07-06 12:46:48 +00:00
return
}
}
if x != nil {
switch cmd := x.(type) {
case cAuto:
2014-07-06 12:46:48 +00:00
ackQ = append(ackQ, x)
case cRange:
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
2014-11-18 12:24:42 +00:00
default:
panic("leveldb: unknown command")
2014-07-06 12:46:48 +00:00
}
x = nil
}
2014-07-23 06:31:36 +00:00
db.tableAutoCompaction()
2014-07-06 12:46:48 +00:00
}
}