mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-10 15:20:56 +00:00
65aaa607ab
Change made by: - running "gvt fetch" on each of the packages mentioned in Godeps/Godeps.json - `rm -rf Godeps` - tweaking the build scripts to not mention Godeps - tweaking the build scripts to test `./lib/...`, `./cmd/...` explicitly (to avoid testing vendor) - tweaking the build scripts to not juggle GOPATH for Godeps and instead set GO15VENDOREXPERIMENT. This also results in some updated packages at the same time I bet. Building with Go 1.3 and 1.4 still *works* but won't use our vendored dependencies - the user needs to have the actual packages in their GOPATH then, which they'll get with a normal "go get". Building with Go 1.6+ will get our vendored dependencies by default even when not using our build script, which is nice. By doing this we gain some freedom in that we can pick and choose manually what to include in vendor, as it's not based on just dependency analysis of our own code. This is also a risk as we might pick up dependencies we are unaware of, as the build may work locally with those packages present in GOPATH. On the other hand the build server will detect this as it has no packages in it's GOPATH beyond what is included in the repo. Recommended tool to manage dependencies is github.com/FiloSottile/gvt.
220 lines
4.3 KiB
Go
220 lines
4.3 KiB
Go
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
|
// All rights reserved.
|
|
//
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file.
|
|
|
|
package leveldb
|
|
|
|
import (
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"github.com/syndtr/goleveldb/leveldb/journal"
|
|
"github.com/syndtr/goleveldb/leveldb/memdb"
|
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
|
)
|
|
|
|
type memDB struct {
|
|
db *DB
|
|
*memdb.DB
|
|
ref int32
|
|
}
|
|
|
|
func (m *memDB) getref() int32 {
|
|
return atomic.LoadInt32(&m.ref)
|
|
}
|
|
|
|
func (m *memDB) incref() {
|
|
atomic.AddInt32(&m.ref, 1)
|
|
}
|
|
|
|
func (m *memDB) decref() {
|
|
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
|
// Only put back memdb with std capacity.
|
|
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
|
|
m.Reset()
|
|
m.db.mpoolPut(m.DB)
|
|
}
|
|
m.db = nil
|
|
m.DB = nil
|
|
} else if ref < 0 {
|
|
panic("negative memdb ref")
|
|
}
|
|
}
|
|
|
|
// Get latest sequence number.
|
|
func (db *DB) getSeq() uint64 {
|
|
return atomic.LoadUint64(&db.seq)
|
|
}
|
|
|
|
// Atomically adds delta to seq.
|
|
func (db *DB) addSeq(delta uint64) {
|
|
atomic.AddUint64(&db.seq, delta)
|
|
}
|
|
|
|
func (db *DB) setSeq(seq uint64) {
|
|
atomic.StoreUint64(&db.seq, seq)
|
|
}
|
|
|
|
func (db *DB) sampleSeek(ikey internalKey) {
|
|
v := db.s.version()
|
|
if v.sampleSeek(ikey) {
|
|
// Trigger table compaction.
|
|
db.compTrigger(db.tcompCmdC)
|
|
}
|
|
v.release()
|
|
}
|
|
|
|
func (db *DB) mpoolPut(mem *memdb.DB) {
|
|
defer func() {
|
|
recover()
|
|
}()
|
|
select {
|
|
case db.memPool <- mem:
|
|
default:
|
|
}
|
|
}
|
|
|
|
func (db *DB) mpoolGet(n int) *memDB {
|
|
var mdb *memdb.DB
|
|
select {
|
|
case mdb = <-db.memPool:
|
|
default:
|
|
}
|
|
if mdb == nil || mdb.Capacity() < n {
|
|
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
|
|
}
|
|
return &memDB{
|
|
db: db,
|
|
DB: mdb,
|
|
}
|
|
}
|
|
|
|
func (db *DB) mpoolDrain() {
|
|
ticker := time.NewTicker(30 * time.Second)
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
select {
|
|
case <-db.memPool:
|
|
default:
|
|
}
|
|
case _, _ = <-db.closeC:
|
|
close(db.memPool)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
// Create new memdb and froze the old one; need external synchronization.
|
|
// newMem only called synchronously by the writer.
|
|
func (db *DB) newMem(n int) (mem *memDB, err error) {
|
|
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
|
|
w, err := db.s.stor.Create(fd)
|
|
if err != nil {
|
|
db.s.reuseFileNum(fd.Num)
|
|
return
|
|
}
|
|
|
|
db.memMu.Lock()
|
|
defer db.memMu.Unlock()
|
|
|
|
if db.frozenMem != nil {
|
|
panic("still has frozen mem")
|
|
}
|
|
|
|
if db.journal == nil {
|
|
db.journal = journal.NewWriter(w)
|
|
} else {
|
|
db.journal.Reset(w)
|
|
db.journalWriter.Close()
|
|
db.frozenJournalFd = db.journalFd
|
|
}
|
|
db.journalWriter = w
|
|
db.journalFd = fd
|
|
db.frozenMem = db.mem
|
|
mem = db.mpoolGet(n)
|
|
mem.incref() // for self
|
|
mem.incref() // for caller
|
|
db.mem = mem
|
|
// The seq only incremented by the writer. And whoever called newMem
|
|
// should hold write lock, so no need additional synchronization here.
|
|
db.frozenSeq = db.seq
|
|
return
|
|
}
|
|
|
|
// Get all memdbs.
|
|
func (db *DB) getMems() (e, f *memDB) {
|
|
db.memMu.RLock()
|
|
defer db.memMu.RUnlock()
|
|
if db.mem == nil {
|
|
panic("nil effective mem")
|
|
}
|
|
db.mem.incref()
|
|
if db.frozenMem != nil {
|
|
db.frozenMem.incref()
|
|
}
|
|
return db.mem, db.frozenMem
|
|
}
|
|
|
|
// Get frozen memdb.
|
|
func (db *DB) getEffectiveMem() *memDB {
|
|
db.memMu.RLock()
|
|
defer db.memMu.RUnlock()
|
|
if db.mem == nil {
|
|
panic("nil effective mem")
|
|
}
|
|
db.mem.incref()
|
|
return db.mem
|
|
}
|
|
|
|
// Check whether we has frozen memdb.
|
|
func (db *DB) hasFrozenMem() bool {
|
|
db.memMu.RLock()
|
|
defer db.memMu.RUnlock()
|
|
return db.frozenMem != nil
|
|
}
|
|
|
|
// Get frozen memdb.
|
|
func (db *DB) getFrozenMem() *memDB {
|
|
db.memMu.RLock()
|
|
defer db.memMu.RUnlock()
|
|
if db.frozenMem != nil {
|
|
db.frozenMem.incref()
|
|
}
|
|
return db.frozenMem
|
|
}
|
|
|
|
// Drop frozen memdb; assume that frozen memdb isn't nil.
|
|
func (db *DB) dropFrozenMem() {
|
|
db.memMu.Lock()
|
|
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
|
|
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
|
|
} else {
|
|
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
|
|
}
|
|
db.frozenJournalFd = storage.FileDesc{}
|
|
db.frozenMem.decref()
|
|
db.frozenMem = nil
|
|
db.memMu.Unlock()
|
|
}
|
|
|
|
// Set closed flag; return true if not already closed.
|
|
func (db *DB) setClosed() bool {
|
|
return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
|
|
}
|
|
|
|
// Check whether DB was closed.
|
|
func (db *DB) isClosed() bool {
|
|
return atomic.LoadUint32(&db.closed) != 0
|
|
}
|
|
|
|
// Check read ok status.
|
|
func (db *DB) ok() error {
|
|
if db.isClosed() {
|
|
return ErrClosed
|
|
}
|
|
return nil
|
|
}
|