diff --git a/cmd/stindex/dump.go b/cmd/stindex/dump.go index 34edbdee0..e997678f9 100644 --- a/cmd/stindex/dump.go +++ b/cmd/stindex/dump.go @@ -13,11 +13,15 @@ import ( "time" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/protocol" ) -func dump(ldb *db.Lowlevel) { - it := ldb.NewIterator(nil, nil) +func dump(ldb backend.Backend) { + it, err := ldb.NewPrefixIterator(nil) + if err != nil { + log.Fatal(err) + } for it.Next() { key := it.Key() switch key[0] { diff --git a/cmd/stindex/dumpsize.go b/cmd/stindex/dumpsize.go index 71c351692..a00630b91 100644 --- a/cmd/stindex/dumpsize.go +++ b/cmd/stindex/dumpsize.go @@ -10,8 +10,10 @@ import ( "container/heap" "encoding/binary" "fmt" + "log" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" ) type SizedElement struct { @@ -37,11 +39,14 @@ func (h *ElementHeap) Pop() interface{} { return x } -func dumpsize(ldb *db.Lowlevel) { +func dumpsize(ldb backend.Backend) { h := &ElementHeap{} heap.Init(h) - it := ldb.NewIterator(nil, nil) + it, err := ldb.NewPrefixIterator(nil) + if err != nil { + log.Fatal(err) + } var ele SizedElement for it.Next() { key := it.Key() diff --git a/cmd/stindex/idxck.go b/cmd/stindex/idxck.go index 88d5b2eb5..04ece2caa 100644 --- a/cmd/stindex/idxck.go +++ b/cmd/stindex/idxck.go @@ -10,8 +10,10 @@ import ( "bytes" "encoding/binary" "fmt" + "log" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/protocol" ) @@ -31,7 +33,7 @@ type sequenceKey struct { sequence uint64 } -func idxck(ldb *db.Lowlevel) (success bool) { +func idxck(ldb backend.Backend) (success bool) { folders := make(map[uint32]string) devices := make(map[uint32]string) deviceToIDs := make(map[string]uint32) @@ -42,7 +44,10 @@ func idxck(ldb *db.Lowlevel) (success bool) { var localDeviceKey uint32 success = true - it := ldb.NewIterator(nil, nil) + it, err := ldb.NewPrefixIterator(nil) + if err != nil { + log.Fatal(err) + } for it.Next() { key := it.Key() switch key[0] { diff --git a/cmd/stindex/main.go b/cmd/stindex/main.go index a88127317..ed48c35d0 100644 --- a/cmd/stindex/main.go +++ b/cmd/stindex/main.go @@ -13,7 +13,7 @@ import ( "os" "path/filepath" - "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" ) func main() { @@ -30,7 +30,7 @@ func main() { path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db") } - ldb, err := db.OpenRO(path) + ldb, err := backend.OpenLevelDBRO(path) if err != nil { log.Fatal(err) } diff --git a/lib/config/tuning_test.go b/lib/config/tuning_test.go index 01ac04c14..d913492ff 100644 --- a/lib/config/tuning_test.go +++ b/lib/config/tuning_test.go @@ -10,17 +10,17 @@ import ( "testing" "github.com/syncthing/syncthing/lib/config" - "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" ) func TestTuningMatches(t *testing.T) { - if int(config.TuningAuto) != int(db.TuningAuto) { + if int(config.TuningAuto) != int(backend.TuningAuto) { t.Error("mismatch for TuningAuto") } - if int(config.TuningSmall) != int(db.TuningSmall) { + if int(config.TuningSmall) != int(backend.TuningSmall) { t.Error("mismatch for TuningSmall") } - if int(config.TuningLarge) != int(db.TuningLarge) { + if int(config.TuningLarge) != int(backend.TuningLarge) { t.Error("mismatch for TuningLarge") } } diff --git a/lib/db/backend/backend.go b/lib/db/backend/backend.go new file mode 100644 index 000000000..e5f6251b7 --- /dev/null +++ b/lib/db/backend/backend.go @@ -0,0 +1,170 @@ +// Copyright (C) 2019 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import ( + "sync" +) + +// The Reader interface specifies the read-only operations available on the +// main database and on read-only transactions (snapshots). Note that when +// called directly on the database handle these operations may take implicit +// transactions and performance may suffer. +type Reader interface { + Get(key []byte) ([]byte, error) + NewPrefixIterator(prefix []byte) (Iterator, error) + NewRangeIterator(first, last []byte) (Iterator, error) +} + +// The Writer interface specifies the mutating operations available on the +// main database and on writable transactions. Note that when called +// directly on the database handle these operations may take implicit +// transactions and performance may suffer. +type Writer interface { + Put(key, val []byte) error + Delete(key []byte) error +} + +// The ReadTransaction interface specifies the operations on read-only +// transactions. Every ReadTransaction must be released when no longer +// required. +type ReadTransaction interface { + Reader + Release() +} + +// The WriteTransaction interface specifies the operations on writable +// transactions. Every WriteTransaction must be either committed or released +// (i.e., discarded) when no longer required. No further operations must be +// performed after release or commit (regardless of whether commit succeeded), +// with one exception -- it's fine to release an already committed or released +// transaction. +// +// A Checkpoint is a potential partial commit of the transaction so far, for +// purposes of saving memory when transactions are in-RAM. Note that +// transactions may be checkpointed *anyway* even if this is not called, due to +// resource constraints, but this gives you a chance to decide when. +type WriteTransaction interface { + ReadTransaction + Writer + Checkpoint() error + Commit() error +} + +// The Iterator interface specifies the operations available on iterators +// returned by NewPrefixIterator and NewRangeIterator. The iterator pattern +// is to loop while Next returns true, then check Error after the loop. Next +// will return false when iteration is complete (Error() == nil) or when +// there is an error preventing iteration, which is then returned by +// Error(). For example: +// +// it, err := db.NewPrefixIterator(nil) +// if err != nil { +// // problem preventing iteration +// } +// defer it.Release() +// for it.Next() { +// // ... +// } +// if err := it.Error(); err != nil { +// // there was a database problem while iterating +// } +// +// An iterator must be Released when no longer required. The Error method +// can be called either before or after Release with the same results. If an +// iterator was created in a transaction (whether read-only or write) it +// must be released before the transaction is released (or committed). +type Iterator interface { + Next() bool + Key() []byte + Value() []byte + Error() error + Release() +} + +// The Backend interface represents the main database handle. It supports +// both read/write operations and opening read-only or writable +// transactions. Depending on the actual implementation, individual +// read/write operations may be implicitly wrapped in transactions, making +// them perform quite badly when used repeatedly. For bulk operations, +// consider always using a transaction of the appropriate type. The +// transaction isolation level is "read committed" - there are no dirty +// reads. +type Backend interface { + Reader + Writer + NewReadTransaction() (ReadTransaction, error) + NewWriteTransaction() (WriteTransaction, error) + Close() error +} + +type Tuning int + +const ( + // N.b. these constants must match those in lib/config.Tuning! + TuningAuto Tuning = iota + TuningSmall + TuningLarge +) + +func Open(path string, tuning Tuning) (Backend, error) { + return OpenLevelDB(path, tuning) +} + +func OpenMemory() Backend { + return OpenLevelDBMemory() +} + +type errClosed struct{} + +func (errClosed) Error() string { return "database is closed" } + +type errNotFound struct{} + +func (errNotFound) Error() string { return "key not found" } + +func IsClosed(err error) bool { + if _, ok := err.(errClosed); ok { + return true + } + if _, ok := err.(*errClosed); ok { + return true + } + return false +} + +func IsNotFound(err error) bool { + if _, ok := err.(errNotFound); ok { + return true + } + if _, ok := err.(*errNotFound); ok { + return true + } + return false +} + +// releaser manages counting on top of a waitgroup +type releaser struct { + wg *sync.WaitGroup + once *sync.Once +} + +func newReleaser(wg *sync.WaitGroup) *releaser { + wg.Add(1) + return &releaser{ + wg: wg, + once: new(sync.Once), + } +} + +func (r releaser) Release() { + // We use the Once because we may get called multiple times from + // Commit() and deferred Release(). + r.once.Do(func() { + r.wg.Done() + }) +} diff --git a/lib/db/backend/backend_test.go b/lib/db/backend/backend_test.go new file mode 100644 index 000000000..c77b5792d --- /dev/null +++ b/lib/db/backend/backend_test.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import "testing" + +// testBackendBehavior is the generic test suite that must be fulfilled by +// every backend implementation. It should be called by each implementation +// as (part of) their test suite. +func testBackendBehavior(t *testing.T, open func() Backend) { + t.Run("WriteIsolation", func(t *testing.T) { testWriteIsolation(t, open) }) + t.Run("DeleteNonexisten", func(t *testing.T) { testDeleteNonexistent(t, open) }) +} + +func testWriteIsolation(t *testing.T, open func() Backend) { + // Values written during a transaction should not be read back, our + // updateGlobal depends on this. + + db := open() + defer db.Close() + + // Sanity check + _ = db.Put([]byte("a"), []byte("a")) + v, _ := db.Get([]byte("a")) + if string(v) != "a" { + t.Fatal("read back should work") + } + + // Now in a transaction we should still see the old value + tx, _ := db.NewWriteTransaction() + defer tx.Release() + _ = tx.Put([]byte("a"), []byte("b")) + v, _ = tx.Get([]byte("a")) + if string(v) != "a" { + t.Fatal("read in transaction should read the old value") + } +} + +func testDeleteNonexistent(t *testing.T, open func() Backend) { + // Deleting a non-existent key is not an error + + db := open() + defer db.Close() + + err := db.Delete([]byte("a")) + if err != nil { + t.Error(err) + } +} diff --git a/lib/db/backend/debug.go b/lib/db/backend/debug.go new file mode 100644 index 000000000..bb6c365b0 --- /dev/null +++ b/lib/db/backend/debug.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import ( + "github.com/syncthing/syncthing/lib/logger" +) + +var ( + l = logger.DefaultLogger.NewFacility("backend", "The database backend") +) diff --git a/lib/db/backend/leveldb_backend.go b/lib/db/backend/leveldb_backend.go new file mode 100644 index 000000000..e142ca0ba --- /dev/null +++ b/lib/db/backend/leveldb_backend.go @@ -0,0 +1,173 @@ +// Copyright (C) 2018 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import ( + "sync" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/util" +) + +const ( + // Never flush transactions smaller than this, even on Checkpoint() + dbFlushBatchMin = 1 << MiB + // Once a transaction reaches this size, flush it unconditionally. + dbFlushBatchMax = 128 << MiB +) + +// leveldbBackend implements Backend on top of a leveldb +type leveldbBackend struct { + ldb *leveldb.DB + closeWG sync.WaitGroup +} + +func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) { + return b.newSnapshot() +} + +func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) { + snap, err := b.ldb.GetSnapshot() + if err != nil { + return leveldbSnapshot{}, wrapLeveldbErr(err) + } + return leveldbSnapshot{ + snap: snap, + rel: newReleaser(&b.closeWG), + }, nil +} + +func (b *leveldbBackend) NewWriteTransaction() (WriteTransaction, error) { + snap, err := b.newSnapshot() + if err != nil { + return nil, err // already wrapped + } + return &leveldbTransaction{ + leveldbSnapshot: snap, + ldb: b.ldb, + batch: new(leveldb.Batch), + rel: newReleaser(&b.closeWG), + }, nil +} + +func (b *leveldbBackend) Close() error { + b.closeWG.Wait() + return wrapLeveldbErr(b.ldb.Close()) +} + +func (b *leveldbBackend) Get(key []byte) ([]byte, error) { + val, err := b.ldb.Get(key, nil) + return val, wrapLeveldbErr(err) +} + +func (b *leveldbBackend) NewPrefixIterator(prefix []byte) (Iterator, error) { + return b.ldb.NewIterator(util.BytesPrefix(prefix), nil), nil +} + +func (b *leveldbBackend) NewRangeIterator(first, last []byte) (Iterator, error) { + return b.ldb.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil +} + +func (b *leveldbBackend) Put(key, val []byte) error { + return wrapLeveldbErr(b.ldb.Put(key, val, nil)) +} + +func (b *leveldbBackend) Delete(key []byte) error { + return wrapLeveldbErr(b.ldb.Delete(key, nil)) +} + +// leveldbSnapshot implements backend.ReadTransaction +type leveldbSnapshot struct { + snap *leveldb.Snapshot + rel *releaser +} + +func (l leveldbSnapshot) Get(key []byte) ([]byte, error) { + val, err := l.snap.Get(key, nil) + return val, wrapLeveldbErr(err) +} + +func (l leveldbSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) { + return l.snap.NewIterator(util.BytesPrefix(prefix), nil), nil +} + +func (l leveldbSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) { + return l.snap.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil +} + +func (l leveldbSnapshot) Release() { + l.snap.Release() + l.rel.Release() +} + +// leveldbTransaction implements backend.WriteTransaction using a batch (not +// an actual leveldb transaction) +type leveldbTransaction struct { + leveldbSnapshot + ldb *leveldb.DB + batch *leveldb.Batch + rel *releaser +} + +func (t *leveldbTransaction) Delete(key []byte) error { + t.batch.Delete(key) + return t.checkFlush(dbFlushBatchMax) +} + +func (t *leveldbTransaction) Put(key, val []byte) error { + t.batch.Put(key, val) + return t.checkFlush(dbFlushBatchMax) +} + +func (t *leveldbTransaction) Checkpoint() error { + return t.checkFlush(dbFlushBatchMin) +} + +func (t *leveldbTransaction) Commit() error { + err := wrapLeveldbErr(t.flush()) + t.leveldbSnapshot.Release() + t.rel.Release() + return err +} + +func (t *leveldbTransaction) Release() { + t.leveldbSnapshot.Release() + t.rel.Release() +} + +// checkFlush flushes and resets the batch if its size exceeds the given size. +func (t *leveldbTransaction) checkFlush(size int) error { + if len(t.batch.Dump()) < size { + return nil + } + return t.flush() +} + +func (t *leveldbTransaction) flush() error { + if t.batch.Len() == 0 { + return nil + } + if err := t.ldb.Write(t.batch, nil); err != nil { + return wrapLeveldbErr(err) + } + t.batch.Reset() + return nil +} + +// wrapLeveldbErr wraps errors so that the backend package can recognize them +func wrapLeveldbErr(err error) error { + if err == nil { + return nil + } + if err == leveldb.ErrClosed { + return errClosed{} + } + if err == leveldb.ErrNotFound { + return errNotFound{} + } + return err +} diff --git a/lib/db/backend/leveldb_open.go b/lib/db/backend/leveldb_open.go new file mode 100644 index 000000000..781690851 --- /dev/null +++ b/lib/db/backend/leveldb_open.go @@ -0,0 +1,226 @@ +// Copyright (C) 2018 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/util" +) + +const ( + dbMaxOpenFiles = 100 + + // A large database is > 200 MiB. It's a mostly arbitrary value, but + // it's also the case that each file is 2 MiB by default and when we + // have dbMaxOpenFiles of them we will need to start thrashing fd:s. + // Switching to large database settings causes larger files to be used + // when compacting, reducing the number. + dbLargeThreshold = dbMaxOpenFiles * (2 << MiB) + + KiB = 10 + MiB = 20 +) + +// Open attempts to open the database at the given location, and runs +// recovery on it if opening fails. Worst case, if recovery is not possible, +// the database is erased and created from scratch. +func OpenLevelDB(location string, tuning Tuning) (Backend, error) { + opts := optsFor(location, tuning) + ldb, err := open(location, opts) + if err != nil { + return nil, err + } + return &leveldbBackend{ldb: ldb}, nil +} + +// OpenRO attempts to open the database at the given location, read only. +func OpenLevelDBRO(location string) (Backend, error) { + opts := &opt.Options{ + OpenFilesCacheCapacity: dbMaxOpenFiles, + ReadOnly: true, + } + ldb, err := open(location, opts) + if err != nil { + return nil, err + } + return &leveldbBackend{ldb: ldb}, nil +} + +// OpenMemory returns a new Lowlevel referencing an in-memory database. +func OpenLevelDBMemory() Backend { + ldb, _ := leveldb.Open(storage.NewMemStorage(), nil) + return &leveldbBackend{ldb: ldb} +} + +// optsFor returns the database options to use when opening a database with +// the given location and tuning. Settings can be overridden by debug +// environment variables. +func optsFor(location string, tuning Tuning) *opt.Options { + large := false + switch tuning { + case TuningLarge: + large = true + case TuningAuto: + large = dbIsLarge(location) + } + + var ( + // Set defaults used for small databases. + defaultBlockCacheCapacity = 0 // 0 means let leveldb use default + defaultBlockSize = 0 + defaultCompactionTableSize = 0 + defaultCompactionTableSizeMultiplier = 0 + defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB + defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff + ) + + if large { + // Change the parameters for better throughput at the price of some + // RAM and larger files. This results in larger batches of writes + // and compaction at a lower frequency. + l.Infoln("Using large-database tuning") + + defaultBlockCacheCapacity = 64 << MiB + defaultBlockSize = 64 << KiB + defaultCompactionTableSize = 16 << MiB + defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten + defaultWriteBuffer = 64 << MiB + defaultCompactionL0Trigger = 8 // number of l0 files + } + + opts := &opt.Options{ + BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity), + BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0, + BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0), + BlockSize: debugEnvValue("BlockSize", defaultBlockSize), + CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0), + CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0), + CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger), + CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0), + CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize), + CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0, + CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0), + CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0, + DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0, + DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0, + DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0, + DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0, + NoSync: debugEnvValue("NoSync", 0) != 0, + NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0, + OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles), + WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer), + // The write slowdown and pause can be overridden, but even if they + // are not and the compaction trigger is overridden we need to + // adjust so that we don't pause writes for L0 compaction before we + // even *start* L0 compaction... + WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)), + WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)), + } + + return opts +} + +func open(location string, opts *opt.Options) (*leveldb.DB, error) { + db, err := leveldb.OpenFile(location, opts) + if leveldbIsCorrupted(err) { + db, err = leveldb.RecoverFile(location, opts) + } + if leveldbIsCorrupted(err) { + // The database is corrupted, and we've tried to recover it but it + // didn't work. At this point there isn't much to do beyond dropping + // the database and reindexing... + l.Infoln("Database corruption detected, unable to recover. Reinitializing...") + if err := os.RemoveAll(location); err != nil { + return nil, errorSuggestion{err, "failed to delete corrupted database"} + } + db, err = leveldb.OpenFile(location, opts) + } + if err != nil { + return nil, errorSuggestion{err, "is another instance of Syncthing running?"} + } + + if debugEnvValue("CompactEverything", 0) != 0 { + if err := db.CompactRange(util.Range{}); err != nil { + l.Warnln("Compacting database:", err) + } + } + + return db, nil +} + +func debugEnvValue(key string, def int) int { + v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63) + if err != nil { + return def + } + return int(v) +} + +// A "better" version of leveldb's errors.IsCorrupted. +func leveldbIsCorrupted(err error) bool { + switch { + case err == nil: + return false + + case errors.IsCorrupted(err): + return true + + case strings.Contains(err.Error(), "corrupted"): + return true + } + + return false +} + +// dbIsLarge returns whether the estimated size of the database at location +// is large enough to warrant optimization for large databases. +func dbIsLarge(location string) bool { + if ^uint(0)>>63 == 0 { + // We're compiled for a 32 bit architecture. We've seen trouble with + // large settings there. + // (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842) + return false + } + + dir, err := os.Open(location) + if err != nil { + return false + } + + fis, err := dir.Readdir(-1) + if err != nil { + return false + } + + var size int64 + for _, fi := range fis { + if fi.Name() == "LOG" { + // don't count the size + continue + } + size += fi.Size() + } + + return size > dbLargeThreshold +} + +type errorSuggestion struct { + inner error + suggestion string +} + +func (e errorSuggestion) Error() string { + return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion) +} diff --git a/lib/db/backend/leveldb_test.go b/lib/db/backend/leveldb_test.go new file mode 100644 index 000000000..3de46ad90 --- /dev/null +++ b/lib/db/backend/leveldb_test.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019 The Syncthing Authors. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +package backend + +import "testing" + +func TestLevelDBBackendBehavior(t *testing.T) { + testBackendBehavior(t, OpenLevelDBMemory) +} diff --git a/lib/db/benchmark_test.go b/lib/db/benchmark_test.go index bddd39197..cb5ceb49b 100644 --- a/lib/db/benchmark_test.go +++ b/lib/db/benchmark_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/protocol" ) @@ -40,7 +41,7 @@ func lazyInitBenchFiles() { func getBenchFileSet() (*db.Lowlevel, *db.FileSet) { lazyInitBenchFiles() - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) benchS := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) replace(benchS, remoteDevice0, files) replace(benchS, protocol.LocalDeviceID, firstHalf) @@ -49,7 +50,7 @@ func getBenchFileSet() (*db.Lowlevel, *db.FileSet) { } func BenchmarkReplaceAll(b *testing.B) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) defer ldb.Close() b.ResetTimer() @@ -157,7 +158,7 @@ func BenchmarkNeedHalf(b *testing.B) { } func BenchmarkNeedHalfRemote(b *testing.B) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) defer ldb.Close() fset := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) replace(fset, remoteDevice0, firstHalf) diff --git a/lib/db/blockmap.go b/lib/db/blockmap.go index 006eeb5a9..fafd0c7dc 100644 --- a/lib/db/blockmap.go +++ b/lib/db/blockmap.go @@ -11,8 +11,6 @@ import ( "fmt" "github.com/syncthing/syncthing/lib/osutil" - - "github.com/syndtr/goleveldb/leveldb/util" ) var blockFinder *BlockFinder @@ -41,13 +39,22 @@ func (f *BlockFinder) String() string { // reason. The iterator finally returns the result, whether or not a // satisfying block was eventually found. func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool { - t := f.db.newReadOnlyTransaction() + t, err := f.db.newReadOnlyTransaction() + if err != nil { + return false + } defer t.close() var key []byte for _, folder := range folders { - key = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil) - iter := t.NewIterator(util.BytesPrefix(key), nil) + key, err = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil) + if err != nil { + return false + } + iter, err := t.NewPrefixIterator(key) + if err != nil { + return false + } for iter.Next() && iter.Error() == nil { file := string(f.db.keyer.NameFromBlockMapKey(iter.Key())) diff --git a/lib/db/blockmap_test.go b/lib/db/blockmap_test.go index 98a289191..4517be8ee 100644 --- a/lib/db/blockmap_test.go +++ b/lib/db/blockmap_test.go @@ -10,23 +10,10 @@ import ( "encoding/binary" "testing" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/protocol" - "github.com/syndtr/goleveldb/leveldb/util" ) -func genBlocks(n int) []protocol.BlockInfo { - b := make([]protocol.BlockInfo, n) - for i := range b { - h := make([]byte, 32) - for j := range h { - h[j] = byte(i + j) - } - b[i].Size = int32(i) - b[i].Hash = h - } - return b -} - var f1, f2, f3 protocol.FileInfo var folders = []string{"folder1", "folder2"} @@ -52,18 +39,24 @@ func init() { func setup() (*instance, *BlockFinder) { // Setup - db := OpenMemory() + db := NewLowlevel(backend.OpenMemory()) return newInstance(db), NewBlockFinder(db) } func dbEmpty(db *instance) bool { - iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil) + iter, err := db.NewPrefixIterator([]byte{KeyTypeBlock}) + if err != nil { + panic(err) + } defer iter.Release() return !iter.Next() } -func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) { - t := db.newReadWriteTransaction() +func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var keyBuf []byte @@ -73,15 +66,24 @@ func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) { name := []byte(f.Name) for i, block := range f.Blocks { binary.BigEndian.PutUint32(blockBuf, uint32(i)) - keyBuf = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) - t.Put(keyBuf, blockBuf) + keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) + if err != nil { + return err + } + if err := t.Put(keyBuf, blockBuf); err != nil { + return err + } } } } + return t.commit() } -func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) { - t := db.newReadWriteTransaction() +func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var keyBuf []byte @@ -89,11 +91,17 @@ func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) { if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() { name := []byte(ef.Name) for _, block := range ef.Blocks { - keyBuf = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) - t.Delete(keyBuf) + keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) + if err != nil { + return err + } + if err := t.Delete(keyBuf); err != nil { + return err + } } } } + return t.commit() } func TestBlockMapAddUpdateWipe(t *testing.T) { @@ -107,7 +115,9 @@ func TestBlockMapAddUpdateWipe(t *testing.T) { f3.Type = protocol.FileInfoTypeDirectory - addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}) + if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil { + t.Fatal(err) + } f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool { if folder != "folder1" || file != "f1" || index != 0 { @@ -128,12 +138,16 @@ func TestBlockMapAddUpdateWipe(t *testing.T) { return true }) - discardFromBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}) + if err := discardFromBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil { + t.Fatal(err) + } f1.Deleted = true f2.LocalFlags = protocol.FlagLocalMustRescan // one of the invalid markers - addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}) + if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil { + t.Fatal(err) + } f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool { t.Fatal("Unexpected block") @@ -152,14 +166,18 @@ func TestBlockMapAddUpdateWipe(t *testing.T) { return true }) - db.dropFolder(folder) + if err := db.dropFolder(folder); err != nil { + t.Fatal(err) + } if !dbEmpty(db) { t.Fatal("db not empty") } // Should not add - addToBlockMap(db, folder, []protocol.FileInfo{f1, f2}) + if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2}); err != nil { + t.Fatal(err) + } if !dbEmpty(db) { t.Fatal("db not empty") @@ -179,8 +197,12 @@ func TestBlockFinderLookup(t *testing.T) { folder1 := []byte("folder1") folder2 := []byte("folder2") - addToBlockMap(db, folder1, []protocol.FileInfo{f1}) - addToBlockMap(db, folder2, []protocol.FileInfo{f1}) + if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil { + t.Fatal(err) + } + if err := addToBlockMap(db, folder2, []protocol.FileInfo{f1}); err != nil { + t.Fatal(err) + } counter := 0 f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool { @@ -204,11 +226,15 @@ func TestBlockFinderLookup(t *testing.T) { t.Fatal("Incorrect count", counter) } - discardFromBlockMap(db, folder1, []protocol.FileInfo{f1}) + if err := discardFromBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil { + t.Fatal(err) + } f1.Deleted = true - addToBlockMap(db, folder1, []protocol.FileInfo{f1}) + if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil { + t.Fatal(err) + } counter = 0 f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool { diff --git a/lib/db/concurrency_test.go b/lib/db/concurrency_test.go deleted file mode 100644 index eb0d661d7..000000000 --- a/lib/db/concurrency_test.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (C) 2014 The Syncthing Authors. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at https://mozilla.org/MPL/2.0/. - -// this is a really tedious test for an old issue -// +build ignore - -package db_test - -import ( - "crypto/rand" - "log" - "os" - "testing" - "time" - - "github.com/syncthing/syncthing/lib/sync" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var keys [][]byte - -func init() { - for i := 0; i < nItems; i++ { - keys = append(keys, randomData(1)) - } -} - -const nItems = 10000 - -func randomData(prefix byte) []byte { - data := make([]byte, 1+32+64+32) - _, err := rand.Reader.Read(data) - if err != nil { - panic(err) - } - return append([]byte{prefix}, data...) -} - -func setItems(db *leveldb.DB) error { - batch := new(leveldb.Batch) - for _, k1 := range keys { - k2 := randomData(2) - // k2 -> data - batch.Put(k2, randomData(42)) - // k1 -> k2 - batch.Put(k1, k2) - } - if testing.Verbose() { - log.Printf("batch write (set) %p", batch) - } - return db.Write(batch, nil) -} - -func clearItems(db *leveldb.DB) error { - snap, err := db.GetSnapshot() - if err != nil { - return err - } - defer snap.Release() - - // Iterate over k2 - - it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - defer it.Release() - - batch := new(leveldb.Batch) - for it.Next() { - k1 := it.Key() - k2 := it.Value() - - // k2 should exist - _, err := snap.Get(k2, nil) - if err != nil { - return err - } - - // Delete the k1 => k2 mapping first - batch.Delete(k1) - // Then the k2 => data mapping - batch.Delete(k2) - } - if testing.Verbose() { - log.Printf("batch write (clear) %p", batch) - } - return db.Write(batch, nil) -} - -func scanItems(db *leveldb.DB) error { - snap, err := db.GetSnapshot() - if testing.Verbose() { - log.Printf("snap create %p", snap) - } - if err != nil { - return err - } - defer func() { - if testing.Verbose() { - log.Printf("snap release %p", snap) - } - snap.Release() - }() - - // Iterate from the start of k2 space to the end - it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) - defer it.Release() - - i := 0 - for it.Next() { - // k2 => k1 => data - k1 := it.Key() - k2 := it.Value() - _, err := snap.Get(k2, nil) - if err != nil { - log.Printf("k1: %x", k1) - log.Printf("k2: %x (missing)", k2) - return err - } - i++ - } - if testing.Verbose() { - log.Println("scanned", i) - } - return nil -} - -func TestConcurrentSetClear(t *testing.T) { - if testing.Short() { - return - } - - dur := 30 * time.Second - t0 := time.Now() - wg := sync.NewWaitGroup() - - os.RemoveAll("testdata/concurrent-set-clear.db") - db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10}) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll("testdata/concurrent-set-clear.db") - - errChan := make(chan error, 3) - - wg.Add(1) - go func() { - defer wg.Done() - for time.Since(t0) < dur { - if err := setItems(db); err != nil { - errChan <- err - return - } - if err := clearItems(db); err != nil { - errChan <- err - return - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for time.Since(t0) < dur { - if err := scanItems(db); err != nil { - errChan <- err - return - } - } - }() - - go func() { - wg.Wait() - errChan <- nil - }() - - err = <-errChan - if err != nil { - t.Error(err) - } - db.Close() -} - -func TestConcurrentSetOnly(t *testing.T) { - if testing.Short() { - return - } - - dur := 30 * time.Second - t0 := time.Now() - wg := sync.NewWaitGroup() - - os.RemoveAll("testdata/concurrent-set-only.db") - db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10}) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll("testdata/concurrent-set-only.db") - - errChan := make(chan error, 3) - - wg.Add(1) - go func() { - defer wg.Done() - for time.Since(t0) < dur { - if err := setItems(db); err != nil { - errChan <- err - return - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for time.Since(t0) < dur { - if err := scanItems(db); err != nil { - errChan <- err - return - } - } - }() - - go func() { - wg.Wait() - errChan <- nil - }() - - err = <-errChan - if err != nil { - t.Error(err) - } -} diff --git a/lib/db/db_test.go b/lib/db/db_test.go index 076727a01..4db710f86 100644 --- a/lib/db/db_test.go +++ b/lib/db/db_test.go @@ -9,17 +9,33 @@ package db import ( "testing" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/protocol" ) +func genBlocks(n int) []protocol.BlockInfo { + b := make([]protocol.BlockInfo, n) + for i := range b { + h := make([]byte, 32) + for j := range h { + h[j] = byte(i + j) + } + b[i].Size = int32(i) + b[i].Hash = h + } + return b +} + func TestIgnoredFiles(t *testing.T) { ldb, err := openJSONS("testdata/v0.14.48-ignoredfiles.db.jsons") if err != nil { t.Fatal(err) } - db := NewLowlevel(ldb, "") - UpdateSchema(db) + db := NewLowlevel(ldb) + if err := UpdateSchema(db); err != nil { + t.Fatal(err) + } fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db) @@ -142,25 +158,35 @@ func TestUpdate0to3(t *testing.T) { t.Fatal(err) } - db := newInstance(NewLowlevel(ldb, "")) + db := newInstance(NewLowlevel(ldb)) updater := schemaUpdater{db} folder := []byte(update0to3Folder) - updater.updateSchema0to1() + if err := updater.updateSchema0to1(); err != nil { + t.Fatal(err) + } - if _, ok := db.getFileDirty(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); ok { + if _, ok, err := db.getFileDirty(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); err != nil { + t.Fatal(err) + } else if ok { t.Error("File prefixed by '/' was not removed during transition to schema 1") } - if _, err := db.Get(db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid)), nil); err != nil { + key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid)) + if err != nil { + t.Fatal(err) + } + if _, err := db.Get(key); err != nil { t.Error("Invalid file wasn't added to global list") } - updater.updateSchema1to2() + if err := updater.updateSchema1to2(); err != nil { + t.Fatal(err) + } found := false - db.withHaveSequence(folder, 0, func(fi FileIntf) bool { + _ = db.withHaveSequence(folder, 0, func(fi FileIntf) bool { f := fi.(protocol.FileInfo) l.Infoln(f) if found { @@ -178,14 +204,16 @@ func TestUpdate0to3(t *testing.T) { t.Error("Local file wasn't added to sequence bucket", err) } - updater.updateSchema2to3() + if err := updater.updateSchema2to3(); err != nil { + t.Fatal(err) + } need := map[string]protocol.FileInfo{ haveUpdate0to3[remoteDevice0][0].Name: haveUpdate0to3[remoteDevice0][0], haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0], haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2], } - db.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool { + _ = db.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool { e, ok := need[fi.FileName()] if !ok { t.Error("Got unexpected needed file:", fi.FileName()) @@ -203,12 +231,17 @@ func TestUpdate0to3(t *testing.T) { } func TestDowngrade(t *testing.T) { - db := OpenMemory() - UpdateSchema(db) // sets the min version etc + db := NewLowlevel(backend.OpenMemory()) + // sets the min version etc + if err := UpdateSchema(db); err != nil { + t.Fatal(err) + } // Bump the database version to something newer than we actually support miscDB := NewMiscDataNamespace(db) - miscDB.PutInt64("dbVersion", dbVersion+1) + if err := miscDB.PutInt64("dbVersion", dbVersion+1); err != nil { + t.Fatal(err) + } l.Infoln(dbVersion) // Pretend we just opened the DB and attempt to update it again diff --git a/lib/db/instance.go b/lib/db/instance.go index 9dc08db19..98edafa19 100644 --- a/lib/db/instance.go +++ b/lib/db/instance.go @@ -9,11 +9,9 @@ package db import ( "bytes" "encoding/binary" - "fmt" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/protocol" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/util" ) type instance struct { @@ -30,17 +28,26 @@ func newInstance(ll *Lowlevel) *instance { // updateRemoteFiles adds a list of fileinfos to the database and updates the // global versionlist and metadata. -func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) { - t := db.newReadWriteTransaction() +func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var dk, gk, keyBuf []byte devID := protocol.DeviceIDFromBytes(device) for _, f := range fs { name := []byte(f.Name) - dk = db.keyer.GenerateDeviceFileKey(dk, folder, device, name) + dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, device, name) + if err != nil { + return err + } - ef, ok := t.getFileTrunc(dk, true) + ef, ok, err := t.getFileTrunc(dk, true) + if err != nil { + return err + } if ok && unchanged(f, ef) { continue } @@ -51,28 +58,49 @@ func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileI meta.addFile(devID, f) l.Debugf("insert; folder=%q device=%v %v", folder, devID, f) - t.Put(dk, mustMarshal(&f)) + if err := t.Put(dk, mustMarshal(&f)); err != nil { + return err + } - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - keyBuf, _ = t.updateGlobal(gk, keyBuf, folder, device, f, meta) + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if err != nil { + return err + } + keyBuf, _, err = t.updateGlobal(gk, keyBuf, folder, device, f, meta) + if err != nil { + return err + } - t.checkFlush() + if err := t.Checkpoint(); err != nil { + return err + } } + + return t.commit() } // updateLocalFiles adds fileinfos to the db, and updates the global versionlist, // metadata, sequence and blockmap buckets. -func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta *metadataTracker) { - t := db.newReadWriteTransaction() +func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta *metadataTracker) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var dk, gk, keyBuf []byte blockBuf := make([]byte, 4) for _, f := range fs { name := []byte(f.Name) - dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name) + dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name) + if err != nil { + return err + } - ef, ok := t.getFileByKey(dk) + ef, ok, err := t.getFileByKey(dk) + if err != nil { + return err + } if ok && unchanged(f, ef) { continue } @@ -80,13 +108,23 @@ func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta if ok { if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() { for _, block := range ef.Blocks { - keyBuf = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) - t.Delete(keyBuf) + keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) + if err != nil { + return err + } + if err := t.Delete(keyBuf); err != nil { + return err + } } } - keyBuf = db.keyer.GenerateSequenceKey(keyBuf, folder, ef.SequenceNo()) - t.Delete(keyBuf) + keyBuf, err = db.keyer.GenerateSequenceKey(keyBuf, folder, ef.SequenceNo()) + if err != nil { + return err + } + if err := t.Delete(keyBuf); err != nil { + return err + } l.Debugf("removing sequence; folder=%q sequence=%v %v", folder, ef.SequenceNo(), ef.FileName()) } @@ -98,29 +136,54 @@ func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta meta.addFile(protocol.LocalDeviceID, f) l.Debugf("insert (local); folder=%q %v", folder, f) - t.Put(dk, mustMarshal(&f)) + if err := t.Put(dk, mustMarshal(&f)); err != nil { + return err + } - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, []byte(f.Name)) - keyBuf, _ = t.updateGlobal(gk, keyBuf, folder, protocol.LocalDeviceID[:], f, meta) + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, []byte(f.Name)) + if err != nil { + return err + } + keyBuf, _, err = t.updateGlobal(gk, keyBuf, folder, protocol.LocalDeviceID[:], f, meta) + if err != nil { + return err + } - keyBuf = db.keyer.GenerateSequenceKey(keyBuf, folder, f.Sequence) - t.Put(keyBuf, dk) + keyBuf, err = db.keyer.GenerateSequenceKey(keyBuf, folder, f.Sequence) + if err != nil { + return err + } + if err := t.Put(keyBuf, dk); err != nil { + return err + } l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name) if !f.IsDirectory() && !f.IsDeleted() && !f.IsInvalid() { for i, block := range f.Blocks { binary.BigEndian.PutUint32(blockBuf, uint32(i)) - keyBuf = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) - t.Put(keyBuf, blockBuf) + keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name) + if err != nil { + return err + } + if err := t.Put(keyBuf, blockBuf); err != nil { + return err + } } } - t.checkFlush() + if err := t.Checkpoint(); err != nil { + return err + } } + + return t.commit() } -func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) { - t := db.newReadOnlyTransaction() +func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) error { + t, err := db.newReadOnlyTransaction() + if err != nil { + return err + } defer t.close() if len(prefix) > 0 { @@ -131,18 +194,31 @@ func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn It prefix = append(prefix, '/') } - if f, ok := t.getFileTrunc(db.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix), true); ok && !fn(f) { - return + key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix) + if err != nil { + return err + } + if f, ok, err := t.getFileTrunc(key, true); err != nil { + return err + } else if ok && !fn(f) { + return nil } } - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, device, prefix)), nil) + key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, prefix) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key) + if err != nil { + return err + } defer dbi.Release() for dbi.Next() { name := db.keyer.NameFromDeviceFileKey(dbi.Key()) if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) { - return + return nil } f, err := unmarshalTrunc(dbi.Value(), truncate) @@ -151,20 +227,38 @@ func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn It continue } if !fn(f) { - return + return nil } } + return dbi.Error() } -func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) { - t := db.newReadOnlyTransaction() +func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) error { + t, err := db.newReadOnlyTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(&util.Range{Start: db.keyer.GenerateSequenceKey(nil, folder, startSeq), Limit: db.keyer.GenerateSequenceKey(nil, folder, maxInt64)}, nil) + first, err := db.keyer.GenerateSequenceKey(nil, folder, startSeq) + if err != nil { + return err + } + last, err := db.keyer.GenerateSequenceKey(nil, folder, maxInt64) + if err != nil { + return err + } + dbi, err := t.NewRangeIterator(first, last) + if err != nil { + return err + } defer dbi.Release() for dbi.Next() { - f, ok := t.getFileByKey(dbi.Value()) + f, ok, err := t.getFileByKey(dbi.Value()) + if err != nil { + return err + } if !ok { l.Debugln("missing file for sequence number", db.keyer.SequenceFromSequenceKey(dbi.Key())) continue @@ -177,16 +271,27 @@ func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) } } if !fn(f) { - return + return nil } } + return dbi.Error() } -func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) { - t := db.newReadWriteTransaction() +func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil).WithoutNameAndDevice()), nil) + key, err := db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key.WithoutNameAndDevice()) + if err != nil { + return err + } defer dbi.Release() var gk, keyBuf []byte @@ -194,8 +299,9 @@ func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key()) if !ok { // Not having the device in the index is bad. Clear it. - t.Delete(dbi.Key()) - t.checkFlush() + if err := t.Delete(dbi.Key()); err != nil { + return err + } continue } var f FileInfoTruncated @@ -205,42 +311,61 @@ func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, // we need to copy it. err := f.Unmarshal(append([]byte{}, dbi.Value()...)) if err != nil { - l.Debugln("unmarshal error:", err) - continue + return err } switch f.Name { case "", ".", "..", "/": // A few obviously invalid filenames l.Infof("Dropping invalid filename %q from database", f.Name) name := []byte(f.Name) - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - keyBuf = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil) - t.Delete(dbi.Key()) - t.checkFlush() + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if err != nil { + return err + } + keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil) + if err != nil { + return err + } + if err := t.Delete(dbi.Key()); err != nil { + return err + } continue } if !fn(device, f) { - return + return nil } } + if err := dbi.Error(); err != nil { + return err + } + return t.commit() } -func (db *instance) getFileDirty(folder, device, file []byte) (protocol.FileInfo, bool) { - t := db.newReadOnlyTransaction() +func (db *instance) getFileDirty(folder, device, file []byte) (protocol.FileInfo, bool, error) { + t, err := db.newReadOnlyTransaction() + if err != nil { + return protocol.FileInfo{}, false, err + } defer t.close() return t.getFile(folder, device, file) } -func (db *instance) getGlobalDirty(folder, file []byte, truncate bool) (FileIntf, bool) { - t := db.newReadOnlyTransaction() +func (db *instance) getGlobalDirty(folder, file []byte, truncate bool) (FileIntf, bool, error) { + t, err := db.newReadOnlyTransaction() + if err != nil { + return nil, false, err + } defer t.close() - _, f, ok := t.getGlobal(nil, folder, file, truncate) - return f, ok + _, f, ok, err := t.getGlobal(nil, folder, file, truncate) + return f, ok, err } -func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) { - t := db.newReadOnlyTransaction() +func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) error { + t, err := db.newReadOnlyTransaction() + if err != nil { + return err + } defer t.close() if len(prefix) > 0 { @@ -251,19 +376,28 @@ func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator prefix = append(prefix, '/') } - if _, f, ok := t.getGlobal(nil, folder, unslashedPrefix, truncate); ok && !fn(f) { - return + if _, f, ok, err := t.getGlobal(nil, folder, unslashedPrefix, truncate); err != nil { + return err + } else if ok && !fn(f) { + return nil } } - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, prefix)), nil) + key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, prefix) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key) + if err != nil { + return err + } defer dbi.Release() var dk []byte for dbi.Next() { name := db.keyer.NameFromGlobalVersionKey(dbi.Key()) if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) { - return + return nil } vl, ok := unmarshalVersionList(dbi.Value()) @@ -271,33 +405,45 @@ func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator continue } - dk = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name) + dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name) + if err != nil { + return err + } - f, ok := t.getFileTrunc(dk, truncate) + f, ok, err := t.getFileTrunc(dk, truncate) + if err != nil { + return err + } if !ok { continue } if !fn(f) { - return + return nil } } + if err != nil { + return err + } + return dbi.Error() } -func (db *instance) availability(folder, file []byte) []protocol.DeviceID { - k := db.keyer.GenerateGlobalVersionKey(nil, folder, file) - bs, err := db.Get(k, nil) - if err == leveldb.ErrNotFound { - return nil +func (db *instance) availability(folder, file []byte) ([]protocol.DeviceID, error) { + k, err := db.keyer.GenerateGlobalVersionKey(nil, folder, file) + if err != nil { + return nil, err + } + bs, err := db.Get(k) + if backend.IsNotFound(err) { + return nil, nil } if err != nil { - l.Debugln("surprise error:", err) - return nil + return nil, err } vl, ok := unmarshalVersionList(bs) if !ok { - return nil + return nil, nil } var devices []protocol.DeviceID @@ -312,19 +458,28 @@ func (db *instance) availability(folder, file []byte) []protocol.DeviceID { devices = append(devices, n) } - return devices + return devices, nil } -func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) { +func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) error { if bytes.Equal(device, protocol.LocalDeviceID[:]) { - db.withNeedLocal(folder, truncate, fn) - return + return db.withNeedLocal(folder, truncate, fn) } - t := db.newReadOnlyTransaction() + t, err := db.newReadOnlyTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName()), nil) + key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key.WithoutName()) + if err != nil { + return err + } defer dbi.Release() var dk []byte @@ -358,8 +513,14 @@ func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) continue } - dk = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name) - gf, ok := t.getFileTrunc(dk, truncate) + dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name) + if err != nil { + return err + } + gf, ok, err := t.getFileTrunc(dk, truncate) + if err != nil { + return err + } if !ok { continue } @@ -372,81 +533,171 @@ func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice) if !fn(gf) { - return + return nil } // This file is handled, no need to look further in the version list break } } + return dbi.Error() } -func (db *instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) { - t := db.newReadOnlyTransaction() +func (db *instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) error { + t, err := db.newReadOnlyTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateNeedFileKey(nil, folder, nil).WithoutName()), nil) + key, err := db.keyer.GenerateNeedFileKey(nil, folder, nil) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key.WithoutName()) + if err != nil { + return err + } defer dbi.Release() var keyBuf []byte var f FileIntf var ok bool for dbi.Next() { - keyBuf, f, ok = t.getGlobal(keyBuf, folder, db.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate) + keyBuf, f, ok, err = t.getGlobal(keyBuf, folder, db.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate) + if err != nil { + return err + } if !ok { continue } if !fn(f) { - return + return nil } } + return dbi.Error() } -func (db *instance) dropFolder(folder []byte) { - t := db.newReadWriteTransaction() - defer t.close() - - for _, key := range [][]byte{ - // Remove all items related to the given folder from the device->file bucket - db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil).WithoutNameAndDevice(), - // Remove all sequences related to the folder - db.keyer.GenerateSequenceKey(nil, []byte(folder), 0).WithoutSequence(), - // Remove all items related to the given folder from the global bucket - db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName(), - // Remove all needs related to the folder - db.keyer.GenerateNeedFileKey(nil, folder, nil).WithoutName(), - // Remove the blockmap of the folder - db.keyer.GenerateBlockMapKey(nil, folder, nil, nil).WithoutHashAndName(), - } { - t.deleteKeyPrefix(key) +func (db *instance) dropFolder(folder []byte) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err } -} - -func (db *instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) { - t := db.newReadWriteTransaction() defer t.close() - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, device, nil)), nil) - defer dbi.Release() + // Remove all items related to the given folder from the device->file bucket + k0, err := db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(k0.WithoutNameAndDevice()); err != nil { + return err + } + // Remove all sequences related to the folder + k1, err := db.keyer.GenerateSequenceKey(nil, folder, 0) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(k1.WithoutSequence()); err != nil { + return err + } + + // Remove all items related to the given folder from the global bucket + k2, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(k2.WithoutName()); err != nil { + return err + } + + // Remove all needs related to the folder + k3, err := db.keyer.GenerateNeedFileKey(nil, folder, nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(k3.WithoutName()); err != nil { + return err + } + + // Remove the blockmap of the folder + k4, err := db.keyer.GenerateBlockMapKey(nil, folder, nil, nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(k4.WithoutHashAndName()); err != nil { + return err + } + + return t.commit() +} + +func (db *instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } + defer t.close() + + key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, nil) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key) + if err != nil { + return err + } var gk, keyBuf []byte for dbi.Next() { name := db.keyer.NameFromDeviceFileKey(dbi.Key()) - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - keyBuf = t.removeFromGlobal(gk, keyBuf, folder, device, name, meta) - t.Delete(dbi.Key()) - t.checkFlush() + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if err != nil { + return err + } + keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, meta) + if err != nil { + return err + } + if err := t.Delete(dbi.Key()); err != nil { + return err + } + if err := t.Checkpoint(); err != nil { + return err + } } + if err := dbi.Error(); err != nil { + return err + } + dbi.Release() + if bytes.Equal(device, protocol.LocalDeviceID[:]) { - t.deleteKeyPrefix(db.keyer.GenerateBlockMapKey(nil, folder, nil, nil).WithoutHashAndName()) + key, err := db.keyer.GenerateBlockMapKey(nil, folder, nil, nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(key.WithoutHashAndName()); err != nil { + return err + } } + return t.commit() } -func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) { - t := db.newReadWriteTransaction() +func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName()), nil) + key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil) + if err != nil { + return err + } + dbi, err := t.NewPrefixIterator(key.WithoutName()) + if err != nil { + return err + } defer dbi.Release() var dk []byte @@ -464,66 +715,98 @@ func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) { name := db.keyer.NameFromGlobalVersionKey(dbi.Key()) var newVL VersionList for i, version := range vl.Versions { - dk = db.keyer.GenerateDeviceFileKey(dk, folder, version.Device, name) - _, err := t.Get(dk, nil) - if err == leveldb.ErrNotFound { + dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, version.Device, name) + if err != nil { + return err + } + _, err := t.Get(dk) + if backend.IsNotFound(err) { continue } if err != nil { - l.Debugln("surprise error:", err) - return + return err } newVL.Versions = append(newVL.Versions, version) if i == 0 { - if fi, ok := t.getFileByKey(dk); ok { + if fi, ok, err := t.getFileByKey(dk); err != nil { + return err + } else if ok { meta.addFile(protocol.GlobalDeviceID, fi) } } } if len(newVL.Versions) != len(vl.Versions) { - t.Put(dbi.Key(), mustMarshal(&newVL)) - t.checkFlush() + if err := t.Put(dbi.Key(), mustMarshal(&newVL)); err != nil { + return err + } } } + if err := dbi.Error(); err != nil { + return err + } + l.Debugf("db check completed for %q", folder) + return t.commit() } -func (db *instance) getIndexID(device, folder []byte) protocol.IndexID { - cur, err := db.Get(db.keyer.GenerateIndexIDKey(nil, device, folder), nil) +func (db *instance) getIndexID(device, folder []byte) (protocol.IndexID, error) { + key, err := db.keyer.GenerateIndexIDKey(nil, device, folder) if err != nil { - return 0 + return 0, err + } + cur, err := db.Get(key) + if backend.IsNotFound(err) { + return 0, nil + } else if err != nil { + return 0, err } var id protocol.IndexID if err := id.Unmarshal(cur); err != nil { - return 0 + return 0, nil } - return id + return id, nil } -func (db *instance) setIndexID(device, folder []byte, id protocol.IndexID) { +func (db *instance) setIndexID(device, folder []byte, id protocol.IndexID) error { bs, _ := id.Marshal() // marshalling can't fail - if err := db.Put(db.keyer.GenerateIndexIDKey(nil, device, folder), bs, nil); err != nil && err != leveldb.ErrClosed { - panic("storing index ID: " + err.Error()) + key, err := db.keyer.GenerateIndexIDKey(nil, device, folder) + if err != nil { + return err } + return db.Put(key, bs) } -func (db *instance) dropMtimes(folder []byte) { - db.dropPrefix(db.keyer.GenerateMtimesKey(nil, folder)) +func (db *instance) dropMtimes(folder []byte) error { + key, err := db.keyer.GenerateMtimesKey(nil, folder) + if err != nil { + return err + } + return db.dropPrefix(key) } -func (db *instance) dropFolderMeta(folder []byte) { - db.dropPrefix(db.keyer.GenerateFolderMetaKey(nil, folder)) +func (db *instance) dropFolderMeta(folder []byte) error { + key, err := db.keyer.GenerateFolderMetaKey(nil, folder) + if err != nil { + return err + } + return db.dropPrefix(key) } -func (db *instance) dropPrefix(prefix []byte) { - t := db.newReadWriteTransaction() +func (db *instance) dropPrefix(prefix []byte) error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() - t.deleteKeyPrefix(prefix) + if err := t.deleteKeyPrefix(prefix); err != nil { + return err + } + return t.commit() } func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) { @@ -551,15 +834,6 @@ func unmarshalVersionList(data []byte) (VersionList, bool) { return vl, true } -type errorSuggestion struct { - inner error - suggestion string -} - -func (e errorSuggestion) Error() string { - return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion) -} - // unchanged checks if two files are the same and thus don't need to be updated. // Local flags or the invalid bit might change without the version // being bumped. diff --git a/lib/db/keyer.go b/lib/db/keyer.go index b5822642e..1f88d5d65 100644 --- a/lib/db/keyer.go +++ b/lib/db/keyer.go @@ -63,36 +63,36 @@ const ( type keyer interface { // device file key stuff - GenerateDeviceFileKey(key, folder, device, name []byte) deviceFileKey + GenerateDeviceFileKey(key, folder, device, name []byte) (deviceFileKey, error) NameFromDeviceFileKey(key []byte) []byte DeviceFromDeviceFileKey(key []byte) ([]byte, bool) FolderFromDeviceFileKey(key []byte) ([]byte, bool) // global version key stuff - GenerateGlobalVersionKey(key, folder, name []byte) globalVersionKey + GenerateGlobalVersionKey(key, folder, name []byte) (globalVersionKey, error) NameFromGlobalVersionKey(key []byte) []byte FolderFromGlobalVersionKey(key []byte) ([]byte, bool) // block map key stuff (former BlockMap) - GenerateBlockMapKey(key, folder, hash, name []byte) blockMapKey + GenerateBlockMapKey(key, folder, hash, name []byte) (blockMapKey, error) NameFromBlockMapKey(key []byte) []byte // file need index - GenerateNeedFileKey(key, folder, name []byte) needFileKey + GenerateNeedFileKey(key, folder, name []byte) (needFileKey, error) // file sequence index - GenerateSequenceKey(key, folder []byte, seq int64) sequenceKey + GenerateSequenceKey(key, folder []byte, seq int64) (sequenceKey, error) SequenceFromSequenceKey(key []byte) int64 // index IDs - GenerateIndexIDKey(key, device, folder []byte) indexIDKey + GenerateIndexIDKey(key, device, folder []byte) (indexIDKey, error) DeviceFromIndexIDKey(key []byte) ([]byte, bool) // Mtimes - GenerateMtimesKey(key, folder []byte) mtimesKey + GenerateMtimesKey(key, folder []byte) (mtimesKey, error) // Folder metadata - GenerateFolderMetaKey(key, folder []byte) folderMetaKey + GenerateFolderMetaKey(key, folder []byte) (folderMetaKey, error) } // defaultKeyer implements our key scheme. It needs folder and device @@ -115,13 +115,21 @@ func (k deviceFileKey) WithoutNameAndDevice() []byte { return k[:keyPrefixLen+keyFolderLen] } -func (k defaultKeyer) GenerateDeviceFileKey(key, folder, device, name []byte) deviceFileKey { +func (k defaultKeyer) GenerateDeviceFileKey(key, folder, device, name []byte) (deviceFileKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } + deviceID, err := k.deviceIdx.ID(device) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen+keyDeviceLen+len(name)) key[0] = KeyTypeDevice - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) - binary.BigEndian.PutUint32(key[keyPrefixLen+keyFolderLen:], k.deviceIdx.ID(device)) + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) + binary.BigEndian.PutUint32(key[keyPrefixLen+keyFolderLen:], deviceID) copy(key[keyPrefixLen+keyFolderLen+keyDeviceLen:], name) - return key + return key, nil } func (k defaultKeyer) NameFromDeviceFileKey(key []byte) []byte { @@ -142,12 +150,16 @@ func (k globalVersionKey) WithoutName() []byte { return k[:keyPrefixLen+keyFolderLen] } -func (k defaultKeyer) GenerateGlobalVersionKey(key, folder, name []byte) globalVersionKey { +func (k defaultKeyer) GenerateGlobalVersionKey(key, folder, name []byte) (globalVersionKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen+len(name)) key[0] = KeyTypeGlobal - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) copy(key[keyPrefixLen+keyFolderLen:], name) - return key + return key, nil } func (k defaultKeyer) NameFromGlobalVersionKey(key []byte) []byte { @@ -160,13 +172,17 @@ func (k defaultKeyer) FolderFromGlobalVersionKey(key []byte) ([]byte, bool) { type blockMapKey []byte -func (k defaultKeyer) GenerateBlockMapKey(key, folder, hash, name []byte) blockMapKey { +func (k defaultKeyer) GenerateBlockMapKey(key, folder, hash, name []byte) (blockMapKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen+keyHashLen+len(name)) key[0] = KeyTypeBlock - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) copy(key[keyPrefixLen+keyFolderLen:], hash) copy(key[keyPrefixLen+keyFolderLen+keyHashLen:], name) - return key + return key, nil } func (k defaultKeyer) NameFromBlockMapKey(key []byte) []byte { @@ -183,12 +199,16 @@ func (k needFileKey) WithoutName() []byte { return k[:keyPrefixLen+keyFolderLen] } -func (k defaultKeyer) GenerateNeedFileKey(key, folder, name []byte) needFileKey { +func (k defaultKeyer) GenerateNeedFileKey(key, folder, name []byte) (needFileKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen+len(name)) key[0] = KeyTypeNeed - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) copy(key[keyPrefixLen+keyFolderLen:], name) - return key + return key, nil } type sequenceKey []byte @@ -197,12 +217,16 @@ func (k sequenceKey) WithoutSequence() []byte { return k[:keyPrefixLen+keyFolderLen] } -func (k defaultKeyer) GenerateSequenceKey(key, folder []byte, seq int64) sequenceKey { +func (k defaultKeyer) GenerateSequenceKey(key, folder []byte, seq int64) (sequenceKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen+keySequenceLen) key[0] = KeyTypeSequence - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) binary.BigEndian.PutUint64(key[keyPrefixLen+keyFolderLen:], uint64(seq)) - return key + return key, nil } func (k defaultKeyer) SequenceFromSequenceKey(key []byte) int64 { @@ -211,12 +235,20 @@ func (k defaultKeyer) SequenceFromSequenceKey(key []byte) int64 { type indexIDKey []byte -func (k defaultKeyer) GenerateIndexIDKey(key, device, folder []byte) indexIDKey { +func (k defaultKeyer) GenerateIndexIDKey(key, device, folder []byte) (indexIDKey, error) { + deviceID, err := k.deviceIdx.ID(device) + if err != nil { + return nil, err + } + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyDeviceLen+keyFolderLen) key[0] = KeyTypeIndexID - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.deviceIdx.ID(device)) - binary.BigEndian.PutUint32(key[keyPrefixLen+keyDeviceLen:], k.folderIdx.ID(folder)) - return key + binary.BigEndian.PutUint32(key[keyPrefixLen:], deviceID) + binary.BigEndian.PutUint32(key[keyPrefixLen+keyDeviceLen:], folderID) + return key, nil } func (k defaultKeyer) DeviceFromIndexIDKey(key []byte) ([]byte, bool) { @@ -225,20 +257,28 @@ func (k defaultKeyer) DeviceFromIndexIDKey(key []byte) ([]byte, bool) { type mtimesKey []byte -func (k defaultKeyer) GenerateMtimesKey(key, folder []byte) mtimesKey { +func (k defaultKeyer) GenerateMtimesKey(key, folder []byte) (mtimesKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen) key[0] = KeyTypeVirtualMtime - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) - return key + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) + return key, nil } type folderMetaKey []byte -func (k defaultKeyer) GenerateFolderMetaKey(key, folder []byte) folderMetaKey { +func (k defaultKeyer) GenerateFolderMetaKey(key, folder []byte) (folderMetaKey, error) { + folderID, err := k.folderIdx.ID(folder) + if err != nil { + return nil, err + } key = resize(key, keyPrefixLen+keyFolderLen) key[0] = KeyTypeFolderMeta - binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder)) - return key + binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID) + return key, nil } // resize returns a byte slice of the specified size, reusing bs if possible diff --git a/lib/db/keyer_test.go b/lib/db/keyer_test.go index 6ce7c9838..e7e40a50f 100644 --- a/lib/db/keyer_test.go +++ b/lib/db/keyer_test.go @@ -9,6 +9,8 @@ package db import ( "bytes" "testing" + + "github.com/syncthing/syncthing/lib/db/backend" ) func TestDeviceKey(t *testing.T) { @@ -16,9 +18,12 @@ func TestDeviceKey(t *testing.T) { dev := []byte("device67890123456789012345678901") name := []byte("name") - db := newInstance(OpenMemory()) + db := newInstance(NewLowlevel(backend.OpenMemory())) - key := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name) + key, err := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name) + if err != nil { + t.Fatal(err) + } fld2, ok := db.keyer.FolderFromDeviceFileKey(key) if !ok { @@ -44,9 +49,12 @@ func TestGlobalKey(t *testing.T) { fld := []byte("folder6789012345678901234567890123456789012345678901234567890123") name := []byte("name") - db := newInstance(OpenMemory()) + db := newInstance(NewLowlevel(backend.OpenMemory())) - key := db.keyer.GenerateGlobalVersionKey(nil, fld, name) + key, err := db.keyer.GenerateGlobalVersionKey(nil, fld, name) + if err != nil { + t.Fatal(err) + } fld2, ok := db.keyer.FolderFromGlobalVersionKey(key) if !ok { @@ -69,10 +77,13 @@ func TestGlobalKey(t *testing.T) { func TestSequenceKey(t *testing.T) { fld := []byte("folder6789012345678901234567890123456789012345678901234567890123") - db := newInstance(OpenMemory()) + db := newInstance(NewLowlevel(backend.OpenMemory())) const seq = 1234567890 - key := db.keyer.GenerateSequenceKey(nil, fld, seq) + key, err := db.keyer.GenerateSequenceKey(nil, fld, seq) + if err != nil { + t.Fatal(err) + } outSeq := db.keyer.SequenceFromSequenceKey(key) if outSeq != seq { t.Errorf("sequence number mangled, %d != %d", outSeq, seq) diff --git a/lib/db/lowlevel.go b/lib/db/lowlevel.go index 4e3bc9ea6..788a77fc6 100644 --- a/lib/db/lowlevel.go +++ b/lib/db/lowlevel.go @@ -7,431 +7,30 @@ package db import ( - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -const ( - dbMaxOpenFiles = 100 - dbFlushBatch = 4 << MiB - - // A large database is > 200 MiB. It's a mostly arbitrary value, but - // it's also the case that each file is 2 MiB by default and when we - // have dbMaxOpenFiles of them we will need to start thrashing fd:s. - // Switching to large database settings causes larger files to be used - // when compacting, reducing the number. - dbLargeThreshold = dbMaxOpenFiles * (2 << MiB) - - KiB = 10 - MiB = 20 -) - -type Tuning int - -const ( - // N.b. these constants must match those in lib/config.Tuning! - TuningAuto Tuning = iota - TuningSmall - TuningLarge + "github.com/syncthing/syncthing/lib/db/backend" ) // Lowlevel is the lowest level database interface. It has a very simple -// purpose: hold the actual *leveldb.DB database, and the in-memory state +// purpose: hold the actual backend database, and the in-memory state // that belong to that database. In the same way that a single on disk // database can only be opened once, there should be only one Lowlevel for -// any given *leveldb.DB. +// any given backend. type Lowlevel struct { - committed int64 // atomic, must come first - *leveldb.DB - location string + backend.Backend folderIdx *smallIndex deviceIdx *smallIndex - closed bool - closeMut *sync.RWMutex - iterWG sync.WaitGroup } -// Open attempts to open the database at the given location, and runs -// recovery on it if opening fails. Worst case, if recovery is not possible, -// the database is erased and created from scratch. -func Open(location string, tuning Tuning) (*Lowlevel, error) { - opts := optsFor(location, tuning) - return open(location, opts) -} - -// optsFor returns the database options to use when opening a database with -// the given location and tuning. Settings can be overridden by debug -// environment variables. -func optsFor(location string, tuning Tuning) *opt.Options { - large := false - switch tuning { - case TuningLarge: - large = true - case TuningAuto: - large = dbIsLarge(location) +// NewLowlevel wraps the given *leveldb.DB into a *lowlevel +func NewLowlevel(db backend.Backend) *Lowlevel { + return &Lowlevel{ + Backend: db, + folderIdx: newSmallIndex(db, []byte{KeyTypeFolderIdx}), + deviceIdx: newSmallIndex(db, []byte{KeyTypeDeviceIdx}), } - - var ( - // Set defaults used for small databases. - defaultBlockCacheCapacity = 0 // 0 means let leveldb use default - defaultBlockSize = 0 - defaultCompactionTableSize = 0 - defaultCompactionTableSizeMultiplier = 0 - defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB - defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff - ) - - if large { - // Change the parameters for better throughput at the price of some - // RAM and larger files. This results in larger batches of writes - // and compaction at a lower frequency. - l.Infoln("Using large-database tuning") - - defaultBlockCacheCapacity = 64 << MiB - defaultBlockSize = 64 << KiB - defaultCompactionTableSize = 16 << MiB - defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten - defaultWriteBuffer = 64 << MiB - defaultCompactionL0Trigger = 8 // number of l0 files - } - - opts := &opt.Options{ - BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity), - BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0, - BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0), - BlockSize: debugEnvValue("BlockSize", defaultBlockSize), - CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0), - CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0), - CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger), - CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0), - CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize), - CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0, - CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0), - CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0, - DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0, - DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0, - DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0, - DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0, - NoSync: debugEnvValue("NoSync", 0) != 0, - NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0, - OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles), - WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer), - // The write slowdown and pause can be overridden, but even if they - // are not and the compaction trigger is overridden we need to - // adjust so that we don't pause writes for L0 compaction before we - // even *start* L0 compaction... - WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)), - WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)), - } - - return opts -} - -// OpenRO attempts to open the database at the given location, read only. -func OpenRO(location string) (*Lowlevel, error) { - opts := &opt.Options{ - OpenFilesCacheCapacity: dbMaxOpenFiles, - ReadOnly: true, - } - return open(location, opts) -} - -func open(location string, opts *opt.Options) (*Lowlevel, error) { - db, err := leveldb.OpenFile(location, opts) - if leveldbIsCorrupted(err) { - db, err = leveldb.RecoverFile(location, opts) - } - if leveldbIsCorrupted(err) { - // The database is corrupted, and we've tried to recover it but it - // didn't work. At this point there isn't much to do beyond dropping - // the database and reindexing... - l.Infoln("Database corruption detected, unable to recover. Reinitializing...") - if err := os.RemoveAll(location); err != nil { - return nil, errorSuggestion{err, "failed to delete corrupted database"} - } - db, err = leveldb.OpenFile(location, opts) - } - if err != nil { - return nil, errorSuggestion{err, "is another instance of Syncthing running?"} - } - - if debugEnvValue("CompactEverything", 0) != 0 { - if err := db.CompactRange(util.Range{}); err != nil { - l.Warnln("Compacting database:", err) - } - } - - return NewLowlevel(db, location), nil -} - -// OpenMemory returns a new Lowlevel referencing an in-memory database. -func OpenMemory() *Lowlevel { - db, _ := leveldb.Open(storage.NewMemStorage(), nil) - return NewLowlevel(db, "") } // ListFolders returns the list of folders currently in the database func (db *Lowlevel) ListFolders() []string { return db.folderIdx.Values() } - -// Committed returns the number of items committed to the database since startup -func (db *Lowlevel) Committed() int64 { - return atomic.LoadInt64(&db.committed) -} - -func (db *Lowlevel) Put(key, val []byte, wo *opt.WriteOptions) error { - db.closeMut.RLock() - defer db.closeMut.RUnlock() - if db.closed { - return leveldb.ErrClosed - } - atomic.AddInt64(&db.committed, 1) - return db.DB.Put(key, val, wo) -} - -func (db *Lowlevel) Write(batch *leveldb.Batch, wo *opt.WriteOptions) error { - db.closeMut.RLock() - defer db.closeMut.RUnlock() - if db.closed { - return leveldb.ErrClosed - } - return db.DB.Write(batch, wo) -} - -func (db *Lowlevel) Delete(key []byte, wo *opt.WriteOptions) error { - db.closeMut.RLock() - defer db.closeMut.RUnlock() - if db.closed { - return leveldb.ErrClosed - } - atomic.AddInt64(&db.committed, 1) - return db.DB.Delete(key, wo) -} - -func (db *Lowlevel) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - return db.newIterator(func() iterator.Iterator { return db.DB.NewIterator(slice, ro) }) -} - -// newIterator returns an iterator created with the given constructor only if db -// is not yet closed. If it is closed, a closedIter is returned instead. -func (db *Lowlevel) newIterator(constr func() iterator.Iterator) iterator.Iterator { - db.closeMut.RLock() - defer db.closeMut.RUnlock() - if db.closed { - return &closedIter{} - } - db.iterWG.Add(1) - return &iter{ - Iterator: constr(), - db: db, - } -} - -func (db *Lowlevel) GetSnapshot() snapshot { - s, err := db.DB.GetSnapshot() - if err != nil { - if err == leveldb.ErrClosed { - return &closedSnap{} - } - panic(err) - } - return &snap{ - Snapshot: s, - db: db, - } -} - -func (db *Lowlevel) Close() { - db.closeMut.Lock() - if db.closed { - db.closeMut.Unlock() - return - } - db.closed = true - db.closeMut.Unlock() - db.iterWG.Wait() - db.DB.Close() -} - -// dbIsLarge returns whether the estimated size of the database at location -// is large enough to warrant optimization for large databases. -func dbIsLarge(location string) bool { - if ^uint(0)>>63 == 0 { - // We're compiled for a 32 bit architecture. We've seen trouble with - // large settings there. - // (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842) - return false - } - - dir, err := os.Open(location) - if err != nil { - return false - } - - fis, err := dir.Readdir(-1) - if err != nil { - return false - } - - var size int64 - for _, fi := range fis { - if fi.Name() == "LOG" { - // don't count the size - continue - } - size += fi.Size() - } - - return size > dbLargeThreshold -} - -// NewLowlevel wraps the given *leveldb.DB into a *lowlevel -func NewLowlevel(db *leveldb.DB, location string) *Lowlevel { - return &Lowlevel{ - DB: db, - location: location, - folderIdx: newSmallIndex(db, []byte{KeyTypeFolderIdx}), - deviceIdx: newSmallIndex(db, []byte{KeyTypeDeviceIdx}), - closeMut: &sync.RWMutex{}, - iterWG: sync.WaitGroup{}, - } -} - -// A "better" version of leveldb's errors.IsCorrupted. -func leveldbIsCorrupted(err error) bool { - switch { - case err == nil: - return false - - case errors.IsCorrupted(err): - return true - - case strings.Contains(err.Error(), "corrupted"): - return true - } - - return false -} - -type batch struct { - *leveldb.Batch - db *Lowlevel -} - -func (db *Lowlevel) newBatch() *batch { - return &batch{ - Batch: new(leveldb.Batch), - db: db, - } -} - -// checkFlush flushes and resets the batch if its size exceeds dbFlushBatch. -func (b *batch) checkFlush() { - if len(b.Dump()) > dbFlushBatch { - b.flush() - b.Reset() - } -} - -func (b *batch) flush() { - if err := b.db.Write(b.Batch, nil); err != nil && err != leveldb.ErrClosed { - panic(err) - } -} - -type closedIter struct{} - -func (it *closedIter) Release() {} -func (it *closedIter) Key() []byte { return nil } -func (it *closedIter) Value() []byte { return nil } -func (it *closedIter) Next() bool { return false } -func (it *closedIter) Prev() bool { return false } -func (it *closedIter) First() bool { return false } -func (it *closedIter) Last() bool { return false } -func (it *closedIter) Seek(key []byte) bool { return false } -func (it *closedIter) Valid() bool { return false } -func (it *closedIter) Error() error { return leveldb.ErrClosed } -func (it *closedIter) SetReleaser(releaser util.Releaser) {} - -type snapshot interface { - Get([]byte, *opt.ReadOptions) ([]byte, error) - Has([]byte, *opt.ReadOptions) (bool, error) - NewIterator(*util.Range, *opt.ReadOptions) iterator.Iterator - Release() -} - -type closedSnap struct{} - -func (s *closedSnap) Get([]byte, *opt.ReadOptions) ([]byte, error) { return nil, leveldb.ErrClosed } -func (s *closedSnap) Has([]byte, *opt.ReadOptions) (bool, error) { return false, leveldb.ErrClosed } -func (s *closedSnap) NewIterator(*util.Range, *opt.ReadOptions) iterator.Iterator { - return &closedIter{} -} -func (s *closedSnap) Release() {} - -type snap struct { - *leveldb.Snapshot - db *Lowlevel -} - -func (s *snap) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - return s.db.newIterator(func() iterator.Iterator { return s.Snapshot.NewIterator(slice, ro) }) -} - -// iter implements iterator.Iterator which allows tracking active iterators -// and aborts if the underlying database is being closed. -type iter struct { - iterator.Iterator - db *Lowlevel -} - -func (it *iter) Release() { - it.db.iterWG.Done() - it.Iterator.Release() -} - -func (it *iter) Next() bool { - return it.execIfNotClosed(it.Iterator.Next) -} -func (it *iter) Prev() bool { - return it.execIfNotClosed(it.Iterator.Prev) -} -func (it *iter) First() bool { - return it.execIfNotClosed(it.Iterator.First) -} -func (it *iter) Last() bool { - return it.execIfNotClosed(it.Iterator.Last) -} -func (it *iter) Seek(key []byte) bool { - return it.execIfNotClosed(func() bool { - return it.Iterator.Seek(key) - }) -} - -func (it *iter) execIfNotClosed(fn func() bool) bool { - it.db.closeMut.RLock() - defer it.db.closeMut.RUnlock() - if it.db.closed { - return false - } - return fn() -} - -func debugEnvValue(key string, def int) int { - v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63) - if err != nil { - return def - } - return int(v) -} diff --git a/lib/db/meta.go b/lib/db/meta.go index 2db56960e..1d1252fb3 100644 --- a/lib/db/meta.go +++ b/lib/db/meta.go @@ -57,7 +57,10 @@ func (m *metadataTracker) Marshal() ([]byte, error) { // toDB saves the marshalled metadataTracker to the given db, under the key // corresponding to the given folder func (m *metadataTracker) toDB(db *instance, folder []byte) error { - key := db.keyer.GenerateFolderMetaKey(nil, folder) + key, err := db.keyer.GenerateFolderMetaKey(nil, folder) + if err != nil { + return err + } m.mut.RLock() defer m.mut.RUnlock() @@ -70,7 +73,7 @@ func (m *metadataTracker) toDB(db *instance, folder []byte) error { if err != nil { return err } - err = db.Put(key, bs, nil) + err = db.Put(key, bs) if err == nil { m.dirty = false } @@ -81,8 +84,11 @@ func (m *metadataTracker) toDB(db *instance, folder []byte) error { // fromDB initializes the metadataTracker from the marshalled data found in // the database under the key corresponding to the given folder func (m *metadataTracker) fromDB(db *instance, folder []byte) error { - key := db.keyer.GenerateFolderMetaKey(nil, folder) - bs, err := db.Get(key, nil) + key, err := db.keyer.GenerateFolderMetaKey(nil, folder) + if err != nil { + return err + } + bs, err := db.Get(key) if err != nil { return err } diff --git a/lib/db/namespaced.go b/lib/db/namespaced.go index 5cbe720bb..30a888fa0 100644 --- a/lib/db/namespaced.go +++ b/lib/db/namespaced.go @@ -9,8 +9,6 @@ package db import ( "encoding/binary" "time" - - "github.com/syndtr/goleveldb/leveldb/util" ) // NamespacedKV is a simple key-value store using a specific namespace within @@ -34,30 +32,18 @@ func NewNamespacedKV(db *Lowlevel, prefix string) *NamespacedKV { } } -// Reset removes all entries in this namespace. -func (n *NamespacedKV) Reset() { - it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil) - defer it.Release() - batch := n.db.newBatch() - for it.Next() { - batch.Delete(it.Key()) - batch.checkFlush() - } - batch.flush() -} - // PutInt64 stores a new int64. Any existing value (even if of another type) // is overwritten. -func (n *NamespacedKV) PutInt64(key string, val int64) { +func (n *NamespacedKV) PutInt64(key string, val int64) error { var valBs [8]byte binary.BigEndian.PutUint64(valBs[:], uint64(val)) - n.db.Put(n.prefixedKey(key), valBs[:], nil) + return n.db.Put(n.prefixedKey(key), valBs[:]) } // Int64 returns the stored value interpreted as an int64 and a boolean that // is false if no value was stored at the key. func (n *NamespacedKV) Int64(key string) (int64, bool) { - valBs, err := n.db.Get(n.prefixedKey(key), nil) + valBs, err := n.db.Get(n.prefixedKey(key)) if err != nil { return 0, false } @@ -67,16 +53,16 @@ func (n *NamespacedKV) Int64(key string) (int64, bool) { // PutTime stores a new time.Time. Any existing value (even if of another // type) is overwritten. -func (n *NamespacedKV) PutTime(key string, val time.Time) { +func (n *NamespacedKV) PutTime(key string, val time.Time) error { valBs, _ := val.MarshalBinary() // never returns an error - n.db.Put(n.prefixedKey(key), valBs, nil) + return n.db.Put(n.prefixedKey(key), valBs) } // Time returns the stored value interpreted as a time.Time and a boolean // that is false if no value was stored at the key. func (n NamespacedKV) Time(key string) (time.Time, bool) { var t time.Time - valBs, err := n.db.Get(n.prefixedKey(key), nil) + valBs, err := n.db.Get(n.prefixedKey(key)) if err != nil { return t, false } @@ -86,14 +72,14 @@ func (n NamespacedKV) Time(key string) (time.Time, bool) { // PutString stores a new string. Any existing value (even if of another type) // is overwritten. -func (n *NamespacedKV) PutString(key, val string) { - n.db.Put(n.prefixedKey(key), []byte(val), nil) +func (n *NamespacedKV) PutString(key, val string) error { + return n.db.Put(n.prefixedKey(key), []byte(val)) } // String returns the stored value interpreted as a string and a boolean that // is false if no value was stored at the key. func (n NamespacedKV) String(key string) (string, bool) { - valBs, err := n.db.Get(n.prefixedKey(key), nil) + valBs, err := n.db.Get(n.prefixedKey(key)) if err != nil { return "", false } @@ -102,14 +88,14 @@ func (n NamespacedKV) String(key string) (string, bool) { // PutBytes stores a new byte slice. Any existing value (even if of another type) // is overwritten. -func (n *NamespacedKV) PutBytes(key string, val []byte) { - n.db.Put(n.prefixedKey(key), val, nil) +func (n *NamespacedKV) PutBytes(key string, val []byte) error { + return n.db.Put(n.prefixedKey(key), val) } // Bytes returns the stored value as a raw byte slice and a boolean that // is false if no value was stored at the key. func (n NamespacedKV) Bytes(key string) ([]byte, bool) { - valBs, err := n.db.Get(n.prefixedKey(key), nil) + valBs, err := n.db.Get(n.prefixedKey(key)) if err != nil { return nil, false } @@ -118,18 +104,17 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) { // PutBool stores a new boolean. Any existing value (even if of another type) // is overwritten. -func (n *NamespacedKV) PutBool(key string, val bool) { +func (n *NamespacedKV) PutBool(key string, val bool) error { if val { - n.db.Put(n.prefixedKey(key), []byte{0x0}, nil) - } else { - n.db.Put(n.prefixedKey(key), []byte{0x1}, nil) + return n.db.Put(n.prefixedKey(key), []byte{0x0}) } + return n.db.Put(n.prefixedKey(key), []byte{0x1}) } // Bool returns the stored value as a boolean and a boolean that // is false if no value was stored at the key. func (n NamespacedKV) Bool(key string) (bool, bool) { - valBs, err := n.db.Get(n.prefixedKey(key), nil) + valBs, err := n.db.Get(n.prefixedKey(key)) if err != nil { return false, false } @@ -138,8 +123,8 @@ func (n NamespacedKV) Bool(key string) (bool, bool) { // Delete deletes the specified key. It is allowed to delete a nonexistent // key. -func (n NamespacedKV) Delete(key string) { - n.db.Delete(n.prefixedKey(key), nil) +func (n NamespacedKV) Delete(key string) error { + return n.db.Delete(n.prefixedKey(key)) } func (n NamespacedKV) prefixedKey(key string) []byte { diff --git a/lib/db/namespaced_test.go b/lib/db/namespaced_test.go index 3b91b35ed..71cc58415 100644 --- a/lib/db/namespaced_test.go +++ b/lib/db/namespaced_test.go @@ -9,10 +9,12 @@ package db import ( "testing" "time" + + "github.com/syncthing/syncthing/lib/db/backend" ) func TestNamespacedInt(t *testing.T) { - ldb := OpenMemory() + ldb := NewLowlevel(backend.OpenMemory()) n1 := NewNamespacedKV(ldb, "foo") n2 := NewNamespacedKV(ldb, "bar") @@ -23,7 +25,9 @@ func TestNamespacedInt(t *testing.T) { t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok) } - n1.PutInt64("test", 42) + if err := n1.PutInt64("test", 42); err != nil { + t.Fatal(err) + } // It should now exist in n1 @@ -37,7 +41,9 @@ func TestNamespacedInt(t *testing.T) { t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok) } - n1.Delete("test") + if err := n1.Delete("test"); err != nil { + t.Fatal(err) + } // It should no longer exist @@ -47,7 +53,7 @@ func TestNamespacedInt(t *testing.T) { } func TestNamespacedTime(t *testing.T) { - ldb := OpenMemory() + ldb := NewLowlevel(backend.OpenMemory()) n1 := NewNamespacedKV(ldb, "foo") @@ -56,7 +62,9 @@ func TestNamespacedTime(t *testing.T) { } now := time.Now() - n1.PutTime("test", now) + if err := n1.PutTime("test", now); err != nil { + t.Fatal(err) + } if v, ok := n1.Time("test"); !v.Equal(now) || !ok { t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok) @@ -64,7 +72,7 @@ func TestNamespacedTime(t *testing.T) { } func TestNamespacedString(t *testing.T) { - ldb := OpenMemory() + ldb := NewLowlevel(backend.OpenMemory()) n1 := NewNamespacedKV(ldb, "foo") @@ -72,7 +80,9 @@ func TestNamespacedString(t *testing.T) { t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok) } - n1.PutString("test", "yo") + if err := n1.PutString("test", "yo"); err != nil { + t.Fatal(err) + } if v, ok := n1.String("test"); v != "yo" || !ok { t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok) @@ -80,13 +90,19 @@ func TestNamespacedString(t *testing.T) { } func TestNamespacedReset(t *testing.T) { - ldb := OpenMemory() + ldb := NewLowlevel(backend.OpenMemory()) n1 := NewNamespacedKV(ldb, "foo") - n1.PutString("test1", "yo1") - n1.PutString("test2", "yo2") - n1.PutString("test3", "yo3") + if err := n1.PutString("test1", "yo1"); err != nil { + t.Fatal(err) + } + if err := n1.PutString("test2", "yo2"); err != nil { + t.Fatal(err) + } + if err := n1.PutString("test3", "yo3"); err != nil { + t.Fatal(err) + } if v, ok := n1.String("test1"); v != "yo1" || !ok { t.Errorf("Incorrect return v %q != \"yo1\" || ok %v != true", v, ok) @@ -98,7 +114,7 @@ func TestNamespacedReset(t *testing.T) { t.Errorf("Incorrect return v %q != \"yo3\" || ok %v != true", v, ok) } - n1.Reset() + reset(n1) if v, ok := n1.String("test1"); v != "" || ok { t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok) @@ -110,3 +126,22 @@ func TestNamespacedReset(t *testing.T) { t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok) } } + +// reset removes all entries in this namespace. +func reset(n *NamespacedKV) { + tr, err := n.db.NewWriteTransaction() + if err != nil { + return + } + defer tr.Release() + + it, err := tr.NewPrefixIterator(n.prefix) + if err != nil { + return + } + for it.Next() { + _ = tr.Delete(it.Key()) + } + it.Release() + _ = tr.Commit() +} diff --git a/lib/db/schemaupdater.go b/lib/db/schemaupdater.go index ccc65254d..6e1f6864f 100644 --- a/lib/db/schemaupdater.go +++ b/lib/db/schemaupdater.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/syncthing/syncthing/lib/protocol" - "github.com/syndtr/goleveldb/leveldb/util" ) // List of all dbVersion to dbMinSyncthingVersion pairs for convenience @@ -65,36 +64,58 @@ func (db *schemaUpdater) updateSchema() error { } if prevVersion < 1 { - db.updateSchema0to1() + if err := db.updateSchema0to1(); err != nil { + return err + } } if prevVersion < 2 { - db.updateSchema1to2() + if err := db.updateSchema1to2(); err != nil { + return err + } } if prevVersion < 3 { - db.updateSchema2to3() + if err := db.updateSchema2to3(); err != nil { + return err + } } // This update fixes problems existing in versions 3 and 4 if prevVersion == 3 || prevVersion == 4 { - db.updateSchemaTo5() + if err := db.updateSchemaTo5(); err != nil { + return err + } } if prevVersion < 6 { - db.updateSchema5to6() + if err := db.updateSchema5to6(); err != nil { + return err + } } if prevVersion < 7 { - db.updateSchema6to7() + if err := db.updateSchema6to7(); err != nil { + return err + } } - miscDB.PutInt64("dbVersion", dbVersion) - miscDB.PutString("dbMinSyncthingVersion", dbMinSyncthingVersion) + if err := miscDB.PutInt64("dbVersion", dbVersion); err != nil { + return err + } + if err := miscDB.PutString("dbMinSyncthingVersion", dbMinSyncthingVersion); err != nil { + return err + } return nil } -func (db *schemaUpdater) updateSchema0to1() { - t := db.newReadWriteTransaction() +func (db *schemaUpdater) updateSchema0to1() error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() - dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil) + dbi, err := t.NewPrefixIterator([]byte{KeyTypeDevice}) + if err != nil { + return err + } defer dbi.Release() symlinkConv := 0 @@ -104,18 +125,20 @@ func (db *schemaUpdater) updateSchema0to1() { var gk, buf []byte for dbi.Next() { - t.checkFlush() - folder, ok := db.keyer.FolderFromDeviceFileKey(dbi.Key()) if !ok { // not having the folder in the index is bad; delete and continue - t.Delete(dbi.Key()) + if err := t.Delete(dbi.Key()); err != nil { + return err + } continue } device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key()) if !ok { // not having the device in the index is bad; delete and continue - t.Delete(dbi.Key()) + if err := t.Delete(dbi.Key()); err != nil { + return err + } continue } name := db.keyer.NameFromDeviceFileKey(dbi.Key()) @@ -125,9 +148,17 @@ func (db *schemaUpdater) updateSchema0to1() { if _, ok := changedFolders[string(folder)]; !ok { changedFolders[string(folder)] = struct{}{} } - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - buf = t.removeFromGlobal(gk, buf, folder, device, nil, nil) - t.Delete(dbi.Key()) + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if err != nil { + return err + } + buf, err = t.removeFromGlobal(gk, buf, folder, device, nil, nil) + if err != nil { + return err + } + if err := t.Delete(dbi.Key()); err != nil { + return err + } continue } @@ -147,14 +178,21 @@ func (db *schemaUpdater) updateSchema0to1() { if err != nil { panic("can't happen: " + err.Error()) } - t.Put(dbi.Key(), bs) + if err := t.Put(dbi.Key(), bs); err != nil { + return err + } symlinkConv++ } // Add invalid files to global list if f.IsInvalid() { - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - if buf, ok = t.updateGlobal(gk, buf, folder, device, f, meta); ok { + gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if err != nil { + return err + } + if buf, ok, err = t.updateGlobal(gk, buf, folder, device, f, meta); err != nil { + return err + } else if ok { if _, ok = changedFolders[string(folder)]; !ok { changedFolders[string(folder)] = struct{}{} } @@ -164,86 +202,139 @@ func (db *schemaUpdater) updateSchema0to1() { } for folder := range changedFolders { - db.dropFolderMeta([]byte(folder)) + if err := db.dropFolderMeta([]byte(folder)); err != nil { + return err + } } + return t.commit() } // updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items // to allow iteration in sequence order (simplifies sending indexes). -func (db *schemaUpdater) updateSchema1to2() { - t := db.newReadWriteTransaction() +func (db *schemaUpdater) updateSchema1to2() error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var sk []byte var dk []byte for _, folderStr := range db.ListFolders() { folder := []byte(folderStr) - db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool { - sk = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo()) - dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName())) - t.Put(sk, dk) - t.checkFlush() - return true + var putErr error + err := db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool { + sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo()) + if putErr != nil { + return false + } + dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName())) + if putErr != nil { + return false + } + putErr = t.Put(sk, dk) + return putErr == nil }) + if putErr != nil { + return putErr + } + if err != nil { + return err + } } + return t.commit() } // updateSchema2to3 introduces a needKey->nil bucket for locally needed files. -func (db *schemaUpdater) updateSchema2to3() { - t := db.newReadWriteTransaction() +func (db *schemaUpdater) updateSchema2to3() error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var nk []byte var dk []byte for _, folderStr := range db.ListFolders() { folder := []byte(folderStr) - db.withGlobal(folder, nil, true, func(f FileIntf) bool { + var putErr error + err := db.withGlobal(folder, nil, true, func(f FileIntf) bool { name := []byte(f.FileName()) - dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name) + dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name) + if putErr != nil { + return false + } var v protocol.Vector - haveFile, ok := t.getFileTrunc(dk, true) + haveFile, ok, err := t.getFileTrunc(dk, true) + if err != nil { + putErr = err + return false + } if ok { v = haveFile.FileVersion() } if !need(f, ok, v) { return true } - nk = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName())) - t.Put(nk, nil) - t.checkFlush() - return true + nk, putErr = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName())) + if putErr != nil { + return false + } + putErr = t.Put(nk, nil) + return putErr == nil }) + if putErr != nil { + return putErr + } + if err != nil { + return err + } } + return t.commit() } // updateSchemaTo5 resets the need bucket due to bugs existing in the v0.14.49 // release candidates (dbVersion 3 and 4) // https://github.com/syncthing/syncthing/issues/5007 // https://github.com/syncthing/syncthing/issues/5053 -func (db *schemaUpdater) updateSchemaTo5() { - t := db.newReadWriteTransaction() +func (db *schemaUpdater) updateSchemaTo5() error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } var nk []byte for _, folderStr := range db.ListFolders() { - nk = db.keyer.GenerateNeedFileKey(nk, []byte(folderStr), nil) - t.deleteKeyPrefix(nk[:keyPrefixLen+keyFolderLen]) + nk, err = db.keyer.GenerateNeedFileKey(nk, []byte(folderStr), nil) + if err != nil { + return err + } + if err := t.deleteKeyPrefix(nk[:keyPrefixLen+keyFolderLen]); err != nil { + return err + } + } + if err := t.commit(); err != nil { + return err } - t.close() - db.updateSchema2to3() + return db.updateSchema2to3() } -func (db *schemaUpdater) updateSchema5to6() { +func (db *schemaUpdater) updateSchema5to6() error { // For every local file with the Invalid bit set, clear the Invalid bit and // set LocalFlags = FlagLocalIgnored. - t := db.newReadWriteTransaction() + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var dk []byte for _, folderStr := range db.ListFolders() { folder := []byte(folderStr) - db.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool { + var putErr error + err := db.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool { if !f.IsInvalid() { return true } @@ -253,19 +344,31 @@ func (db *schemaUpdater) updateSchema5to6() { fi.LocalFlags = protocol.FlagLocalIgnored bs, _ := fi.Marshal() - dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(fi.Name)) - t.Put(dk, bs) + dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(fi.Name)) + if putErr != nil { + return false + } + putErr = t.Put(dk, bs) - t.checkFlush() - return true + return putErr == nil }) + if putErr != nil { + return putErr + } + if err != nil { + return err + } } + return t.commit() } // updateSchema6to7 checks whether all currently locally needed files are really // needed and removes them if not. -func (db *schemaUpdater) updateSchema6to7() { - t := db.newReadWriteTransaction() +func (db *schemaUpdater) updateSchema6to7() error { + t, err := db.newReadWriteTransaction() + if err != nil { + return err + } defer t.close() var gk []byte @@ -273,15 +376,24 @@ func (db *schemaUpdater) updateSchema6to7() { for _, folderStr := range db.ListFolders() { folder := []byte(folderStr) - db.withNeedLocal(folder, false, func(f FileIntf) bool { + var delErr error + err := db.withNeedLocal(folder, false, func(f FileIntf) bool { name := []byte(f.FileName()) global := f.(protocol.FileInfo) - gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name) - svl, err := t.Get(gk, nil) + gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name) + if delErr != nil { + return false + } + svl, err := t.Get(gk) if err != nil { // If there is no global list, we hardly need it. - t.Delete(t.keyer.GenerateNeedFileKey(nk, folder, name)) - return true + key, err := t.keyer.GenerateNeedFileKey(nk, folder, name) + if err != nil { + delErr = err + return false + } + delErr = t.Delete(key) + return delErr == nil } var fl VersionList err = fl.Unmarshal(svl) @@ -291,9 +403,18 @@ func (db *schemaUpdater) updateSchema6to7() { return true } if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !need(global, haveLocalFV, localFV.Version) { - t.Delete(t.keyer.GenerateNeedFileKey(nk, folder, name)) + key, err := t.keyer.GenerateNeedFileKey(nk, folder, name) + if err != nil { + delErr = err + return false + } + delErr = t.Delete(key) } - return true + return delErr == nil }) + if err != nil { + return err + } } + return t.commit() } diff --git a/lib/db/set.go b/lib/db/set.go index 3b3a9dbdd..2f7b75fcf 100644 --- a/lib/db/set.go +++ b/lib/db/set.go @@ -16,11 +16,11 @@ import ( "os" "time" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/sync" - "github.com/syndtr/goleveldb/leveldb/util" ) type FileSet struct { @@ -83,29 +83,42 @@ func NewFileSet(folder string, fs fs.Filesystem, ll *Lowlevel) *FileSet { if err := s.meta.fromDB(db, []byte(folder)); err != nil { l.Infof("No stored folder metadata for %q: recalculating", folder) - s.recalcCounts() + if err := s.recalcCounts(); backend.IsClosed(err) { + return nil + } else if err != nil { + panic(err) + } } else if age := time.Since(s.meta.Created()); age > databaseRecheckInterval { l.Infof("Stored folder metadata for %q is %v old; recalculating", folder, age) - s.recalcCounts() + if err := s.recalcCounts(); backend.IsClosed(err) { + return nil + } else if err != nil { + panic(err) + } } return &s } -func (s *FileSet) recalcCounts() { +func (s *FileSet) recalcCounts() error { s.meta = newMetadataTracker() - s.db.checkGlobals([]byte(s.folder), s.meta) + if err := s.db.checkGlobals([]byte(s.folder), s.meta); err != nil { + return err + } var deviceID protocol.DeviceID - s.db.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool { + err := s.db.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool { copy(deviceID[:], device) s.meta.addFile(deviceID, f) return true }) + if err != nil { + return err + } s.meta.SetCreated() - s.meta.toDB(s.db, []byte(s.folder)) + return s.meta.toDB(s.db, []byte(s.folder)) } func (s *FileSet) Drop(device protocol.DeviceID) { @@ -114,7 +127,11 @@ func (s *FileSet) Drop(device protocol.DeviceID) { s.updateMutex.Lock() defer s.updateMutex.Unlock() - s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta) + if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) { + return + } else if err != nil { + panic(err) + } if device == protocol.LocalDeviceID { s.meta.resetCounts(device) @@ -131,7 +148,11 @@ func (s *FileSet) Drop(device protocol.DeviceID) { s.meta.resetAll(device) } - s.meta.toDB(s.db, []byte(s.folder)) + if err := s.meta.toDB(s.db, []byte(s.folder)); backend.IsClosed(err) { + return + } else if err != nil { + panic(err) + } } func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) { @@ -145,73 +166,110 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) { s.updateMutex.Lock() defer s.updateMutex.Unlock() - defer s.meta.toDB(s.db, []byte(s.folder)) + defer func() { + if err := s.meta.toDB(s.db, []byte(s.folder)); err != nil && !backend.IsClosed(err) { + panic(err) + } + }() if device == protocol.LocalDeviceID { // For the local device we have a bunch of metadata to track. - s.db.updateLocalFiles([]byte(s.folder), fs, s.meta) + if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) { + panic(err) + } return } // Easy case, just update the files and we're done. - s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta) + if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) { l.Debugf("%s WithNeed(%v)", s.folder, device) - s.db.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)) + if err := s.db.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) { l.Debugf("%s WithNeedTruncated(%v)", s.folder, device) - s.db.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)) + if err := s.db.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) { l.Debugf("%s WithHave(%v)", s.folder, device) - s.db.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)) + if err := s.db.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) { l.Debugf("%s WithHaveTruncated(%v)", s.folder, device) - s.db.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)) + if err := s.db.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithHaveSequence(startSeq int64, fn Iterator) { l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq) - s.db.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)) + if err := s.db.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } // Except for an item with a path equal to prefix, only children of prefix are iterated. // E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not. func (s *FileSet) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) { l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix) - s.db.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)) + if err := s.db.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } + func (s *FileSet) WithGlobal(fn Iterator) { l.Debugf("%s WithGlobal()", s.folder) - s.db.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)) + if err := s.db.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) WithGlobalTruncated(fn Iterator) { l.Debugf("%s WithGlobalTruncated()", s.folder) - s.db.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)) + if err := s.db.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } // Except for an item with a path equal to prefix, only children of prefix are iterated. // E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not. func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) { l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix) - s.db.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)) + if err := s.db.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) { - f, ok := s.db.getFileDirty([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file))) + f, ok, err := s.db.getFileDirty([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file))) + if backend.IsClosed(err) { + return protocol.FileInfo{}, false + } else if err != nil { + panic(err) + } f.Name = osutil.NativeFilename(f.Name) return f, ok } func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) { - fi, ok := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), false) + fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), false) + if backend.IsClosed(err) { + return protocol.FileInfo{}, false + } else if err != nil { + panic(err) + } if !ok { return protocol.FileInfo{}, false } @@ -221,7 +279,12 @@ func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) { } func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) { - fi, ok := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), true) + fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), true) + if backend.IsClosed(err) { + return FileInfoTruncated{}, false + } else if err != nil { + panic(err) + } if !ok { return FileInfoTruncated{}, false } @@ -231,7 +294,13 @@ func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) { } func (s *FileSet) Availability(file string) []protocol.DeviceID { - return s.db.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file))) + av, err := s.db.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file))) + if backend.IsClosed(err) { + return nil + } else if err != nil { + panic(err) + } + return av } func (s *FileSet) Sequence(device protocol.DeviceID) int64 { @@ -255,11 +324,21 @@ func (s *FileSet) GlobalSize() Counts { } func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID { - id := s.db.getIndexID(device[:], []byte(s.folder)) + id, err := s.db.getIndexID(device[:], []byte(s.folder)) + if backend.IsClosed(err) { + return 0 + } else if err != nil { + panic(err) + } if id == 0 && device == protocol.LocalDeviceID { // No index ID set yet. We create one now. id = protocol.NewIndexID() - s.db.setIndexID(device[:], []byte(s.folder), id) + err := s.db.setIndexID(device[:], []byte(s.folder), id) + if backend.IsClosed(err) { + return 0 + } else if err != nil { + panic(err) + } } return id } @@ -268,11 +347,18 @@ func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) { if device == protocol.LocalDeviceID { panic("do not explicitly set index ID for local device") } - s.db.setIndexID(device[:], []byte(s.folder), id) + if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) { + panic(err) + } } func (s *FileSet) MtimeFS() *fs.MtimeFS { - prefix := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder)) + prefix, err := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder)) + if backend.IsClosed(err) { + return nil + } else if err != nil { + panic(err) + } kv := NewNamespacedKV(s.db.Lowlevel, string(prefix)) return fs.NewMtimeFS(s.fs, kv) } @@ -285,21 +371,39 @@ func (s *FileSet) ListDevices() []protocol.DeviceID { // database. func DropFolder(ll *Lowlevel, folder string) { db := newInstance(ll) - db.dropFolder([]byte(folder)) - db.dropMtimes([]byte(folder)) - db.dropFolderMeta([]byte(folder)) - // Also clean out the folder ID mapping. - db.folderIdx.Delete([]byte(folder)) + droppers := []func([]byte) error{ + db.dropFolder, + db.dropMtimes, + db.dropFolderMeta, + db.folderIdx.Delete, + } + for _, drop := range droppers { + if err := drop([]byte(folder)); backend.IsClosed(err) { + return + } else if err != nil { + panic(err) + } + } } // DropDeltaIndexIDs removes all delta index IDs from the database. // This will cause a full index transmission on the next connection. func DropDeltaIndexIDs(db *Lowlevel) { - dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil) + dbi, err := db.NewPrefixIterator([]byte{KeyTypeIndexID}) + if backend.IsClosed(err) { + return + } else if err != nil { + panic(err) + } defer dbi.Release() for dbi.Next() { - db.Delete(dbi.Key(), nil) + if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) { + panic(err) + } + } + if err := dbi.Error(); err != nil && !backend.IsClosed(err) { + panic(err) } } diff --git a/lib/db/set_test.go b/lib/db/set_test.go index 9fc58cc47..256cf0333 100644 --- a/lib/db/set_test.go +++ b/lib/db/set_test.go @@ -17,6 +17,7 @@ import ( "github.com/d4l3k/messagediff" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/protocol" ) @@ -117,7 +118,7 @@ func (l fileList) String() string { } func TestGlobalSet(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -332,7 +333,7 @@ func TestGlobalSet(t *testing.T) { } func TestNeedWithInvalid(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -369,7 +370,7 @@ func TestNeedWithInvalid(t *testing.T) { } func TestUpdateToInvalid(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -425,7 +426,7 @@ func TestUpdateToInvalid(t *testing.T) { } func TestInvalidAvailability(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -463,7 +464,7 @@ func TestInvalidAvailability(t *testing.T) { } func TestGlobalReset(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -501,7 +502,7 @@ func TestGlobalReset(t *testing.T) { } func TestNeed(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -539,7 +540,7 @@ func TestNeed(t *testing.T) { } func TestSequence(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -569,7 +570,7 @@ func TestSequence(t *testing.T) { } func TestListDropFolder(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s0 := db.NewFileSet("test0", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) local1 := []protocol.FileInfo{ @@ -619,7 +620,7 @@ func TestListDropFolder(t *testing.T) { } func TestGlobalNeedWithInvalid(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -660,7 +661,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) { } func TestLongPath(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -671,7 +672,7 @@ func TestLongPath(t *testing.T) { name := b.String() // 5000 characters local := []protocol.FileInfo{ - {Name: string(name), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, + {Name: name, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, } replace(s, protocol.LocalDeviceID, local) @@ -686,39 +687,6 @@ func TestLongPath(t *testing.T) { } } -func TestCommitted(t *testing.T) { - // Verify that the Committed counter increases when we change things and - // doesn't increase when we don't. - - ldb := db.OpenMemory() - - s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) - - local := []protocol.FileInfo{ - {Name: string("file"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}}, - } - - // Adding a file should increase the counter - - c0 := ldb.Committed() - - replace(s, protocol.LocalDeviceID, local) - - c1 := ldb.Committed() - if c1 <= c0 { - t.Errorf("committed data didn't increase; %d <= %d", c1, c0) - } - - // Updating with something identical should not do anything - - s.Update(protocol.LocalDeviceID, local) - - c2 := ldb.Committed() - if c2 > c1 { - t.Errorf("replace with same contents should do nothing but %d > %d", c2, c1) - } -} - func BenchmarkUpdateOneFile(b *testing.B) { local0 := fileList{ protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)}, @@ -729,10 +697,11 @@ func BenchmarkUpdateOneFile(b *testing.B) { protocol.FileInfo{Name: "zajksdhaskjdh/askjdhaskjdashkajshd/kasjdhaskjdhaskdjhaskdjash/dkjashdaksjdhaskdjahskdjh", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)}, } - ldb, err := db.Open("testdata/benchmarkupdate.db", db.TuningAuto) + be, err := backend.Open("testdata/benchmarkupdate.db", backend.TuningAuto) if err != nil { b.Fatal(err) } + ldb := db.NewLowlevel(be) defer func() { ldb.Close() os.RemoveAll("testdata/benchmarkupdate.db") @@ -751,7 +720,7 @@ func BenchmarkUpdateOneFile(b *testing.B) { } func TestIndexID(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -783,7 +752,7 @@ func TestIndexID(t *testing.T) { } func TestDropFiles(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -846,7 +815,7 @@ func TestDropFiles(t *testing.T) { } func TestIssue4701(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -887,7 +856,7 @@ func TestIssue4701(t *testing.T) { } func TestWithHaveSequence(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -915,14 +884,14 @@ func TestWithHaveSequence(t *testing.T) { func TestStressWithHaveSequence(t *testing.T) { // This races two loops against each other: one that contiously does - // updates, and one that continously does sequence walks. The test fails + // updates, and one that continuously does sequence walks. The test fails // if the sequence walker sees a discontinuity. if testing.Short() { t.Skip("Takes a long time") } - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -945,7 +914,7 @@ func TestStressWithHaveSequence(t *testing.T) { close(done) }() - var prevSeq int64 = 0 + var prevSeq int64 loop: for { select { @@ -964,7 +933,7 @@ loop: } func TestIssue4925(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -990,7 +959,7 @@ func TestIssue4925(t *testing.T) { } func TestMoveGlobalBack(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" file := "foo" @@ -1054,7 +1023,7 @@ func TestMoveGlobalBack(t *testing.T) { // needed files. // https://github.com/syncthing/syncthing/issues/5007 func TestIssue5007(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" file := "foo" @@ -1081,7 +1050,7 @@ func TestIssue5007(t *testing.T) { // TestNeedDeleted checks that a file that doesn't exist locally isn't needed // when the global file is deleted. func TestNeedDeleted(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" file := "foo" @@ -1115,7 +1084,7 @@ func TestNeedDeleted(t *testing.T) { } func TestReceiveOnlyAccounting(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -1219,7 +1188,7 @@ func TestReceiveOnlyAccounting(t *testing.T) { } func TestNeedAfterUnignore(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) folder := "test" file := "foo" @@ -1251,7 +1220,7 @@ func TestNeedAfterUnignore(t *testing.T) { func TestRemoteInvalidNotAccounted(t *testing.T) { // Remote files with the invalid bit should not count. - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) files := []protocol.FileInfo{ @@ -1270,7 +1239,7 @@ func TestRemoteInvalidNotAccounted(t *testing.T) { } func TestNeedWithNewerInvalid(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("default", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -1308,7 +1277,7 @@ func TestNeedWithNewerInvalid(t *testing.T) { } func TestNeedAfterDeviceRemove(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) file := "foo" s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) @@ -1335,7 +1304,7 @@ func TestNeedAfterDeviceRemove(t *testing.T) { func TestCaseSensitive(t *testing.T) { // Normal case sensitive lookup should work - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) local := []protocol.FileInfo{ @@ -1372,7 +1341,7 @@ func TestSequenceIndex(t *testing.T) { // Set up a db and a few files that we will manipulate. - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) local := []protocol.FileInfo{ @@ -1463,7 +1432,7 @@ func TestSequenceIndex(t *testing.T) { } func TestIgnoreAfterReceiveOnly(t *testing.T) { - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) file := "foo" s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb) diff --git a/lib/db/smallindex.go b/lib/db/smallindex.go index 9ef00f5fd..b426d906b 100644 --- a/lib/db/smallindex.go +++ b/lib/db/smallindex.go @@ -10,16 +10,15 @@ import ( "encoding/binary" "sort" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/sync" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/util" ) // A smallIndex is an in memory bidirectional []byte to uint32 map. It gives // fast lookups in both directions and persists to the database. Don't use for // storing more items than fit comfortably in RAM. type smallIndex struct { - db *leveldb.DB + db backend.Backend prefix []byte id2val map[uint32]string val2id map[string]uint32 @@ -27,7 +26,7 @@ type smallIndex struct { mut sync.Mutex } -func newSmallIndex(db *leveldb.DB, prefix []byte) *smallIndex { +func newSmallIndex(db backend.Backend, prefix []byte) *smallIndex { idx := &smallIndex{ db: db, prefix: prefix, @@ -42,7 +41,10 @@ func newSmallIndex(db *leveldb.DB, prefix []byte) *smallIndex { // load iterates over the prefix space in the database and populates the in // memory maps. func (i *smallIndex) load() { - it := i.db.NewIterator(util.BytesPrefix(i.prefix), nil) + it, err := i.db.NewPrefixIterator(i.prefix) + if err != nil { + panic("loading small index: " + err.Error()) + } defer it.Release() for it.Next() { val := string(it.Value()) @@ -60,7 +62,7 @@ func (i *smallIndex) load() { // ID returns the index number for the given byte slice, allocating a new one // and persisting this to the database if necessary. -func (i *smallIndex) ID(val []byte) uint32 { +func (i *smallIndex) ID(val []byte) (uint32, error) { i.mut.Lock() // intentionally avoiding defer here as we want this call to be as fast as // possible in the general case (folder ID already exists). The map lookup @@ -69,7 +71,7 @@ func (i *smallIndex) ID(val []byte) uint32 { // here. if id, ok := i.val2id[string(val)]; ok { i.mut.Unlock() - return id + return id, nil } id := i.nextID @@ -82,10 +84,13 @@ func (i *smallIndex) ID(val []byte) uint32 { key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id copy(key, i.prefix) binary.BigEndian.PutUint32(key[len(i.prefix):], id) - i.db.Put(key, val, nil) + if err := i.db.Put(key, val); err != nil { + i.mut.Unlock() + return 0, err + } i.mut.Unlock() - return id + return id, nil } // Val returns the value for the given index number, or (nil, false) if there @@ -101,7 +106,7 @@ func (i *smallIndex) Val(id uint32) ([]byte, bool) { return []byte(val), true } -func (i *smallIndex) Delete(val []byte) { +func (i *smallIndex) Delete(val []byte) error { i.mut.Lock() defer i.mut.Unlock() @@ -115,7 +120,9 @@ func (i *smallIndex) Delete(val []byte) { // Put an empty value into the database. This indicates that the // entry does not exist any more and prevents the ID from being // reused in the future. - i.db.Put(key, []byte{}, nil) + if err := i.db.Put(key, []byte{}); err != nil { + return err + } // Delete reverse mapping. delete(i.id2val, id) @@ -123,6 +130,7 @@ func (i *smallIndex) Delete(val []byte) { // Delete forward mapping. delete(i.val2id, string(val)) + return nil } // Values returns the set of values in the index diff --git a/lib/db/smallindex_test.go b/lib/db/smallindex_test.go index 60602cd35..e8150970c 100644 --- a/lib/db/smallindex_test.go +++ b/lib/db/smallindex_test.go @@ -6,11 +6,15 @@ package db -import "testing" +import ( + "testing" + + "github.com/syncthing/syncthing/lib/db/backend" +) func TestSmallIndex(t *testing.T) { - db := OpenMemory() - idx := newSmallIndex(db.DB, []byte{12, 34}) + db := NewLowlevel(backend.OpenMemory()) + idx := newSmallIndex(db, []byte{12, 34}) // ID zero should be unallocated if val, ok := idx.Val(0); ok || val != nil { @@ -18,7 +22,9 @@ func TestSmallIndex(t *testing.T) { } // A new key should get ID zero - if id := idx.ID([]byte("hello")); id != 0 { + if id, err := idx.ID([]byte("hello")); err != nil { + t.Fatal(err) + } else if id != 0 { t.Fatal("Expected 0, not", id) } // Looking up ID zero should work @@ -30,23 +36,29 @@ func TestSmallIndex(t *testing.T) { idx.Delete([]byte("hello")) // Next ID should be one - if id := idx.ID([]byte("key2")); id != 1 { + if id, err := idx.ID([]byte("key2")); err != nil { + t.Fatal(err) + } else if id != 1 { t.Fatal("Expected 1, not", id) } // Now lets create a new index instance based on what's actually serialized to the database. - idx = newSmallIndex(db.DB, []byte{12, 34}) + idx = newSmallIndex(db, []byte{12, 34}) // Status should be about the same as before. if val, ok := idx.Val(0); ok || val != nil { t.Fatal("Unexpected return for deleted ID 0") } - if id := idx.ID([]byte("key2")); id != 1 { + if id, err := idx.ID([]byte("key2")); err != nil { + t.Fatal(err) + } else if id != 1 { t.Fatal("Expected 1, not", id) } // Setting "hello" again should get us ID 2, not 0 as it was originally. - if id := idx.ID([]byte("hello")); id != 2 { + if id, err := idx.ID([]byte("hello")); err != nil { + t.Fatal(err) + } else if id != 2 { t.Fatal("Expected 2, not", id) } } diff --git a/lib/db/structs.go b/lib/db/structs.go index f417d002b..ca786a1b8 100644 --- a/lib/db/structs.go +++ b/lib/db/structs.go @@ -175,7 +175,7 @@ func (vl VersionList) String() string { // update brings the VersionList up to date with file. It returns the updated // VersionList, a potentially removed old FileVersion and its index, as well as // the index where the new FileVersion was inserted. -func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (_ VersionList, removedFV FileVersion, removedAt int, insertedAt int) { +func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (_ VersionList, removedFV FileVersion, removedAt int, insertedAt int, err error) { vl, removedFV, removedAt = vl.pop(device) nv := FileVersion{ @@ -198,7 +198,7 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re // The version at this point in the list is equal to or lesser // ("older") than us. We insert ourselves in front of it. vl = vl.insertAt(i, nv) - return vl, removedFV, removedAt, i + return vl, removedFV, removedAt, i, nil case protocol.ConcurrentLesser, protocol.ConcurrentGreater: // The version at this point is in conflict with us. We must pull @@ -209,9 +209,11 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re // to determine the winner.) // // A surprise missing file entry here is counted as a win for us. - if of, ok := t.getFile(folder, vl.Versions[i].Device, []byte(file.Name)); !ok || file.WinsConflict(of) { + if of, ok, err := t.getFile(folder, vl.Versions[i].Device, []byte(file.Name)); err != nil { + return vl, removedFV, removedAt, i, err + } else if !ok || file.WinsConflict(of) { vl = vl.insertAt(i, nv) - return vl, removedFV, removedAt, i + return vl, removedFV, removedAt, i, nil } } } @@ -219,7 +221,7 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re // We didn't find a position for an insert above, so append to the end. vl.Versions = append(vl.Versions, nv) - return vl, removedFV, removedAt, len(vl.Versions) - 1 + return vl, removedFV, removedAt, len(vl.Versions) - 1, nil } func (vl VersionList) insertAt(i int, v FileVersion) VersionList { diff --git a/lib/db/transactions.go b/lib/db/transactions.go index b19453057..d4bba1bc0 100644 --- a/lib/db/transactions.go +++ b/lib/db/transactions.go @@ -7,111 +7,146 @@ package db import ( + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/protocol" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/util" ) // A readOnlyTransaction represents a database snapshot. type readOnlyTransaction struct { - snapshot + backend.ReadTransaction keyer keyer } -func (db *instance) newReadOnlyTransaction() readOnlyTransaction { - return readOnlyTransaction{ - snapshot: db.GetSnapshot(), - keyer: db.keyer, +func (db *instance) newReadOnlyTransaction() (readOnlyTransaction, error) { + tran, err := db.NewReadTransaction() + if err != nil { + return readOnlyTransaction{}, err } + return readOnlyTransaction{ + ReadTransaction: tran, + keyer: db.keyer, + }, nil } func (t readOnlyTransaction) close() { t.Release() } -func (t readOnlyTransaction) getFile(folder, device, file []byte) (protocol.FileInfo, bool) { - return t.getFileByKey(t.keyer.GenerateDeviceFileKey(nil, folder, device, file)) -} - -func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool) { - if f, ok := t.getFileTrunc(key, false); ok { - return f.(protocol.FileInfo), true +func (t readOnlyTransaction) getFile(folder, device, file []byte) (protocol.FileInfo, bool, error) { + key, err := t.keyer.GenerateDeviceFileKey(nil, folder, device, file) + if err != nil { + return protocol.FileInfo{}, false, err } - return protocol.FileInfo{}, false + return t.getFileByKey(key) } -func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, bool) { - bs, err := t.Get(key, nil) - if err == leveldb.ErrNotFound { - return nil, false +func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool, error) { + f, ok, err := t.getFileTrunc(key, false) + if err != nil || !ok { + return protocol.FileInfo{}, false, err + } + return f.(protocol.FileInfo), true, nil +} + +func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, bool, error) { + bs, err := t.Get(key) + if backend.IsNotFound(err) { + return nil, false, nil } if err != nil { - l.Debugln("surprise error:", err) - return nil, false + return nil, false, err } f, err := unmarshalTrunc(bs, trunc) if err != nil { - l.Debugln("unmarshal error:", err) - return nil, false + return nil, false, err } - return f, true + return f, true, nil } -func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, FileIntf, bool) { - keyBuf = t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file) - - bs, err := t.Get(keyBuf, nil) +func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, FileIntf, bool, error) { + var err error + keyBuf, err = t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file) if err != nil { - return keyBuf, nil, false + return nil, nil, false, err + } + + bs, err := t.Get(keyBuf) + if backend.IsNotFound(err) { + return keyBuf, nil, false, nil + } + if err != nil { + return nil, nil, false, err } vl, ok := unmarshalVersionList(bs) if !ok { - return keyBuf, nil, false + return keyBuf, nil, false, nil } - keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file) - if fi, ok := t.getFileTrunc(keyBuf, truncate); ok { - return keyBuf, fi, true + keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file) + if err != nil { + return nil, nil, false, err } - - return keyBuf, nil, false + fi, ok, err := t.getFileTrunc(keyBuf, truncate) + if err != nil || !ok { + return keyBuf, nil, false, err + } + return keyBuf, fi, true, nil } // A readWriteTransaction is a readOnlyTransaction plus a batch for writes. // The batch will be committed on close() or by checkFlush() if it exceeds the // batch size. type readWriteTransaction struct { + backend.WriteTransaction readOnlyTransaction - *batch } -func (db *instance) newReadWriteTransaction() readWriteTransaction { - return readWriteTransaction{ - readOnlyTransaction: db.newReadOnlyTransaction(), - batch: db.newBatch(), +func (db *instance) newReadWriteTransaction() (readWriteTransaction, error) { + tran, err := db.NewWriteTransaction() + if err != nil { + return readWriteTransaction{}, err } + return readWriteTransaction{ + WriteTransaction: tran, + readOnlyTransaction: readOnlyTransaction{ + ReadTransaction: tran, + keyer: db.keyer, + }, + }, nil +} + +func (t readWriteTransaction) commit() error { + t.readOnlyTransaction.close() + return t.WriteTransaction.Commit() } func (t readWriteTransaction) close() { - t.flush() t.readOnlyTransaction.close() + t.WriteTransaction.Release() } // updateGlobal adds this device+version to the version list for the given // file. If the device is already present in the list, the version is updated. // If the file does not have an entry in the global list, it is created. -func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, file protocol.FileInfo, meta *metadataTracker) ([]byte, bool) { +func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, file protocol.FileInfo, meta *metadataTracker) ([]byte, bool, error) { l.Debugf("update global; folder=%q device=%v file=%q version=%v invalid=%v", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version, file.IsInvalid()) var fl VersionList - if svl, err := t.Get(gk, nil); err == nil { - fl.Unmarshal(svl) // Ignore error, continue with empty fl + svl, err := t.Get(gk) + if err == nil { + _ = fl.Unmarshal(svl) // Ignore error, continue with empty fl + } else if !backend.IsNotFound(err) { + return nil, false, err + } + + fl, removedFV, removedAt, insertedAt, err := fl.update(folder, device, file, t.readOnlyTransaction) + if err != nil { + return nil, false, err } - fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction) if insertedAt == -1 { l.Debugln("update global; same version, global unchanged") - return keyBuf, false + return keyBuf, false, nil } name := []byte(file.Name) @@ -121,24 +156,29 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi // Inserted a new newest version global = file } else { - keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, name) - if new, ok := t.getFileByKey(keyBuf); ok { - global = new - } else { - // This file must exist in the db, so this must be caused - // by the db being closed - bail out. - l.Debugln("File should exist:", name) - return keyBuf, false + keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, name) + if err != nil { + return nil, false, err } + new, ok, err := t.getFileByKey(keyBuf) + if err != nil || !ok { + return keyBuf, false, err + } + global = new } // Fixup the list of files we need. - keyBuf = t.updateLocalNeed(keyBuf, folder, name, fl, global) + keyBuf, err = t.updateLocalNeed(keyBuf, folder, name, fl, global) + if err != nil { + return nil, false, err + } if removedAt != 0 && insertedAt != 0 { l.Debugf(`new global for "%v" after update: %v`, file.Name, fl) - t.Put(gk, mustMarshal(&fl)) - return keyBuf, true + if err := t.Put(gk, mustMarshal(&fl)); err != nil { + return nil, false, err + } + return keyBuf, true, nil } // Remove the old global from the global size counter @@ -149,8 +189,15 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi // The previous newest version is now at index 1 oldGlobalFV = fl.Versions[1] } - keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, oldGlobalFV.Device, name) - if oldFile, ok := t.getFileByKey(keyBuf); ok { + keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, oldGlobalFV.Device, name) + if err != nil { + return nil, false, err + } + oldFile, ok, err := t.getFileByKey(keyBuf) + if err != nil { + return nil, false, err + } + if ok { // A failure to get the file here is surprising and our // global size data will be incorrect until a restart... meta.removeFile(protocol.GlobalDeviceID, oldFile) @@ -160,27 +207,41 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi meta.addFile(protocol.GlobalDeviceID, global) l.Debugf(`new global for "%v" after update: %v`, file.Name, fl) - t.Put(gk, mustMarshal(&fl)) + if err := t.Put(gk, mustMarshal(&fl)); err != nil { + return nil, false, err + } - return keyBuf, true + return keyBuf, true, nil } // updateLocalNeed checks whether the given file is still needed on the local // device according to the version list and global FileInfo given and updates // the db accordingly. -func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, fl VersionList, global protocol.FileInfo) []byte { - keyBuf = t.keyer.GenerateNeedFileKey(keyBuf, folder, name) - hasNeeded, _ := t.Has(keyBuf, nil) +func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, fl VersionList, global protocol.FileInfo) ([]byte, error) { + var err error + keyBuf, err = t.keyer.GenerateNeedFileKey(keyBuf, folder, name) + if err != nil { + return nil, err + } + _, err = t.Get(keyBuf) + if err != nil && !backend.IsNotFound(err) { + return nil, err + } + hasNeeded := err == nil if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); need(global, haveLocalFV, localFV.Version) { if !hasNeeded { l.Debugf("local need insert; folder=%q, name=%q", folder, name) - t.Put(keyBuf, nil) + if err := t.Put(keyBuf, nil); err != nil { + return nil, err + } } } else if hasNeeded { l.Debugf("local need delete; folder=%q, name=%q", folder, name) - t.Delete(keyBuf) + if err := t.Delete(keyBuf); err != nil { + return nil, err + } } - return keyBuf + return keyBuf, nil } func need(global FileIntf, haveLocal bool, localVersion protocol.Vector) bool { @@ -202,71 +263,94 @@ func need(global FileIntf, haveLocal bool, localVersion protocol.Vector) bool { // removeFromGlobal removes the device from the global version list for the // given file. If the version list is empty after this, the file entry is // removed entirely. -func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte, file []byte, meta *metadataTracker) []byte { +func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte, file []byte, meta *metadataTracker) ([]byte, error) { l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file) - svl, err := t.Get(gk, nil) - if err != nil { + svl, err := t.Get(gk) + if backend.IsNotFound(err) { // We might be called to "remove" a global version that doesn't exist // if the first update for the file is already marked invalid. - return keyBuf + return keyBuf, nil + } else if err != nil { + return nil, err } var fl VersionList err = fl.Unmarshal(svl) if err != nil { - l.Debugln("unmarshal error:", err) - return keyBuf + return nil, err } fl, _, removedAt := fl.pop(device) if removedAt == -1 { // There is no version for the given device - return keyBuf + return keyBuf, nil } if removedAt == 0 { // A failure to get the file here is surprising and our // global size data will be incorrect until a restart... - keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, device, file) - if f, ok := t.getFileByKey(keyBuf); ok { + keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, device, file) + if err != nil { + return nil, err + } + if f, ok, err := t.getFileByKey(keyBuf); err != nil { + return keyBuf, nil + } else if ok { meta.removeFile(protocol.GlobalDeviceID, f) } } if len(fl.Versions) == 0 { - keyBuf = t.keyer.GenerateNeedFileKey(keyBuf, folder, file) - t.Delete(keyBuf) - t.Delete(gk) - return keyBuf + keyBuf, err = t.keyer.GenerateNeedFileKey(keyBuf, folder, file) + if err != nil { + return nil, err + } + if err := t.Delete(keyBuf); err != nil { + return nil, err + } + if err := t.Delete(gk); err != nil { + return nil, err + } + return keyBuf, nil } if removedAt == 0 { - keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, file) - global, ok := t.getFileByKey(keyBuf) - if !ok { - // This file must exist in the db, so this must be caused - // by the db being closed - bail out. - l.Debugln("File should exist:", file) - return keyBuf + keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, file) + if err != nil { + return nil, err + } + global, ok, err := t.getFileByKey(keyBuf) + if err != nil || !ok { + return keyBuf, err + } + keyBuf, err = t.updateLocalNeed(keyBuf, folder, file, fl, global) + if err != nil { + return nil, err } - keyBuf = t.updateLocalNeed(keyBuf, folder, file, fl, global) meta.addFile(protocol.GlobalDeviceID, global) } l.Debugf("new global after remove: %v", fl) - t.Put(gk, mustMarshal(&fl)) + if err := t.Put(gk, mustMarshal(&fl)); err != nil { + return nil, err + } - return keyBuf + return keyBuf, nil } -func (t readWriteTransaction) deleteKeyPrefix(prefix []byte) { - dbi := t.NewIterator(util.BytesPrefix(prefix), nil) - for dbi.Next() { - t.Delete(dbi.Key()) - t.checkFlush() +func (t readWriteTransaction) deleteKeyPrefix(prefix []byte) error { + dbi, err := t.NewPrefixIterator(prefix) + if err != nil { + return err } - dbi.Release() + defer dbi.Release() + for dbi.Next() { + if err := t.Delete(dbi.Key()); err != nil { + return err + } + } + return dbi.Error() } type marshaller interface { diff --git a/lib/db/util_test.go b/lib/db/util_test.go index aa5595e91..e7e5d5413 100644 --- a/lib/db/util_test.go +++ b/lib/db/util_test.go @@ -11,22 +11,26 @@ import ( "io" "os" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" + "github.com/syncthing/syncthing/lib/db/backend" ) // writeJSONS serializes the database to a JSON stream that can be checked // in to the repo and used for tests. -func writeJSONS(w io.Writer, db *leveldb.DB) { - it := db.NewIterator(&util.Range{}, nil) +func writeJSONS(w io.Writer, db backend.Backend) { + it, err := db.NewPrefixIterator(nil) + if err != nil { + panic(err) + } defer it.Release() enc := json.NewEncoder(w) for it.Next() { - enc.Encode(map[string][]byte{ + err := enc.Encode(map[string][]byte{ "k": it.Key(), "v": it.Value(), }) + if err != nil { + panic(err) + } } } @@ -34,15 +38,15 @@ func writeJSONS(w io.Writer, db *leveldb.DB) { // here and the linter to not complain. var _ = writeJSONS -// openJSONS reads a JSON stream file into a leveldb.DB -func openJSONS(file string) (*leveldb.DB, error) { +// openJSONS reads a JSON stream file into a backend DB +func openJSONS(file string) (backend.Backend, error) { fd, err := os.Open(file) if err != nil { return nil, err } dec := json.NewDecoder(fd) - db, _ := leveldb.Open(storage.NewMemStorage(), nil) + db := backend.OpenMemory() for { var row map[string][]byte @@ -54,7 +58,9 @@ func openJSONS(file string) (*leveldb.DB, error) { return nil, err } - db.Put(row["k"], row["v"], nil) + if err := db.Put(row["k"], row["v"]); err != nil { + return nil, err + } } return db, nil diff --git a/lib/fs/mtimefs.go b/lib/fs/mtimefs.go index 199fb8acb..a1a1a33f4 100644 --- a/lib/fs/mtimefs.go +++ b/lib/fs/mtimefs.go @@ -13,8 +13,8 @@ import ( // The database is where we store the virtual mtimes type database interface { Bytes(key string) (data []byte, ok bool) - PutBytes(key string, data []byte) - Delete(key string) + PutBytes(key string, data []byte) error + Delete(key string) error } // The MtimeFS is a filesystem with nanosecond mtime precision, regardless diff --git a/lib/fs/mtimefs_test.go b/lib/fs/mtimefs_test.go index 1c002d25e..5100bea3e 100644 --- a/lib/fs/mtimefs_test.go +++ b/lib/fs/mtimefs_test.go @@ -236,8 +236,9 @@ func TestMtimeFSInsensitive(t *testing.T) { type mapStore map[string][]byte -func (s mapStore) PutBytes(key string, data []byte) { +func (s mapStore) PutBytes(key string, data []byte) error { s[key] = data + return nil } func (s mapStore) Bytes(key string) (data []byte, ok bool) { @@ -245,8 +246,9 @@ func (s mapStore) Bytes(key string) (data []byte, ok bool) { return } -func (s mapStore) Delete(key string) { +func (s mapStore) Delete(key string) error { delete(s, key) + return nil } // failChtimes does nothing, and fails diff --git a/lib/model/folder_recvonly_test.go b/lib/model/folder_recvonly_test.go index 3fbe3a2cb..dd9e81014 100644 --- a/lib/model/folder_recvonly_test.go +++ b/lib/model/folder_recvonly_test.go @@ -16,6 +16,7 @@ import ( "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/scanner" @@ -315,8 +316,8 @@ func setupROFolder() (*model, *sendOnlyFolder) { fcfg.ID = "ro" fcfg.Type = config.FolderTypeReceiveOnly w.SetFolder(fcfg) - - m := newModel(w, myID, "syncthing", "dev", db.OpenMemory(), nil) + + m := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil) m.ServeBackground() @@ -335,4 +336,4 @@ func setupROFolder() (*model, *sendOnlyFolder) { m.fmut.RUnlock() return m, f -} +} \ No newline at end of file diff --git a/lib/model/folder_sendrecv_test.go b/lib/model/folder_sendrecv_test.go index b3eab1c9e..2e5a81b17 100644 --- a/lib/model/folder_sendrecv_test.go +++ b/lib/model/folder_sendrecv_test.go @@ -20,6 +20,7 @@ import ( "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/ignore" @@ -91,7 +92,7 @@ func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo { func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder) { w := createTmpWrapper(defaultCfg) - model := newModel(w, myID, "syncthing", "dev", db.OpenMemory(), nil) + model := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil) fcfg := testFolderConfigTmp() model.addFolder(fcfg) diff --git a/lib/model/model_test.go b/lib/model/model_test.go index 5376cc68e..732d5aac2 100644 --- a/lib/model/model_test.go +++ b/lib/model/model_test.go @@ -27,6 +27,7 @@ import ( "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/ignore" @@ -306,7 +307,7 @@ func TestDeviceRename(t *testing.T) { } cfg := config.Wrap("testdata/tmpconfig.xml", rawCfg, events.NoopLogger) - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) m := newModel(cfg, myID, "syncthing", "dev", db, nil) if cfg.Devices()[device1].Name != "" { @@ -402,7 +403,7 @@ func TestClusterConfig(t *testing.T) { }, } - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) wrapper := createTmpWrapper(cfg) m := newModel(wrapper, myID, "syncthing", "dev", db, nil) @@ -1533,7 +1534,7 @@ func waitForState(t *testing.T, m *model, folder, status string) { func TestROScanRecovery(t *testing.T) { testOs := &fatalOs{t} - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) set := db.NewFileSet("default", defaultFs, ldb) set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ {Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}}, @@ -1584,7 +1585,7 @@ func TestROScanRecovery(t *testing.T) { func TestRWScanRecovery(t *testing.T) { testOs := &fatalOs{t} - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) set := db.NewFileSet("default", defaultFs, ldb) set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ {Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}}, @@ -1633,7 +1634,7 @@ func TestRWScanRecovery(t *testing.T) { } func TestGlobalDirectoryTree(t *testing.T) { - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil) m.ServeBackground() m.removeFolder(defaultFolderConfig) @@ -1886,7 +1887,7 @@ func TestGlobalDirectoryTree(t *testing.T) { } func TestGlobalDirectorySelfFixing(t *testing.T) { - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil) m.ServeBackground() m.removeFolder(defaultFolderConfig) @@ -2063,7 +2064,7 @@ func BenchmarkTree_100_10(b *testing.B) { } func benchmarkTree(b *testing.B, n1, n2 int) { - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil) m.ServeBackground() m.removeFolder(defaultFolderConfig) @@ -2128,7 +2129,7 @@ func TestIssue3028(t *testing.T) { } func TestIssue4357(t *testing.T) { - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) cfg := defaultCfgWrapper.RawCopy() // Create a separate wrapper not to pollute other tests. wrapper := createTmpWrapper(config.Configuration{}) @@ -2251,7 +2252,7 @@ func TestIssue2782(t *testing.T) { } func TestIndexesForUnknownDevicesDropped(t *testing.T) { - dbi := db.OpenMemory() + dbi := db.NewLowlevel(backend.OpenMemory()) files := db.NewFileSet("default", defaultFs, dbi) files.Drop(device1) @@ -2677,7 +2678,7 @@ func TestInternalScan(t *testing.T) { func TestCustomMarkerName(t *testing.T) { testOs := &fatalOs{t} - ldb := db.OpenMemory() + ldb := db.NewLowlevel(backend.OpenMemory()) set := db.NewFileSet("default", defaultFs, ldb) set.Update(protocol.LocalDeviceID, []protocol.FileInfo{ {Name: "dummyfile"}, @@ -3052,7 +3053,7 @@ func TestPausedFolders(t *testing.T) { func TestIssue4094(t *testing.T) { testOs := &fatalOs{t} - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) // Create a separate wrapper not to pollute other tests. wrapper := createTmpWrapper(config.Configuration{}) m := newModel(wrapper, myID, "syncthing", "dev", db, nil) @@ -3088,7 +3089,7 @@ func TestIssue4094(t *testing.T) { func TestIssue4903(t *testing.T) { testOs := &fatalOs{t} - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) // Create a separate wrapper not to pollute other tests. wrapper := createTmpWrapper(config.Configuration{}) m := newModel(wrapper, myID, "syncthing", "dev", db, nil) diff --git a/lib/model/testutils_test.go b/lib/model/testutils_test.go index 109c3fea4..44181087f 100644 --- a/lib/model/testutils_test.go +++ b/lib/model/testutils_test.go @@ -13,6 +13,7 @@ import ( "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/protocol" @@ -102,7 +103,7 @@ func setupModelWithConnectionFromWrapper(w config.Wrapper) (*model, *fakeConnect } func setupModel(w config.Wrapper) *model { - db := db.OpenMemory() + db := db.NewLowlevel(backend.OpenMemory()) m := newModel(w, myID, "syncthing", "dev", db, nil) m.ServeBackground() diff --git a/lib/syncthing/utils.go b/lib/syncthing/utils.go index 2070e7864..841a483e6 100644 --- a/lib/syncthing/utils.go +++ b/lib/syncthing/utils.go @@ -17,6 +17,7 @@ import ( "github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/db" + "github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/locations" @@ -124,5 +125,9 @@ func copyFile(src, dst string) error { } func OpenGoleveldb(path string, tuning config.Tuning) (*db.Lowlevel, error) { - return db.Open(path, db.Tuning(tuning)) + ldb, err := backend.Open(path, backend.Tuning(tuning)) + if err != nil { + return nil, err + } + return db.NewLowlevel(ldb), nil }