mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 19:08:58 +00:00
This PR does two things, because one lead to the other: - Move the leveldb specific stuff into a small "backend" package that defines a backend interface and the leveldb implementation. This allows, potentially, in the future, switching the db implementation so another KV store should we wish to do so. - Add proper error handling all along the way. The db and backend packages are now errcheck clean. However, I drew the line at modifying the FileSet API in order to keep this manageable and not continue refactoring all of the rest of Syncthing. As such, the FileSet methods still panic on database errors, except for the "database is closed" error which is instead handled by silently returning as quickly as possible, with the assumption that we're anyway "on the way out".
This commit is contained in:
parent
a5bbc12625
commit
c71116ee94
@ -13,11 +13,15 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
func dump(ldb *db.Lowlevel) {
|
func dump(ldb backend.Backend) {
|
||||||
it := ldb.NewIterator(nil, nil)
|
it, err := ldb.NewPrefixIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
key := it.Key()
|
key := it.Key()
|
||||||
switch key[0] {
|
switch key[0] {
|
||||||
|
@ -10,8 +10,10 @@ import (
|
|||||||
"container/heap"
|
"container/heap"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SizedElement struct {
|
type SizedElement struct {
|
||||||
@ -37,11 +39,14 @@ func (h *ElementHeap) Pop() interface{} {
|
|||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpsize(ldb *db.Lowlevel) {
|
func dumpsize(ldb backend.Backend) {
|
||||||
h := &ElementHeap{}
|
h := &ElementHeap{}
|
||||||
heap.Init(h)
|
heap.Init(h)
|
||||||
|
|
||||||
it := ldb.NewIterator(nil, nil)
|
it, err := ldb.NewPrefixIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
var ele SizedElement
|
var ele SizedElement
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
key := it.Key()
|
key := it.Key()
|
||||||
|
@ -10,8 +10,10 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,7 +33,7 @@ type sequenceKey struct {
|
|||||||
sequence uint64
|
sequence uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func idxck(ldb *db.Lowlevel) (success bool) {
|
func idxck(ldb backend.Backend) (success bool) {
|
||||||
folders := make(map[uint32]string)
|
folders := make(map[uint32]string)
|
||||||
devices := make(map[uint32]string)
|
devices := make(map[uint32]string)
|
||||||
deviceToIDs := make(map[string]uint32)
|
deviceToIDs := make(map[string]uint32)
|
||||||
@ -42,7 +44,10 @@ func idxck(ldb *db.Lowlevel) (success bool) {
|
|||||||
var localDeviceKey uint32
|
var localDeviceKey uint32
|
||||||
success = true
|
success = true
|
||||||
|
|
||||||
it := ldb.NewIterator(nil, nil)
|
it, err := ldb.NewPrefixIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
key := it.Key()
|
key := it.Key()
|
||||||
switch key[0] {
|
switch key[0] {
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@ -30,7 +30,7 @@ func main() {
|
|||||||
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
path = filepath.Join(defaultConfigDir(), "index-v0.14.0.db")
|
||||||
}
|
}
|
||||||
|
|
||||||
ldb, err := db.OpenRO(path)
|
ldb, err := backend.OpenLevelDBRO(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -10,17 +10,17 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTuningMatches(t *testing.T) {
|
func TestTuningMatches(t *testing.T) {
|
||||||
if int(config.TuningAuto) != int(db.TuningAuto) {
|
if int(config.TuningAuto) != int(backend.TuningAuto) {
|
||||||
t.Error("mismatch for TuningAuto")
|
t.Error("mismatch for TuningAuto")
|
||||||
}
|
}
|
||||||
if int(config.TuningSmall) != int(db.TuningSmall) {
|
if int(config.TuningSmall) != int(backend.TuningSmall) {
|
||||||
t.Error("mismatch for TuningSmall")
|
t.Error("mismatch for TuningSmall")
|
||||||
}
|
}
|
||||||
if int(config.TuningLarge) != int(db.TuningLarge) {
|
if int(config.TuningLarge) != int(backend.TuningLarge) {
|
||||||
t.Error("mismatch for TuningLarge")
|
t.Error("mismatch for TuningLarge")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
170
lib/db/backend/backend.go
Normal file
170
lib/db/backend/backend.go
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
// Copyright (C) 2019 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The Reader interface specifies the read-only operations available on the
|
||||||
|
// main database and on read-only transactions (snapshots). Note that when
|
||||||
|
// called directly on the database handle these operations may take implicit
|
||||||
|
// transactions and performance may suffer.
|
||||||
|
type Reader interface {
|
||||||
|
Get(key []byte) ([]byte, error)
|
||||||
|
NewPrefixIterator(prefix []byte) (Iterator, error)
|
||||||
|
NewRangeIterator(first, last []byte) (Iterator, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Writer interface specifies the mutating operations available on the
|
||||||
|
// main database and on writable transactions. Note that when called
|
||||||
|
// directly on the database handle these operations may take implicit
|
||||||
|
// transactions and performance may suffer.
|
||||||
|
type Writer interface {
|
||||||
|
Put(key, val []byte) error
|
||||||
|
Delete(key []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ReadTransaction interface specifies the operations on read-only
|
||||||
|
// transactions. Every ReadTransaction must be released when no longer
|
||||||
|
// required.
|
||||||
|
type ReadTransaction interface {
|
||||||
|
Reader
|
||||||
|
Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// The WriteTransaction interface specifies the operations on writable
|
||||||
|
// transactions. Every WriteTransaction must be either committed or released
|
||||||
|
// (i.e., discarded) when no longer required. No further operations must be
|
||||||
|
// performed after release or commit (regardless of whether commit succeeded),
|
||||||
|
// with one exception -- it's fine to release an already committed or released
|
||||||
|
// transaction.
|
||||||
|
//
|
||||||
|
// A Checkpoint is a potential partial commit of the transaction so far, for
|
||||||
|
// purposes of saving memory when transactions are in-RAM. Note that
|
||||||
|
// transactions may be checkpointed *anyway* even if this is not called, due to
|
||||||
|
// resource constraints, but this gives you a chance to decide when.
|
||||||
|
type WriteTransaction interface {
|
||||||
|
ReadTransaction
|
||||||
|
Writer
|
||||||
|
Checkpoint() error
|
||||||
|
Commit() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Iterator interface specifies the operations available on iterators
|
||||||
|
// returned by NewPrefixIterator and NewRangeIterator. The iterator pattern
|
||||||
|
// is to loop while Next returns true, then check Error after the loop. Next
|
||||||
|
// will return false when iteration is complete (Error() == nil) or when
|
||||||
|
// there is an error preventing iteration, which is then returned by
|
||||||
|
// Error(). For example:
|
||||||
|
//
|
||||||
|
// it, err := db.NewPrefixIterator(nil)
|
||||||
|
// if err != nil {
|
||||||
|
// // problem preventing iteration
|
||||||
|
// }
|
||||||
|
// defer it.Release()
|
||||||
|
// for it.Next() {
|
||||||
|
// // ...
|
||||||
|
// }
|
||||||
|
// if err := it.Error(); err != nil {
|
||||||
|
// // there was a database problem while iterating
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// An iterator must be Released when no longer required. The Error method
|
||||||
|
// can be called either before or after Release with the same results. If an
|
||||||
|
// iterator was created in a transaction (whether read-only or write) it
|
||||||
|
// must be released before the transaction is released (or committed).
|
||||||
|
type Iterator interface {
|
||||||
|
Next() bool
|
||||||
|
Key() []byte
|
||||||
|
Value() []byte
|
||||||
|
Error() error
|
||||||
|
Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Backend interface represents the main database handle. It supports
|
||||||
|
// both read/write operations and opening read-only or writable
|
||||||
|
// transactions. Depending on the actual implementation, individual
|
||||||
|
// read/write operations may be implicitly wrapped in transactions, making
|
||||||
|
// them perform quite badly when used repeatedly. For bulk operations,
|
||||||
|
// consider always using a transaction of the appropriate type. The
|
||||||
|
// transaction isolation level is "read committed" - there are no dirty
|
||||||
|
// reads.
|
||||||
|
type Backend interface {
|
||||||
|
Reader
|
||||||
|
Writer
|
||||||
|
NewReadTransaction() (ReadTransaction, error)
|
||||||
|
NewWriteTransaction() (WriteTransaction, error)
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tuning int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// N.b. these constants must match those in lib/config.Tuning!
|
||||||
|
TuningAuto Tuning = iota
|
||||||
|
TuningSmall
|
||||||
|
TuningLarge
|
||||||
|
)
|
||||||
|
|
||||||
|
func Open(path string, tuning Tuning) (Backend, error) {
|
||||||
|
return OpenLevelDB(path, tuning)
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpenMemory() Backend {
|
||||||
|
return OpenLevelDBMemory()
|
||||||
|
}
|
||||||
|
|
||||||
|
type errClosed struct{}
|
||||||
|
|
||||||
|
func (errClosed) Error() string { return "database is closed" }
|
||||||
|
|
||||||
|
type errNotFound struct{}
|
||||||
|
|
||||||
|
func (errNotFound) Error() string { return "key not found" }
|
||||||
|
|
||||||
|
func IsClosed(err error) bool {
|
||||||
|
if _, ok := err.(errClosed); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, ok := err.(*errClosed); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
if _, ok := err.(errNotFound); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, ok := err.(*errNotFound); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// releaser manages counting on top of a waitgroup
|
||||||
|
type releaser struct {
|
||||||
|
wg *sync.WaitGroup
|
||||||
|
once *sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func newReleaser(wg *sync.WaitGroup) *releaser {
|
||||||
|
wg.Add(1)
|
||||||
|
return &releaser{
|
||||||
|
wg: wg,
|
||||||
|
once: new(sync.Once),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r releaser) Release() {
|
||||||
|
// We use the Once because we may get called multiple times from
|
||||||
|
// Commit() and deferred Release().
|
||||||
|
r.once.Do(func() {
|
||||||
|
r.wg.Done()
|
||||||
|
})
|
||||||
|
}
|
53
lib/db/backend/backend_test.go
Normal file
53
lib/db/backend/backend_test.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
// Copyright (C) 2019 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// testBackendBehavior is the generic test suite that must be fulfilled by
|
||||||
|
// every backend implementation. It should be called by each implementation
|
||||||
|
// as (part of) their test suite.
|
||||||
|
func testBackendBehavior(t *testing.T, open func() Backend) {
|
||||||
|
t.Run("WriteIsolation", func(t *testing.T) { testWriteIsolation(t, open) })
|
||||||
|
t.Run("DeleteNonexisten", func(t *testing.T) { testDeleteNonexistent(t, open) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func testWriteIsolation(t *testing.T, open func() Backend) {
|
||||||
|
// Values written during a transaction should not be read back, our
|
||||||
|
// updateGlobal depends on this.
|
||||||
|
|
||||||
|
db := open()
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
_ = db.Put([]byte("a"), []byte("a"))
|
||||||
|
v, _ := db.Get([]byte("a"))
|
||||||
|
if string(v) != "a" {
|
||||||
|
t.Fatal("read back should work")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now in a transaction we should still see the old value
|
||||||
|
tx, _ := db.NewWriteTransaction()
|
||||||
|
defer tx.Release()
|
||||||
|
_ = tx.Put([]byte("a"), []byte("b"))
|
||||||
|
v, _ = tx.Get([]byte("a"))
|
||||||
|
if string(v) != "a" {
|
||||||
|
t.Fatal("read in transaction should read the old value")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDeleteNonexistent(t *testing.T, open func() Backend) {
|
||||||
|
// Deleting a non-existent key is not an error
|
||||||
|
|
||||||
|
db := open()
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
err := db.Delete([]byte("a"))
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
15
lib/db/backend/debug.go
Normal file
15
lib/db/backend/debug.go
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// Copyright (C) 2019 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/syncthing/syncthing/lib/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
l = logger.DefaultLogger.NewFacility("backend", "The database backend")
|
||||||
|
)
|
173
lib/db/backend/leveldb_backend.go
Normal file
173
lib/db/backend/leveldb_backend.go
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
// Copyright (C) 2018 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Never flush transactions smaller than this, even on Checkpoint()
|
||||||
|
dbFlushBatchMin = 1 << MiB
|
||||||
|
// Once a transaction reaches this size, flush it unconditionally.
|
||||||
|
dbFlushBatchMax = 128 << MiB
|
||||||
|
)
|
||||||
|
|
||||||
|
// leveldbBackend implements Backend on top of a leveldb
|
||||||
|
type leveldbBackend struct {
|
||||||
|
ldb *leveldb.DB
|
||||||
|
closeWG sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) {
|
||||||
|
return b.newSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
|
||||||
|
snap, err := b.ldb.GetSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
return leveldbSnapshot{}, wrapLeveldbErr(err)
|
||||||
|
}
|
||||||
|
return leveldbSnapshot{
|
||||||
|
snap: snap,
|
||||||
|
rel: newReleaser(&b.closeWG),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) NewWriteTransaction() (WriteTransaction, error) {
|
||||||
|
snap, err := b.newSnapshot()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err // already wrapped
|
||||||
|
}
|
||||||
|
return &leveldbTransaction{
|
||||||
|
leveldbSnapshot: snap,
|
||||||
|
ldb: b.ldb,
|
||||||
|
batch: new(leveldb.Batch),
|
||||||
|
rel: newReleaser(&b.closeWG),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) Close() error {
|
||||||
|
b.closeWG.Wait()
|
||||||
|
return wrapLeveldbErr(b.ldb.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) Get(key []byte) ([]byte, error) {
|
||||||
|
val, err := b.ldb.Get(key, nil)
|
||||||
|
return val, wrapLeveldbErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) NewPrefixIterator(prefix []byte) (Iterator, error) {
|
||||||
|
return b.ldb.NewIterator(util.BytesPrefix(prefix), nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) NewRangeIterator(first, last []byte) (Iterator, error) {
|
||||||
|
return b.ldb.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) Put(key, val []byte) error {
|
||||||
|
return wrapLeveldbErr(b.ldb.Put(key, val, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) Delete(key []byte) error {
|
||||||
|
return wrapLeveldbErr(b.ldb.Delete(key, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// leveldbSnapshot implements backend.ReadTransaction
|
||||||
|
type leveldbSnapshot struct {
|
||||||
|
snap *leveldb.Snapshot
|
||||||
|
rel *releaser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l leveldbSnapshot) Get(key []byte) ([]byte, error) {
|
||||||
|
val, err := l.snap.Get(key, nil)
|
||||||
|
return val, wrapLeveldbErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l leveldbSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) {
|
||||||
|
return l.snap.NewIterator(util.BytesPrefix(prefix), nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l leveldbSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) {
|
||||||
|
return l.snap.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l leveldbSnapshot) Release() {
|
||||||
|
l.snap.Release()
|
||||||
|
l.rel.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// leveldbTransaction implements backend.WriteTransaction using a batch (not
|
||||||
|
// an actual leveldb transaction)
|
||||||
|
type leveldbTransaction struct {
|
||||||
|
leveldbSnapshot
|
||||||
|
ldb *leveldb.DB
|
||||||
|
batch *leveldb.Batch
|
||||||
|
rel *releaser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) Delete(key []byte) error {
|
||||||
|
t.batch.Delete(key)
|
||||||
|
return t.checkFlush(dbFlushBatchMax)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) Put(key, val []byte) error {
|
||||||
|
t.batch.Put(key, val)
|
||||||
|
return t.checkFlush(dbFlushBatchMax)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) Checkpoint() error {
|
||||||
|
return t.checkFlush(dbFlushBatchMin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) Commit() error {
|
||||||
|
err := wrapLeveldbErr(t.flush())
|
||||||
|
t.leveldbSnapshot.Release()
|
||||||
|
t.rel.Release()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) Release() {
|
||||||
|
t.leveldbSnapshot.Release()
|
||||||
|
t.rel.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkFlush flushes and resets the batch if its size exceeds the given size.
|
||||||
|
func (t *leveldbTransaction) checkFlush(size int) error {
|
||||||
|
if len(t.batch.Dump()) < size {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return t.flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *leveldbTransaction) flush() error {
|
||||||
|
if t.batch.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := t.ldb.Write(t.batch, nil); err != nil {
|
||||||
|
return wrapLeveldbErr(err)
|
||||||
|
}
|
||||||
|
t.batch.Reset()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapLeveldbErr wraps errors so that the backend package can recognize them
|
||||||
|
func wrapLeveldbErr(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err == leveldb.ErrClosed {
|
||||||
|
return errClosed{}
|
||||||
|
}
|
||||||
|
if err == leveldb.ErrNotFound {
|
||||||
|
return errNotFound{}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
226
lib/db/backend/leveldb_open.go
Normal file
226
lib/db/backend/leveldb_open.go
Normal file
@ -0,0 +1,226 @@
|
|||||||
|
// Copyright (C) 2018 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dbMaxOpenFiles = 100
|
||||||
|
|
||||||
|
// A large database is > 200 MiB. It's a mostly arbitrary value, but
|
||||||
|
// it's also the case that each file is 2 MiB by default and when we
|
||||||
|
// have dbMaxOpenFiles of them we will need to start thrashing fd:s.
|
||||||
|
// Switching to large database settings causes larger files to be used
|
||||||
|
// when compacting, reducing the number.
|
||||||
|
dbLargeThreshold = dbMaxOpenFiles * (2 << MiB)
|
||||||
|
|
||||||
|
KiB = 10
|
||||||
|
MiB = 20
|
||||||
|
)
|
||||||
|
|
||||||
|
// Open attempts to open the database at the given location, and runs
|
||||||
|
// recovery on it if opening fails. Worst case, if recovery is not possible,
|
||||||
|
// the database is erased and created from scratch.
|
||||||
|
func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
|
||||||
|
opts := optsFor(location, tuning)
|
||||||
|
ldb, err := open(location, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &leveldbBackend{ldb: ldb}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenRO attempts to open the database at the given location, read only.
|
||||||
|
func OpenLevelDBRO(location string) (Backend, error) {
|
||||||
|
opts := &opt.Options{
|
||||||
|
OpenFilesCacheCapacity: dbMaxOpenFiles,
|
||||||
|
ReadOnly: true,
|
||||||
|
}
|
||||||
|
ldb, err := open(location, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &leveldbBackend{ldb: ldb}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenMemory returns a new Lowlevel referencing an in-memory database.
|
||||||
|
func OpenLevelDBMemory() Backend {
|
||||||
|
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
|
return &leveldbBackend{ldb: ldb}
|
||||||
|
}
|
||||||
|
|
||||||
|
// optsFor returns the database options to use when opening a database with
|
||||||
|
// the given location and tuning. Settings can be overridden by debug
|
||||||
|
// environment variables.
|
||||||
|
func optsFor(location string, tuning Tuning) *opt.Options {
|
||||||
|
large := false
|
||||||
|
switch tuning {
|
||||||
|
case TuningLarge:
|
||||||
|
large = true
|
||||||
|
case TuningAuto:
|
||||||
|
large = dbIsLarge(location)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Set defaults used for small databases.
|
||||||
|
defaultBlockCacheCapacity = 0 // 0 means let leveldb use default
|
||||||
|
defaultBlockSize = 0
|
||||||
|
defaultCompactionTableSize = 0
|
||||||
|
defaultCompactionTableSizeMultiplier = 0
|
||||||
|
defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB
|
||||||
|
defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff
|
||||||
|
)
|
||||||
|
|
||||||
|
if large {
|
||||||
|
// Change the parameters for better throughput at the price of some
|
||||||
|
// RAM and larger files. This results in larger batches of writes
|
||||||
|
// and compaction at a lower frequency.
|
||||||
|
l.Infoln("Using large-database tuning")
|
||||||
|
|
||||||
|
defaultBlockCacheCapacity = 64 << MiB
|
||||||
|
defaultBlockSize = 64 << KiB
|
||||||
|
defaultCompactionTableSize = 16 << MiB
|
||||||
|
defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten
|
||||||
|
defaultWriteBuffer = 64 << MiB
|
||||||
|
defaultCompactionL0Trigger = 8 // number of l0 files
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := &opt.Options{
|
||||||
|
BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity),
|
||||||
|
BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0,
|
||||||
|
BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0),
|
||||||
|
BlockSize: debugEnvValue("BlockSize", defaultBlockSize),
|
||||||
|
CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0),
|
||||||
|
CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0),
|
||||||
|
CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger),
|
||||||
|
CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0),
|
||||||
|
CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize),
|
||||||
|
CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0,
|
||||||
|
CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0),
|
||||||
|
CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0,
|
||||||
|
DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0,
|
||||||
|
DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0,
|
||||||
|
DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0,
|
||||||
|
DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0,
|
||||||
|
NoSync: debugEnvValue("NoSync", 0) != 0,
|
||||||
|
NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0,
|
||||||
|
OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles),
|
||||||
|
WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer),
|
||||||
|
// The write slowdown and pause can be overridden, but even if they
|
||||||
|
// are not and the compaction trigger is overridden we need to
|
||||||
|
// adjust so that we don't pause writes for L0 compaction before we
|
||||||
|
// even *start* L0 compaction...
|
||||||
|
WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
|
||||||
|
WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func open(location string, opts *opt.Options) (*leveldb.DB, error) {
|
||||||
|
db, err := leveldb.OpenFile(location, opts)
|
||||||
|
if leveldbIsCorrupted(err) {
|
||||||
|
db, err = leveldb.RecoverFile(location, opts)
|
||||||
|
}
|
||||||
|
if leveldbIsCorrupted(err) {
|
||||||
|
// The database is corrupted, and we've tried to recover it but it
|
||||||
|
// didn't work. At this point there isn't much to do beyond dropping
|
||||||
|
// the database and reindexing...
|
||||||
|
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
||||||
|
if err := os.RemoveAll(location); err != nil {
|
||||||
|
return nil, errorSuggestion{err, "failed to delete corrupted database"}
|
||||||
|
}
|
||||||
|
db, err = leveldb.OpenFile(location, opts)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, errorSuggestion{err, "is another instance of Syncthing running?"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugEnvValue("CompactEverything", 0) != 0 {
|
||||||
|
if err := db.CompactRange(util.Range{}); err != nil {
|
||||||
|
l.Warnln("Compacting database:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func debugEnvValue(key string, def int) int {
|
||||||
|
v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63)
|
||||||
|
if err != nil {
|
||||||
|
return def
|
||||||
|
}
|
||||||
|
return int(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A "better" version of leveldb's errors.IsCorrupted.
|
||||||
|
func leveldbIsCorrupted(err error) bool {
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return false
|
||||||
|
|
||||||
|
case errors.IsCorrupted(err):
|
||||||
|
return true
|
||||||
|
|
||||||
|
case strings.Contains(err.Error(), "corrupted"):
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// dbIsLarge returns whether the estimated size of the database at location
|
||||||
|
// is large enough to warrant optimization for large databases.
|
||||||
|
func dbIsLarge(location string) bool {
|
||||||
|
if ^uint(0)>>63 == 0 {
|
||||||
|
// We're compiled for a 32 bit architecture. We've seen trouble with
|
||||||
|
// large settings there.
|
||||||
|
// (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, err := os.Open(location)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, err := dir.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
for _, fi := range fis {
|
||||||
|
if fi.Name() == "LOG" {
|
||||||
|
// don't count the size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
size += fi.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
return size > dbLargeThreshold
|
||||||
|
}
|
||||||
|
|
||||||
|
type errorSuggestion struct {
|
||||||
|
inner error
|
||||||
|
suggestion string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errorSuggestion) Error() string {
|
||||||
|
return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion)
|
||||||
|
}
|
13
lib/db/backend/leveldb_test.go
Normal file
13
lib/db/backend/leveldb_test.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Copyright (C) 2019 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package backend
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestLevelDBBackendBehavior(t *testing.T) {
|
||||||
|
testBackendBehavior(t, OpenLevelDBMemory)
|
||||||
|
}
|
@ -11,6 +11,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
@ -40,7 +41,7 @@ func lazyInitBenchFiles() {
|
|||||||
func getBenchFileSet() (*db.Lowlevel, *db.FileSet) {
|
func getBenchFileSet() (*db.Lowlevel, *db.FileSet) {
|
||||||
lazyInitBenchFiles()
|
lazyInitBenchFiles()
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
benchS := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
benchS := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
replace(benchS, remoteDevice0, files)
|
replace(benchS, remoteDevice0, files)
|
||||||
replace(benchS, protocol.LocalDeviceID, firstHalf)
|
replace(benchS, protocol.LocalDeviceID, firstHalf)
|
||||||
@ -49,7 +50,7 @@ func getBenchFileSet() (*db.Lowlevel, *db.FileSet) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkReplaceAll(b *testing.B) {
|
func BenchmarkReplaceAll(b *testing.B) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
defer ldb.Close()
|
defer ldb.Close()
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@ -157,7 +158,7 @@ func BenchmarkNeedHalf(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkNeedHalfRemote(b *testing.B) {
|
func BenchmarkNeedHalfRemote(b *testing.B) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
defer ldb.Close()
|
defer ldb.Close()
|
||||||
fset := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
fset := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
replace(fset, remoteDevice0, firstHalf)
|
replace(fset, remoteDevice0, firstHalf)
|
||||||
|
@ -11,8 +11,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/osutil"
|
"github.com/syncthing/syncthing/lib/osutil"
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var blockFinder *BlockFinder
|
var blockFinder *BlockFinder
|
||||||
@ -41,13 +39,22 @@ func (f *BlockFinder) String() string {
|
|||||||
// reason. The iterator finally returns the result, whether or not a
|
// reason. The iterator finally returns the result, whether or not a
|
||||||
// satisfying block was eventually found.
|
// satisfying block was eventually found.
|
||||||
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
||||||
t := f.db.newReadOnlyTransaction()
|
t, err := f.db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var key []byte
|
var key []byte
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
key = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil)
|
key, err = f.db.keyer.GenerateBlockMapKey(key, []byte(folder), hash, nil)
|
||||||
iter := t.NewIterator(util.BytesPrefix(key), nil)
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iter, err := t.NewPrefixIterator(key)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
for iter.Next() && iter.Error() == nil {
|
for iter.Next() && iter.Error() == nil {
|
||||||
file := string(f.db.keyer.NameFromBlockMapKey(iter.Key()))
|
file := string(f.db.keyer.NameFromBlockMapKey(iter.Key()))
|
||||||
|
@ -10,23 +10,10 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func genBlocks(n int) []protocol.BlockInfo {
|
|
||||||
b := make([]protocol.BlockInfo, n)
|
|
||||||
for i := range b {
|
|
||||||
h := make([]byte, 32)
|
|
||||||
for j := range h {
|
|
||||||
h[j] = byte(i + j)
|
|
||||||
}
|
|
||||||
b[i].Size = int32(i)
|
|
||||||
b[i].Hash = h
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
var f1, f2, f3 protocol.FileInfo
|
var f1, f2, f3 protocol.FileInfo
|
||||||
var folders = []string{"folder1", "folder2"}
|
var folders = []string{"folder1", "folder2"}
|
||||||
|
|
||||||
@ -52,18 +39,24 @@ func init() {
|
|||||||
func setup() (*instance, *BlockFinder) {
|
func setup() (*instance, *BlockFinder) {
|
||||||
// Setup
|
// Setup
|
||||||
|
|
||||||
db := OpenMemory()
|
db := NewLowlevel(backend.OpenMemory())
|
||||||
return newInstance(db), NewBlockFinder(db)
|
return newInstance(db), NewBlockFinder(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
func dbEmpty(db *instance) bool {
|
func dbEmpty(db *instance) bool {
|
||||||
iter := db.NewIterator(util.BytesPrefix([]byte{KeyTypeBlock}), nil)
|
iter, err := db.NewPrefixIterator([]byte{KeyTypeBlock})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
defer iter.Release()
|
defer iter.Release()
|
||||||
return !iter.Next()
|
return !iter.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) {
|
func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var keyBuf []byte
|
var keyBuf []byte
|
||||||
@ -73,15 +66,24 @@ func addToBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) {
|
|||||||
name := []byte(f.Name)
|
name := []byte(f.Name)
|
||||||
for i, block := range f.Blocks {
|
for i, block := range f.Blocks {
|
||||||
binary.BigEndian.PutUint32(blockBuf, uint32(i))
|
binary.BigEndian.PutUint32(blockBuf, uint32(i))
|
||||||
keyBuf = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
||||||
t.Put(keyBuf, blockBuf)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Put(keyBuf, blockBuf); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) {
|
func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var keyBuf []byte
|
var keyBuf []byte
|
||||||
@ -89,11 +91,17 @@ func discardFromBlockMap(db *instance, folder []byte, fs []protocol.FileInfo) {
|
|||||||
if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() {
|
if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() {
|
||||||
name := []byte(ef.Name)
|
name := []byte(ef.Name)
|
||||||
for _, block := range ef.Blocks {
|
for _, block := range ef.Blocks {
|
||||||
keyBuf = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
keyBuf, err = t.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
||||||
t.Delete(keyBuf)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(keyBuf); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
func TestBlockMapAddUpdateWipe(t *testing.T) {
|
||||||
@ -107,7 +115,9 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
|
|||||||
|
|
||||||
f3.Type = protocol.FileInfoTypeDirectory
|
f3.Type = protocol.FileInfoTypeDirectory
|
||||||
|
|
||||||
addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3})
|
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||||
if folder != "folder1" || file != "f1" || index != 0 {
|
if folder != "folder1" || file != "f1" || index != 0 {
|
||||||
@ -128,12 +138,16 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
discardFromBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3})
|
if err := discardFromBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
f1.Deleted = true
|
f1.Deleted = true
|
||||||
f2.LocalFlags = protocol.FlagLocalMustRescan // one of the invalid markers
|
f2.LocalFlags = protocol.FlagLocalMustRescan // one of the invalid markers
|
||||||
|
|
||||||
addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3})
|
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2, f3}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||||
t.Fatal("Unexpected block")
|
t.Fatal("Unexpected block")
|
||||||
@ -152,14 +166,18 @@ func TestBlockMapAddUpdateWipe(t *testing.T) {
|
|||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
db.dropFolder(folder)
|
if err := db.dropFolder(folder); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if !dbEmpty(db) {
|
if !dbEmpty(db) {
|
||||||
t.Fatal("db not empty")
|
t.Fatal("db not empty")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should not add
|
// Should not add
|
||||||
addToBlockMap(db, folder, []protocol.FileInfo{f1, f2})
|
if err := addToBlockMap(db, folder, []protocol.FileInfo{f1, f2}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if !dbEmpty(db) {
|
if !dbEmpty(db) {
|
||||||
t.Fatal("db not empty")
|
t.Fatal("db not empty")
|
||||||
@ -179,8 +197,12 @@ func TestBlockFinderLookup(t *testing.T) {
|
|||||||
folder1 := []byte("folder1")
|
folder1 := []byte("folder1")
|
||||||
folder2 := []byte("folder2")
|
folder2 := []byte("folder2")
|
||||||
|
|
||||||
addToBlockMap(db, folder1, []protocol.FileInfo{f1})
|
if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
|
||||||
addToBlockMap(db, folder2, []protocol.FileInfo{f1})
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := addToBlockMap(db, folder2, []protocol.FileInfo{f1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
counter := 0
|
counter := 0
|
||||||
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||||
@ -204,11 +226,15 @@ func TestBlockFinderLookup(t *testing.T) {
|
|||||||
t.Fatal("Incorrect count", counter)
|
t.Fatal("Incorrect count", counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
discardFromBlockMap(db, folder1, []protocol.FileInfo{f1})
|
if err := discardFromBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
f1.Deleted = true
|
f1.Deleted = true
|
||||||
|
|
||||||
addToBlockMap(db, folder1, []protocol.FileInfo{f1})
|
if err := addToBlockMap(db, folder1, []protocol.FileInfo{f1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
counter = 0
|
counter = 0
|
||||||
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
f.Iterate(folders, f1.Blocks[0].Hash, func(folder, file string, index int32) bool {
|
||||||
|
@ -1,237 +0,0 @@
|
|||||||
// Copyright (C) 2014 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
// this is a really tedious test for an old issue
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
package db_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/sync"
|
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
var keys [][]byte
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for i := 0; i < nItems; i++ {
|
|
||||||
keys = append(keys, randomData(1))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const nItems = 10000
|
|
||||||
|
|
||||||
func randomData(prefix byte) []byte {
|
|
||||||
data := make([]byte, 1+32+64+32)
|
|
||||||
_, err := rand.Reader.Read(data)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return append([]byte{prefix}, data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setItems(db *leveldb.DB) error {
|
|
||||||
batch := new(leveldb.Batch)
|
|
||||||
for _, k1 := range keys {
|
|
||||||
k2 := randomData(2)
|
|
||||||
// k2 -> data
|
|
||||||
batch.Put(k2, randomData(42))
|
|
||||||
// k1 -> k2
|
|
||||||
batch.Put(k1, k2)
|
|
||||||
}
|
|
||||||
if testing.Verbose() {
|
|
||||||
log.Printf("batch write (set) %p", batch)
|
|
||||||
}
|
|
||||||
return db.Write(batch, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func clearItems(db *leveldb.DB) error {
|
|
||||||
snap, err := db.GetSnapshot()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer snap.Release()
|
|
||||||
|
|
||||||
// Iterate over k2
|
|
||||||
|
|
||||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
|
||||||
defer it.Release()
|
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
|
||||||
for it.Next() {
|
|
||||||
k1 := it.Key()
|
|
||||||
k2 := it.Value()
|
|
||||||
|
|
||||||
// k2 should exist
|
|
||||||
_, err := snap.Get(k2, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the k1 => k2 mapping first
|
|
||||||
batch.Delete(k1)
|
|
||||||
// Then the k2 => data mapping
|
|
||||||
batch.Delete(k2)
|
|
||||||
}
|
|
||||||
if testing.Verbose() {
|
|
||||||
log.Printf("batch write (clear) %p", batch)
|
|
||||||
}
|
|
||||||
return db.Write(batch, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func scanItems(db *leveldb.DB) error {
|
|
||||||
snap, err := db.GetSnapshot()
|
|
||||||
if testing.Verbose() {
|
|
||||||
log.Printf("snap create %p", snap)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if testing.Verbose() {
|
|
||||||
log.Printf("snap release %p", snap)
|
|
||||||
}
|
|
||||||
snap.Release()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Iterate from the start of k2 space to the end
|
|
||||||
it := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
|
|
||||||
defer it.Release()
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for it.Next() {
|
|
||||||
// k2 => k1 => data
|
|
||||||
k1 := it.Key()
|
|
||||||
k2 := it.Value()
|
|
||||||
_, err := snap.Get(k2, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("k1: %x", k1)
|
|
||||||
log.Printf("k2: %x (missing)", k2)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if testing.Verbose() {
|
|
||||||
log.Println("scanned", i)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConcurrentSetClear(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dur := 30 * time.Second
|
|
||||||
t0 := time.Now()
|
|
||||||
wg := sync.NewWaitGroup()
|
|
||||||
|
|
||||||
os.RemoveAll("testdata/concurrent-set-clear.db")
|
|
||||||
db, err := leveldb.OpenFile("testdata/concurrent-set-clear.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll("testdata/concurrent-set-clear.db")
|
|
||||||
|
|
||||||
errChan := make(chan error, 3)
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for time.Since(t0) < dur {
|
|
||||||
if err := setItems(db); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := clearItems(db); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for time.Since(t0) < dur {
|
|
||||||
if err := scanItems(db); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
errChan <- nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = <-errChan
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestConcurrentSetOnly(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dur := 30 * time.Second
|
|
||||||
t0 := time.Now()
|
|
||||||
wg := sync.NewWaitGroup()
|
|
||||||
|
|
||||||
os.RemoveAll("testdata/concurrent-set-only.db")
|
|
||||||
db, err := leveldb.OpenFile("testdata/concurrent-set-only.db", &opt.Options{OpenFilesCacheCapacity: 10})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll("testdata/concurrent-set-only.db")
|
|
||||||
|
|
||||||
errChan := make(chan error, 3)
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for time.Since(t0) < dur {
|
|
||||||
if err := setItems(db); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for time.Since(t0) < dur {
|
|
||||||
if err := scanItems(db); err != nil {
|
|
||||||
errChan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
errChan <- nil
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = <-errChan
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
@ -9,17 +9,33 @@ package db
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func genBlocks(n int) []protocol.BlockInfo {
|
||||||
|
b := make([]protocol.BlockInfo, n)
|
||||||
|
for i := range b {
|
||||||
|
h := make([]byte, 32)
|
||||||
|
for j := range h {
|
||||||
|
h[j] = byte(i + j)
|
||||||
|
}
|
||||||
|
b[i].Size = int32(i)
|
||||||
|
b[i].Hash = h
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
func TestIgnoredFiles(t *testing.T) {
|
func TestIgnoredFiles(t *testing.T) {
|
||||||
ldb, err := openJSONS("testdata/v0.14.48-ignoredfiles.db.jsons")
|
ldb, err := openJSONS("testdata/v0.14.48-ignoredfiles.db.jsons")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
db := NewLowlevel(ldb, "<memory>")
|
db := NewLowlevel(ldb)
|
||||||
UpdateSchema(db)
|
if err := UpdateSchema(db); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
|
fs := NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), db)
|
||||||
|
|
||||||
@ -142,25 +158,35 @@ func TestUpdate0to3(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
db := newInstance(NewLowlevel(ldb, "<memory>"))
|
db := newInstance(NewLowlevel(ldb))
|
||||||
updater := schemaUpdater{db}
|
updater := schemaUpdater{db}
|
||||||
|
|
||||||
folder := []byte(update0to3Folder)
|
folder := []byte(update0to3Folder)
|
||||||
|
|
||||||
updater.updateSchema0to1()
|
if err := updater.updateSchema0to1(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := db.getFileDirty(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); ok {
|
if _, ok, err := db.getFileDirty(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if ok {
|
||||||
t.Error("File prefixed by '/' was not removed during transition to schema 1")
|
t.Error("File prefixed by '/' was not removed during transition to schema 1")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := db.Get(db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid)), nil); err != nil {
|
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, []byte(invalid))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := db.Get(key); err != nil {
|
||||||
t.Error("Invalid file wasn't added to global list")
|
t.Error("Invalid file wasn't added to global list")
|
||||||
}
|
}
|
||||||
|
|
||||||
updater.updateSchema1to2()
|
if err := updater.updateSchema1to2(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
db.withHaveSequence(folder, 0, func(fi FileIntf) bool {
|
_ = db.withHaveSequence(folder, 0, func(fi FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfo)
|
f := fi.(protocol.FileInfo)
|
||||||
l.Infoln(f)
|
l.Infoln(f)
|
||||||
if found {
|
if found {
|
||||||
@ -178,14 +204,16 @@ func TestUpdate0to3(t *testing.T) {
|
|||||||
t.Error("Local file wasn't added to sequence bucket", err)
|
t.Error("Local file wasn't added to sequence bucket", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
updater.updateSchema2to3()
|
if err := updater.updateSchema2to3(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
need := map[string]protocol.FileInfo{
|
need := map[string]protocol.FileInfo{
|
||||||
haveUpdate0to3[remoteDevice0][0].Name: haveUpdate0to3[remoteDevice0][0],
|
haveUpdate0to3[remoteDevice0][0].Name: haveUpdate0to3[remoteDevice0][0],
|
||||||
haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0],
|
haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0],
|
||||||
haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2],
|
haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2],
|
||||||
}
|
}
|
||||||
db.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool {
|
_ = db.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool {
|
||||||
e, ok := need[fi.FileName()]
|
e, ok := need[fi.FileName()]
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Error("Got unexpected needed file:", fi.FileName())
|
t.Error("Got unexpected needed file:", fi.FileName())
|
||||||
@ -203,12 +231,17 @@ func TestUpdate0to3(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDowngrade(t *testing.T) {
|
func TestDowngrade(t *testing.T) {
|
||||||
db := OpenMemory()
|
db := NewLowlevel(backend.OpenMemory())
|
||||||
UpdateSchema(db) // sets the min version etc
|
// sets the min version etc
|
||||||
|
if err := UpdateSchema(db); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Bump the database version to something newer than we actually support
|
// Bump the database version to something newer than we actually support
|
||||||
miscDB := NewMiscDataNamespace(db)
|
miscDB := NewMiscDataNamespace(db)
|
||||||
miscDB.PutInt64("dbVersion", dbVersion+1)
|
if err := miscDB.PutInt64("dbVersion", dbVersion+1); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
l.Infoln(dbVersion)
|
l.Infoln(dbVersion)
|
||||||
|
|
||||||
// Pretend we just opened the DB and attempt to update it again
|
// Pretend we just opened the DB and attempt to update it again
|
||||||
|
@ -9,11 +9,9 @@ package db
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type instance struct {
|
type instance struct {
|
||||||
@ -30,17 +28,26 @@ func newInstance(ll *Lowlevel) *instance {
|
|||||||
|
|
||||||
// updateRemoteFiles adds a list of fileinfos to the database and updates the
|
// updateRemoteFiles adds a list of fileinfos to the database and updates the
|
||||||
// global versionlist and metadata.
|
// global versionlist and metadata.
|
||||||
func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) {
|
func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileInfo, meta *metadataTracker) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var dk, gk, keyBuf []byte
|
var dk, gk, keyBuf []byte
|
||||||
devID := protocol.DeviceIDFromBytes(device)
|
devID := protocol.DeviceIDFromBytes(device)
|
||||||
for _, f := range fs {
|
for _, f := range fs {
|
||||||
name := []byte(f.Name)
|
name := []byte(f.Name)
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, device, name)
|
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, device, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ef, ok := t.getFileTrunc(dk, true)
|
ef, ok, err := t.getFileTrunc(dk, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if ok && unchanged(f, ef) {
|
if ok && unchanged(f, ef) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -51,28 +58,49 @@ func (db *instance) updateRemoteFiles(folder, device []byte, fs []protocol.FileI
|
|||||||
meta.addFile(devID, f)
|
meta.addFile(devID, f)
|
||||||
|
|
||||||
l.Debugf("insert; folder=%q device=%v %v", folder, devID, f)
|
l.Debugf("insert; folder=%q device=%v %v", folder, devID, f)
|
||||||
t.Put(dk, mustMarshal(&f))
|
if err := t.Put(dk, mustMarshal(&f)); err != nil {
|
||||||
|
return err
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
|
||||||
keyBuf, _ = t.updateGlobal(gk, keyBuf, folder, device, f, meta)
|
|
||||||
|
|
||||||
t.checkFlush()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
keyBuf, _, err = t.updateGlobal(gk, keyBuf, folder, device, f, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.Checkpoint(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateLocalFiles adds fileinfos to the db, and updates the global versionlist,
|
// updateLocalFiles adds fileinfos to the db, and updates the global versionlist,
|
||||||
// metadata, sequence and blockmap buckets.
|
// metadata, sequence and blockmap buckets.
|
||||||
func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta *metadataTracker) {
|
func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta *metadataTracker) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var dk, gk, keyBuf []byte
|
var dk, gk, keyBuf []byte
|
||||||
blockBuf := make([]byte, 4)
|
blockBuf := make([]byte, 4)
|
||||||
for _, f := range fs {
|
for _, f := range fs {
|
||||||
name := []byte(f.Name)
|
name := []byte(f.Name)
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
|
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
ef, ok := t.getFileByKey(dk)
|
ef, ok, err := t.getFileByKey(dk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if ok && unchanged(f, ef) {
|
if ok && unchanged(f, ef) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -80,13 +108,23 @@ func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
|
|||||||
if ok {
|
if ok {
|
||||||
if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() {
|
if !ef.IsDirectory() && !ef.IsDeleted() && !ef.IsInvalid() {
|
||||||
for _, block := range ef.Blocks {
|
for _, block := range ef.Blocks {
|
||||||
keyBuf = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
||||||
t.Delete(keyBuf)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(keyBuf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keyBuf = db.keyer.GenerateSequenceKey(keyBuf, folder, ef.SequenceNo())
|
keyBuf, err = db.keyer.GenerateSequenceKey(keyBuf, folder, ef.SequenceNo())
|
||||||
t.Delete(keyBuf)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(keyBuf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
l.Debugf("removing sequence; folder=%q sequence=%v %v", folder, ef.SequenceNo(), ef.FileName())
|
l.Debugf("removing sequence; folder=%q sequence=%v %v", folder, ef.SequenceNo(), ef.FileName())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,29 +136,54 @@ func (db *instance) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
|
|||||||
meta.addFile(protocol.LocalDeviceID, f)
|
meta.addFile(protocol.LocalDeviceID, f)
|
||||||
|
|
||||||
l.Debugf("insert (local); folder=%q %v", folder, f)
|
l.Debugf("insert (local); folder=%q %v", folder, f)
|
||||||
t.Put(dk, mustMarshal(&f))
|
if err := t.Put(dk, mustMarshal(&f)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, []byte(f.Name))
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, []byte(f.Name))
|
||||||
keyBuf, _ = t.updateGlobal(gk, keyBuf, folder, protocol.LocalDeviceID[:], f, meta)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
keyBuf, _, err = t.updateGlobal(gk, keyBuf, folder, protocol.LocalDeviceID[:], f, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
keyBuf = db.keyer.GenerateSequenceKey(keyBuf, folder, f.Sequence)
|
keyBuf, err = db.keyer.GenerateSequenceKey(keyBuf, folder, f.Sequence)
|
||||||
t.Put(keyBuf, dk)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Put(keyBuf, dk); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
|
l.Debugf("adding sequence; folder=%q sequence=%v %v", folder, f.Sequence, f.Name)
|
||||||
|
|
||||||
if !f.IsDirectory() && !f.IsDeleted() && !f.IsInvalid() {
|
if !f.IsDirectory() && !f.IsDeleted() && !f.IsInvalid() {
|
||||||
for i, block := range f.Blocks {
|
for i, block := range f.Blocks {
|
||||||
binary.BigEndian.PutUint32(blockBuf, uint32(i))
|
binary.BigEndian.PutUint32(blockBuf, uint32(i))
|
||||||
keyBuf = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
keyBuf, err = db.keyer.GenerateBlockMapKey(keyBuf, folder, block.Hash, name)
|
||||||
t.Put(keyBuf, blockBuf)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Put(keyBuf, blockBuf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t.checkFlush()
|
if err := t.Checkpoint(); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) {
|
func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) error {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
if len(prefix) > 0 {
|
if len(prefix) > 0 {
|
||||||
@ -131,18 +194,31 @@ func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn It
|
|||||||
prefix = append(prefix, '/')
|
prefix = append(prefix, '/')
|
||||||
}
|
}
|
||||||
|
|
||||||
if f, ok := t.getFileTrunc(db.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix), true); ok && !fn(f) {
|
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix)
|
||||||
return
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f, ok, err := t.getFileTrunc(key, true); err != nil {
|
||||||
|
return err
|
||||||
|
} else if ok && !fn(f) {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, device, prefix)), nil)
|
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
||||||
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := unmarshalTrunc(dbi.Value(), truncate)
|
f, err := unmarshalTrunc(dbi.Value(), truncate)
|
||||||
@ -151,20 +227,38 @@ func (db *instance) withHave(folder, device, prefix []byte, truncate bool, fn It
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !fn(f) {
|
if !fn(f) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return dbi.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) {
|
func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator) error {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(&util.Range{Start: db.keyer.GenerateSequenceKey(nil, folder, startSeq), Limit: db.keyer.GenerateSequenceKey(nil, folder, maxInt64)}, nil)
|
first, err := db.keyer.GenerateSequenceKey(nil, folder, startSeq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
last, err := db.keyer.GenerateSequenceKey(nil, folder, maxInt64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewRangeIterator(first, last)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
f, ok := t.getFileByKey(dbi.Value())
|
f, ok, err := t.getFileByKey(dbi.Value())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
l.Debugln("missing file for sequence number", db.keyer.SequenceFromSequenceKey(dbi.Key()))
|
l.Debugln("missing file for sequence number", db.keyer.SequenceFromSequenceKey(dbi.Key()))
|
||||||
continue
|
continue
|
||||||
@ -177,16 +271,27 @@ func (db *instance) withHaveSequence(folder []byte, startSeq int64, fn Iterator)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !fn(f) {
|
if !fn(f) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return dbi.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
|
func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil).WithoutNameAndDevice()), nil)
|
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key.WithoutNameAndDevice())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
var gk, keyBuf []byte
|
var gk, keyBuf []byte
|
||||||
@ -194,8 +299,9 @@ func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte,
|
|||||||
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
|
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
|
||||||
if !ok {
|
if !ok {
|
||||||
// Not having the device in the index is bad. Clear it.
|
// Not having the device in the index is bad. Clear it.
|
||||||
t.Delete(dbi.Key())
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
t.checkFlush()
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var f FileInfoTruncated
|
var f FileInfoTruncated
|
||||||
@ -205,42 +311,61 @@ func (db *instance) withAllFolderTruncated(folder []byte, fn func(device []byte,
|
|||||||
// we need to copy it.
|
// we need to copy it.
|
||||||
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
|
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("unmarshal error:", err)
|
return err
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch f.Name {
|
switch f.Name {
|
||||||
case "", ".", "..", "/": // A few obviously invalid filenames
|
case "", ".", "..", "/": // A few obviously invalid filenames
|
||||||
l.Infof("Dropping invalid filename %q from database", f.Name)
|
l.Infof("Dropping invalid filename %q from database", f.Name)
|
||||||
name := []byte(f.Name)
|
name := []byte(f.Name)
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
keyBuf = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil)
|
if err != nil {
|
||||||
t.Delete(dbi.Key())
|
return err
|
||||||
t.checkFlush()
|
}
|
||||||
|
keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fn(device, f) {
|
if !fn(device, f) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if err := dbi.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) getFileDirty(folder, device, file []byte) (protocol.FileInfo, bool) {
|
func (db *instance) getFileDirty(folder, device, file []byte) (protocol.FileInfo, bool, error) {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return protocol.FileInfo{}, false, err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
return t.getFile(folder, device, file)
|
return t.getFile(folder, device, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) getGlobalDirty(folder, file []byte, truncate bool) (FileIntf, bool) {
|
func (db *instance) getGlobalDirty(folder, file []byte, truncate bool) (FileIntf, bool, error) {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
_, f, ok := t.getGlobal(nil, folder, file, truncate)
|
_, f, ok, err := t.getGlobal(nil, folder, file, truncate)
|
||||||
return f, ok
|
return f, ok, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) {
|
func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) error {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
if len(prefix) > 0 {
|
if len(prefix) > 0 {
|
||||||
@ -251,19 +376,28 @@ func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator
|
|||||||
prefix = append(prefix, '/')
|
prefix = append(prefix, '/')
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, f, ok := t.getGlobal(nil, folder, unslashedPrefix, truncate); ok && !fn(f) {
|
if _, f, ok, err := t.getGlobal(nil, folder, unslashedPrefix, truncate); err != nil {
|
||||||
return
|
return err
|
||||||
|
} else if ok && !fn(f) {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, prefix)), nil)
|
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
var dk []byte
|
var dk []byte
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
|
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||||
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
vl, ok := unmarshalVersionList(dbi.Value())
|
vl, ok := unmarshalVersionList(dbi.Value())
|
||||||
@ -271,33 +405,45 @@ func (db *instance) withGlobal(folder, prefix []byte, truncate bool, fn Iterator
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
|
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
f, ok := t.getFileTrunc(dk, truncate)
|
f, ok, err := t.getFileTrunc(dk, truncate)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fn(f) {
|
if !fn(f) {
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *instance) availability(folder, file []byte) []protocol.DeviceID {
|
|
||||||
k := db.keyer.GenerateGlobalVersionKey(nil, folder, file)
|
|
||||||
bs, err := db.Get(k, nil)
|
|
||||||
if err == leveldb.ErrNotFound {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("surprise error:", err)
|
return err
|
||||||
return nil
|
}
|
||||||
|
return dbi.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *instance) availability(folder, file []byte) ([]protocol.DeviceID, error) {
|
||||||
|
k, err := db.keyer.GenerateGlobalVersionKey(nil, folder, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bs, err := db.Get(k)
|
||||||
|
if backend.IsNotFound(err) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
vl, ok := unmarshalVersionList(bs)
|
vl, ok := unmarshalVersionList(bs)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var devices []protocol.DeviceID
|
var devices []protocol.DeviceID
|
||||||
@ -312,19 +458,28 @@ func (db *instance) availability(folder, file []byte) []protocol.DeviceID {
|
|||||||
devices = append(devices, n)
|
devices = append(devices, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
return devices
|
return devices, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) {
|
func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator) error {
|
||||||
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||||
db.withNeedLocal(folder, truncate, fn)
|
return db.withNeedLocal(folder, truncate, fn)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName()), nil)
|
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key.WithoutName())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
var dk []byte
|
var dk []byte
|
||||||
@ -358,8 +513,14 @@ func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator)
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name)
|
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name)
|
||||||
gf, ok := t.getFileTrunc(dk, truncate)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gf, ok, err := t.getFileTrunc(dk, truncate)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -372,81 +533,171 @@ func (db *instance) withNeed(folder, device []byte, truncate bool, fn Iterator)
|
|||||||
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
|
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
|
||||||
|
|
||||||
if !fn(gf) {
|
if !fn(gf) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This file is handled, no need to look further in the version list
|
// This file is handled, no need to look further in the version list
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return dbi.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) {
|
func (db *instance) withNeedLocal(folder []byte, truncate bool, fn Iterator) error {
|
||||||
t := db.newReadOnlyTransaction()
|
t, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateNeedFileKey(nil, folder, nil).WithoutName()), nil)
|
key, err := db.keyer.GenerateNeedFileKey(nil, folder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key.WithoutName())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
var keyBuf []byte
|
var keyBuf []byte
|
||||||
var f FileIntf
|
var f FileIntf
|
||||||
var ok bool
|
var ok bool
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
keyBuf, f, ok = t.getGlobal(keyBuf, folder, db.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate)
|
keyBuf, f, ok, err = t.getGlobal(keyBuf, folder, db.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !fn(f) {
|
if !fn(f) {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return dbi.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) dropFolder(folder []byte) {
|
func (db *instance) dropFolder(folder []byte) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
for _, key := range [][]byte{
|
|
||||||
// Remove all items related to the given folder from the device->file bucket
|
// Remove all items related to the given folder from the device->file bucket
|
||||||
db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil).WithoutNameAndDevice(),
|
k0, err := db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil)
|
||||||
// Remove all sequences related to the folder
|
if err != nil {
|
||||||
db.keyer.GenerateSequenceKey(nil, []byte(folder), 0).WithoutSequence(),
|
return err
|
||||||
// Remove all items related to the given folder from the global bucket
|
|
||||||
db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName(),
|
|
||||||
// Remove all needs related to the folder
|
|
||||||
db.keyer.GenerateNeedFileKey(nil, folder, nil).WithoutName(),
|
|
||||||
// Remove the blockmap of the folder
|
|
||||||
db.keyer.GenerateBlockMapKey(nil, folder, nil, nil).WithoutHashAndName(),
|
|
||||||
} {
|
|
||||||
t.deleteKeyPrefix(key)
|
|
||||||
}
|
}
|
||||||
|
if err := t.deleteKeyPrefix(k0.WithoutNameAndDevice()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all sequences related to the folder
|
||||||
|
k1, err := db.keyer.GenerateSequenceKey(nil, folder, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.deleteKeyPrefix(k1.WithoutSequence()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all items related to the given folder from the global bucket
|
||||||
|
k2, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.deleteKeyPrefix(k2.WithoutName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all needs related to the folder
|
||||||
|
k3, err := db.keyer.GenerateNeedFileKey(nil, folder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.deleteKeyPrefix(k3.WithoutName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the blockmap of the folder
|
||||||
|
k4, err := db.keyer.GenerateBlockMapKey(nil, folder, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.deleteKeyPrefix(k4.WithoutHashAndName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) {
|
func (db *instance) dropDeviceFolder(device, folder []byte, meta *metadataTracker) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateDeviceFileKey(nil, folder, device, nil)), nil)
|
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, nil)
|
||||||
defer dbi.Release()
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
var gk, keyBuf []byte
|
var gk, keyBuf []byte
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
keyBuf = t.removeFromGlobal(gk, keyBuf, folder, device, name, meta)
|
if err != nil {
|
||||||
t.Delete(dbi.Key())
|
return err
|
||||||
t.checkFlush()
|
|
||||||
}
|
}
|
||||||
|
keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Checkpoint(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := dbi.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi.Release()
|
||||||
|
|
||||||
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
|
||||||
t.deleteKeyPrefix(db.keyer.GenerateBlockMapKey(nil, folder, nil, nil).WithoutHashAndName())
|
key, err := db.keyer.GenerateBlockMapKey(nil, folder, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
if err := t.deleteKeyPrefix(key.WithoutHashAndName()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) {
|
func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix(db.keyer.GenerateGlobalVersionKey(nil, folder, nil).WithoutName()), nil)
|
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dbi, err := t.NewPrefixIterator(key.WithoutName())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
var dk []byte
|
var dk []byte
|
||||||
@ -464,66 +715,98 @@ func (db *instance) checkGlobals(folder []byte, meta *metadataTracker) {
|
|||||||
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
|
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
|
||||||
var newVL VersionList
|
var newVL VersionList
|
||||||
for i, version := range vl.Versions {
|
for i, version := range vl.Versions {
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, version.Device, name)
|
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, version.Device, name)
|
||||||
_, err := t.Get(dk, nil)
|
if err != nil {
|
||||||
if err == leveldb.ErrNotFound {
|
return err
|
||||||
|
}
|
||||||
|
_, err := t.Get(dk)
|
||||||
|
if backend.IsNotFound(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("surprise error:", err)
|
return err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
newVL.Versions = append(newVL.Versions, version)
|
newVL.Versions = append(newVL.Versions, version)
|
||||||
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
if fi, ok := t.getFileByKey(dk); ok {
|
if fi, ok, err := t.getFileByKey(dk); err != nil {
|
||||||
|
return err
|
||||||
|
} else if ok {
|
||||||
meta.addFile(protocol.GlobalDeviceID, fi)
|
meta.addFile(protocol.GlobalDeviceID, fi)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(newVL.Versions) != len(vl.Versions) {
|
if len(newVL.Versions) != len(vl.Versions) {
|
||||||
t.Put(dbi.Key(), mustMarshal(&newVL))
|
if err := t.Put(dbi.Key(), mustMarshal(&newVL)); err != nil {
|
||||||
t.checkFlush()
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if err := dbi.Error(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
l.Debugf("db check completed for %q", folder)
|
l.Debugf("db check completed for %q", folder)
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) getIndexID(device, folder []byte) protocol.IndexID {
|
func (db *instance) getIndexID(device, folder []byte) (protocol.IndexID, error) {
|
||||||
cur, err := db.Get(db.keyer.GenerateIndexIDKey(nil, device, folder), nil)
|
key, err := db.keyer.GenerateIndexIDKey(nil, device, folder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0, err
|
||||||
|
}
|
||||||
|
cur, err := db.Get(key)
|
||||||
|
if backend.IsNotFound(err) {
|
||||||
|
return 0, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var id protocol.IndexID
|
var id protocol.IndexID
|
||||||
if err := id.Unmarshal(cur); err != nil {
|
if err := id.Unmarshal(cur); err != nil {
|
||||||
return 0
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return id
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) setIndexID(device, folder []byte, id protocol.IndexID) {
|
func (db *instance) setIndexID(device, folder []byte, id protocol.IndexID) error {
|
||||||
bs, _ := id.Marshal() // marshalling can't fail
|
bs, _ := id.Marshal() // marshalling can't fail
|
||||||
if err := db.Put(db.keyer.GenerateIndexIDKey(nil, device, folder), bs, nil); err != nil && err != leveldb.ErrClosed {
|
key, err := db.keyer.GenerateIndexIDKey(nil, device, folder)
|
||||||
panic("storing index ID: " + err.Error())
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
return db.Put(key, bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) dropMtimes(folder []byte) {
|
func (db *instance) dropMtimes(folder []byte) error {
|
||||||
db.dropPrefix(db.keyer.GenerateMtimesKey(nil, folder))
|
key, err := db.keyer.GenerateMtimesKey(nil, folder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.dropPrefix(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) dropFolderMeta(folder []byte) {
|
func (db *instance) dropFolderMeta(folder []byte) error {
|
||||||
db.dropPrefix(db.keyer.GenerateFolderMetaKey(nil, folder))
|
key, err := db.keyer.GenerateFolderMetaKey(nil, folder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return db.dropPrefix(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) dropPrefix(prefix []byte) {
|
func (db *instance) dropPrefix(prefix []byte) error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
t.deleteKeyPrefix(prefix)
|
if err := t.deleteKeyPrefix(prefix); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
|
func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
|
||||||
@ -551,15 +834,6 @@ func unmarshalVersionList(data []byte) (VersionList, bool) {
|
|||||||
return vl, true
|
return vl, true
|
||||||
}
|
}
|
||||||
|
|
||||||
type errorSuggestion struct {
|
|
||||||
inner error
|
|
||||||
suggestion string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e errorSuggestion) Error() string {
|
|
||||||
return fmt.Sprintf("%s (%s)", e.inner.Error(), e.suggestion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unchanged checks if two files are the same and thus don't need to be updated.
|
// unchanged checks if two files are the same and thus don't need to be updated.
|
||||||
// Local flags or the invalid bit might change without the version
|
// Local flags or the invalid bit might change without the version
|
||||||
// being bumped.
|
// being bumped.
|
||||||
|
108
lib/db/keyer.go
108
lib/db/keyer.go
@ -63,36 +63,36 @@ const (
|
|||||||
|
|
||||||
type keyer interface {
|
type keyer interface {
|
||||||
// device file key stuff
|
// device file key stuff
|
||||||
GenerateDeviceFileKey(key, folder, device, name []byte) deviceFileKey
|
GenerateDeviceFileKey(key, folder, device, name []byte) (deviceFileKey, error)
|
||||||
NameFromDeviceFileKey(key []byte) []byte
|
NameFromDeviceFileKey(key []byte) []byte
|
||||||
DeviceFromDeviceFileKey(key []byte) ([]byte, bool)
|
DeviceFromDeviceFileKey(key []byte) ([]byte, bool)
|
||||||
FolderFromDeviceFileKey(key []byte) ([]byte, bool)
|
FolderFromDeviceFileKey(key []byte) ([]byte, bool)
|
||||||
|
|
||||||
// global version key stuff
|
// global version key stuff
|
||||||
GenerateGlobalVersionKey(key, folder, name []byte) globalVersionKey
|
GenerateGlobalVersionKey(key, folder, name []byte) (globalVersionKey, error)
|
||||||
NameFromGlobalVersionKey(key []byte) []byte
|
NameFromGlobalVersionKey(key []byte) []byte
|
||||||
FolderFromGlobalVersionKey(key []byte) ([]byte, bool)
|
FolderFromGlobalVersionKey(key []byte) ([]byte, bool)
|
||||||
|
|
||||||
// block map key stuff (former BlockMap)
|
// block map key stuff (former BlockMap)
|
||||||
GenerateBlockMapKey(key, folder, hash, name []byte) blockMapKey
|
GenerateBlockMapKey(key, folder, hash, name []byte) (blockMapKey, error)
|
||||||
NameFromBlockMapKey(key []byte) []byte
|
NameFromBlockMapKey(key []byte) []byte
|
||||||
|
|
||||||
// file need index
|
// file need index
|
||||||
GenerateNeedFileKey(key, folder, name []byte) needFileKey
|
GenerateNeedFileKey(key, folder, name []byte) (needFileKey, error)
|
||||||
|
|
||||||
// file sequence index
|
// file sequence index
|
||||||
GenerateSequenceKey(key, folder []byte, seq int64) sequenceKey
|
GenerateSequenceKey(key, folder []byte, seq int64) (sequenceKey, error)
|
||||||
SequenceFromSequenceKey(key []byte) int64
|
SequenceFromSequenceKey(key []byte) int64
|
||||||
|
|
||||||
// index IDs
|
// index IDs
|
||||||
GenerateIndexIDKey(key, device, folder []byte) indexIDKey
|
GenerateIndexIDKey(key, device, folder []byte) (indexIDKey, error)
|
||||||
DeviceFromIndexIDKey(key []byte) ([]byte, bool)
|
DeviceFromIndexIDKey(key []byte) ([]byte, bool)
|
||||||
|
|
||||||
// Mtimes
|
// Mtimes
|
||||||
GenerateMtimesKey(key, folder []byte) mtimesKey
|
GenerateMtimesKey(key, folder []byte) (mtimesKey, error)
|
||||||
|
|
||||||
// Folder metadata
|
// Folder metadata
|
||||||
GenerateFolderMetaKey(key, folder []byte) folderMetaKey
|
GenerateFolderMetaKey(key, folder []byte) (folderMetaKey, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// defaultKeyer implements our key scheme. It needs folder and device
|
// defaultKeyer implements our key scheme. It needs folder and device
|
||||||
@ -115,13 +115,21 @@ func (k deviceFileKey) WithoutNameAndDevice() []byte {
|
|||||||
return k[:keyPrefixLen+keyFolderLen]
|
return k[:keyPrefixLen+keyFolderLen]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateDeviceFileKey(key, folder, device, name []byte) deviceFileKey {
|
func (k defaultKeyer) GenerateDeviceFileKey(key, folder, device, name []byte) (deviceFileKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deviceID, err := k.deviceIdx.ID(device)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen+keyDeviceLen+len(name))
|
key = resize(key, keyPrefixLen+keyFolderLen+keyDeviceLen+len(name))
|
||||||
key[0] = KeyTypeDevice
|
key[0] = KeyTypeDevice
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen+keyFolderLen:], k.deviceIdx.ID(device))
|
binary.BigEndian.PutUint32(key[keyPrefixLen+keyFolderLen:], deviceID)
|
||||||
copy(key[keyPrefixLen+keyFolderLen+keyDeviceLen:], name)
|
copy(key[keyPrefixLen+keyFolderLen+keyDeviceLen:], name)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) NameFromDeviceFileKey(key []byte) []byte {
|
func (k defaultKeyer) NameFromDeviceFileKey(key []byte) []byte {
|
||||||
@ -142,12 +150,16 @@ func (k globalVersionKey) WithoutName() []byte {
|
|||||||
return k[:keyPrefixLen+keyFolderLen]
|
return k[:keyPrefixLen+keyFolderLen]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateGlobalVersionKey(key, folder, name []byte) globalVersionKey {
|
func (k defaultKeyer) GenerateGlobalVersionKey(key, folder, name []byte) (globalVersionKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen+len(name))
|
key = resize(key, keyPrefixLen+keyFolderLen+len(name))
|
||||||
key[0] = KeyTypeGlobal
|
key[0] = KeyTypeGlobal
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
copy(key[keyPrefixLen+keyFolderLen:], name)
|
copy(key[keyPrefixLen+keyFolderLen:], name)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) NameFromGlobalVersionKey(key []byte) []byte {
|
func (k defaultKeyer) NameFromGlobalVersionKey(key []byte) []byte {
|
||||||
@ -160,13 +172,17 @@ func (k defaultKeyer) FolderFromGlobalVersionKey(key []byte) ([]byte, bool) {
|
|||||||
|
|
||||||
type blockMapKey []byte
|
type blockMapKey []byte
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateBlockMapKey(key, folder, hash, name []byte) blockMapKey {
|
func (k defaultKeyer) GenerateBlockMapKey(key, folder, hash, name []byte) (blockMapKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen+keyHashLen+len(name))
|
key = resize(key, keyPrefixLen+keyFolderLen+keyHashLen+len(name))
|
||||||
key[0] = KeyTypeBlock
|
key[0] = KeyTypeBlock
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
copy(key[keyPrefixLen+keyFolderLen:], hash)
|
copy(key[keyPrefixLen+keyFolderLen:], hash)
|
||||||
copy(key[keyPrefixLen+keyFolderLen+keyHashLen:], name)
|
copy(key[keyPrefixLen+keyFolderLen+keyHashLen:], name)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) NameFromBlockMapKey(key []byte) []byte {
|
func (k defaultKeyer) NameFromBlockMapKey(key []byte) []byte {
|
||||||
@ -183,12 +199,16 @@ func (k needFileKey) WithoutName() []byte {
|
|||||||
return k[:keyPrefixLen+keyFolderLen]
|
return k[:keyPrefixLen+keyFolderLen]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateNeedFileKey(key, folder, name []byte) needFileKey {
|
func (k defaultKeyer) GenerateNeedFileKey(key, folder, name []byte) (needFileKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen+len(name))
|
key = resize(key, keyPrefixLen+keyFolderLen+len(name))
|
||||||
key[0] = KeyTypeNeed
|
key[0] = KeyTypeNeed
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
copy(key[keyPrefixLen+keyFolderLen:], name)
|
copy(key[keyPrefixLen+keyFolderLen:], name)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type sequenceKey []byte
|
type sequenceKey []byte
|
||||||
@ -197,12 +217,16 @@ func (k sequenceKey) WithoutSequence() []byte {
|
|||||||
return k[:keyPrefixLen+keyFolderLen]
|
return k[:keyPrefixLen+keyFolderLen]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateSequenceKey(key, folder []byte, seq int64) sequenceKey {
|
func (k defaultKeyer) GenerateSequenceKey(key, folder []byte, seq int64) (sequenceKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen+keySequenceLen)
|
key = resize(key, keyPrefixLen+keyFolderLen+keySequenceLen)
|
||||||
key[0] = KeyTypeSequence
|
key[0] = KeyTypeSequence
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
binary.BigEndian.PutUint64(key[keyPrefixLen+keyFolderLen:], uint64(seq))
|
binary.BigEndian.PutUint64(key[keyPrefixLen+keyFolderLen:], uint64(seq))
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) SequenceFromSequenceKey(key []byte) int64 {
|
func (k defaultKeyer) SequenceFromSequenceKey(key []byte) int64 {
|
||||||
@ -211,12 +235,20 @@ func (k defaultKeyer) SequenceFromSequenceKey(key []byte) int64 {
|
|||||||
|
|
||||||
type indexIDKey []byte
|
type indexIDKey []byte
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateIndexIDKey(key, device, folder []byte) indexIDKey {
|
func (k defaultKeyer) GenerateIndexIDKey(key, device, folder []byte) (indexIDKey, error) {
|
||||||
|
deviceID, err := k.deviceIdx.ID(device)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyDeviceLen+keyFolderLen)
|
key = resize(key, keyPrefixLen+keyDeviceLen+keyFolderLen)
|
||||||
key[0] = KeyTypeIndexID
|
key[0] = KeyTypeIndexID
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.deviceIdx.ID(device))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], deviceID)
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen+keyDeviceLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen+keyDeviceLen:], folderID)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k defaultKeyer) DeviceFromIndexIDKey(key []byte) ([]byte, bool) {
|
func (k defaultKeyer) DeviceFromIndexIDKey(key []byte) ([]byte, bool) {
|
||||||
@ -225,20 +257,28 @@ func (k defaultKeyer) DeviceFromIndexIDKey(key []byte) ([]byte, bool) {
|
|||||||
|
|
||||||
type mtimesKey []byte
|
type mtimesKey []byte
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateMtimesKey(key, folder []byte) mtimesKey {
|
func (k defaultKeyer) GenerateMtimesKey(key, folder []byte) (mtimesKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen)
|
key = resize(key, keyPrefixLen+keyFolderLen)
|
||||||
key[0] = KeyTypeVirtualMtime
|
key[0] = KeyTypeVirtualMtime
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type folderMetaKey []byte
|
type folderMetaKey []byte
|
||||||
|
|
||||||
func (k defaultKeyer) GenerateFolderMetaKey(key, folder []byte) folderMetaKey {
|
func (k defaultKeyer) GenerateFolderMetaKey(key, folder []byte) (folderMetaKey, error) {
|
||||||
|
folderID, err := k.folderIdx.ID(folder)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
key = resize(key, keyPrefixLen+keyFolderLen)
|
key = resize(key, keyPrefixLen+keyFolderLen)
|
||||||
key[0] = KeyTypeFolderMeta
|
key[0] = KeyTypeFolderMeta
|
||||||
binary.BigEndian.PutUint32(key[keyPrefixLen:], k.folderIdx.ID(folder))
|
binary.BigEndian.PutUint32(key[keyPrefixLen:], folderID)
|
||||||
return key
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resize returns a byte slice of the specified size, reusing bs if possible
|
// resize returns a byte slice of the specified size, reusing bs if possible
|
||||||
|
@ -9,6 +9,8 @@ package db
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDeviceKey(t *testing.T) {
|
func TestDeviceKey(t *testing.T) {
|
||||||
@ -16,9 +18,12 @@ func TestDeviceKey(t *testing.T) {
|
|||||||
dev := []byte("device67890123456789012345678901")
|
dev := []byte("device67890123456789012345678901")
|
||||||
name := []byte("name")
|
name := []byte("name")
|
||||||
|
|
||||||
db := newInstance(OpenMemory())
|
db := newInstance(NewLowlevel(backend.OpenMemory()))
|
||||||
|
|
||||||
key := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name)
|
key, err := db.keyer.GenerateDeviceFileKey(nil, fld, dev, name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
fld2, ok := db.keyer.FolderFromDeviceFileKey(key)
|
fld2, ok := db.keyer.FolderFromDeviceFileKey(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -44,9 +49,12 @@ func TestGlobalKey(t *testing.T) {
|
|||||||
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
||||||
name := []byte("name")
|
name := []byte("name")
|
||||||
|
|
||||||
db := newInstance(OpenMemory())
|
db := newInstance(NewLowlevel(backend.OpenMemory()))
|
||||||
|
|
||||||
key := db.keyer.GenerateGlobalVersionKey(nil, fld, name)
|
key, err := db.keyer.GenerateGlobalVersionKey(nil, fld, name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
fld2, ok := db.keyer.FolderFromGlobalVersionKey(key)
|
fld2, ok := db.keyer.FolderFromGlobalVersionKey(key)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -69,10 +77,13 @@ func TestGlobalKey(t *testing.T) {
|
|||||||
func TestSequenceKey(t *testing.T) {
|
func TestSequenceKey(t *testing.T) {
|
||||||
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
fld := []byte("folder6789012345678901234567890123456789012345678901234567890123")
|
||||||
|
|
||||||
db := newInstance(OpenMemory())
|
db := newInstance(NewLowlevel(backend.OpenMemory()))
|
||||||
|
|
||||||
const seq = 1234567890
|
const seq = 1234567890
|
||||||
key := db.keyer.GenerateSequenceKey(nil, fld, seq)
|
key, err := db.keyer.GenerateSequenceKey(nil, fld, seq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
outSeq := db.keyer.SequenceFromSequenceKey(key)
|
outSeq := db.keyer.SequenceFromSequenceKey(key)
|
||||||
if outSeq != seq {
|
if outSeq != seq {
|
||||||
t.Errorf("sequence number mangled, %d != %d", outSeq, seq)
|
t.Errorf("sequence number mangled, %d != %d", outSeq, seq)
|
||||||
|
@ -7,431 +7,30 @@
|
|||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
dbMaxOpenFiles = 100
|
|
||||||
dbFlushBatch = 4 << MiB
|
|
||||||
|
|
||||||
// A large database is > 200 MiB. It's a mostly arbitrary value, but
|
|
||||||
// it's also the case that each file is 2 MiB by default and when we
|
|
||||||
// have dbMaxOpenFiles of them we will need to start thrashing fd:s.
|
|
||||||
// Switching to large database settings causes larger files to be used
|
|
||||||
// when compacting, reducing the number.
|
|
||||||
dbLargeThreshold = dbMaxOpenFiles * (2 << MiB)
|
|
||||||
|
|
||||||
KiB = 10
|
|
||||||
MiB = 20
|
|
||||||
)
|
|
||||||
|
|
||||||
type Tuning int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// N.b. these constants must match those in lib/config.Tuning!
|
|
||||||
TuningAuto Tuning = iota
|
|
||||||
TuningSmall
|
|
||||||
TuningLarge
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lowlevel is the lowest level database interface. It has a very simple
|
// Lowlevel is the lowest level database interface. It has a very simple
|
||||||
// purpose: hold the actual *leveldb.DB database, and the in-memory state
|
// purpose: hold the actual backend database, and the in-memory state
|
||||||
// that belong to that database. In the same way that a single on disk
|
// that belong to that database. In the same way that a single on disk
|
||||||
// database can only be opened once, there should be only one Lowlevel for
|
// database can only be opened once, there should be only one Lowlevel for
|
||||||
// any given *leveldb.DB.
|
// any given backend.
|
||||||
type Lowlevel struct {
|
type Lowlevel struct {
|
||||||
committed int64 // atomic, must come first
|
backend.Backend
|
||||||
*leveldb.DB
|
|
||||||
location string
|
|
||||||
folderIdx *smallIndex
|
folderIdx *smallIndex
|
||||||
deviceIdx *smallIndex
|
deviceIdx *smallIndex
|
||||||
closed bool
|
|
||||||
closeMut *sync.RWMutex
|
|
||||||
iterWG sync.WaitGroup
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open attempts to open the database at the given location, and runs
|
// NewLowlevel wraps the given *leveldb.DB into a *lowlevel
|
||||||
// recovery on it if opening fails. Worst case, if recovery is not possible,
|
func NewLowlevel(db backend.Backend) *Lowlevel {
|
||||||
// the database is erased and created from scratch.
|
return &Lowlevel{
|
||||||
func Open(location string, tuning Tuning) (*Lowlevel, error) {
|
Backend: db,
|
||||||
opts := optsFor(location, tuning)
|
folderIdx: newSmallIndex(db, []byte{KeyTypeFolderIdx}),
|
||||||
return open(location, opts)
|
deviceIdx: newSmallIndex(db, []byte{KeyTypeDeviceIdx}),
|
||||||
}
|
|
||||||
|
|
||||||
// optsFor returns the database options to use when opening a database with
|
|
||||||
// the given location and tuning. Settings can be overridden by debug
|
|
||||||
// environment variables.
|
|
||||||
func optsFor(location string, tuning Tuning) *opt.Options {
|
|
||||||
large := false
|
|
||||||
switch tuning {
|
|
||||||
case TuningLarge:
|
|
||||||
large = true
|
|
||||||
case TuningAuto:
|
|
||||||
large = dbIsLarge(location)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
// Set defaults used for small databases.
|
|
||||||
defaultBlockCacheCapacity = 0 // 0 means let leveldb use default
|
|
||||||
defaultBlockSize = 0
|
|
||||||
defaultCompactionTableSize = 0
|
|
||||||
defaultCompactionTableSizeMultiplier = 0
|
|
||||||
defaultWriteBuffer = 16 << MiB // increased from leveldb default of 4 MiB
|
|
||||||
defaultCompactionL0Trigger = opt.DefaultCompactionL0Trigger // explicit because we use it as base for other stuff
|
|
||||||
)
|
|
||||||
|
|
||||||
if large {
|
|
||||||
// Change the parameters for better throughput at the price of some
|
|
||||||
// RAM and larger files. This results in larger batches of writes
|
|
||||||
// and compaction at a lower frequency.
|
|
||||||
l.Infoln("Using large-database tuning")
|
|
||||||
|
|
||||||
defaultBlockCacheCapacity = 64 << MiB
|
|
||||||
defaultBlockSize = 64 << KiB
|
|
||||||
defaultCompactionTableSize = 16 << MiB
|
|
||||||
defaultCompactionTableSizeMultiplier = 20 // 2.0 after division by ten
|
|
||||||
defaultWriteBuffer = 64 << MiB
|
|
||||||
defaultCompactionL0Trigger = 8 // number of l0 files
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := &opt.Options{
|
|
||||||
BlockCacheCapacity: debugEnvValue("BlockCacheCapacity", defaultBlockCacheCapacity),
|
|
||||||
BlockCacheEvictRemoved: debugEnvValue("BlockCacheEvictRemoved", 0) != 0,
|
|
||||||
BlockRestartInterval: debugEnvValue("BlockRestartInterval", 0),
|
|
||||||
BlockSize: debugEnvValue("BlockSize", defaultBlockSize),
|
|
||||||
CompactionExpandLimitFactor: debugEnvValue("CompactionExpandLimitFactor", 0),
|
|
||||||
CompactionGPOverlapsFactor: debugEnvValue("CompactionGPOverlapsFactor", 0),
|
|
||||||
CompactionL0Trigger: debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger),
|
|
||||||
CompactionSourceLimitFactor: debugEnvValue("CompactionSourceLimitFactor", 0),
|
|
||||||
CompactionTableSize: debugEnvValue("CompactionTableSize", defaultCompactionTableSize),
|
|
||||||
CompactionTableSizeMultiplier: float64(debugEnvValue("CompactionTableSizeMultiplier", defaultCompactionTableSizeMultiplier)) / 10.0,
|
|
||||||
CompactionTotalSize: debugEnvValue("CompactionTotalSize", 0),
|
|
||||||
CompactionTotalSizeMultiplier: float64(debugEnvValue("CompactionTotalSizeMultiplier", 0)) / 10.0,
|
|
||||||
DisableBufferPool: debugEnvValue("DisableBufferPool", 0) != 0,
|
|
||||||
DisableBlockCache: debugEnvValue("DisableBlockCache", 0) != 0,
|
|
||||||
DisableCompactionBackoff: debugEnvValue("DisableCompactionBackoff", 0) != 0,
|
|
||||||
DisableLargeBatchTransaction: debugEnvValue("DisableLargeBatchTransaction", 0) != 0,
|
|
||||||
NoSync: debugEnvValue("NoSync", 0) != 0,
|
|
||||||
NoWriteMerge: debugEnvValue("NoWriteMerge", 0) != 0,
|
|
||||||
OpenFilesCacheCapacity: debugEnvValue("OpenFilesCacheCapacity", dbMaxOpenFiles),
|
|
||||||
WriteBuffer: debugEnvValue("WriteBuffer", defaultWriteBuffer),
|
|
||||||
// The write slowdown and pause can be overridden, but even if they
|
|
||||||
// are not and the compaction trigger is overridden we need to
|
|
||||||
// adjust so that we don't pause writes for L0 compaction before we
|
|
||||||
// even *start* L0 compaction...
|
|
||||||
WriteL0SlowdownTrigger: debugEnvValue("WriteL0SlowdownTrigger", 2*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
|
|
||||||
WriteL0PauseTrigger: debugEnvValue("WriteL0SlowdownTrigger", 3*debugEnvValue("CompactionL0Trigger", defaultCompactionL0Trigger)),
|
|
||||||
}
|
|
||||||
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenRO attempts to open the database at the given location, read only.
|
|
||||||
func OpenRO(location string) (*Lowlevel, error) {
|
|
||||||
opts := &opt.Options{
|
|
||||||
OpenFilesCacheCapacity: dbMaxOpenFiles,
|
|
||||||
ReadOnly: true,
|
|
||||||
}
|
|
||||||
return open(location, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func open(location string, opts *opt.Options) (*Lowlevel, error) {
|
|
||||||
db, err := leveldb.OpenFile(location, opts)
|
|
||||||
if leveldbIsCorrupted(err) {
|
|
||||||
db, err = leveldb.RecoverFile(location, opts)
|
|
||||||
}
|
|
||||||
if leveldbIsCorrupted(err) {
|
|
||||||
// The database is corrupted, and we've tried to recover it but it
|
|
||||||
// didn't work. At this point there isn't much to do beyond dropping
|
|
||||||
// the database and reindexing...
|
|
||||||
l.Infoln("Database corruption detected, unable to recover. Reinitializing...")
|
|
||||||
if err := os.RemoveAll(location); err != nil {
|
|
||||||
return nil, errorSuggestion{err, "failed to delete corrupted database"}
|
|
||||||
}
|
|
||||||
db, err = leveldb.OpenFile(location, opts)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, errorSuggestion{err, "is another instance of Syncthing running?"}
|
|
||||||
}
|
|
||||||
|
|
||||||
if debugEnvValue("CompactEverything", 0) != 0 {
|
|
||||||
if err := db.CompactRange(util.Range{}); err != nil {
|
|
||||||
l.Warnln("Compacting database:", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewLowlevel(db, location), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenMemory returns a new Lowlevel referencing an in-memory database.
|
|
||||||
func OpenMemory() *Lowlevel {
|
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
return NewLowlevel(db, "<memory>")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFolders returns the list of folders currently in the database
|
// ListFolders returns the list of folders currently in the database
|
||||||
func (db *Lowlevel) ListFolders() []string {
|
func (db *Lowlevel) ListFolders() []string {
|
||||||
return db.folderIdx.Values()
|
return db.folderIdx.Values()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Committed returns the number of items committed to the database since startup
|
|
||||||
func (db *Lowlevel) Committed() int64 {
|
|
||||||
return atomic.LoadInt64(&db.committed)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) Put(key, val []byte, wo *opt.WriteOptions) error {
|
|
||||||
db.closeMut.RLock()
|
|
||||||
defer db.closeMut.RUnlock()
|
|
||||||
if db.closed {
|
|
||||||
return leveldb.ErrClosed
|
|
||||||
}
|
|
||||||
atomic.AddInt64(&db.committed, 1)
|
|
||||||
return db.DB.Put(key, val, wo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) Write(batch *leveldb.Batch, wo *opt.WriteOptions) error {
|
|
||||||
db.closeMut.RLock()
|
|
||||||
defer db.closeMut.RUnlock()
|
|
||||||
if db.closed {
|
|
||||||
return leveldb.ErrClosed
|
|
||||||
}
|
|
||||||
return db.DB.Write(batch, wo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) Delete(key []byte, wo *opt.WriteOptions) error {
|
|
||||||
db.closeMut.RLock()
|
|
||||||
defer db.closeMut.RUnlock()
|
|
||||||
if db.closed {
|
|
||||||
return leveldb.ErrClosed
|
|
||||||
}
|
|
||||||
atomic.AddInt64(&db.committed, 1)
|
|
||||||
return db.DB.Delete(key, wo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
|
||||||
return db.newIterator(func() iterator.Iterator { return db.DB.NewIterator(slice, ro) })
|
|
||||||
}
|
|
||||||
|
|
||||||
// newIterator returns an iterator created with the given constructor only if db
|
|
||||||
// is not yet closed. If it is closed, a closedIter is returned instead.
|
|
||||||
func (db *Lowlevel) newIterator(constr func() iterator.Iterator) iterator.Iterator {
|
|
||||||
db.closeMut.RLock()
|
|
||||||
defer db.closeMut.RUnlock()
|
|
||||||
if db.closed {
|
|
||||||
return &closedIter{}
|
|
||||||
}
|
|
||||||
db.iterWG.Add(1)
|
|
||||||
return &iter{
|
|
||||||
Iterator: constr(),
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) GetSnapshot() snapshot {
|
|
||||||
s, err := db.DB.GetSnapshot()
|
|
||||||
if err != nil {
|
|
||||||
if err == leveldb.ErrClosed {
|
|
||||||
return &closedSnap{}
|
|
||||||
}
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return &snap{
|
|
||||||
Snapshot: s,
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) Close() {
|
|
||||||
db.closeMut.Lock()
|
|
||||||
if db.closed {
|
|
||||||
db.closeMut.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
db.closed = true
|
|
||||||
db.closeMut.Unlock()
|
|
||||||
db.iterWG.Wait()
|
|
||||||
db.DB.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// dbIsLarge returns whether the estimated size of the database at location
|
|
||||||
// is large enough to warrant optimization for large databases.
|
|
||||||
func dbIsLarge(location string) bool {
|
|
||||||
if ^uint(0)>>63 == 0 {
|
|
||||||
// We're compiled for a 32 bit architecture. We've seen trouble with
|
|
||||||
// large settings there.
|
|
||||||
// (https://forum.syncthing.net/t/many-small-ldb-files-with-database-tuning/13842)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
dir, err := os.Open(location)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
fis, err := dir.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var size int64
|
|
||||||
for _, fi := range fis {
|
|
||||||
if fi.Name() == "LOG" {
|
|
||||||
// don't count the size
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
size += fi.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
return size > dbLargeThreshold
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLowlevel wraps the given *leveldb.DB into a *lowlevel
|
|
||||||
func NewLowlevel(db *leveldb.DB, location string) *Lowlevel {
|
|
||||||
return &Lowlevel{
|
|
||||||
DB: db,
|
|
||||||
location: location,
|
|
||||||
folderIdx: newSmallIndex(db, []byte{KeyTypeFolderIdx}),
|
|
||||||
deviceIdx: newSmallIndex(db, []byte{KeyTypeDeviceIdx}),
|
|
||||||
closeMut: &sync.RWMutex{},
|
|
||||||
iterWG: sync.WaitGroup{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A "better" version of leveldb's errors.IsCorrupted.
|
|
||||||
func leveldbIsCorrupted(err error) bool {
|
|
||||||
switch {
|
|
||||||
case err == nil:
|
|
||||||
return false
|
|
||||||
|
|
||||||
case errors.IsCorrupted(err):
|
|
||||||
return true
|
|
||||||
|
|
||||||
case strings.Contains(err.Error(), "corrupted"):
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type batch struct {
|
|
||||||
*leveldb.Batch
|
|
||||||
db *Lowlevel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *Lowlevel) newBatch() *batch {
|
|
||||||
return &batch{
|
|
||||||
Batch: new(leveldb.Batch),
|
|
||||||
db: db,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkFlush flushes and resets the batch if its size exceeds dbFlushBatch.
|
|
||||||
func (b *batch) checkFlush() {
|
|
||||||
if len(b.Dump()) > dbFlushBatch {
|
|
||||||
b.flush()
|
|
||||||
b.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *batch) flush() {
|
|
||||||
if err := b.db.Write(b.Batch, nil); err != nil && err != leveldb.ErrClosed {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type closedIter struct{}
|
|
||||||
|
|
||||||
func (it *closedIter) Release() {}
|
|
||||||
func (it *closedIter) Key() []byte { return nil }
|
|
||||||
func (it *closedIter) Value() []byte { return nil }
|
|
||||||
func (it *closedIter) Next() bool { return false }
|
|
||||||
func (it *closedIter) Prev() bool { return false }
|
|
||||||
func (it *closedIter) First() bool { return false }
|
|
||||||
func (it *closedIter) Last() bool { return false }
|
|
||||||
func (it *closedIter) Seek(key []byte) bool { return false }
|
|
||||||
func (it *closedIter) Valid() bool { return false }
|
|
||||||
func (it *closedIter) Error() error { return leveldb.ErrClosed }
|
|
||||||
func (it *closedIter) SetReleaser(releaser util.Releaser) {}
|
|
||||||
|
|
||||||
type snapshot interface {
|
|
||||||
Get([]byte, *opt.ReadOptions) ([]byte, error)
|
|
||||||
Has([]byte, *opt.ReadOptions) (bool, error)
|
|
||||||
NewIterator(*util.Range, *opt.ReadOptions) iterator.Iterator
|
|
||||||
Release()
|
|
||||||
}
|
|
||||||
|
|
||||||
type closedSnap struct{}
|
|
||||||
|
|
||||||
func (s *closedSnap) Get([]byte, *opt.ReadOptions) ([]byte, error) { return nil, leveldb.ErrClosed }
|
|
||||||
func (s *closedSnap) Has([]byte, *opt.ReadOptions) (bool, error) { return false, leveldb.ErrClosed }
|
|
||||||
func (s *closedSnap) NewIterator(*util.Range, *opt.ReadOptions) iterator.Iterator {
|
|
||||||
return &closedIter{}
|
|
||||||
}
|
|
||||||
func (s *closedSnap) Release() {}
|
|
||||||
|
|
||||||
type snap struct {
|
|
||||||
*leveldb.Snapshot
|
|
||||||
db *Lowlevel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *snap) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
|
|
||||||
return s.db.newIterator(func() iterator.Iterator { return s.Snapshot.NewIterator(slice, ro) })
|
|
||||||
}
|
|
||||||
|
|
||||||
// iter implements iterator.Iterator which allows tracking active iterators
|
|
||||||
// and aborts if the underlying database is being closed.
|
|
||||||
type iter struct {
|
|
||||||
iterator.Iterator
|
|
||||||
db *Lowlevel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *iter) Release() {
|
|
||||||
it.db.iterWG.Done()
|
|
||||||
it.Iterator.Release()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *iter) Next() bool {
|
|
||||||
return it.execIfNotClosed(it.Iterator.Next)
|
|
||||||
}
|
|
||||||
func (it *iter) Prev() bool {
|
|
||||||
return it.execIfNotClosed(it.Iterator.Prev)
|
|
||||||
}
|
|
||||||
func (it *iter) First() bool {
|
|
||||||
return it.execIfNotClosed(it.Iterator.First)
|
|
||||||
}
|
|
||||||
func (it *iter) Last() bool {
|
|
||||||
return it.execIfNotClosed(it.Iterator.Last)
|
|
||||||
}
|
|
||||||
func (it *iter) Seek(key []byte) bool {
|
|
||||||
return it.execIfNotClosed(func() bool {
|
|
||||||
return it.Iterator.Seek(key)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (it *iter) execIfNotClosed(fn func() bool) bool {
|
|
||||||
it.db.closeMut.RLock()
|
|
||||||
defer it.db.closeMut.RUnlock()
|
|
||||||
if it.db.closed {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return fn()
|
|
||||||
}
|
|
||||||
|
|
||||||
func debugEnvValue(key string, def int) int {
|
|
||||||
v, err := strconv.ParseInt(os.Getenv("STDEBUG_"+key), 10, 63)
|
|
||||||
if err != nil {
|
|
||||||
return def
|
|
||||||
}
|
|
||||||
return int(v)
|
|
||||||
}
|
|
||||||
|
@ -57,7 +57,10 @@ func (m *metadataTracker) Marshal() ([]byte, error) {
|
|||||||
// toDB saves the marshalled metadataTracker to the given db, under the key
|
// toDB saves the marshalled metadataTracker to the given db, under the key
|
||||||
// corresponding to the given folder
|
// corresponding to the given folder
|
||||||
func (m *metadataTracker) toDB(db *instance, folder []byte) error {
|
func (m *metadataTracker) toDB(db *instance, folder []byte) error {
|
||||||
key := db.keyer.GenerateFolderMetaKey(nil, folder)
|
key, err := db.keyer.GenerateFolderMetaKey(nil, folder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
m.mut.RLock()
|
m.mut.RLock()
|
||||||
defer m.mut.RUnlock()
|
defer m.mut.RUnlock()
|
||||||
@ -70,7 +73,7 @@ func (m *metadataTracker) toDB(db *instance, folder []byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = db.Put(key, bs, nil)
|
err = db.Put(key, bs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
m.dirty = false
|
m.dirty = false
|
||||||
}
|
}
|
||||||
@ -81,8 +84,11 @@ func (m *metadataTracker) toDB(db *instance, folder []byte) error {
|
|||||||
// fromDB initializes the metadataTracker from the marshalled data found in
|
// fromDB initializes the metadataTracker from the marshalled data found in
|
||||||
// the database under the key corresponding to the given folder
|
// the database under the key corresponding to the given folder
|
||||||
func (m *metadataTracker) fromDB(db *instance, folder []byte) error {
|
func (m *metadataTracker) fromDB(db *instance, folder []byte) error {
|
||||||
key := db.keyer.GenerateFolderMetaKey(nil, folder)
|
key, err := db.keyer.GenerateFolderMetaKey(nil, folder)
|
||||||
bs, err := db.Get(key, nil)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bs, err := db.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -9,8 +9,6 @@ package db
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// NamespacedKV is a simple key-value store using a specific namespace within
|
// NamespacedKV is a simple key-value store using a specific namespace within
|
||||||
@ -34,30 +32,18 @@ func NewNamespacedKV(db *Lowlevel, prefix string) *NamespacedKV {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset removes all entries in this namespace.
|
|
||||||
func (n *NamespacedKV) Reset() {
|
|
||||||
it := n.db.NewIterator(util.BytesPrefix(n.prefix), nil)
|
|
||||||
defer it.Release()
|
|
||||||
batch := n.db.newBatch()
|
|
||||||
for it.Next() {
|
|
||||||
batch.Delete(it.Key())
|
|
||||||
batch.checkFlush()
|
|
||||||
}
|
|
||||||
batch.flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutInt64 stores a new int64. Any existing value (even if of another type)
|
// PutInt64 stores a new int64. Any existing value (even if of another type)
|
||||||
// is overwritten.
|
// is overwritten.
|
||||||
func (n *NamespacedKV) PutInt64(key string, val int64) {
|
func (n *NamespacedKV) PutInt64(key string, val int64) error {
|
||||||
var valBs [8]byte
|
var valBs [8]byte
|
||||||
binary.BigEndian.PutUint64(valBs[:], uint64(val))
|
binary.BigEndian.PutUint64(valBs[:], uint64(val))
|
||||||
n.db.Put(n.prefixedKey(key), valBs[:], nil)
|
return n.db.Put(n.prefixedKey(key), valBs[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int64 returns the stored value interpreted as an int64 and a boolean that
|
// Int64 returns the stored value interpreted as an int64 and a boolean that
|
||||||
// is false if no value was stored at the key.
|
// is false if no value was stored at the key.
|
||||||
func (n *NamespacedKV) Int64(key string) (int64, bool) {
|
func (n *NamespacedKV) Int64(key string) (int64, bool) {
|
||||||
valBs, err := n.db.Get(n.prefixedKey(key), nil)
|
valBs, err := n.db.Get(n.prefixedKey(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
@ -67,16 +53,16 @@ func (n *NamespacedKV) Int64(key string) (int64, bool) {
|
|||||||
|
|
||||||
// PutTime stores a new time.Time. Any existing value (even if of another
|
// PutTime stores a new time.Time. Any existing value (even if of another
|
||||||
// type) is overwritten.
|
// type) is overwritten.
|
||||||
func (n *NamespacedKV) PutTime(key string, val time.Time) {
|
func (n *NamespacedKV) PutTime(key string, val time.Time) error {
|
||||||
valBs, _ := val.MarshalBinary() // never returns an error
|
valBs, _ := val.MarshalBinary() // never returns an error
|
||||||
n.db.Put(n.prefixedKey(key), valBs, nil)
|
return n.db.Put(n.prefixedKey(key), valBs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Time returns the stored value interpreted as a time.Time and a boolean
|
// Time returns the stored value interpreted as a time.Time and a boolean
|
||||||
// that is false if no value was stored at the key.
|
// that is false if no value was stored at the key.
|
||||||
func (n NamespacedKV) Time(key string) (time.Time, bool) {
|
func (n NamespacedKV) Time(key string) (time.Time, bool) {
|
||||||
var t time.Time
|
var t time.Time
|
||||||
valBs, err := n.db.Get(n.prefixedKey(key), nil)
|
valBs, err := n.db.Get(n.prefixedKey(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return t, false
|
return t, false
|
||||||
}
|
}
|
||||||
@ -86,14 +72,14 @@ func (n NamespacedKV) Time(key string) (time.Time, bool) {
|
|||||||
|
|
||||||
// PutString stores a new string. Any existing value (even if of another type)
|
// PutString stores a new string. Any existing value (even if of another type)
|
||||||
// is overwritten.
|
// is overwritten.
|
||||||
func (n *NamespacedKV) PutString(key, val string) {
|
func (n *NamespacedKV) PutString(key, val string) error {
|
||||||
n.db.Put(n.prefixedKey(key), []byte(val), nil)
|
return n.db.Put(n.prefixedKey(key), []byte(val))
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the stored value interpreted as a string and a boolean that
|
// String returns the stored value interpreted as a string and a boolean that
|
||||||
// is false if no value was stored at the key.
|
// is false if no value was stored at the key.
|
||||||
func (n NamespacedKV) String(key string) (string, bool) {
|
func (n NamespacedKV) String(key string) (string, bool) {
|
||||||
valBs, err := n.db.Get(n.prefixedKey(key), nil)
|
valBs, err := n.db.Get(n.prefixedKey(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
@ -102,14 +88,14 @@ func (n NamespacedKV) String(key string) (string, bool) {
|
|||||||
|
|
||||||
// PutBytes stores a new byte slice. Any existing value (even if of another type)
|
// PutBytes stores a new byte slice. Any existing value (even if of another type)
|
||||||
// is overwritten.
|
// is overwritten.
|
||||||
func (n *NamespacedKV) PutBytes(key string, val []byte) {
|
func (n *NamespacedKV) PutBytes(key string, val []byte) error {
|
||||||
n.db.Put(n.prefixedKey(key), val, nil)
|
return n.db.Put(n.prefixedKey(key), val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns the stored value as a raw byte slice and a boolean that
|
// Bytes returns the stored value as a raw byte slice and a boolean that
|
||||||
// is false if no value was stored at the key.
|
// is false if no value was stored at the key.
|
||||||
func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
|
func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
|
||||||
valBs, err := n.db.Get(n.prefixedKey(key), nil)
|
valBs, err := n.db.Get(n.prefixedKey(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
@ -118,18 +104,17 @@ func (n NamespacedKV) Bytes(key string) ([]byte, bool) {
|
|||||||
|
|
||||||
// PutBool stores a new boolean. Any existing value (even if of another type)
|
// PutBool stores a new boolean. Any existing value (even if of another type)
|
||||||
// is overwritten.
|
// is overwritten.
|
||||||
func (n *NamespacedKV) PutBool(key string, val bool) {
|
func (n *NamespacedKV) PutBool(key string, val bool) error {
|
||||||
if val {
|
if val {
|
||||||
n.db.Put(n.prefixedKey(key), []byte{0x0}, nil)
|
return n.db.Put(n.prefixedKey(key), []byte{0x0})
|
||||||
} else {
|
|
||||||
n.db.Put(n.prefixedKey(key), []byte{0x1}, nil)
|
|
||||||
}
|
}
|
||||||
|
return n.db.Put(n.prefixedKey(key), []byte{0x1})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bool returns the stored value as a boolean and a boolean that
|
// Bool returns the stored value as a boolean and a boolean that
|
||||||
// is false if no value was stored at the key.
|
// is false if no value was stored at the key.
|
||||||
func (n NamespacedKV) Bool(key string) (bool, bool) {
|
func (n NamespacedKV) Bool(key string) (bool, bool) {
|
||||||
valBs, err := n.db.Get(n.prefixedKey(key), nil)
|
valBs, err := n.db.Get(n.prefixedKey(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false
|
return false, false
|
||||||
}
|
}
|
||||||
@ -138,8 +123,8 @@ func (n NamespacedKV) Bool(key string) (bool, bool) {
|
|||||||
|
|
||||||
// Delete deletes the specified key. It is allowed to delete a nonexistent
|
// Delete deletes the specified key. It is allowed to delete a nonexistent
|
||||||
// key.
|
// key.
|
||||||
func (n NamespacedKV) Delete(key string) {
|
func (n NamespacedKV) Delete(key string) error {
|
||||||
n.db.Delete(n.prefixedKey(key), nil)
|
return n.db.Delete(n.prefixedKey(key))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n NamespacedKV) prefixedKey(key string) []byte {
|
func (n NamespacedKV) prefixedKey(key string) []byte {
|
||||||
|
@ -9,10 +9,12 @@ package db
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNamespacedInt(t *testing.T) {
|
func TestNamespacedInt(t *testing.T) {
|
||||||
ldb := OpenMemory()
|
ldb := NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
n1 := NewNamespacedKV(ldb, "foo")
|
n1 := NewNamespacedKV(ldb, "foo")
|
||||||
n2 := NewNamespacedKV(ldb, "bar")
|
n2 := NewNamespacedKV(ldb, "bar")
|
||||||
@ -23,7 +25,9 @@ func TestNamespacedInt(t *testing.T) {
|
|||||||
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
n1.PutInt64("test", 42)
|
if err := n1.PutInt64("test", 42); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// It should now exist in n1
|
// It should now exist in n1
|
||||||
|
|
||||||
@ -37,7 +41,9 @@ func TestNamespacedInt(t *testing.T) {
|
|||||||
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
t.Errorf("Incorrect return v %v != 0 || ok %v != false", v, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
n1.Delete("test")
|
if err := n1.Delete("test"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// It should no longer exist
|
// It should no longer exist
|
||||||
|
|
||||||
@ -47,7 +53,7 @@ func TestNamespacedInt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNamespacedTime(t *testing.T) {
|
func TestNamespacedTime(t *testing.T) {
|
||||||
ldb := OpenMemory()
|
ldb := NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
n1 := NewNamespacedKV(ldb, "foo")
|
n1 := NewNamespacedKV(ldb, "foo")
|
||||||
|
|
||||||
@ -56,7 +62,9 @@ func TestNamespacedTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
n1.PutTime("test", now)
|
if err := n1.PutTime("test", now); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := n1.Time("test"); !v.Equal(now) || !ok {
|
if v, ok := n1.Time("test"); !v.Equal(now) || !ok {
|
||||||
t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok)
|
t.Errorf("Incorrect return v %v != %v || ok %v != true", v, now, ok)
|
||||||
@ -64,7 +72,7 @@ func TestNamespacedTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNamespacedString(t *testing.T) {
|
func TestNamespacedString(t *testing.T) {
|
||||||
ldb := OpenMemory()
|
ldb := NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
n1 := NewNamespacedKV(ldb, "foo")
|
n1 := NewNamespacedKV(ldb, "foo")
|
||||||
|
|
||||||
@ -72,7 +80,9 @@ func TestNamespacedString(t *testing.T) {
|
|||||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
n1.PutString("test", "yo")
|
if err := n1.PutString("test", "yo"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := n1.String("test"); v != "yo" || !ok {
|
if v, ok := n1.String("test"); v != "yo" || !ok {
|
||||||
t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok)
|
t.Errorf("Incorrect return v %q != \"yo\" || ok %v != true", v, ok)
|
||||||
@ -80,13 +90,19 @@ func TestNamespacedString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNamespacedReset(t *testing.T) {
|
func TestNamespacedReset(t *testing.T) {
|
||||||
ldb := OpenMemory()
|
ldb := NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
n1 := NewNamespacedKV(ldb, "foo")
|
n1 := NewNamespacedKV(ldb, "foo")
|
||||||
|
|
||||||
n1.PutString("test1", "yo1")
|
if err := n1.PutString("test1", "yo1"); err != nil {
|
||||||
n1.PutString("test2", "yo2")
|
t.Fatal(err)
|
||||||
n1.PutString("test3", "yo3")
|
}
|
||||||
|
if err := n1.PutString("test2", "yo2"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := n1.PutString("test3", "yo3"); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := n1.String("test1"); v != "yo1" || !ok {
|
if v, ok := n1.String("test1"); v != "yo1" || !ok {
|
||||||
t.Errorf("Incorrect return v %q != \"yo1\" || ok %v != true", v, ok)
|
t.Errorf("Incorrect return v %q != \"yo1\" || ok %v != true", v, ok)
|
||||||
@ -98,7 +114,7 @@ func TestNamespacedReset(t *testing.T) {
|
|||||||
t.Errorf("Incorrect return v %q != \"yo3\" || ok %v != true", v, ok)
|
t.Errorf("Incorrect return v %q != \"yo3\" || ok %v != true", v, ok)
|
||||||
}
|
}
|
||||||
|
|
||||||
n1.Reset()
|
reset(n1)
|
||||||
|
|
||||||
if v, ok := n1.String("test1"); v != "" || ok {
|
if v, ok := n1.String("test1"); v != "" || ok {
|
||||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||||
@ -110,3 +126,22 @@ func TestNamespacedReset(t *testing.T) {
|
|||||||
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
t.Errorf("Incorrect return v %q != \"\" || ok %v != false", v, ok)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reset removes all entries in this namespace.
|
||||||
|
func reset(n *NamespacedKV) {
|
||||||
|
tr, err := n.db.NewWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer tr.Release()
|
||||||
|
|
||||||
|
it, err := tr.NewPrefixIterator(n.prefix)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for it.Next() {
|
||||||
|
_ = tr.Delete(it.Key())
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
_ = tr.Commit()
|
||||||
|
}
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// List of all dbVersion to dbMinSyncthingVersion pairs for convenience
|
// List of all dbVersion to dbMinSyncthingVersion pairs for convenience
|
||||||
@ -65,36 +64,58 @@ func (db *schemaUpdater) updateSchema() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if prevVersion < 1 {
|
if prevVersion < 1 {
|
||||||
db.updateSchema0to1()
|
if err := db.updateSchema0to1(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if prevVersion < 2 {
|
if prevVersion < 2 {
|
||||||
db.updateSchema1to2()
|
if err := db.updateSchema1to2(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if prevVersion < 3 {
|
if prevVersion < 3 {
|
||||||
db.updateSchema2to3()
|
if err := db.updateSchema2to3(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// This update fixes problems existing in versions 3 and 4
|
// This update fixes problems existing in versions 3 and 4
|
||||||
if prevVersion == 3 || prevVersion == 4 {
|
if prevVersion == 3 || prevVersion == 4 {
|
||||||
db.updateSchemaTo5()
|
if err := db.updateSchemaTo5(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if prevVersion < 6 {
|
if prevVersion < 6 {
|
||||||
db.updateSchema5to6()
|
if err := db.updateSchema5to6(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if prevVersion < 7 {
|
if prevVersion < 7 {
|
||||||
db.updateSchema6to7()
|
if err := db.updateSchema6to7(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
miscDB.PutInt64("dbVersion", dbVersion)
|
if err := miscDB.PutInt64("dbVersion", dbVersion); err != nil {
|
||||||
miscDB.PutString("dbMinSyncthingVersion", dbMinSyncthingVersion)
|
return err
|
||||||
|
}
|
||||||
|
if err := miscDB.PutString("dbMinSyncthingVersion", dbMinSyncthingVersion); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *schemaUpdater) updateSchema0to1() {
|
func (db *schemaUpdater) updateSchema0to1() error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
dbi := t.NewIterator(util.BytesPrefix([]byte{KeyTypeDevice}), nil)
|
dbi, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
|
|
||||||
symlinkConv := 0
|
symlinkConv := 0
|
||||||
@ -104,18 +125,20 @@ func (db *schemaUpdater) updateSchema0to1() {
|
|||||||
var gk, buf []byte
|
var gk, buf []byte
|
||||||
|
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
t.checkFlush()
|
|
||||||
|
|
||||||
folder, ok := db.keyer.FolderFromDeviceFileKey(dbi.Key())
|
folder, ok := db.keyer.FolderFromDeviceFileKey(dbi.Key())
|
||||||
if !ok {
|
if !ok {
|
||||||
// not having the folder in the index is bad; delete and continue
|
// not having the folder in the index is bad; delete and continue
|
||||||
t.Delete(dbi.Key())
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
|
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
|
||||||
if !ok {
|
if !ok {
|
||||||
// not having the device in the index is bad; delete and continue
|
// not having the device in the index is bad; delete and continue
|
||||||
t.Delete(dbi.Key())
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
|
||||||
@ -125,9 +148,17 @@ func (db *schemaUpdater) updateSchema0to1() {
|
|||||||
if _, ok := changedFolders[string(folder)]; !ok {
|
if _, ok := changedFolders[string(folder)]; !ok {
|
||||||
changedFolders[string(folder)] = struct{}{}
|
changedFolders[string(folder)] = struct{}{}
|
||||||
}
|
}
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
buf = t.removeFromGlobal(gk, buf, folder, device, nil, nil)
|
if err != nil {
|
||||||
t.Delete(dbi.Key())
|
return err
|
||||||
|
}
|
||||||
|
buf, err = t.removeFromGlobal(gk, buf, folder, device, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,14 +178,21 @@ func (db *schemaUpdater) updateSchema0to1() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic("can't happen: " + err.Error())
|
panic("can't happen: " + err.Error())
|
||||||
}
|
}
|
||||||
t.Put(dbi.Key(), bs)
|
if err := t.Put(dbi.Key(), bs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
symlinkConv++
|
symlinkConv++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add invalid files to global list
|
// Add invalid files to global list
|
||||||
if f.IsInvalid() {
|
if f.IsInvalid() {
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
if buf, ok = t.updateGlobal(gk, buf, folder, device, f, meta); ok {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if buf, ok, err = t.updateGlobal(gk, buf, folder, device, f, meta); err != nil {
|
||||||
|
return err
|
||||||
|
} else if ok {
|
||||||
if _, ok = changedFolders[string(folder)]; !ok {
|
if _, ok = changedFolders[string(folder)]; !ok {
|
||||||
changedFolders[string(folder)] = struct{}{}
|
changedFolders[string(folder)] = struct{}{}
|
||||||
}
|
}
|
||||||
@ -164,86 +202,139 @@ func (db *schemaUpdater) updateSchema0to1() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for folder := range changedFolders {
|
for folder := range changedFolders {
|
||||||
db.dropFolderMeta([]byte(folder))
|
if err := db.dropFolderMeta([]byte(folder)); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items
|
// updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items
|
||||||
// to allow iteration in sequence order (simplifies sending indexes).
|
// to allow iteration in sequence order (simplifies sending indexes).
|
||||||
func (db *schemaUpdater) updateSchema1to2() {
|
func (db *schemaUpdater) updateSchema1to2() error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var sk []byte
|
var sk []byte
|
||||||
var dk []byte
|
var dk []byte
|
||||||
for _, folderStr := range db.ListFolders() {
|
for _, folderStr := range db.ListFolders() {
|
||||||
folder := []byte(folderStr)
|
folder := []byte(folderStr)
|
||||||
db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool {
|
var putErr error
|
||||||
sk = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo())
|
err := db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool {
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName()))
|
sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo())
|
||||||
t.Put(sk, dk)
|
if putErr != nil {
|
||||||
t.checkFlush()
|
return false
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName()))
|
||||||
|
if putErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
putErr = t.Put(sk, dk)
|
||||||
|
return putErr == nil
|
||||||
|
})
|
||||||
|
if putErr != nil {
|
||||||
|
return putErr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSchema2to3 introduces a needKey->nil bucket for locally needed files.
|
// updateSchema2to3 introduces a needKey->nil bucket for locally needed files.
|
||||||
func (db *schemaUpdater) updateSchema2to3() {
|
func (db *schemaUpdater) updateSchema2to3() error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var nk []byte
|
var nk []byte
|
||||||
var dk []byte
|
var dk []byte
|
||||||
for _, folderStr := range db.ListFolders() {
|
for _, folderStr := range db.ListFolders() {
|
||||||
folder := []byte(folderStr)
|
folder := []byte(folderStr)
|
||||||
db.withGlobal(folder, nil, true, func(f FileIntf) bool {
|
var putErr error
|
||||||
|
err := db.withGlobal(folder, nil, true, func(f FileIntf) bool {
|
||||||
name := []byte(f.FileName())
|
name := []byte(f.FileName())
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
|
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
|
||||||
|
if putErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
var v protocol.Vector
|
var v protocol.Vector
|
||||||
haveFile, ok := t.getFileTrunc(dk, true)
|
haveFile, ok, err := t.getFileTrunc(dk, true)
|
||||||
|
if err != nil {
|
||||||
|
putErr = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
if ok {
|
if ok {
|
||||||
v = haveFile.FileVersion()
|
v = haveFile.FileVersion()
|
||||||
}
|
}
|
||||||
if !need(f, ok, v) {
|
if !need(f, ok, v) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
nk = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName()))
|
nk, putErr = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName()))
|
||||||
t.Put(nk, nil)
|
if putErr != nil {
|
||||||
t.checkFlush()
|
return false
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
putErr = t.Put(nk, nil)
|
||||||
|
return putErr == nil
|
||||||
|
})
|
||||||
|
if putErr != nil {
|
||||||
|
return putErr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSchemaTo5 resets the need bucket due to bugs existing in the v0.14.49
|
// updateSchemaTo5 resets the need bucket due to bugs existing in the v0.14.49
|
||||||
// release candidates (dbVersion 3 and 4)
|
// release candidates (dbVersion 3 and 4)
|
||||||
// https://github.com/syncthing/syncthing/issues/5007
|
// https://github.com/syncthing/syncthing/issues/5007
|
||||||
// https://github.com/syncthing/syncthing/issues/5053
|
// https://github.com/syncthing/syncthing/issues/5053
|
||||||
func (db *schemaUpdater) updateSchemaTo5() {
|
func (db *schemaUpdater) updateSchemaTo5() error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
var nk []byte
|
var nk []byte
|
||||||
for _, folderStr := range db.ListFolders() {
|
for _, folderStr := range db.ListFolders() {
|
||||||
nk = db.keyer.GenerateNeedFileKey(nk, []byte(folderStr), nil)
|
nk, err = db.keyer.GenerateNeedFileKey(nk, []byte(folderStr), nil)
|
||||||
t.deleteKeyPrefix(nk[:keyPrefixLen+keyFolderLen])
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.deleteKeyPrefix(nk[:keyPrefixLen+keyFolderLen]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := t.commit(); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
t.close()
|
|
||||||
|
|
||||||
db.updateSchema2to3()
|
return db.updateSchema2to3()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *schemaUpdater) updateSchema5to6() {
|
func (db *schemaUpdater) updateSchema5to6() error {
|
||||||
// For every local file with the Invalid bit set, clear the Invalid bit and
|
// For every local file with the Invalid bit set, clear the Invalid bit and
|
||||||
// set LocalFlags = FlagLocalIgnored.
|
// set LocalFlags = FlagLocalIgnored.
|
||||||
|
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var dk []byte
|
var dk []byte
|
||||||
|
|
||||||
for _, folderStr := range db.ListFolders() {
|
for _, folderStr := range db.ListFolders() {
|
||||||
folder := []byte(folderStr)
|
folder := []byte(folderStr)
|
||||||
db.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool {
|
var putErr error
|
||||||
|
err := db.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool {
|
||||||
if !f.IsInvalid() {
|
if !f.IsInvalid() {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -253,19 +344,31 @@ func (db *schemaUpdater) updateSchema5to6() {
|
|||||||
fi.LocalFlags = protocol.FlagLocalIgnored
|
fi.LocalFlags = protocol.FlagLocalIgnored
|
||||||
bs, _ := fi.Marshal()
|
bs, _ := fi.Marshal()
|
||||||
|
|
||||||
dk = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(fi.Name))
|
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(fi.Name))
|
||||||
t.Put(dk, bs)
|
if putErr != nil {
|
||||||
|
return false
|
||||||
t.checkFlush()
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
putErr = t.Put(dk, bs)
|
||||||
|
|
||||||
|
return putErr == nil
|
||||||
|
})
|
||||||
|
if putErr != nil {
|
||||||
|
return putErr
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSchema6to7 checks whether all currently locally needed files are really
|
// updateSchema6to7 checks whether all currently locally needed files are really
|
||||||
// needed and removes them if not.
|
// needed and removes them if not.
|
||||||
func (db *schemaUpdater) updateSchema6to7() {
|
func (db *schemaUpdater) updateSchema6to7() error {
|
||||||
t := db.newReadWriteTransaction()
|
t, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
var gk []byte
|
var gk []byte
|
||||||
@ -273,15 +376,24 @@ func (db *schemaUpdater) updateSchema6to7() {
|
|||||||
|
|
||||||
for _, folderStr := range db.ListFolders() {
|
for _, folderStr := range db.ListFolders() {
|
||||||
folder := []byte(folderStr)
|
folder := []byte(folderStr)
|
||||||
db.withNeedLocal(folder, false, func(f FileIntf) bool {
|
var delErr error
|
||||||
|
err := db.withNeedLocal(folder, false, func(f FileIntf) bool {
|
||||||
name := []byte(f.FileName())
|
name := []byte(f.FileName())
|
||||||
global := f.(protocol.FileInfo)
|
global := f.(protocol.FileInfo)
|
||||||
gk = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
|
||||||
svl, err := t.Get(gk, nil)
|
if delErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
svl, err := t.Get(gk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If there is no global list, we hardly need it.
|
// If there is no global list, we hardly need it.
|
||||||
t.Delete(t.keyer.GenerateNeedFileKey(nk, folder, name))
|
key, err := t.keyer.GenerateNeedFileKey(nk, folder, name)
|
||||||
return true
|
if err != nil {
|
||||||
|
delErr = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
delErr = t.Delete(key)
|
||||||
|
return delErr == nil
|
||||||
}
|
}
|
||||||
var fl VersionList
|
var fl VersionList
|
||||||
err = fl.Unmarshal(svl)
|
err = fl.Unmarshal(svl)
|
||||||
@ -291,9 +403,18 @@ func (db *schemaUpdater) updateSchema6to7() {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !need(global, haveLocalFV, localFV.Version) {
|
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !need(global, haveLocalFV, localFV.Version) {
|
||||||
t.Delete(t.keyer.GenerateNeedFileKey(nk, folder, name))
|
key, err := t.keyer.GenerateNeedFileKey(nk, folder, name)
|
||||||
|
if err != nil {
|
||||||
|
delErr = err
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
return true
|
delErr = t.Delete(key)
|
||||||
|
}
|
||||||
|
return delErr == nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return t.commit()
|
||||||
}
|
}
|
||||||
|
176
lib/db/set.go
176
lib/db/set.go
@ -16,11 +16,11 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/osutil"
|
"github.com/syncthing/syncthing/lib/osutil"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syncthing/syncthing/lib/sync"
|
"github.com/syncthing/syncthing/lib/sync"
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileSet struct {
|
type FileSet struct {
|
||||||
@ -83,29 +83,42 @@ func NewFileSet(folder string, fs fs.Filesystem, ll *Lowlevel) *FileSet {
|
|||||||
|
|
||||||
if err := s.meta.fromDB(db, []byte(folder)); err != nil {
|
if err := s.meta.fromDB(db, []byte(folder)); err != nil {
|
||||||
l.Infof("No stored folder metadata for %q: recalculating", folder)
|
l.Infof("No stored folder metadata for %q: recalculating", folder)
|
||||||
s.recalcCounts()
|
if err := s.recalcCounts(); backend.IsClosed(err) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
} else if age := time.Since(s.meta.Created()); age > databaseRecheckInterval {
|
} else if age := time.Since(s.meta.Created()); age > databaseRecheckInterval {
|
||||||
l.Infof("Stored folder metadata for %q is %v old; recalculating", folder, age)
|
l.Infof("Stored folder metadata for %q is %v old; recalculating", folder, age)
|
||||||
s.recalcCounts()
|
if err := s.recalcCounts(); backend.IsClosed(err) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) recalcCounts() {
|
func (s *FileSet) recalcCounts() error {
|
||||||
s.meta = newMetadataTracker()
|
s.meta = newMetadataTracker()
|
||||||
|
|
||||||
s.db.checkGlobals([]byte(s.folder), s.meta)
|
if err := s.db.checkGlobals([]byte(s.folder), s.meta); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var deviceID protocol.DeviceID
|
var deviceID protocol.DeviceID
|
||||||
s.db.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool {
|
err := s.db.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool {
|
||||||
copy(deviceID[:], device)
|
copy(deviceID[:], device)
|
||||||
s.meta.addFile(deviceID, f)
|
s.meta.addFile(deviceID, f)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
s.meta.SetCreated()
|
s.meta.SetCreated()
|
||||||
s.meta.toDB(s.db, []byte(s.folder))
|
return s.meta.toDB(s.db, []byte(s.folder))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Drop(device protocol.DeviceID) {
|
func (s *FileSet) Drop(device protocol.DeviceID) {
|
||||||
@ -114,7 +127,11 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
|
|||||||
s.updateMutex.Lock()
|
s.updateMutex.Lock()
|
||||||
defer s.updateMutex.Unlock()
|
defer s.updateMutex.Unlock()
|
||||||
|
|
||||||
s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta)
|
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
if device == protocol.LocalDeviceID {
|
if device == protocol.LocalDeviceID {
|
||||||
s.meta.resetCounts(device)
|
s.meta.resetCounts(device)
|
||||||
@ -131,7 +148,11 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
|
|||||||
s.meta.resetAll(device)
|
s.meta.resetAll(device)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.meta.toDB(s.db, []byte(s.folder))
|
if err := s.meta.toDB(s.db, []byte(s.folder)); backend.IsClosed(err) {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||||
@ -145,73 +166,110 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
|||||||
s.updateMutex.Lock()
|
s.updateMutex.Lock()
|
||||||
defer s.updateMutex.Unlock()
|
defer s.updateMutex.Unlock()
|
||||||
|
|
||||||
defer s.meta.toDB(s.db, []byte(s.folder))
|
defer func() {
|
||||||
|
if err := s.meta.toDB(s.db, []byte(s.folder)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if device == protocol.LocalDeviceID {
|
if device == protocol.LocalDeviceID {
|
||||||
// For the local device we have a bunch of metadata to track.
|
// For the local device we have a bunch of metadata to track.
|
||||||
s.db.updateLocalFiles([]byte(s.folder), fs, s.meta)
|
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Easy case, just update the files and we're done.
|
// Easy case, just update the files and we're done.
|
||||||
s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta)
|
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) {
|
func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) {
|
||||||
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
||||||
s.db.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn))
|
if err := s.db.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
||||||
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
||||||
s.db.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn))
|
if err := s.db.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) {
|
func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) {
|
||||||
l.Debugf("%s WithHave(%v)", s.folder, device)
|
l.Debugf("%s WithHave(%v)", s.folder, device)
|
||||||
s.db.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn))
|
if err := s.db.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
||||||
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
||||||
s.db.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn))
|
if err := s.db.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithHaveSequence(startSeq int64, fn Iterator) {
|
func (s *FileSet) WithHaveSequence(startSeq int64, fn Iterator) {
|
||||||
l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
||||||
s.db.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn))
|
if err := s.db.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
||||||
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
||||||
func (s *FileSet) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
|
func (s *FileSet) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
|
||||||
l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
||||||
s.db.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn))
|
if err := s.db.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithGlobal(fn Iterator) {
|
func (s *FileSet) WithGlobal(fn Iterator) {
|
||||||
l.Debugf("%s WithGlobal()", s.folder)
|
l.Debugf("%s WithGlobal()", s.folder)
|
||||||
s.db.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn))
|
if err := s.db.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) WithGlobalTruncated(fn Iterator) {
|
func (s *FileSet) WithGlobalTruncated(fn Iterator) {
|
||||||
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
||||||
s.db.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn))
|
if err := s.db.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
||||||
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
||||||
func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
||||||
l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
||||||
s.db.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn))
|
if err := s.db.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
|
func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
|
||||||
f, ok := s.db.getFileDirty([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
|
f, ok, err := s.db.getFileDirty([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return protocol.FileInfo{}, false
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
f.Name = osutil.NativeFilename(f.Name)
|
f.Name = osutil.NativeFilename(f.Name)
|
||||||
return f, ok
|
return f, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
|
func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
|
||||||
fi, ok := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
|
fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return protocol.FileInfo{}, false
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return protocol.FileInfo{}, false
|
return protocol.FileInfo{}, false
|
||||||
}
|
}
|
||||||
@ -221,7 +279,12 @@ func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
||||||
fi, ok := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
|
fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return FileInfoTruncated{}, false
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return FileInfoTruncated{}, false
|
return FileInfoTruncated{}, false
|
||||||
}
|
}
|
||||||
@ -231,7 +294,13 @@ func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Availability(file string) []protocol.DeviceID {
|
func (s *FileSet) Availability(file string) []protocol.DeviceID {
|
||||||
return s.db.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
|
av, err := s.db.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return av
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
|
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
|
||||||
@ -255,11 +324,21 @@ func (s *FileSet) GlobalSize() Counts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
||||||
id := s.db.getIndexID(device[:], []byte(s.folder))
|
id, err := s.db.getIndexID(device[:], []byte(s.folder))
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return 0
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
if id == 0 && device == protocol.LocalDeviceID {
|
if id == 0 && device == protocol.LocalDeviceID {
|
||||||
// No index ID set yet. We create one now.
|
// No index ID set yet. We create one now.
|
||||||
id = protocol.NewIndexID()
|
id = protocol.NewIndexID()
|
||||||
s.db.setIndexID(device[:], []byte(s.folder), id)
|
err := s.db.setIndexID(device[:], []byte(s.folder), id)
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return 0
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
@ -268,11 +347,18 @@ func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) {
|
|||||||
if device == protocol.LocalDeviceID {
|
if device == protocol.LocalDeviceID {
|
||||||
panic("do not explicitly set index ID for local device")
|
panic("do not explicitly set index ID for local device")
|
||||||
}
|
}
|
||||||
s.db.setIndexID(device[:], []byte(s.folder), id)
|
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) MtimeFS() *fs.MtimeFS {
|
func (s *FileSet) MtimeFS() *fs.MtimeFS {
|
||||||
prefix := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder))
|
prefix, err := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder))
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
kv := NewNamespacedKV(s.db.Lowlevel, string(prefix))
|
kv := NewNamespacedKV(s.db.Lowlevel, string(prefix))
|
||||||
return fs.NewMtimeFS(s.fs, kv)
|
return fs.NewMtimeFS(s.fs, kv)
|
||||||
}
|
}
|
||||||
@ -285,21 +371,39 @@ func (s *FileSet) ListDevices() []protocol.DeviceID {
|
|||||||
// database.
|
// database.
|
||||||
func DropFolder(ll *Lowlevel, folder string) {
|
func DropFolder(ll *Lowlevel, folder string) {
|
||||||
db := newInstance(ll)
|
db := newInstance(ll)
|
||||||
db.dropFolder([]byte(folder))
|
|
||||||
db.dropMtimes([]byte(folder))
|
|
||||||
db.dropFolderMeta([]byte(folder))
|
|
||||||
|
|
||||||
// Also clean out the folder ID mapping.
|
droppers := []func([]byte) error{
|
||||||
db.folderIdx.Delete([]byte(folder))
|
db.dropFolder,
|
||||||
|
db.dropMtimes,
|
||||||
|
db.dropFolderMeta,
|
||||||
|
db.folderIdx.Delete,
|
||||||
|
}
|
||||||
|
for _, drop := range droppers {
|
||||||
|
if err := drop([]byte(folder)); backend.IsClosed(err) {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DropDeltaIndexIDs removes all delta index IDs from the database.
|
// DropDeltaIndexIDs removes all delta index IDs from the database.
|
||||||
// This will cause a full index transmission on the next connection.
|
// This will cause a full index transmission on the next connection.
|
||||||
func DropDeltaIndexIDs(db *Lowlevel) {
|
func DropDeltaIndexIDs(db *Lowlevel) {
|
||||||
dbi := db.NewIterator(util.BytesPrefix([]byte{KeyTypeIndexID}), nil)
|
dbi, err := db.NewPrefixIterator([]byte{KeyTypeIndexID})
|
||||||
|
if backend.IsClosed(err) {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
db.Delete(dbi.Key(), nil)
|
if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := dbi.Error(); err != nil && !backend.IsClosed(err) {
|
||||||
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
|
|
||||||
"github.com/d4l3k/messagediff"
|
"github.com/d4l3k/messagediff"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
@ -117,7 +118,7 @@ func (l fileList) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalSet(t *testing.T) {
|
func TestGlobalSet(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -332,7 +333,7 @@ func TestGlobalSet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedWithInvalid(t *testing.T) {
|
func TestNeedWithInvalid(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -369,7 +370,7 @@ func TestNeedWithInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateToInvalid(t *testing.T) {
|
func TestUpdateToInvalid(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -425,7 +426,7 @@ func TestUpdateToInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInvalidAvailability(t *testing.T) {
|
func TestInvalidAvailability(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -463,7 +464,7 @@ func TestInvalidAvailability(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalReset(t *testing.T) {
|
func TestGlobalReset(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -501,7 +502,7 @@ func TestGlobalReset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeed(t *testing.T) {
|
func TestNeed(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -539,7 +540,7 @@ func TestNeed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSequence(t *testing.T) {
|
func TestSequence(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -569,7 +570,7 @@ func TestSequence(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestListDropFolder(t *testing.T) {
|
func TestListDropFolder(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s0 := db.NewFileSet("test0", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s0 := db.NewFileSet("test0", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
local1 := []protocol.FileInfo{
|
local1 := []protocol.FileInfo{
|
||||||
@ -619,7 +620,7 @@ func TestListDropFolder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalNeedWithInvalid(t *testing.T) {
|
func TestGlobalNeedWithInvalid(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -660,7 +661,7 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLongPath(t *testing.T) {
|
func TestLongPath(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -671,7 +672,7 @@ func TestLongPath(t *testing.T) {
|
|||||||
name := b.String() // 5000 characters
|
name := b.String() // 5000 characters
|
||||||
|
|
||||||
local := []protocol.FileInfo{
|
local := []protocol.FileInfo{
|
||||||
{Name: string(name), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
{Name: name, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
||||||
}
|
}
|
||||||
|
|
||||||
replace(s, protocol.LocalDeviceID, local)
|
replace(s, protocol.LocalDeviceID, local)
|
||||||
@ -686,39 +687,6 @@ func TestLongPath(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCommitted(t *testing.T) {
|
|
||||||
// Verify that the Committed counter increases when we change things and
|
|
||||||
// doesn't increase when we don't.
|
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
|
||||||
|
|
||||||
local := []protocol.FileInfo{
|
|
||||||
{Name: string("file"), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adding a file should increase the counter
|
|
||||||
|
|
||||||
c0 := ldb.Committed()
|
|
||||||
|
|
||||||
replace(s, protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
c1 := ldb.Committed()
|
|
||||||
if c1 <= c0 {
|
|
||||||
t.Errorf("committed data didn't increase; %d <= %d", c1, c0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Updating with something identical should not do anything
|
|
||||||
|
|
||||||
s.Update(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
c2 := ldb.Committed()
|
|
||||||
if c2 > c1 {
|
|
||||||
t.Errorf("replace with same contents should do nothing but %d > %d", c2, c1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkUpdateOneFile(b *testing.B) {
|
func BenchmarkUpdateOneFile(b *testing.B) {
|
||||||
local0 := fileList{
|
local0 := fileList{
|
||||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
|
||||||
@ -729,10 +697,11 @@ func BenchmarkUpdateOneFile(b *testing.B) {
|
|||||||
protocol.FileInfo{Name: "zajksdhaskjdh/askjdhaskjdashkajshd/kasjdhaskjdhaskdjhaskdjash/dkjashdaksjdhaskdjahskdjh", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
|
protocol.FileInfo{Name: "zajksdhaskjdh/askjdhaskjdashkajshd/kasjdhaskjdhaskdjhaskdjash/dkjashdaksjdhaskdjahskdjh", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
|
||||||
}
|
}
|
||||||
|
|
||||||
ldb, err := db.Open("testdata/benchmarkupdate.db", db.TuningAuto)
|
be, err := backend.Open("testdata/benchmarkupdate.db", backend.TuningAuto)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
ldb := db.NewLowlevel(be)
|
||||||
defer func() {
|
defer func() {
|
||||||
ldb.Close()
|
ldb.Close()
|
||||||
os.RemoveAll("testdata/benchmarkupdate.db")
|
os.RemoveAll("testdata/benchmarkupdate.db")
|
||||||
@ -751,7 +720,7 @@ func BenchmarkUpdateOneFile(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexID(t *testing.T) {
|
func TestIndexID(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -783,7 +752,7 @@ func TestIndexID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDropFiles(t *testing.T) {
|
func TestDropFiles(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
m := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -846,7 +815,7 @@ func TestDropFiles(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue4701(t *testing.T) {
|
func TestIssue4701(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -887,7 +856,7 @@ func TestIssue4701(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWithHaveSequence(t *testing.T) {
|
func TestWithHaveSequence(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -915,14 +884,14 @@ func TestWithHaveSequence(t *testing.T) {
|
|||||||
|
|
||||||
func TestStressWithHaveSequence(t *testing.T) {
|
func TestStressWithHaveSequence(t *testing.T) {
|
||||||
// This races two loops against each other: one that contiously does
|
// This races two loops against each other: one that contiously does
|
||||||
// updates, and one that continously does sequence walks. The test fails
|
// updates, and one that continuously does sequence walks. The test fails
|
||||||
// if the sequence walker sees a discontinuity.
|
// if the sequence walker sees a discontinuity.
|
||||||
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Takes a long time")
|
t.Skip("Takes a long time")
|
||||||
}
|
}
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -945,7 +914,7 @@ func TestStressWithHaveSequence(t *testing.T) {
|
|||||||
close(done)
|
close(done)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var prevSeq int64 = 0
|
var prevSeq int64
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -964,7 +933,7 @@ loop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue4925(t *testing.T) {
|
func TestIssue4925(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -990,7 +959,7 @@ func TestIssue4925(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMoveGlobalBack(t *testing.T) {
|
func TestMoveGlobalBack(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
file := "foo"
|
file := "foo"
|
||||||
@ -1054,7 +1023,7 @@ func TestMoveGlobalBack(t *testing.T) {
|
|||||||
// needed files.
|
// needed files.
|
||||||
// https://github.com/syncthing/syncthing/issues/5007
|
// https://github.com/syncthing/syncthing/issues/5007
|
||||||
func TestIssue5007(t *testing.T) {
|
func TestIssue5007(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
file := "foo"
|
file := "foo"
|
||||||
@ -1081,7 +1050,7 @@ func TestIssue5007(t *testing.T) {
|
|||||||
// TestNeedDeleted checks that a file that doesn't exist locally isn't needed
|
// TestNeedDeleted checks that a file that doesn't exist locally isn't needed
|
||||||
// when the global file is deleted.
|
// when the global file is deleted.
|
||||||
func TestNeedDeleted(t *testing.T) {
|
func TestNeedDeleted(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
file := "foo"
|
file := "foo"
|
||||||
@ -1115,7 +1084,7 @@ func TestNeedDeleted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReceiveOnlyAccounting(t *testing.T) {
|
func TestReceiveOnlyAccounting(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet(folder, fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -1219,7 +1188,7 @@ func TestReceiveOnlyAccounting(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedAfterUnignore(t *testing.T) {
|
func TestNeedAfterUnignore(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
folder := "test"
|
folder := "test"
|
||||||
file := "foo"
|
file := "foo"
|
||||||
@ -1251,7 +1220,7 @@ func TestNeedAfterUnignore(t *testing.T) {
|
|||||||
func TestRemoteInvalidNotAccounted(t *testing.T) {
|
func TestRemoteInvalidNotAccounted(t *testing.T) {
|
||||||
// Remote files with the invalid bit should not count.
|
// Remote files with the invalid bit should not count.
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
files := []protocol.FileInfo{
|
files := []protocol.FileInfo{
|
||||||
@ -1270,7 +1239,7 @@ func TestRemoteInvalidNotAccounted(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedWithNewerInvalid(t *testing.T) {
|
func TestNeedWithNewerInvalid(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
s := db.NewFileSet("default", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("default", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
@ -1308,7 +1277,7 @@ func TestNeedWithNewerInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedAfterDeviceRemove(t *testing.T) {
|
func TestNeedAfterDeviceRemove(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
file := "foo"
|
file := "foo"
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
@ -1335,7 +1304,7 @@ func TestNeedAfterDeviceRemove(t *testing.T) {
|
|||||||
func TestCaseSensitive(t *testing.T) {
|
func TestCaseSensitive(t *testing.T) {
|
||||||
// Normal case sensitive lookup should work
|
// Normal case sensitive lookup should work
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
local := []protocol.FileInfo{
|
local := []protocol.FileInfo{
|
||||||
@ -1372,7 +1341,7 @@ func TestSequenceIndex(t *testing.T) {
|
|||||||
|
|
||||||
// Set up a db and a few files that we will manipulate.
|
// Set up a db and a few files that we will manipulate.
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
|
||||||
local := []protocol.FileInfo{
|
local := []protocol.FileInfo{
|
||||||
@ -1463,7 +1432,7 @@ func TestSequenceIndex(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIgnoreAfterReceiveOnly(t *testing.T) {
|
func TestIgnoreAfterReceiveOnly(t *testing.T) {
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
file := "foo"
|
file := "foo"
|
||||||
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
s := db.NewFileSet("test", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
|
||||||
|
@ -10,16 +10,15 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/sync"
|
"github.com/syncthing/syncthing/lib/sync"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
|
// A smallIndex is an in memory bidirectional []byte to uint32 map. It gives
|
||||||
// fast lookups in both directions and persists to the database. Don't use for
|
// fast lookups in both directions and persists to the database. Don't use for
|
||||||
// storing more items than fit comfortably in RAM.
|
// storing more items than fit comfortably in RAM.
|
||||||
type smallIndex struct {
|
type smallIndex struct {
|
||||||
db *leveldb.DB
|
db backend.Backend
|
||||||
prefix []byte
|
prefix []byte
|
||||||
id2val map[uint32]string
|
id2val map[uint32]string
|
||||||
val2id map[string]uint32
|
val2id map[string]uint32
|
||||||
@ -27,7 +26,7 @@ type smallIndex struct {
|
|||||||
mut sync.Mutex
|
mut sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSmallIndex(db *leveldb.DB, prefix []byte) *smallIndex {
|
func newSmallIndex(db backend.Backend, prefix []byte) *smallIndex {
|
||||||
idx := &smallIndex{
|
idx := &smallIndex{
|
||||||
db: db,
|
db: db,
|
||||||
prefix: prefix,
|
prefix: prefix,
|
||||||
@ -42,7 +41,10 @@ func newSmallIndex(db *leveldb.DB, prefix []byte) *smallIndex {
|
|||||||
// load iterates over the prefix space in the database and populates the in
|
// load iterates over the prefix space in the database and populates the in
|
||||||
// memory maps.
|
// memory maps.
|
||||||
func (i *smallIndex) load() {
|
func (i *smallIndex) load() {
|
||||||
it := i.db.NewIterator(util.BytesPrefix(i.prefix), nil)
|
it, err := i.db.NewPrefixIterator(i.prefix)
|
||||||
|
if err != nil {
|
||||||
|
panic("loading small index: " + err.Error())
|
||||||
|
}
|
||||||
defer it.Release()
|
defer it.Release()
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
val := string(it.Value())
|
val := string(it.Value())
|
||||||
@ -60,7 +62,7 @@ func (i *smallIndex) load() {
|
|||||||
|
|
||||||
// ID returns the index number for the given byte slice, allocating a new one
|
// ID returns the index number for the given byte slice, allocating a new one
|
||||||
// and persisting this to the database if necessary.
|
// and persisting this to the database if necessary.
|
||||||
func (i *smallIndex) ID(val []byte) uint32 {
|
func (i *smallIndex) ID(val []byte) (uint32, error) {
|
||||||
i.mut.Lock()
|
i.mut.Lock()
|
||||||
// intentionally avoiding defer here as we want this call to be as fast as
|
// intentionally avoiding defer here as we want this call to be as fast as
|
||||||
// possible in the general case (folder ID already exists). The map lookup
|
// possible in the general case (folder ID already exists). The map lookup
|
||||||
@ -69,7 +71,7 @@ func (i *smallIndex) ID(val []byte) uint32 {
|
|||||||
// here.
|
// here.
|
||||||
if id, ok := i.val2id[string(val)]; ok {
|
if id, ok := i.val2id[string(val)]; ok {
|
||||||
i.mut.Unlock()
|
i.mut.Unlock()
|
||||||
return id
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
id := i.nextID
|
id := i.nextID
|
||||||
@ -82,10 +84,13 @@ func (i *smallIndex) ID(val []byte) uint32 {
|
|||||||
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
|
key := make([]byte, len(i.prefix)+8) // prefix plus uint32 id
|
||||||
copy(key, i.prefix)
|
copy(key, i.prefix)
|
||||||
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
|
binary.BigEndian.PutUint32(key[len(i.prefix):], id)
|
||||||
i.db.Put(key, val, nil)
|
if err := i.db.Put(key, val); err != nil {
|
||||||
|
i.mut.Unlock()
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
i.mut.Unlock()
|
i.mut.Unlock()
|
||||||
return id
|
return id, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Val returns the value for the given index number, or (nil, false) if there
|
// Val returns the value for the given index number, or (nil, false) if there
|
||||||
@ -101,7 +106,7 @@ func (i *smallIndex) Val(id uint32) ([]byte, bool) {
|
|||||||
return []byte(val), true
|
return []byte(val), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *smallIndex) Delete(val []byte) {
|
func (i *smallIndex) Delete(val []byte) error {
|
||||||
i.mut.Lock()
|
i.mut.Lock()
|
||||||
defer i.mut.Unlock()
|
defer i.mut.Unlock()
|
||||||
|
|
||||||
@ -115,7 +120,9 @@ func (i *smallIndex) Delete(val []byte) {
|
|||||||
// Put an empty value into the database. This indicates that the
|
// Put an empty value into the database. This indicates that the
|
||||||
// entry does not exist any more and prevents the ID from being
|
// entry does not exist any more and prevents the ID from being
|
||||||
// reused in the future.
|
// reused in the future.
|
||||||
i.db.Put(key, []byte{}, nil)
|
if err := i.db.Put(key, []byte{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Delete reverse mapping.
|
// Delete reverse mapping.
|
||||||
delete(i.id2val, id)
|
delete(i.id2val, id)
|
||||||
@ -123,6 +130,7 @@ func (i *smallIndex) Delete(val []byte) {
|
|||||||
|
|
||||||
// Delete forward mapping.
|
// Delete forward mapping.
|
||||||
delete(i.val2id, string(val))
|
delete(i.val2id, string(val))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Values returns the set of values in the index
|
// Values returns the set of values in the index
|
||||||
|
@ -6,11 +6,15 @@
|
|||||||
|
|
||||||
package db
|
package db
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
|
)
|
||||||
|
|
||||||
func TestSmallIndex(t *testing.T) {
|
func TestSmallIndex(t *testing.T) {
|
||||||
db := OpenMemory()
|
db := NewLowlevel(backend.OpenMemory())
|
||||||
idx := newSmallIndex(db.DB, []byte{12, 34})
|
idx := newSmallIndex(db, []byte{12, 34})
|
||||||
|
|
||||||
// ID zero should be unallocated
|
// ID zero should be unallocated
|
||||||
if val, ok := idx.Val(0); ok || val != nil {
|
if val, ok := idx.Val(0); ok || val != nil {
|
||||||
@ -18,7 +22,9 @@ func TestSmallIndex(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// A new key should get ID zero
|
// A new key should get ID zero
|
||||||
if id := idx.ID([]byte("hello")); id != 0 {
|
if id, err := idx.ID([]byte("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if id != 0 {
|
||||||
t.Fatal("Expected 0, not", id)
|
t.Fatal("Expected 0, not", id)
|
||||||
}
|
}
|
||||||
// Looking up ID zero should work
|
// Looking up ID zero should work
|
||||||
@ -30,23 +36,29 @@ func TestSmallIndex(t *testing.T) {
|
|||||||
idx.Delete([]byte("hello"))
|
idx.Delete([]byte("hello"))
|
||||||
|
|
||||||
// Next ID should be one
|
// Next ID should be one
|
||||||
if id := idx.ID([]byte("key2")); id != 1 {
|
if id, err := idx.ID([]byte("key2")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if id != 1 {
|
||||||
t.Fatal("Expected 1, not", id)
|
t.Fatal("Expected 1, not", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now lets create a new index instance based on what's actually serialized to the database.
|
// Now lets create a new index instance based on what's actually serialized to the database.
|
||||||
idx = newSmallIndex(db.DB, []byte{12, 34})
|
idx = newSmallIndex(db, []byte{12, 34})
|
||||||
|
|
||||||
// Status should be about the same as before.
|
// Status should be about the same as before.
|
||||||
if val, ok := idx.Val(0); ok || val != nil {
|
if val, ok := idx.Val(0); ok || val != nil {
|
||||||
t.Fatal("Unexpected return for deleted ID 0")
|
t.Fatal("Unexpected return for deleted ID 0")
|
||||||
}
|
}
|
||||||
if id := idx.ID([]byte("key2")); id != 1 {
|
if id, err := idx.ID([]byte("key2")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if id != 1 {
|
||||||
t.Fatal("Expected 1, not", id)
|
t.Fatal("Expected 1, not", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setting "hello" again should get us ID 2, not 0 as it was originally.
|
// Setting "hello" again should get us ID 2, not 0 as it was originally.
|
||||||
if id := idx.ID([]byte("hello")); id != 2 {
|
if id, err := idx.ID([]byte("hello")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if id != 2 {
|
||||||
t.Fatal("Expected 2, not", id)
|
t.Fatal("Expected 2, not", id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -175,7 +175,7 @@ func (vl VersionList) String() string {
|
|||||||
// update brings the VersionList up to date with file. It returns the updated
|
// update brings the VersionList up to date with file. It returns the updated
|
||||||
// VersionList, a potentially removed old FileVersion and its index, as well as
|
// VersionList, a potentially removed old FileVersion and its index, as well as
|
||||||
// the index where the new FileVersion was inserted.
|
// the index where the new FileVersion was inserted.
|
||||||
func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (_ VersionList, removedFV FileVersion, removedAt int, insertedAt int) {
|
func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t readOnlyTransaction) (_ VersionList, removedFV FileVersion, removedAt int, insertedAt int, err error) {
|
||||||
vl, removedFV, removedAt = vl.pop(device)
|
vl, removedFV, removedAt = vl.pop(device)
|
||||||
|
|
||||||
nv := FileVersion{
|
nv := FileVersion{
|
||||||
@ -198,7 +198,7 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re
|
|||||||
// The version at this point in the list is equal to or lesser
|
// The version at this point in the list is equal to or lesser
|
||||||
// ("older") than us. We insert ourselves in front of it.
|
// ("older") than us. We insert ourselves in front of it.
|
||||||
vl = vl.insertAt(i, nv)
|
vl = vl.insertAt(i, nv)
|
||||||
return vl, removedFV, removedAt, i
|
return vl, removedFV, removedAt, i, nil
|
||||||
|
|
||||||
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
|
||||||
// The version at this point is in conflict with us. We must pull
|
// The version at this point is in conflict with us. We must pull
|
||||||
@ -209,9 +209,11 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re
|
|||||||
// to determine the winner.)
|
// to determine the winner.)
|
||||||
//
|
//
|
||||||
// A surprise missing file entry here is counted as a win for us.
|
// A surprise missing file entry here is counted as a win for us.
|
||||||
if of, ok := t.getFile(folder, vl.Versions[i].Device, []byte(file.Name)); !ok || file.WinsConflict(of) {
|
if of, ok, err := t.getFile(folder, vl.Versions[i].Device, []byte(file.Name)); err != nil {
|
||||||
|
return vl, removedFV, removedAt, i, err
|
||||||
|
} else if !ok || file.WinsConflict(of) {
|
||||||
vl = vl.insertAt(i, nv)
|
vl = vl.insertAt(i, nv)
|
||||||
return vl, removedFV, removedAt, i
|
return vl, removedFV, removedAt, i, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -219,7 +221,7 @@ func (vl VersionList) update(folder, device []byte, file protocol.FileInfo, t re
|
|||||||
// We didn't find a position for an insert above, so append to the end.
|
// We didn't find a position for an insert above, so append to the end.
|
||||||
vl.Versions = append(vl.Versions, nv)
|
vl.Versions = append(vl.Versions, nv)
|
||||||
|
|
||||||
return vl, removedFV, removedAt, len(vl.Versions) - 1
|
return vl, removedFV, removedAt, len(vl.Versions) - 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vl VersionList) insertAt(i int, v FileVersion) VersionList {
|
func (vl VersionList) insertAt(i int, v FileVersion) VersionList {
|
||||||
|
@ -7,111 +7,146 @@
|
|||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A readOnlyTransaction represents a database snapshot.
|
// A readOnlyTransaction represents a database snapshot.
|
||||||
type readOnlyTransaction struct {
|
type readOnlyTransaction struct {
|
||||||
snapshot
|
backend.ReadTransaction
|
||||||
keyer keyer
|
keyer keyer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) newReadOnlyTransaction() readOnlyTransaction {
|
func (db *instance) newReadOnlyTransaction() (readOnlyTransaction, error) {
|
||||||
return readOnlyTransaction{
|
tran, err := db.NewReadTransaction()
|
||||||
snapshot: db.GetSnapshot(),
|
if err != nil {
|
||||||
keyer: db.keyer,
|
return readOnlyTransaction{}, err
|
||||||
}
|
}
|
||||||
|
return readOnlyTransaction{
|
||||||
|
ReadTransaction: tran,
|
||||||
|
keyer: db.keyer,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readOnlyTransaction) close() {
|
func (t readOnlyTransaction) close() {
|
||||||
t.Release()
|
t.Release()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readOnlyTransaction) getFile(folder, device, file []byte) (protocol.FileInfo, bool) {
|
func (t readOnlyTransaction) getFile(folder, device, file []byte) (protocol.FileInfo, bool, error) {
|
||||||
return t.getFileByKey(t.keyer.GenerateDeviceFileKey(nil, folder, device, file))
|
key, err := t.keyer.GenerateDeviceFileKey(nil, folder, device, file)
|
||||||
}
|
if err != nil {
|
||||||
|
return protocol.FileInfo{}, false, err
|
||||||
func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool) {
|
|
||||||
if f, ok := t.getFileTrunc(key, false); ok {
|
|
||||||
return f.(protocol.FileInfo), true
|
|
||||||
}
|
}
|
||||||
return protocol.FileInfo{}, false
|
return t.getFileByKey(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, bool) {
|
func (t readOnlyTransaction) getFileByKey(key []byte) (protocol.FileInfo, bool, error) {
|
||||||
bs, err := t.Get(key, nil)
|
f, ok, err := t.getFileTrunc(key, false)
|
||||||
if err == leveldb.ErrNotFound {
|
if err != nil || !ok {
|
||||||
return nil, false
|
return protocol.FileInfo{}, false, err
|
||||||
|
}
|
||||||
|
return f.(protocol.FileInfo), true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t readOnlyTransaction) getFileTrunc(key []byte, trunc bool) (FileIntf, bool, error) {
|
||||||
|
bs, err := t.Get(key)
|
||||||
|
if backend.IsNotFound(err) {
|
||||||
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("surprise error:", err)
|
return nil, false, err
|
||||||
return nil, false
|
|
||||||
}
|
}
|
||||||
f, err := unmarshalTrunc(bs, trunc)
|
f, err := unmarshalTrunc(bs, trunc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("unmarshal error:", err)
|
return nil, false, err
|
||||||
return nil, false
|
|
||||||
}
|
}
|
||||||
return f, true
|
return f, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, FileIntf, bool) {
|
func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate bool) ([]byte, FileIntf, bool, error) {
|
||||||
keyBuf = t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file)
|
var err error
|
||||||
|
keyBuf, err = t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file)
|
||||||
bs, err := t.Get(keyBuf, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return keyBuf, nil, false
|
return nil, nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bs, err := t.Get(keyBuf)
|
||||||
|
if backend.IsNotFound(err) {
|
||||||
|
return keyBuf, nil, false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
vl, ok := unmarshalVersionList(bs)
|
vl, ok := unmarshalVersionList(bs)
|
||||||
if !ok {
|
if !ok {
|
||||||
return keyBuf, nil, false
|
return keyBuf, nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file)
|
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file)
|
||||||
if fi, ok := t.getFileTrunc(keyBuf, truncate); ok {
|
if err != nil {
|
||||||
return keyBuf, fi, true
|
return nil, nil, false, err
|
||||||
}
|
}
|
||||||
|
fi, ok, err := t.getFileTrunc(keyBuf, truncate)
|
||||||
return keyBuf, nil, false
|
if err != nil || !ok {
|
||||||
|
return keyBuf, nil, false, err
|
||||||
|
}
|
||||||
|
return keyBuf, fi, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A readWriteTransaction is a readOnlyTransaction plus a batch for writes.
|
// A readWriteTransaction is a readOnlyTransaction plus a batch for writes.
|
||||||
// The batch will be committed on close() or by checkFlush() if it exceeds the
|
// The batch will be committed on close() or by checkFlush() if it exceeds the
|
||||||
// batch size.
|
// batch size.
|
||||||
type readWriteTransaction struct {
|
type readWriteTransaction struct {
|
||||||
|
backend.WriteTransaction
|
||||||
readOnlyTransaction
|
readOnlyTransaction
|
||||||
*batch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *instance) newReadWriteTransaction() readWriteTransaction {
|
func (db *instance) newReadWriteTransaction() (readWriteTransaction, error) {
|
||||||
return readWriteTransaction{
|
tran, err := db.NewWriteTransaction()
|
||||||
readOnlyTransaction: db.newReadOnlyTransaction(),
|
if err != nil {
|
||||||
batch: db.newBatch(),
|
return readWriteTransaction{}, err
|
||||||
}
|
}
|
||||||
|
return readWriteTransaction{
|
||||||
|
WriteTransaction: tran,
|
||||||
|
readOnlyTransaction: readOnlyTransaction{
|
||||||
|
ReadTransaction: tran,
|
||||||
|
keyer: db.keyer,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t readWriteTransaction) commit() error {
|
||||||
|
t.readOnlyTransaction.close()
|
||||||
|
return t.WriteTransaction.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readWriteTransaction) close() {
|
func (t readWriteTransaction) close() {
|
||||||
t.flush()
|
|
||||||
t.readOnlyTransaction.close()
|
t.readOnlyTransaction.close()
|
||||||
|
t.WriteTransaction.Release()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateGlobal adds this device+version to the version list for the given
|
// updateGlobal adds this device+version to the version list for the given
|
||||||
// file. If the device is already present in the list, the version is updated.
|
// file. If the device is already present in the list, the version is updated.
|
||||||
// If the file does not have an entry in the global list, it is created.
|
// If the file does not have an entry in the global list, it is created.
|
||||||
func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, file protocol.FileInfo, meta *metadataTracker) ([]byte, bool) {
|
func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, file protocol.FileInfo, meta *metadataTracker) ([]byte, bool, error) {
|
||||||
l.Debugf("update global; folder=%q device=%v file=%q version=%v invalid=%v", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version, file.IsInvalid())
|
l.Debugf("update global; folder=%q device=%v file=%q version=%v invalid=%v", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version, file.IsInvalid())
|
||||||
|
|
||||||
var fl VersionList
|
var fl VersionList
|
||||||
if svl, err := t.Get(gk, nil); err == nil {
|
svl, err := t.Get(gk)
|
||||||
fl.Unmarshal(svl) // Ignore error, continue with empty fl
|
if err == nil {
|
||||||
|
_ = fl.Unmarshal(svl) // Ignore error, continue with empty fl
|
||||||
|
} else if !backend.IsNotFound(err) {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fl, removedFV, removedAt, insertedAt, err := fl.update(folder, device, file, t.readOnlyTransaction)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
}
|
}
|
||||||
fl, removedFV, removedAt, insertedAt := fl.update(folder, device, file, t.readOnlyTransaction)
|
|
||||||
if insertedAt == -1 {
|
if insertedAt == -1 {
|
||||||
l.Debugln("update global; same version, global unchanged")
|
l.Debugln("update global; same version, global unchanged")
|
||||||
return keyBuf, false
|
return keyBuf, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name := []byte(file.Name)
|
name := []byte(file.Name)
|
||||||
@ -121,24 +156,29 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
|||||||
// Inserted a new newest version
|
// Inserted a new newest version
|
||||||
global = file
|
global = file
|
||||||
} else {
|
} else {
|
||||||
keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, name)
|
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, name)
|
||||||
if new, ok := t.getFileByKey(keyBuf); ok {
|
if err != nil {
|
||||||
global = new
|
return nil, false, err
|
||||||
} else {
|
|
||||||
// This file must exist in the db, so this must be caused
|
|
||||||
// by the db being closed - bail out.
|
|
||||||
l.Debugln("File should exist:", name)
|
|
||||||
return keyBuf, false
|
|
||||||
}
|
}
|
||||||
|
new, ok, err := t.getFileByKey(keyBuf)
|
||||||
|
if err != nil || !ok {
|
||||||
|
return keyBuf, false, err
|
||||||
|
}
|
||||||
|
global = new
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fixup the list of files we need.
|
// Fixup the list of files we need.
|
||||||
keyBuf = t.updateLocalNeed(keyBuf, folder, name, fl, global)
|
keyBuf, err = t.updateLocalNeed(keyBuf, folder, name, fl, global)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
if removedAt != 0 && insertedAt != 0 {
|
if removedAt != 0 && insertedAt != 0 {
|
||||||
l.Debugf(`new global for "%v" after update: %v`, file.Name, fl)
|
l.Debugf(`new global for "%v" after update: %v`, file.Name, fl)
|
||||||
t.Put(gk, mustMarshal(&fl))
|
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||||
return keyBuf, true
|
return nil, false, err
|
||||||
|
}
|
||||||
|
return keyBuf, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the old global from the global size counter
|
// Remove the old global from the global size counter
|
||||||
@ -149,8 +189,15 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
|||||||
// The previous newest version is now at index 1
|
// The previous newest version is now at index 1
|
||||||
oldGlobalFV = fl.Versions[1]
|
oldGlobalFV = fl.Versions[1]
|
||||||
}
|
}
|
||||||
keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, oldGlobalFV.Device, name)
|
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, oldGlobalFV.Device, name)
|
||||||
if oldFile, ok := t.getFileByKey(keyBuf); ok {
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
oldFile, ok, err := t.getFileByKey(keyBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
// A failure to get the file here is surprising and our
|
// A failure to get the file here is surprising and our
|
||||||
// global size data will be incorrect until a restart...
|
// global size data will be incorrect until a restart...
|
||||||
meta.removeFile(protocol.GlobalDeviceID, oldFile)
|
meta.removeFile(protocol.GlobalDeviceID, oldFile)
|
||||||
@ -160,27 +207,41 @@ func (t readWriteTransaction) updateGlobal(gk, keyBuf, folder, device []byte, fi
|
|||||||
meta.addFile(protocol.GlobalDeviceID, global)
|
meta.addFile(protocol.GlobalDeviceID, global)
|
||||||
|
|
||||||
l.Debugf(`new global for "%v" after update: %v`, file.Name, fl)
|
l.Debugf(`new global for "%v" after update: %v`, file.Name, fl)
|
||||||
t.Put(gk, mustMarshal(&fl))
|
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
return keyBuf, true
|
return keyBuf, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateLocalNeed checks whether the given file is still needed on the local
|
// updateLocalNeed checks whether the given file is still needed on the local
|
||||||
// device according to the version list and global FileInfo given and updates
|
// device according to the version list and global FileInfo given and updates
|
||||||
// the db accordingly.
|
// the db accordingly.
|
||||||
func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, fl VersionList, global protocol.FileInfo) []byte {
|
func (t readWriteTransaction) updateLocalNeed(keyBuf, folder, name []byte, fl VersionList, global protocol.FileInfo) ([]byte, error) {
|
||||||
keyBuf = t.keyer.GenerateNeedFileKey(keyBuf, folder, name)
|
var err error
|
||||||
hasNeeded, _ := t.Has(keyBuf, nil)
|
keyBuf, err = t.keyer.GenerateNeedFileKey(keyBuf, folder, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = t.Get(keyBuf)
|
||||||
|
if err != nil && !backend.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hasNeeded := err == nil
|
||||||
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); need(global, haveLocalFV, localFV.Version) {
|
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); need(global, haveLocalFV, localFV.Version) {
|
||||||
if !hasNeeded {
|
if !hasNeeded {
|
||||||
l.Debugf("local need insert; folder=%q, name=%q", folder, name)
|
l.Debugf("local need insert; folder=%q, name=%q", folder, name)
|
||||||
t.Put(keyBuf, nil)
|
if err := t.Put(keyBuf, nil); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if hasNeeded {
|
} else if hasNeeded {
|
||||||
l.Debugf("local need delete; folder=%q, name=%q", folder, name)
|
l.Debugf("local need delete; folder=%q, name=%q", folder, name)
|
||||||
t.Delete(keyBuf)
|
if err := t.Delete(keyBuf); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
return keyBuf
|
}
|
||||||
|
return keyBuf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func need(global FileIntf, haveLocal bool, localVersion protocol.Vector) bool {
|
func need(global FileIntf, haveLocal bool, localVersion protocol.Vector) bool {
|
||||||
@ -202,71 +263,94 @@ func need(global FileIntf, haveLocal bool, localVersion protocol.Vector) bool {
|
|||||||
// removeFromGlobal removes the device from the global version list for the
|
// removeFromGlobal removes the device from the global version list for the
|
||||||
// given file. If the version list is empty after this, the file entry is
|
// given file. If the version list is empty after this, the file entry is
|
||||||
// removed entirely.
|
// removed entirely.
|
||||||
func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte, file []byte, meta *metadataTracker) []byte {
|
func (t readWriteTransaction) removeFromGlobal(gk, keyBuf, folder, device []byte, file []byte, meta *metadataTracker) ([]byte, error) {
|
||||||
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
|
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
|
||||||
|
|
||||||
svl, err := t.Get(gk, nil)
|
svl, err := t.Get(gk)
|
||||||
if err != nil {
|
if backend.IsNotFound(err) {
|
||||||
// We might be called to "remove" a global version that doesn't exist
|
// We might be called to "remove" a global version that doesn't exist
|
||||||
// if the first update for the file is already marked invalid.
|
// if the first update for the file is already marked invalid.
|
||||||
return keyBuf
|
return keyBuf, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fl VersionList
|
var fl VersionList
|
||||||
err = fl.Unmarshal(svl)
|
err = fl.Unmarshal(svl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Debugln("unmarshal error:", err)
|
return nil, err
|
||||||
return keyBuf
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fl, _, removedAt := fl.pop(device)
|
fl, _, removedAt := fl.pop(device)
|
||||||
if removedAt == -1 {
|
if removedAt == -1 {
|
||||||
// There is no version for the given device
|
// There is no version for the given device
|
||||||
return keyBuf
|
return keyBuf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if removedAt == 0 {
|
if removedAt == 0 {
|
||||||
// A failure to get the file here is surprising and our
|
// A failure to get the file here is surprising and our
|
||||||
// global size data will be incorrect until a restart...
|
// global size data will be incorrect until a restart...
|
||||||
keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, device, file)
|
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, device, file)
|
||||||
if f, ok := t.getFileByKey(keyBuf); ok {
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if f, ok, err := t.getFileByKey(keyBuf); err != nil {
|
||||||
|
return keyBuf, nil
|
||||||
|
} else if ok {
|
||||||
meta.removeFile(protocol.GlobalDeviceID, f)
|
meta.removeFile(protocol.GlobalDeviceID, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(fl.Versions) == 0 {
|
if len(fl.Versions) == 0 {
|
||||||
keyBuf = t.keyer.GenerateNeedFileKey(keyBuf, folder, file)
|
keyBuf, err = t.keyer.GenerateNeedFileKey(keyBuf, folder, file)
|
||||||
t.Delete(keyBuf)
|
if err != nil {
|
||||||
t.Delete(gk)
|
return nil, err
|
||||||
return keyBuf
|
}
|
||||||
|
if err := t.Delete(keyBuf); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := t.Delete(gk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return keyBuf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if removedAt == 0 {
|
if removedAt == 0 {
|
||||||
keyBuf = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, file)
|
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, fl.Versions[0].Device, file)
|
||||||
global, ok := t.getFileByKey(keyBuf)
|
if err != nil {
|
||||||
if !ok {
|
return nil, err
|
||||||
// This file must exist in the db, so this must be caused
|
}
|
||||||
// by the db being closed - bail out.
|
global, ok, err := t.getFileByKey(keyBuf)
|
||||||
l.Debugln("File should exist:", file)
|
if err != nil || !ok {
|
||||||
return keyBuf
|
return keyBuf, err
|
||||||
|
}
|
||||||
|
keyBuf, err = t.updateLocalNeed(keyBuf, folder, file, fl, global)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
keyBuf = t.updateLocalNeed(keyBuf, folder, file, fl, global)
|
|
||||||
meta.addFile(protocol.GlobalDeviceID, global)
|
meta.addFile(protocol.GlobalDeviceID, global)
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Debugf("new global after remove: %v", fl)
|
l.Debugf("new global after remove: %v", fl)
|
||||||
t.Put(gk, mustMarshal(&fl))
|
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return keyBuf
|
return keyBuf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t readWriteTransaction) deleteKeyPrefix(prefix []byte) {
|
func (t readWriteTransaction) deleteKeyPrefix(prefix []byte) error {
|
||||||
dbi := t.NewIterator(util.BytesPrefix(prefix), nil)
|
dbi, err := t.NewPrefixIterator(prefix)
|
||||||
for dbi.Next() {
|
if err != nil {
|
||||||
t.Delete(dbi.Key())
|
return err
|
||||||
t.checkFlush()
|
|
||||||
}
|
}
|
||||||
dbi.Release()
|
defer dbi.Release()
|
||||||
|
for dbi.Next() {
|
||||||
|
if err := t.Delete(dbi.Key()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dbi.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
type marshaller interface {
|
type marshaller interface {
|
||||||
|
@ -11,22 +11,26 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
|
||||||
"github.com/syndtr/goleveldb/leveldb/util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// writeJSONS serializes the database to a JSON stream that can be checked
|
// writeJSONS serializes the database to a JSON stream that can be checked
|
||||||
// in to the repo and used for tests.
|
// in to the repo and used for tests.
|
||||||
func writeJSONS(w io.Writer, db *leveldb.DB) {
|
func writeJSONS(w io.Writer, db backend.Backend) {
|
||||||
it := db.NewIterator(&util.Range{}, nil)
|
it, err := db.NewPrefixIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
defer it.Release()
|
defer it.Release()
|
||||||
enc := json.NewEncoder(w)
|
enc := json.NewEncoder(w)
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
enc.Encode(map[string][]byte{
|
err := enc.Encode(map[string][]byte{
|
||||||
"k": it.Key(),
|
"k": it.Key(),
|
||||||
"v": it.Value(),
|
"v": it.Value(),
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,15 +38,15 @@ func writeJSONS(w io.Writer, db *leveldb.DB) {
|
|||||||
// here and the linter to not complain.
|
// here and the linter to not complain.
|
||||||
var _ = writeJSONS
|
var _ = writeJSONS
|
||||||
|
|
||||||
// openJSONS reads a JSON stream file into a leveldb.DB
|
// openJSONS reads a JSON stream file into a backend DB
|
||||||
func openJSONS(file string) (*leveldb.DB, error) {
|
func openJSONS(file string) (backend.Backend, error) {
|
||||||
fd, err := os.Open(file)
|
fd, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder(fd)
|
dec := json.NewDecoder(fd)
|
||||||
|
|
||||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
db := backend.OpenMemory()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
var row map[string][]byte
|
var row map[string][]byte
|
||||||
@ -54,7 +58,9 @@ func openJSONS(file string) (*leveldb.DB, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
db.Put(row["k"], row["v"], nil)
|
if err := db.Put(row["k"], row["v"]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return db, nil
|
return db, nil
|
||||||
|
@ -13,8 +13,8 @@ import (
|
|||||||
// The database is where we store the virtual mtimes
|
// The database is where we store the virtual mtimes
|
||||||
type database interface {
|
type database interface {
|
||||||
Bytes(key string) (data []byte, ok bool)
|
Bytes(key string) (data []byte, ok bool)
|
||||||
PutBytes(key string, data []byte)
|
PutBytes(key string, data []byte) error
|
||||||
Delete(key string)
|
Delete(key string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// The MtimeFS is a filesystem with nanosecond mtime precision, regardless
|
// The MtimeFS is a filesystem with nanosecond mtime precision, regardless
|
||||||
|
@ -236,8 +236,9 @@ func TestMtimeFSInsensitive(t *testing.T) {
|
|||||||
|
|
||||||
type mapStore map[string][]byte
|
type mapStore map[string][]byte
|
||||||
|
|
||||||
func (s mapStore) PutBytes(key string, data []byte) {
|
func (s mapStore) PutBytes(key string, data []byte) error {
|
||||||
s[key] = data
|
s[key] = data
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s mapStore) Bytes(key string) (data []byte, ok bool) {
|
func (s mapStore) Bytes(key string) (data []byte, ok bool) {
|
||||||
@ -245,8 +246,9 @@ func (s mapStore) Bytes(key string) (data []byte, ok bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s mapStore) Delete(key string) {
|
func (s mapStore) Delete(key string) error {
|
||||||
delete(s, key)
|
delete(s, key)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// failChtimes does nothing, and fails
|
// failChtimes does nothing, and fails
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syncthing/syncthing/lib/scanner"
|
"github.com/syncthing/syncthing/lib/scanner"
|
||||||
@ -316,7 +317,7 @@ func setupROFolder() (*model, *sendOnlyFolder) {
|
|||||||
fcfg.Type = config.FolderTypeReceiveOnly
|
fcfg.Type = config.FolderTypeReceiveOnly
|
||||||
w.SetFolder(fcfg)
|
w.SetFolder(fcfg)
|
||||||
|
|
||||||
m := newModel(w, myID, "syncthing", "dev", db.OpenMemory(), nil)
|
m := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil)
|
||||||
|
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/events"
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/ignore"
|
"github.com/syncthing/syncthing/lib/ignore"
|
||||||
@ -91,7 +92,7 @@ func createFile(t *testing.T, name string, fs fs.Filesystem) protocol.FileInfo {
|
|||||||
|
|
||||||
func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder) {
|
func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFolder) {
|
||||||
w := createTmpWrapper(defaultCfg)
|
w := createTmpWrapper(defaultCfg)
|
||||||
model := newModel(w, myID, "syncthing", "dev", db.OpenMemory(), nil)
|
model := newModel(w, myID, "syncthing", "dev", db.NewLowlevel(backend.OpenMemory()), nil)
|
||||||
fcfg := testFolderConfigTmp()
|
fcfg := testFolderConfigTmp()
|
||||||
model.addFolder(fcfg)
|
model.addFolder(fcfg)
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/events"
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/ignore"
|
"github.com/syncthing/syncthing/lib/ignore"
|
||||||
@ -306,7 +307,7 @@ func TestDeviceRename(t *testing.T) {
|
|||||||
}
|
}
|
||||||
cfg := config.Wrap("testdata/tmpconfig.xml", rawCfg, events.NoopLogger)
|
cfg := config.Wrap("testdata/tmpconfig.xml", rawCfg, events.NoopLogger)
|
||||||
|
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
m := newModel(cfg, myID, "syncthing", "dev", db, nil)
|
m := newModel(cfg, myID, "syncthing", "dev", db, nil)
|
||||||
|
|
||||||
if cfg.Devices()[device1].Name != "" {
|
if cfg.Devices()[device1].Name != "" {
|
||||||
@ -402,7 +403,7 @@ func TestClusterConfig(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
wrapper := createTmpWrapper(cfg)
|
wrapper := createTmpWrapper(cfg)
|
||||||
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
||||||
@ -1533,7 +1534,7 @@ func waitForState(t *testing.T, m *model, folder, status string) {
|
|||||||
func TestROScanRecovery(t *testing.T) {
|
func TestROScanRecovery(t *testing.T) {
|
||||||
testOs := &fatalOs{t}
|
testOs := &fatalOs{t}
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
set := db.NewFileSet("default", defaultFs, ldb)
|
set := db.NewFileSet("default", defaultFs, ldb)
|
||||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
||||||
@ -1584,7 +1585,7 @@ func TestROScanRecovery(t *testing.T) {
|
|||||||
func TestRWScanRecovery(t *testing.T) {
|
func TestRWScanRecovery(t *testing.T) {
|
||||||
testOs := &fatalOs{t}
|
testOs := &fatalOs{t}
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
set := db.NewFileSet("default", defaultFs, ldb)
|
set := db.NewFileSet("default", defaultFs, ldb)
|
||||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
||||||
@ -1633,7 +1634,7 @@ func TestRWScanRecovery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalDirectoryTree(t *testing.T) {
|
func TestGlobalDirectoryTree(t *testing.T) {
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
m.removeFolder(defaultFolderConfig)
|
m.removeFolder(defaultFolderConfig)
|
||||||
@ -1886,7 +1887,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGlobalDirectorySelfFixing(t *testing.T) {
|
func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
m.removeFolder(defaultFolderConfig)
|
m.removeFolder(defaultFolderConfig)
|
||||||
@ -2063,7 +2064,7 @@ func BenchmarkTree_100_10(b *testing.B) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkTree(b *testing.B, n1, n2 int) {
|
func benchmarkTree(b *testing.B, n1, n2 int) {
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(defaultCfgWrapper, myID, "syncthing", "dev", db, nil)
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
m.removeFolder(defaultFolderConfig)
|
m.removeFolder(defaultFolderConfig)
|
||||||
@ -2128,7 +2129,7 @@ func TestIssue3028(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIssue4357(t *testing.T) {
|
func TestIssue4357(t *testing.T) {
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
cfg := defaultCfgWrapper.RawCopy()
|
cfg := defaultCfgWrapper.RawCopy()
|
||||||
// Create a separate wrapper not to pollute other tests.
|
// Create a separate wrapper not to pollute other tests.
|
||||||
wrapper := createTmpWrapper(config.Configuration{})
|
wrapper := createTmpWrapper(config.Configuration{})
|
||||||
@ -2251,7 +2252,7 @@ func TestIssue2782(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexesForUnknownDevicesDropped(t *testing.T) {
|
func TestIndexesForUnknownDevicesDropped(t *testing.T) {
|
||||||
dbi := db.OpenMemory()
|
dbi := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
|
||||||
files := db.NewFileSet("default", defaultFs, dbi)
|
files := db.NewFileSet("default", defaultFs, dbi)
|
||||||
files.Drop(device1)
|
files.Drop(device1)
|
||||||
@ -2677,7 +2678,7 @@ func TestInternalScan(t *testing.T) {
|
|||||||
func TestCustomMarkerName(t *testing.T) {
|
func TestCustomMarkerName(t *testing.T) {
|
||||||
testOs := &fatalOs{t}
|
testOs := &fatalOs{t}
|
||||||
|
|
||||||
ldb := db.OpenMemory()
|
ldb := db.NewLowlevel(backend.OpenMemory())
|
||||||
set := db.NewFileSet("default", defaultFs, ldb)
|
set := db.NewFileSet("default", defaultFs, ldb)
|
||||||
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||||
{Name: "dummyfile"},
|
{Name: "dummyfile"},
|
||||||
@ -3052,7 +3053,7 @@ func TestPausedFolders(t *testing.T) {
|
|||||||
func TestIssue4094(t *testing.T) {
|
func TestIssue4094(t *testing.T) {
|
||||||
testOs := &fatalOs{t}
|
testOs := &fatalOs{t}
|
||||||
|
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
// Create a separate wrapper not to pollute other tests.
|
// Create a separate wrapper not to pollute other tests.
|
||||||
wrapper := createTmpWrapper(config.Configuration{})
|
wrapper := createTmpWrapper(config.Configuration{})
|
||||||
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
||||||
@ -3088,7 +3089,7 @@ func TestIssue4094(t *testing.T) {
|
|||||||
func TestIssue4903(t *testing.T) {
|
func TestIssue4903(t *testing.T) {
|
||||||
testOs := &fatalOs{t}
|
testOs := &fatalOs{t}
|
||||||
|
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
// Create a separate wrapper not to pollute other tests.
|
// Create a separate wrapper not to pollute other tests.
|
||||||
wrapper := createTmpWrapper(config.Configuration{})
|
wrapper := createTmpWrapper(config.Configuration{})
|
||||||
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
m := newModel(wrapper, myID, "syncthing", "dev", db, nil)
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/events"
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
@ -102,7 +103,7 @@ func setupModelWithConnectionFromWrapper(w config.Wrapper) (*model, *fakeConnect
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupModel(w config.Wrapper) *model {
|
func setupModel(w config.Wrapper) *model {
|
||||||
db := db.OpenMemory()
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
m := newModel(w, myID, "syncthing", "dev", db, nil)
|
m := newModel(w, myID, "syncthing", "dev", db, nil)
|
||||||
m.ServeBackground()
|
m.ServeBackground()
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ import (
|
|||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/config"
|
"github.com/syncthing/syncthing/lib/config"
|
||||||
"github.com/syncthing/syncthing/lib/db"
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/events"
|
"github.com/syncthing/syncthing/lib/events"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/locations"
|
"github.com/syncthing/syncthing/lib/locations"
|
||||||
@ -124,5 +125,9 @@ func copyFile(src, dst string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func OpenGoleveldb(path string, tuning config.Tuning) (*db.Lowlevel, error) {
|
func OpenGoleveldb(path string, tuning config.Tuning) (*db.Lowlevel, error) {
|
||||||
return db.Open(path, db.Tuning(tuning))
|
ldb, err := backend.Open(path, backend.Tuning(tuning))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return db.NewLowlevel(ldb), nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user