mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-23 03:18:59 +00:00
This commit is contained in:
parent
c5c23ed10f
commit
08bebbe59b
@ -109,6 +109,8 @@ type Iterator interface {
|
|||||||
// consider always using a transaction of the appropriate type. The
|
// consider always using a transaction of the appropriate type. The
|
||||||
// transaction isolation level is "read committed" - there are no dirty
|
// transaction isolation level is "read committed" - there are no dirty
|
||||||
// reads.
|
// reads.
|
||||||
|
// Location returns the path to the database, as given to Open. The returned string
|
||||||
|
// is empty for a db in memory.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
Reader
|
Reader
|
||||||
Writer
|
Writer
|
||||||
@ -116,6 +118,7 @@ type Backend interface {
|
|||||||
NewWriteTransaction(hooks ...CommitHook) (WriteTransaction, error)
|
NewWriteTransaction(hooks ...CommitHook) (WriteTransaction, error)
|
||||||
Close() error
|
Close() error
|
||||||
Compact() error
|
Compact() error
|
||||||
|
Location() string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tuning int
|
type Tuning int
|
||||||
|
@ -23,7 +23,12 @@ func OpenBadger(path string) (Backend, error) {
|
|||||||
opts := badger.DefaultOptions(path)
|
opts := badger.DefaultOptions(path)
|
||||||
opts = opts.WithMaxCacheSize(maxCacheSize).WithCompactL0OnClose(false)
|
opts = opts.WithMaxCacheSize(maxCacheSize).WithCompactL0OnClose(false)
|
||||||
opts.Logger = nil
|
opts.Logger = nil
|
||||||
return openBadger(opts)
|
backend, err := openBadger(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
backend.location = path
|
||||||
|
return backend, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenBadgerMemory() Backend {
|
func OpenBadgerMemory() Backend {
|
||||||
@ -38,7 +43,7 @@ func OpenBadgerMemory() Backend {
|
|||||||
return backend
|
return backend
|
||||||
}
|
}
|
||||||
|
|
||||||
func openBadger(opts badger.Options) (Backend, error) {
|
func openBadger(opts badger.Options) (*badgerBackend, error) {
|
||||||
// XXX: We should find good values for memory utilization in the "small"
|
// XXX: We should find good values for memory utilization in the "small"
|
||||||
// and "large" cases we support for LevelDB. Some notes here:
|
// and "large" cases we support for LevelDB. Some notes here:
|
||||||
// https://github.com/dgraph-io/badger/tree/v2.0.3#memory-usage
|
// https://github.com/dgraph-io/badger/tree/v2.0.3#memory-usage
|
||||||
@ -54,8 +59,9 @@ func openBadger(opts badger.Options) (Backend, error) {
|
|||||||
|
|
||||||
// badgerBackend implements Backend on top of a badger
|
// badgerBackend implements Backend on top of a badger
|
||||||
type badgerBackend struct {
|
type badgerBackend struct {
|
||||||
bdb *badger.DB
|
bdb *badger.DB
|
||||||
closeWG *closeWaitGroup
|
closeWG *closeWaitGroup
|
||||||
|
location string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *badgerBackend) NewReadTransaction() (ReadTransaction, error) {
|
func (b *badgerBackend) NewReadTransaction() (ReadTransaction, error) {
|
||||||
@ -217,6 +223,10 @@ func (b *badgerBackend) Compact() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *badgerBackend) Location() string {
|
||||||
|
return b.location
|
||||||
|
}
|
||||||
|
|
||||||
// badgerSnapshot implements backend.ReadTransaction
|
// badgerSnapshot implements backend.ReadTransaction
|
||||||
type badgerSnapshot struct {
|
type badgerSnapshot struct {
|
||||||
txn *badger.Txn
|
txn *badger.Txn
|
||||||
|
@ -28,14 +28,16 @@ const (
|
|||||||
|
|
||||||
// leveldbBackend implements Backend on top of a leveldb
|
// leveldbBackend implements Backend on top of a leveldb
|
||||||
type leveldbBackend struct {
|
type leveldbBackend struct {
|
||||||
ldb *leveldb.DB
|
ldb *leveldb.DB
|
||||||
closeWG *closeWaitGroup
|
closeWG *closeWaitGroup
|
||||||
|
location string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLeveldbBackend(ldb *leveldb.DB) *leveldbBackend {
|
func newLeveldbBackend(ldb *leveldb.DB, location string) *leveldbBackend {
|
||||||
return &leveldbBackend{
|
return &leveldbBackend{
|
||||||
ldb: ldb,
|
ldb: ldb,
|
||||||
closeWG: &closeWaitGroup{},
|
closeWG: &closeWaitGroup{},
|
||||||
|
location: location,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,6 +118,10 @@ func (b *leveldbBackend) Compact() error {
|
|||||||
return wrapLeveldbErr(b.ldb.CompactRange(util.Range{}))
|
return wrapLeveldbErr(b.ldb.CompactRange(util.Range{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *leveldbBackend) Location() string {
|
||||||
|
return b.location
|
||||||
|
}
|
||||||
|
|
||||||
// leveldbSnapshot implements backend.ReadTransaction
|
// leveldbSnapshot implements backend.ReadTransaction
|
||||||
type leveldbSnapshot struct {
|
type leveldbSnapshot struct {
|
||||||
snap *leveldb.Snapshot
|
snap *leveldb.Snapshot
|
||||||
|
@ -42,7 +42,7 @@ func OpenLevelDB(location string, tuning Tuning) (Backend, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newLeveldbBackend(ldb), nil
|
return newLeveldbBackend(ldb, location), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenLevelDBAuto is OpenLevelDB with TuningAuto tuning.
|
// OpenLevelDBAuto is OpenLevelDB with TuningAuto tuning.
|
||||||
@ -61,13 +61,13 @@ func OpenLevelDBRO(location string) (Backend, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return newLeveldbBackend(ldb), nil
|
return newLeveldbBackend(ldb, location), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenMemory returns a new Backend referencing an in-memory database.
|
// OpenMemory returns a new Backend referencing an in-memory database.
|
||||||
func OpenLevelDBMemory() Backend {
|
func OpenLevelDBMemory() Backend {
|
||||||
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
ldb, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
return newLeveldbBackend(ldb)
|
return newLeveldbBackend(ldb, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// optsFor returns the database options to use when opening a database with
|
// optsFor returns the database options to use when opening a database with
|
||||||
|
@ -931,7 +931,7 @@ func TestDuplicateNeedCount(t *testing.T) {
|
|||||||
files[0].Version = files[0].Version.Update(remoteDevice0.Short())
|
files[0].Version = files[0].Version.Update(remoteDevice0.Short())
|
||||||
fs.Update(remoteDevice0, files)
|
fs.Update(remoteDevice0, files)
|
||||||
|
|
||||||
db.CheckRepair()
|
db.checkRepair()
|
||||||
|
|
||||||
fs = NewFileSet(folder, testFs, db)
|
fs = NewFileSet(folder, testFs, db)
|
||||||
found := false
|
found := false
|
||||||
|
@ -11,11 +11,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dchest/siphash"
|
"github.com/dchest/siphash"
|
||||||
"github.com/greatroar/blobloom"
|
"github.com/greatroar/blobloom"
|
||||||
"github.com/syncthing/syncthing/lib/db/backend"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syncthing/syncthing/lib/rand"
|
"github.com/syncthing/syncthing/lib/rand"
|
||||||
"github.com/syncthing/syncthing/lib/sha256"
|
"github.com/syncthing/syncthing/lib/sha256"
|
||||||
@ -42,6 +44,8 @@ const (
|
|||||||
versionIndirectionCutoff = 10
|
versionIndirectionCutoff = 10
|
||||||
|
|
||||||
recheckDefaultInterval = 30 * 24 * time.Hour
|
recheckDefaultInterval = 30 * 24 * time.Hour
|
||||||
|
|
||||||
|
needsRepairSuffix = ".needsrepair"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lowlevel is the lowest level database interface. It has a very simple
|
// Lowlevel is the lowest level database interface. It has a very simple
|
||||||
@ -82,6 +86,13 @@ func NewLowlevel(backend backend.Backend, opts ...Option) *Lowlevel {
|
|||||||
}
|
}
|
||||||
db.keyer = newDefaultKeyer(db.folderIdx, db.deviceIdx)
|
db.keyer = newDefaultKeyer(db.folderIdx, db.deviceIdx)
|
||||||
db.Add(util.AsService(db.gcRunner, "db.Lowlevel/gcRunner"))
|
db.Add(util.AsService(db.gcRunner, "db.Lowlevel/gcRunner"))
|
||||||
|
if path := db.needsRepairPath(); path != "" {
|
||||||
|
if _, err := os.Lstat(path); err == nil {
|
||||||
|
l.Infoln("Database was marked for repair - this may take a while")
|
||||||
|
db.checkRepair()
|
||||||
|
os.Remove(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
return db
|
return db
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,8 +801,8 @@ func (b *bloomFilter) hash(id []byte) uint64 {
|
|||||||
return siphash.Hash(b.k0, b.k1, id)
|
return siphash.Hash(b.k0, b.k1, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckRepair checks folder metadata and sequences for miscellaneous errors.
|
// checkRepair checks folder metadata and sequences for miscellaneous errors.
|
||||||
func (db *Lowlevel) CheckRepair() {
|
func (db *Lowlevel) checkRepair() {
|
||||||
for _, folder := range db.ListFolders() {
|
for _, folder := range db.ListFolders() {
|
||||||
_ = db.getMetaAndCheck(folder)
|
_ = db.getMetaAndCheck(folder)
|
||||||
}
|
}
|
||||||
@ -1129,6 +1140,17 @@ func (db *Lowlevel) checkLocalNeed(folder []byte) (int, error) {
|
|||||||
return repaired, nil
|
return repaired, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *Lowlevel) needsRepairPath() string {
|
||||||
|
path := db.Location()
|
||||||
|
if path == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if path[len(path)-1] == fs.PathSeparator {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
return path + needsRepairSuffix
|
||||||
|
}
|
||||||
|
|
||||||
// unchanged checks if two files are the same and thus don't need to be updated.
|
// unchanged checks if two files are the same and thus don't need to be updated.
|
||||||
// Local flags or the invalid bit might change without the version
|
// Local flags or the invalid bit might change without the version
|
||||||
// being bumped.
|
// being bumped.
|
||||||
|
@ -13,7 +13,9 @@
|
|||||||
package db
|
package db
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db/backend"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
"github.com/syncthing/syncthing/lib/fs"
|
"github.com/syncthing/syncthing/lib/fs"
|
||||||
@ -56,7 +58,7 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
|
|||||||
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
|
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
|
|
||||||
if device == protocol.LocalDeviceID {
|
if device == protocol.LocalDeviceID {
|
||||||
@ -78,19 +80,19 @@ func (s *FileSet) Drop(device protocol.DeviceID) {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
defer t.close()
|
defer t.close()
|
||||||
|
|
||||||
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
|
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
if err := t.Commit(); backend.IsClosed(err) {
|
if err := t.Commit(); backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,20 +117,21 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
|||||||
if device == protocol.LocalDeviceID {
|
if device == protocol.LocalDeviceID {
|
||||||
// For the local device we have a bunch of metadata to track.
|
// For the local device we have a bunch of metadata to track.
|
||||||
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Easy case, just update the files and we're done.
|
// Easy case, just update the files and we're done.
|
||||||
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
|
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
folder string
|
folder string
|
||||||
t readOnlyTransaction
|
t readOnlyTransaction
|
||||||
meta *countsMap
|
meta *countsMap
|
||||||
|
fatalError func(error, string)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *FileSet) Snapshot() *Snapshot {
|
func (s *FileSet) Snapshot() *Snapshot {
|
||||||
@ -136,12 +139,15 @@ func (s *FileSet) Snapshot() *Snapshot {
|
|||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
t, err := s.db.newReadOnlyTransaction()
|
t, err := s.db.newReadOnlyTransaction()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
folder: s.folder,
|
folder: s.folder,
|
||||||
t: t,
|
t: t,
|
||||||
meta: s.meta.Snapshot(),
|
meta: s.meta.Snapshot(),
|
||||||
|
fatalError: func(err error, opStr string) {
|
||||||
|
fatalError(err, opStr, s.db)
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +159,7 @@ func (s *Snapshot) WithNeed(device protocol.DeviceID, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithNeed(%v)", s.folder, device)
|
opStr := fmt.Sprintf("%s WithNeed(%v)", s.folder, device)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +167,7 @@ func (s *Snapshot) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithNeedTruncated(%v)", s.folder, device)
|
opStr := fmt.Sprintf("%s WithNeedTruncated(%v)", s.folder, device)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,7 +175,7 @@ func (s *Snapshot) WithHave(device protocol.DeviceID, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithHave(%v)", s.folder, device)
|
opStr := fmt.Sprintf("%s WithHave(%v)", s.folder, device)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,7 +183,7 @@ func (s *Snapshot) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithHaveTruncated(%v)", s.folder, device)
|
opStr := fmt.Sprintf("%s WithHaveTruncated(%v)", s.folder, device)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,7 +191,7 @@ func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
opStr := fmt.Sprintf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +201,7 @@ func (s *Snapshot) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix st
|
|||||||
opStr := fmt.Sprintf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
opStr := fmt.Sprintf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +209,7 @@ func (s *Snapshot) WithGlobal(fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithGlobal()", s.folder)
|
opStr := fmt.Sprintf("%s WithGlobal()", s.folder)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,7 +217,7 @@ func (s *Snapshot) WithGlobalTruncated(fn Iterator) {
|
|||||||
opStr := fmt.Sprintf("%s WithGlobalTruncated()", s.folder)
|
opStr := fmt.Sprintf("%s WithGlobalTruncated()", s.folder)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +227,7 @@ func (s *Snapshot) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
opStr := fmt.Sprintf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,7 +238,7 @@ func (s *Snapshot) Get(device protocol.DeviceID, file string) (protocol.FileInfo
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return protocol.FileInfo{}, false
|
return protocol.FileInfo{}, false
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
f.Name = osutil.NativeFilename(f.Name)
|
f.Name = osutil.NativeFilename(f.Name)
|
||||||
return f, ok
|
return f, ok
|
||||||
@ -245,7 +251,7 @@ func (s *Snapshot) GetGlobal(file string) (protocol.FileInfo, bool) {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return protocol.FileInfo{}, false
|
return protocol.FileInfo{}, false
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return protocol.FileInfo{}, false
|
return protocol.FileInfo{}, false
|
||||||
@ -262,7 +268,7 @@ func (s *Snapshot) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return FileInfoTruncated{}, false
|
return FileInfoTruncated{}, false
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return FileInfoTruncated{}, false
|
return FileInfoTruncated{}, false
|
||||||
@ -279,7 +285,7 @@ func (s *Snapshot) Availability(file string) []protocol.DeviceID {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
return av
|
return av
|
||||||
}
|
}
|
||||||
@ -369,7 +375,7 @@ func (s *Snapshot) WithBlocksHash(hash []byte, fn Iterator) {
|
|||||||
opStr := fmt.Sprintf(`%s WithBlocksHash("%x")`, s.folder, hash)
|
opStr := fmt.Sprintf(`%s WithBlocksHash("%x")`, s.folder, hash)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.t.withBlocksHash([]byte(s.folder), hash, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
if err := s.t.withBlocksHash([]byte(s.folder), hash, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
s.fatalError(err, opStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -384,7 +390,7 @@ func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return 0
|
return 0
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
if id == 0 && device == protocol.LocalDeviceID {
|
if id == 0 && device == protocol.LocalDeviceID {
|
||||||
// No index ID set yet. We create one now.
|
// No index ID set yet. We create one now.
|
||||||
@ -393,7 +399,7 @@ func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return 0
|
return 0
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return id
|
return id
|
||||||
@ -406,7 +412,7 @@ func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) {
|
|||||||
opStr := fmt.Sprintf("%s SetIndexID(%v, %v)", s.folder, device, id)
|
opStr := fmt.Sprintf("%s SetIndexID(%v, %v)", s.folder, device, id)
|
||||||
l.Debugf(opStr)
|
l.Debugf(opStr)
|
||||||
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
|
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -417,7 +423,7 @@ func (s *FileSet) MtimeFS() *fs.MtimeFS {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, s.db)
|
||||||
}
|
}
|
||||||
kv := NewNamespacedKV(s.db, string(prefix))
|
kv := NewNamespacedKV(s.db, string(prefix))
|
||||||
return fs.NewMtimeFS(s.fs, kv)
|
return fs.NewMtimeFS(s.fs, kv)
|
||||||
@ -454,7 +460,7 @@ func DropFolder(db *Lowlevel, folder string) {
|
|||||||
if err := drop([]byte(folder)); backend.IsClosed(err) {
|
if err := drop([]byte(folder)); backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -468,16 +474,16 @@ func DropDeltaIndexIDs(db *Lowlevel) {
|
|||||||
if backend.IsClosed(err) {
|
if backend.IsClosed(err) {
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, db)
|
||||||
}
|
}
|
||||||
defer dbi.Release()
|
defer dbi.Release()
|
||||||
for dbi.Next() {
|
for dbi.Next() {
|
||||||
if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) {
|
if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := dbi.Error(); err != nil && !backend.IsClosed(err) {
|
if err := dbi.Error(); err != nil && !backend.IsClosed(err) {
|
||||||
fatalError(err, opStr)
|
fatalError(err, opStr, db)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,7 +522,15 @@ func nativeFileIterator(fn Iterator) Iterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func fatalError(err error, opStr string) {
|
func fatalError(err error, opStr string, db *Lowlevel) {
|
||||||
|
if errors.Is(err, errEntryFromGlobalMissing) || errors.Is(err, errEmptyGlobal) {
|
||||||
|
// Inconsistency error, mark db for repair on next start.
|
||||||
|
if path := db.needsRepairPath(); path != "" {
|
||||||
|
if fd, err := os.Create(path); err == nil {
|
||||||
|
fd.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
l.Warnf("Fatal error: %v: %v", opStr, err)
|
l.Warnf("Fatal error: %v: %v", opStr, err)
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -4028,9 +4028,13 @@ func TestIssue6961(t *testing.T) {
|
|||||||
fcfg.Type = config.FolderTypeReceiveOnly
|
fcfg.Type = config.FolderTypeReceiveOnly
|
||||||
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
|
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
|
||||||
wcfg.SetFolder(fcfg)
|
wcfg.SetFolder(fcfg)
|
||||||
m := setupModel(wcfg)
|
// Always recalc/repair when opening a fileset.
|
||||||
// defer cleanupModelAndRemoveDir(m, tfs.URI())
|
// db := db.NewLowlevel(backend.OpenMemory(), db.WithRecheckInterval(time.Millisecond))
|
||||||
defer cleanupModel(m)
|
db := db.NewLowlevel(backend.OpenMemory())
|
||||||
|
m := newModel(wcfg, myID, "syncthing", "dev", db, nil)
|
||||||
|
m.ServeBackground()
|
||||||
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
||||||
|
m.ScanFolders()
|
||||||
|
|
||||||
name := "foo"
|
name := "foo"
|
||||||
version := protocol.Vector{}.Update(device1.Short())
|
version := protocol.Vector{}.Update(device1.Short())
|
||||||
@ -4066,14 +4070,13 @@ func TestIssue6961(t *testing.T) {
|
|||||||
// Drop ther remote index, add some other file.
|
// Drop ther remote index, add some other file.
|
||||||
m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}})
|
m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}})
|
||||||
|
|
||||||
// Recalculate everything
|
// Pause and unpause folder to create new db.FileSet and thus recalculate everything
|
||||||
fcfg.Paused = true
|
fcfg.Paused = true
|
||||||
waiter, err = wcfg.SetFolder(fcfg)
|
waiter, err = wcfg.SetFolder(fcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
waiter.Wait()
|
waiter.Wait()
|
||||||
m.db.CheckRepair()
|
|
||||||
fcfg.Paused = false
|
fcfg.Paused = false
|
||||||
waiter, err = wcfg.SetFolder(fcfg)
|
waiter, err = wcfg.SetFolder(fcfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -245,14 +245,6 @@ func (a *App) startup() error {
|
|||||||
db.DropDeltaIndexIDs(a.ll)
|
db.DropDeltaIndexIDs(a.ll)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check and repair metadata and sequences on every upgrade including RCs.
|
|
||||||
prevParts = strings.Split(prevVersion, "+")
|
|
||||||
curParts = strings.Split(build.Version, "+")
|
|
||||||
if rel := upgrade.CompareVersions(prevParts[0], curParts[0]); rel != upgrade.Equal {
|
|
||||||
l.Infoln("Checking db due to upgrade - this may take a while...")
|
|
||||||
a.ll.CheckRepair()
|
|
||||||
}
|
|
||||||
|
|
||||||
if build.Version != prevVersion {
|
if build.Version != prevVersion {
|
||||||
// Remember the new version.
|
// Remember the new version.
|
||||||
miscDB.PutString("prevVersion", build.Version)
|
miscDB.PutString("prevVersion", build.Version)
|
||||||
|
Loading…
Reference in New Issue
Block a user