all: Transactionalize db.FileSet (fixes #5952) (#6239)

This commit is contained in:
Simon Frei 2020-01-21 18:23:08 +01:00 committed by Jakob Borg
parent d62a0cf692
commit 08f0e125ef
23 changed files with 957 additions and 891 deletions

View File

@ -350,6 +350,9 @@ angular.module('syncthing.core')
var debouncedFuncs = {}; var debouncedFuncs = {};
function refreshFolder(folder) { function refreshFolder(folder) {
if ($scope.folders[folder].paused) {
return;
}
var key = "refreshFolder" + folder; var key = "refreshFolder" + folder;
if (!debouncedFuncs[key]) { if (!debouncedFuncs[key]) {
debouncedFuncs[key] = debounce(function () { debouncedFuncs[key] = debounce(function () {
@ -780,10 +783,6 @@ angular.module('syncthing.core')
}; };
$scope.folderStatus = function (folderCfg) { $scope.folderStatus = function (folderCfg) {
if (typeof $scope.model[folderCfg.id] === 'undefined') {
return 'unknown';
}
if (folderCfg.paused) { if (folderCfg.paused) {
return 'paused'; return 'paused';
} }
@ -791,7 +790,7 @@ angular.module('syncthing.core')
var folderInfo = $scope.model[folderCfg.id]; var folderInfo = $scope.model[folderCfg.id];
// after restart syncthing process state may be empty // after restart syncthing process state may be empty
if (!folderInfo.state) { if (typeof folderInfo === 'undefined' || !folderInfo.state) {
return 'unknown'; return 'unknown';
} }

View File

@ -743,15 +743,18 @@ func (s *service) getDBRemoteNeed(w http.ResponseWriter, r *http.Request) {
page, perpage := getPagingParams(qs) page, perpage := getPagingParams(qs)
if files, err := s.model.RemoteNeedFolderFiles(deviceID, folder, page, perpage); err != nil { snap, err := s.model.DBSnapshot(folder)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound) http.Error(w, err.Error(), http.StatusNotFound)
} else { return
sendJSON(w, map[string]interface{}{
"files": toJsonFileInfoSlice(files),
"page": page,
"perpage": perpage,
})
} }
defer snap.Release()
files := snap.RemoteNeedFolderFiles(deviceID, page, perpage)
sendJSON(w, map[string]interface{}{
"files": toJsonFileInfoSlice(files),
"page": page,
"perpage": perpage,
})
} }
func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) { func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) {
@ -761,7 +764,13 @@ func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) {
page, perpage := getPagingParams(qs) page, perpage := getPagingParams(qs)
files := s.model.LocalChangedFiles(folder, page, perpage) snap, err := s.model.DBSnapshot(folder)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer snap.Release()
files := snap.LocalChangedFiles(page, perpage)
sendJSON(w, map[string]interface{}{ sendJSON(w, map[string]interface{}{
"files": toJsonFileInfoSlice(files), "files": toJsonFileInfoSlice(files),

View File

@ -29,7 +29,6 @@ import (
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/fs" "github.com/syncthing/syncthing/lib/fs"
"github.com/syncthing/syncthing/lib/locations" "github.com/syncthing/syncthing/lib/locations"
"github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
@ -528,8 +527,7 @@ func startHTTP(cfg *mockedConfig) (string, *suture.Supervisor, error) {
// Instantiate the API service // Instantiate the API service
urService := ur.New(cfg, m, connections, false) urService := ur.New(cfg, m, connections, false)
summaryService := model.NewFolderSummaryService(cfg, m, protocol.LocalDeviceID, events.NoopLogger) svc := New(protocol.LocalDeviceID, cfg, assetDir, "syncthing", m, eventSub, diskEventSub, events.NoopLogger, discoverer, connections, urService, &mockedFolderSummaryService{}, errorLog, systemLog, cpu, nil, false).(*service)
svc := New(protocol.LocalDeviceID, cfg, assetDir, "syncthing", m, eventSub, diskEventSub, events.NoopLogger, discoverer, connections, urService, summaryService, errorLog, systemLog, cpu, nil, false).(*service)
defer os.Remove(token) defer os.Remove(token)
svc.started = addrChan svc.started = addrChan

View File

@ -36,12 +36,8 @@ func (m *mockedModel) NeedFolderFiles(folder string, page, perpage int) ([]db.Fi
return nil, nil, nil return nil, nil, nil
} }
func (m *mockedModel) RemoteNeedFolderFiles(device protocol.DeviceID, folder string, page, perpage int) ([]db.FileInfoTruncated, error) { func (m *mockedModel) FolderProgressBytesCompleted(_ string) int64 {
return nil, nil return 0
}
func (m *mockedModel) NeedSize(folder string) db.Counts {
return db.Counts{}
} }
func (m *mockedModel) ConnectionStats() map[string]interface{} { func (m *mockedModel) ConnectionStats() map[string]interface{} {
@ -112,26 +108,6 @@ func (m *mockedModel) Connection(deviceID protocol.DeviceID) (connections.Connec
return nil, false return nil, false
} }
func (m *mockedModel) GlobalSize(folder string) db.Counts {
return db.Counts{}
}
func (m *mockedModel) LocalSize(folder string) db.Counts {
return db.Counts{}
}
func (m *mockedModel) ReceiveOnlyChangedSize(folder string) db.Counts {
return db.Counts{}
}
func (m *mockedModel) CurrentSequence(folder string) (int64, bool) {
return 0, false
}
func (m *mockedModel) RemoteSequence(folder string) (int64, bool) {
return 0, false
}
func (m *mockedModel) State(folder string) (string, time.Time, error) { func (m *mockedModel) State(folder string) (string, time.Time, error) {
return "", time.Time{}, nil return "", time.Time{}, nil
} }
@ -148,10 +124,6 @@ func (m *mockedModel) WatchError(folder string) error {
return nil return nil
} }
func (m *mockedModel) LocalChangedFiles(folder string, page, perpage int) []db.FileInfoTruncated {
return nil
}
func (m *mockedModel) Serve() {} func (m *mockedModel) Serve() {}
func (m *mockedModel) Stop() {} func (m *mockedModel) Stop() {}
@ -188,3 +160,19 @@ func (m *mockedModel) GetHello(protocol.DeviceID) protocol.HelloIntf {
} }
func (m *mockedModel) StartDeadlockDetector(timeout time.Duration) {} func (m *mockedModel) StartDeadlockDetector(timeout time.Duration) {}
func (m *mockedModel) DBSnapshot(_ string) (*db.Snapshot, error) {
return nil, nil
}
type mockedFolderSummaryService struct{}
func (m *mockedFolderSummaryService) Serve() {}
func (m *mockedFolderSummaryService) Stop() {}
func (m *mockedFolderSummaryService) Summary(folder string) (map[string]interface{}, error) {
return map[string]interface{}{"mocked": true}, nil
}
func (m *mockedFolderSummaryService) OnEventRequest() {}

View File

@ -145,10 +145,12 @@ func BenchmarkNeedHalf(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(secondHalf) { if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf)) b.Errorf("wrong length %d != %d", count, len(secondHalf))
} }
@ -167,10 +169,12 @@ func BenchmarkNeedHalfRemote(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
fset.WithNeed(remoteDevice0, func(fi db.FileIntf) bool { snap := fset.Snapshot()
snap.WithNeed(remoteDevice0, func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(secondHalf) { if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf)) b.Errorf("wrong length %d != %d", count, len(secondHalf))
} }
@ -186,10 +190,12 @@ func BenchmarkHave(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(firstHalf) { if count != len(firstHalf) {
b.Errorf("wrong length %d != %d", count, len(firstHalf)) b.Errorf("wrong length %d != %d", count, len(firstHalf))
} }
@ -205,10 +211,12 @@ func BenchmarkGlobal(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithGlobal(func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithGlobal(func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(files) { if count != len(files) {
b.Errorf("wrong length %d != %d", count, len(files)) b.Errorf("wrong length %d != %d", count, len(files))
} }
@ -224,10 +232,12 @@ func BenchmarkNeedHalfTruncated(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithNeedTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithNeedTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(secondHalf) { if count != len(secondHalf) {
b.Errorf("wrong length %d != %d", count, len(secondHalf)) b.Errorf("wrong length %d != %d", count, len(secondHalf))
} }
@ -243,10 +253,12 @@ func BenchmarkHaveTruncated(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithHaveTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithHaveTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(firstHalf) { if count != len(firstHalf) {
b.Errorf("wrong length %d != %d", count, len(firstHalf)) b.Errorf("wrong length %d != %d", count, len(firstHalf))
} }
@ -262,10 +274,12 @@ func BenchmarkGlobalTruncated(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
count := 0 count := 0
benchS.WithGlobalTruncated(func(fi db.FileIntf) bool { snap := benchS.Snapshot()
snap.WithGlobalTruncated(func(fi db.FileIntf) bool {
count++ count++
return true return true
}) })
snap.Release()
if count != len(files) { if count != len(files) {
b.Errorf("wrong length %d != %d", count, len(files)) b.Errorf("wrong length %d != %d", count, len(files))
} }

View File

@ -72,7 +72,9 @@ func TestIgnoredFiles(t *testing.T) {
// Local files should have the "ignored" bit in addition to just being // Local files should have the "ignored" bit in addition to just being
// generally invalid if we want to look at the simulation of that bit. // generally invalid if we want to look at the simulation of that bit.
fi, ok := fs.Get(protocol.LocalDeviceID, "foo") snap := fs.Snapshot()
defer snap.Release()
fi, ok := snap.Get(protocol.LocalDeviceID, "foo")
if !ok { if !ok {
t.Fatal("foo should exist") t.Fatal("foo should exist")
} }
@ -83,7 +85,7 @@ func TestIgnoredFiles(t *testing.T) {
t.Error("foo should be ignored") t.Error("foo should be ignored")
} }
fi, ok = fs.Get(protocol.LocalDeviceID, "bar") fi, ok = snap.Get(protocol.LocalDeviceID, "bar")
if !ok { if !ok {
t.Fatal("bar should exist") t.Fatal("bar should exist")
} }
@ -97,7 +99,7 @@ func TestIgnoredFiles(t *testing.T) {
// Remote files have the invalid bit as usual, and the IsInvalid() method // Remote files have the invalid bit as usual, and the IsInvalid() method
// should pick this up too. // should pick this up too.
fi, ok = fs.Get(protocol.DeviceID{42}, "baz") fi, ok = snap.Get(protocol.DeviceID{42}, "baz")
if !ok { if !ok {
t.Fatal("baz should exist") t.Fatal("baz should exist")
} }
@ -108,7 +110,7 @@ func TestIgnoredFiles(t *testing.T) {
t.Error("baz should be invalid") t.Error("baz should be invalid")
} }
fi, ok = fs.Get(protocol.DeviceID{42}, "quux") fi, ok = snap.Get(protocol.DeviceID{42}, "quux")
if !ok { if !ok {
t.Fatal("quux should exist") t.Fatal("quux should exist")
} }
@ -167,7 +169,11 @@ func TestUpdate0to3(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if _, ok, err := db.getFileDirty(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); err != nil { trans, err := db.newReadOnlyTransaction()
if err != nil {
t.Fatal(err)
}
if _, ok, err := trans.getFile(folder, protocol.LocalDeviceID[:], []byte(slashPrefixed)); err != nil {
t.Fatal(err) t.Fatal(err)
} else if ok { } else if ok {
t.Error("File prefixed by '/' was not removed during transition to schema 1") t.Error("File prefixed by '/' was not removed during transition to schema 1")
@ -186,7 +192,11 @@ func TestUpdate0to3(t *testing.T) {
} }
found := false found := false
_ = db.withHaveSequence(folder, 0, func(fi FileIntf) bool { trans, err = db.newReadOnlyTransaction()
if err != nil {
t.Fatal(err)
}
_ = trans.withHaveSequence(folder, 0, func(fi FileIntf) bool {
f := fi.(protocol.FileInfo) f := fi.(protocol.FileInfo)
l.Infoln(f) l.Infoln(f)
if found { if found {
@ -213,7 +223,11 @@ func TestUpdate0to3(t *testing.T) {
haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0], haveUpdate0to3[remoteDevice1][0].Name: haveUpdate0to3[remoteDevice1][0],
haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2], haveUpdate0to3[remoteDevice0][2].Name: haveUpdate0to3[remoteDevice0][2],
} }
_ = db.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool { trans, err = db.newReadOnlyTransaction()
if err != nil {
t.Fatal(err)
}
_ = trans.withNeed(folder, protocol.LocalDeviceID[:], false, func(fi FileIntf) bool {
e, ok := need[fi.FileName()] e, ok := need[fi.FileName()]
if !ok { if !ok {
t.Error("Got unexpected needed file:", fi.FileName()) t.Error("Got unexpected needed file:", fi.FileName())

View File

@ -194,405 +194,6 @@ func (db *Lowlevel) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
return t.commit() return t.commit()
} }
func (db *Lowlevel) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) error {
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
if len(prefix) > 0 {
unslashedPrefix := prefix
if bytes.HasSuffix(prefix, []byte{'/'}) {
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
} else {
prefix = append(prefix, '/')
}
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix)
if err != nil {
return err
}
if f, ok, err := t.getFileTrunc(key, true); err != nil {
return err
} else if ok && !fn(f) {
return nil
}
}
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, device, prefix)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
return nil
}
f, err := unmarshalTrunc(dbi.Value(), truncate)
if err != nil {
l.Debugln("unmarshal error:", err)
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func (db *Lowlevel) withHaveSequence(folder []byte, startSeq int64, fn Iterator) error {
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
first, err := db.keyer.GenerateSequenceKey(nil, folder, startSeq)
if err != nil {
return err
}
last, err := db.keyer.GenerateSequenceKey(nil, folder, maxInt64)
if err != nil {
return err
}
dbi, err := t.NewRangeIterator(first, last)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
f, ok, err := t.getFileByKey(dbi.Value())
if err != nil {
return err
}
if !ok {
l.Debugln("missing file for sequence number", db.keyer.SequenceFromSequenceKey(dbi.Key()))
continue
}
if shouldDebug() {
if seq := db.keyer.SequenceFromSequenceKey(dbi.Key()); f.Sequence != seq {
l.Warnf("Sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq)
panic("sequence index corruption")
}
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func (db *Lowlevel) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, nil, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutNameAndDevice())
if err != nil {
return err
}
defer dbi.Release()
var gk, keyBuf []byte
for dbi.Next() {
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
if !ok {
// Not having the device in the index is bad. Clear it.
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
var f FileInfoTruncated
// The iterator function may keep a reference to the unmarshalled
// struct, which in turn references the buffer it was unmarshalled
// from. dbi.Value() just returns an internal slice that it reuses, so
// we need to copy it.
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
if err != nil {
return err
}
switch f.Name {
case "", ".", "..", "/": // A few obviously invalid filenames
l.Infof("Dropping invalid filename %q from database", f.Name)
name := []byte(f.Name)
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
if err != nil {
return err
}
keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil)
if err != nil {
return err
}
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
if !fn(device, f) {
return nil
}
}
if err := dbi.Error(); err != nil {
return err
}
return t.commit()
}
func (db *Lowlevel) getFileDirty(folder, device, file []byte) (protocol.FileInfo, bool, error) {
t, err := db.newReadOnlyTransaction()
if err != nil {
return protocol.FileInfo{}, false, err
}
defer t.close()
return t.getFile(folder, device, file)
}
func (db *Lowlevel) getGlobalDirty(folder, file []byte, truncate bool) (FileIntf, bool, error) {
t, err := db.newReadOnlyTransaction()
if err != nil {
return nil, false, err
}
defer t.close()
_, f, ok, err := t.getGlobal(nil, folder, file, truncate)
return f, ok, err
}
func (db *Lowlevel) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) error {
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
if len(prefix) > 0 {
unslashedPrefix := prefix
if bytes.HasSuffix(prefix, []byte{'/'}) {
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
} else {
prefix = append(prefix, '/')
}
if _, f, ok, err := t.getGlobal(nil, folder, unslashedPrefix, truncate); err != nil {
return err
} else if ok && !fn(f) {
return nil
}
}
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, prefix)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer dbi.Release()
var dk []byte
for dbi.Next() {
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
return nil
}
vl, ok := unmarshalVersionList(dbi.Value())
if !ok {
continue
}
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
if err != nil {
return err
}
f, ok, err := t.getFileTrunc(dk, truncate)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
if err != nil {
return err
}
return dbi.Error()
}
func (db *Lowlevel) availability(folder, file []byte) ([]protocol.DeviceID, error) {
k, err := db.keyer.GenerateGlobalVersionKey(nil, folder, file)
if err != nil {
return nil, err
}
bs, err := db.Get(k)
if backend.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
vl, ok := unmarshalVersionList(bs)
if !ok {
return nil, nil
}
var devices []protocol.DeviceID
for _, v := range vl.Versions {
if !v.Version.Equal(vl.Versions[0].Version) {
break
}
if v.Invalid {
continue
}
n := protocol.DeviceIDFromBytes(v.Device)
devices = append(devices, n)
}
return devices, nil
}
func (db *Lowlevel) withNeed(folder, device []byte, truncate bool, fn Iterator) error {
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
return db.withNeedLocal(folder, truncate, fn)
}
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
key, err := db.keyer.GenerateGlobalVersionKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutName())
if err != nil {
return err
}
defer dbi.Release()
var dk []byte
devID := protocol.DeviceIDFromBytes(device)
for dbi.Next() {
vl, ok := unmarshalVersionList(dbi.Value())
if !ok {
continue
}
haveFV, have := vl.Get(device)
// XXX: This marks Concurrent (i.e. conflicting) changes as
// needs. Maybe we should do that, but it needs special
// handling in the puller.
if have && haveFV.Version.GreaterEqual(vl.Versions[0].Version) {
continue
}
name := db.keyer.NameFromGlobalVersionKey(dbi.Key())
needVersion := vl.Versions[0].Version
needDevice := protocol.DeviceIDFromBytes(vl.Versions[0].Device)
for i := range vl.Versions {
if !vl.Versions[i].Version.Equal(needVersion) {
// We haven't found a valid copy of the file with the needed version.
break
}
if vl.Versions[i].Invalid {
// The file is marked invalid, don't use it.
continue
}
dk, err = db.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name)
if err != nil {
return err
}
gf, ok, err := t.getFileTrunc(dk, truncate)
if err != nil {
return err
}
if !ok {
continue
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
break
}
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
if !fn(gf) {
return nil
}
// This file is handled, no need to look further in the version list
break
}
}
return dbi.Error()
}
func (db *Lowlevel) withNeedLocal(folder []byte, truncate bool, fn Iterator) error {
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
key, err := db.keyer.GenerateNeedFileKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutName())
if err != nil {
return err
}
defer dbi.Release()
var keyBuf []byte
var f FileIntf
var ok bool
for dbi.Next() {
keyBuf, f, ok, err = t.getGlobal(keyBuf, folder, db.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func (db *Lowlevel) dropFolder(folder []byte) error { func (db *Lowlevel) dropFolder(folder []byte) error {
t, err := db.newReadWriteTransaction() t, err := db.newReadWriteTransaction()
if err != nil { if err != nil {

View File

@ -15,12 +15,16 @@ import (
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
) )
// metadataTracker keeps metadata on a per device, per local flag basis. type countsMap struct {
type metadataTracker struct {
mut sync.RWMutex
counts CountsSet counts CountsSet
indexes map[metaKey]int // device ID + local flags -> index in counts indexes map[metaKey]int // device ID + local flags -> index in counts
dirty bool }
// metadataTracker keeps metadata on a per device, per local flag basis.
type metadataTracker struct {
countsMap
mut sync.RWMutex
dirty bool
} }
type metaKey struct { type metaKey struct {
@ -30,8 +34,10 @@ type metaKey struct {
func newMetadataTracker() *metadataTracker { func newMetadataTracker() *metadataTracker {
return &metadataTracker{ return &metadataTracker{
mut: sync.NewRWMutex(), mut: sync.NewRWMutex(),
indexes: make(map[metaKey]int), countsMap: countsMap{
indexes: make(map[metaKey]int),
},
} }
} }
@ -49,7 +55,7 @@ func (m *metadataTracker) Unmarshal(bs []byte) error {
return nil return nil
} }
// Unmarshal returns the protobuf representation of the metadataTracker // Marshal returns the protobuf representation of the metadataTracker
func (m *metadataTracker) Marshal() ([]byte, error) { func (m *metadataTracker) Marshal() ([]byte, error) {
return m.counts.Marshal() return m.counts.Marshal()
} }
@ -260,16 +266,11 @@ func (m *metadataTracker) resetCounts(dev protocol.DeviceID) {
m.mut.Unlock() m.mut.Unlock()
} }
// Counts returns the counts for the given device ID and flag. `flag` should func (m *countsMap) Counts(dev protocol.DeviceID, flag uint32) Counts {
// be zero or have exactly one bit set.
func (m *metadataTracker) Counts(dev protocol.DeviceID, flag uint32) Counts {
if bits.OnesCount32(flag) > 1 { if bits.OnesCount32(flag) > 1 {
panic("incorrect usage: set at most one bit in flag") panic("incorrect usage: set at most one bit in flag")
} }
m.mut.RLock()
defer m.mut.RUnlock()
idx, ok := m.indexes[metaKey{dev, flag}] idx, ok := m.indexes[metaKey{dev, flag}]
if !ok { if !ok {
return Counts{} return Counts{}
@ -278,6 +279,37 @@ func (m *metadataTracker) Counts(dev protocol.DeviceID, flag uint32) Counts {
return m.counts.Counts[idx] return m.counts.Counts[idx]
} }
// Counts returns the counts for the given device ID and flag. `flag` should
// be zero or have exactly one bit set.
func (m *metadataTracker) Counts(dev protocol.DeviceID, flag uint32) Counts {
m.mut.RLock()
defer m.mut.RUnlock()
return m.countsMap.Counts(dev, flag)
}
// Snapshot returns a copy of the metadata for reading.
func (m *metadataTracker) Snapshot() *countsMap {
m.mut.RLock()
defer m.mut.RUnlock()
c := &countsMap{
counts: CountsSet{
Counts: make([]Counts, len(m.counts.Counts)),
Created: m.counts.Created,
},
indexes: make(map[metaKey]int, len(m.indexes)),
}
for k, v := range m.indexes {
c.indexes[k] = v
}
for i := range m.counts.Counts {
c.counts.Counts[i] = m.counts.Counts[i]
}
return c
}
// nextLocalSeq allocates a new local sequence number // nextLocalSeq allocates a new local sequence number
func (m *metadataTracker) nextLocalSeq() int64 { func (m *metadataTracker) nextLocalSeq() int64 {
m.mut.Lock() m.mut.Lock()
@ -291,26 +323,25 @@ func (m *metadataTracker) nextLocalSeq() int64 {
// devices returns the list of devices tracked, excluding the local device // devices returns the list of devices tracked, excluding the local device
// (which we don't know the ID of) // (which we don't know the ID of)
func (m *metadataTracker) devices() []protocol.DeviceID { func (m *metadataTracker) devices() []protocol.DeviceID {
devs := make(map[protocol.DeviceID]struct{}, len(m.counts.Counts))
m.mut.RLock() m.mut.RLock()
defer m.mut.RUnlock()
return m.countsMap.devices()
}
func (m *countsMap) devices() []protocol.DeviceID {
devs := make([]protocol.DeviceID, 0, len(m.counts.Counts))
for _, dev := range m.counts.Counts { for _, dev := range m.counts.Counts {
if dev.Sequence > 0 { if dev.Sequence > 0 {
id := protocol.DeviceIDFromBytes(dev.DeviceID) id := protocol.DeviceIDFromBytes(dev.DeviceID)
if id == protocol.GlobalDeviceID || id == protocol.LocalDeviceID { if id == protocol.GlobalDeviceID || id == protocol.LocalDeviceID {
continue continue
} }
devs[id] = struct{}{} devs = append(devs, id)
} }
} }
m.mut.RUnlock()
devList := make([]protocol.DeviceID, 0, len(devs)) return devs
for dev := range devs {
devList = append(devList, dev)
}
return devList
} }
func (m *metadataTracker) Created() time.Time { func (m *metadataTracker) Created() time.Time {

View File

@ -228,7 +228,7 @@ func (db *schemaUpdater) updateSchema1to2() error {
for _, folderStr := range db.ListFolders() { for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr) folder := []byte(folderStr)
var putErr error var putErr error
err := db.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool { err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f FileIntf) bool {
sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo()) sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo())
if putErr != nil { if putErr != nil {
return false return false
@ -263,7 +263,7 @@ func (db *schemaUpdater) updateSchema2to3() error {
for _, folderStr := range db.ListFolders() { for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr) folder := []byte(folderStr)
var putErr error var putErr error
err := db.withGlobal(folder, nil, true, func(f FileIntf) bool { err := t.withGlobal(folder, nil, true, func(f FileIntf) bool {
name := []byte(f.FileName()) name := []byte(f.FileName())
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name) dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
if putErr != nil { if putErr != nil {
@ -339,7 +339,7 @@ func (db *schemaUpdater) updateSchema5to6() error {
for _, folderStr := range db.ListFolders() { for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr) folder := []byte(folderStr)
var putErr error var putErr error
err := db.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool { err := t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f FileIntf) bool {
if !f.IsInvalid() { if !f.IsInvalid() {
return true return true
} }
@ -382,7 +382,7 @@ func (db *schemaUpdater) updateSchema6to7() error {
for _, folderStr := range db.ListFolders() { for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr) folder := []byte(folderStr)
var delErr error var delErr error
err := db.withNeedLocal(folder, false, func(f FileIntf) bool { err := t.withNeedLocal(folder, false, func(f FileIntf) bool {
name := []byte(f.FileName()) name := []byte(f.FileName())
global := f.(protocol.FileInfo) global := f.(protocol.FileInfo)
gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name) gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name)

View File

@ -105,8 +105,12 @@ func (s *FileSet) recalcCounts() error {
return err return err
} }
t, err := s.db.newReadWriteTransaction()
if err != nil {
return err
}
var deviceID protocol.DeviceID var deviceID protocol.DeviceID
err := s.db.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool { err = t.withAllFolderTruncated([]byte(s.folder), func(device []byte, f FileInfoTruncated) bool {
copy(deviceID[:], device) copy(deviceID[:], device)
s.meta.addFile(deviceID, f) s.meta.addFile(deviceID, f)
return true return true
@ -183,75 +187,97 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
} }
} }
func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) { type Snapshot struct {
folder string
t readOnlyTransaction
meta *countsMap
}
func (s *FileSet) Snapshot() *Snapshot {
t, err := s.db.newReadOnlyTransaction()
if err != nil {
panic(err)
}
return &Snapshot{
folder: s.folder,
t: t,
meta: s.meta.Snapshot(),
}
}
func (s *Snapshot) Release() {
s.t.close()
}
func (s *Snapshot) WithNeed(device protocol.DeviceID, fn Iterator) {
l.Debugf("%s WithNeed(%v)", s.folder, device) l.Debugf("%s WithNeed(%v)", s.folder, device)
if err := s.db.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) { func (s *Snapshot) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device) l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
if err := s.db.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) { func (s *Snapshot) WithHave(device protocol.DeviceID, fn Iterator) {
l.Debugf("%s WithHave(%v)", s.folder, device) l.Debugf("%s WithHave(%v)", s.folder, device)
if err := s.db.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) { func (s *Snapshot) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device) l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
if err := s.db.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithHaveSequence(startSeq int64, fn Iterator) { func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) {
l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq) l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq)
if err := s.db.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
// Except for an item with a path equal to prefix, only children of prefix are iterated. // Except for an item with a path equal to prefix, only children of prefix are iterated.
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not. // E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
func (s *FileSet) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) { func (s *Snapshot) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix) l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
if err := s.db.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithGlobal(fn Iterator) { func (s *Snapshot) WithGlobal(fn Iterator) {
l.Debugf("%s WithGlobal()", s.folder) l.Debugf("%s WithGlobal()", s.folder)
if err := s.db.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) WithGlobalTruncated(fn Iterator) { func (s *Snapshot) WithGlobalTruncated(fn Iterator) {
l.Debugf("%s WithGlobalTruncated()", s.folder) l.Debugf("%s WithGlobalTruncated()", s.folder)
if err := s.db.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
// Except for an item with a path equal to prefix, only children of prefix are iterated. // Except for an item with a path equal to prefix, only children of prefix are iterated.
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not. // E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) { func (s *Snapshot) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix) l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
if err := s.db.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) { if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
panic(err) panic(err)
} }
} }
func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) { func (s *Snapshot) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
f, ok, err := s.db.getFileDirty([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file))) f, ok, err := s.t.getFile([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
if backend.IsClosed(err) { if backend.IsClosed(err) {
return protocol.FileInfo{}, false return protocol.FileInfo{}, false
} else if err != nil { } else if err != nil {
@ -261,8 +287,8 @@ func (s *FileSet) Get(device protocol.DeviceID, file string) (protocol.FileInfo,
return f, ok return f, ok
} }
func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) { func (s *Snapshot) GetGlobal(file string) (protocol.FileInfo, bool) {
fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), false) _, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
if backend.IsClosed(err) { if backend.IsClosed(err) {
return protocol.FileInfo{}, false return protocol.FileInfo{}, false
} else if err != nil { } else if err != nil {
@ -276,8 +302,8 @@ func (s *FileSet) GetGlobal(file string) (protocol.FileInfo, bool) {
return f, true return f, true
} }
func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) { func (s *Snapshot) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
fi, ok, err := s.db.getGlobalDirty([]byte(s.folder), []byte(osutil.NormalizedFilename(file)), true) _, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
if backend.IsClosed(err) { if backend.IsClosed(err) {
return FileInfoTruncated{}, false return FileInfoTruncated{}, false
} else if err != nil { } else if err != nil {
@ -291,8 +317,8 @@ func (s *FileSet) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
return f, true return f, true
} }
func (s *FileSet) Availability(file string) []protocol.DeviceID { func (s *Snapshot) Availability(file string) []protocol.DeviceID {
av, err := s.db.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file))) av, err := s.t.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
if backend.IsClosed(err) { if backend.IsClosed(err) {
return nil return nil
} else if err != nil { } else if err != nil {
@ -301,26 +327,110 @@ func (s *FileSet) Availability(file string) []protocol.DeviceID {
return av return av
} }
func (s *FileSet) Sequence(device protocol.DeviceID) int64 { func (s *Snapshot) Sequence(device protocol.DeviceID) int64 {
return s.meta.Sequence(device) return s.meta.Counts(device, 0).Sequence
} }
func (s *FileSet) LocalSize() Counts { // RemoteSequence returns the change version for the given folder, as
// sent by remote peers. This is guaranteed to increment if the contents of
// the remote or global folder has changed.
func (s *Snapshot) RemoteSequence() int64 {
var ver int64
for _, device := range s.meta.devices() {
ver += s.Sequence(device)
}
return ver
}
func (s *Snapshot) LocalSize() Counts {
local := s.meta.Counts(protocol.LocalDeviceID, 0) local := s.meta.Counts(protocol.LocalDeviceID, 0)
recvOnlyChanged := s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly) return local.Add(s.ReceiveOnlyChangedSize())
return local.Add(recvOnlyChanged)
} }
func (s *FileSet) ReceiveOnlyChangedSize() Counts { func (s *Snapshot) ReceiveOnlyChangedSize() Counts {
return s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly) return s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly)
} }
func (s *FileSet) GlobalSize() Counts { func (s *Snapshot) GlobalSize() Counts {
global := s.meta.Counts(protocol.GlobalDeviceID, 0) global := s.meta.Counts(protocol.GlobalDeviceID, 0)
recvOnlyChanged := s.meta.Counts(protocol.GlobalDeviceID, protocol.FlagLocalReceiveOnly) recvOnlyChanged := s.meta.Counts(protocol.GlobalDeviceID, protocol.FlagLocalReceiveOnly)
return global.Add(recvOnlyChanged) return global.Add(recvOnlyChanged)
} }
func (s *Snapshot) NeedSize() Counts {
var result Counts
s.WithNeedTruncated(protocol.LocalDeviceID, func(f FileIntf) bool {
switch {
case f.IsDeleted():
result.Deleted++
case f.IsDirectory():
result.Directories++
case f.IsSymlink():
result.Symlinks++
default:
result.Files++
result.Bytes += f.FileSize()
}
return true
})
return result
}
// LocalChangedFiles returns a paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
func (s *Snapshot) LocalChangedFiles(page, perpage int) []FileInfoTruncated {
if s.ReceiveOnlyChangedSize().TotalItems() == 0 {
return nil
}
files := make([]FileInfoTruncated, 0, perpage)
skip := (page - 1) * perpage
get := perpage
s.WithHaveTruncated(protocol.LocalDeviceID, func(f FileIntf) bool {
if !f.IsReceiveOnlyChanged() {
return true
}
if skip > 0 {
skip--
return true
}
ft := f.(FileInfoTruncated)
files = append(files, ft)
get--
return get > 0
})
return files
}
// RemoteNeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
func (s *Snapshot) RemoteNeedFolderFiles(device protocol.DeviceID, page, perpage int) []FileInfoTruncated {
files := make([]FileInfoTruncated, 0, perpage)
skip := (page - 1) * perpage
get := perpage
s.WithNeedTruncated(device, func(f FileIntf) bool {
if skip > 0 {
skip--
return true
}
files = append(files, f.(FileInfoTruncated))
get--
return get > 0
})
return files
}
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
return s.meta.Sequence(device)
}
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID { func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
id, err := s.db.getIndexID(device[:], []byte(s.folder)) id, err := s.db.getIndexID(device[:], []byte(s.folder))
if backend.IsClosed(err) { if backend.IsClosed(err) {

View File

@ -46,7 +46,9 @@ func genBlocks(n int) []protocol.BlockInfo {
func globalList(s *db.FileSet) []protocol.FileInfo { func globalList(s *db.FileSet) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithGlobal(func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithGlobal(func(fi db.FileIntf) bool {
f := fi.(protocol.FileInfo) f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
@ -55,7 +57,9 @@ func globalList(s *db.FileSet) []protocol.FileInfo {
} }
func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated { func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated {
var fs []db.FileInfoTruncated var fs []db.FileInfoTruncated
s.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated) f := fi.(db.FileInfoTruncated)
fs = append(fs, f) fs = append(fs, f)
return true return true
@ -65,7 +69,9 @@ func globalListPrefixed(s *db.FileSet, prefix string) []db.FileInfoTruncated {
func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo { func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithHave(n, func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithHave(n, func(fi db.FileIntf) bool {
f := fi.(protocol.FileInfo) f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
@ -75,7 +81,9 @@ func haveList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.FileInfoTruncated { func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.FileInfoTruncated {
var fs []db.FileInfoTruncated var fs []db.FileInfoTruncated
s.WithPrefixedHaveTruncated(n, prefix, func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithPrefixedHaveTruncated(n, prefix, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated) f := fi.(db.FileInfoTruncated)
fs = append(fs, f) fs = append(fs, f)
return true return true
@ -85,7 +93,9 @@ func haveListPrefixed(s *db.FileSet, n protocol.DeviceID, prefix string) []db.Fi
func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo { func needList(s *db.FileSet, n protocol.DeviceID) []protocol.FileInfo {
var fs []protocol.FileInfo var fs []protocol.FileInfo
s.WithNeed(n, func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithNeed(n, func(fi db.FileIntf) bool {
f := fi.(protocol.FileInfo) f := fi.(protocol.FileInfo)
fs = append(fs, f) fs = append(fs, f)
return true return true
@ -206,7 +216,7 @@ func TestGlobalSet(t *testing.T) {
} }
globalBytes += f.FileSize() globalBytes += f.FileSize()
} }
gs := m.GlobalSize() gs := globalSize(m)
if gs.Files != globalFiles { if gs.Files != globalFiles {
t.Errorf("Incorrect GlobalSize files; %d != %d", gs.Files, globalFiles) t.Errorf("Incorrect GlobalSize files; %d != %d", gs.Files, globalFiles)
} }
@ -242,7 +252,7 @@ func TestGlobalSet(t *testing.T) {
} }
haveBytes += f.FileSize() haveBytes += f.FileSize()
} }
ls := m.LocalSize() ls := localSize(m)
if ls.Files != haveFiles { if ls.Files != haveFiles {
t.Errorf("Incorrect LocalSize files; %d != %d", ls.Files, haveFiles) t.Errorf("Incorrect LocalSize files; %d != %d", ls.Files, haveFiles)
} }
@ -277,7 +287,9 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed) t.Errorf("Need incorrect;\n A: %v !=\n E: %v", n, expectedRemoteNeed)
} }
f, ok := m.Get(protocol.LocalDeviceID, "b") snap := m.Snapshot()
defer snap.Release()
f, ok := snap.Get(protocol.LocalDeviceID, "b")
if !ok { if !ok {
t.Error("Unexpectedly not OK") t.Error("Unexpectedly not OK")
} }
@ -285,7 +297,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1]) t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, localTot[1])
} }
f, ok = m.Get(remoteDevice0, "b") f, ok = snap.Get(remoteDevice0, "b")
if !ok { if !ok {
t.Error("Unexpectedly not OK") t.Error("Unexpectedly not OK")
} }
@ -293,7 +305,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0]) t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, remote1[0])
} }
f, ok = m.GetGlobal("b") f, ok = snap.GetGlobal("b")
if !ok { if !ok {
t.Error("Unexpectedly not OK") t.Error("Unexpectedly not OK")
} }
@ -301,7 +313,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0]) t.Errorf("GetGlobal incorrect;\n A: %v !=\n E: %v", f, remote1[0])
} }
f, ok = m.Get(protocol.LocalDeviceID, "zz") f, ok = snap.Get(protocol.LocalDeviceID, "zz")
if ok { if ok {
t.Error("Unexpectedly OK") t.Error("Unexpectedly OK")
} }
@ -309,7 +321,7 @@ func TestGlobalSet(t *testing.T) {
t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{}) t.Errorf("Get incorrect;\n A: %v !=\n E: %v", f, protocol.FileInfo{})
} }
f, ok = m.GetGlobal("zz") f, ok = snap.GetGlobal("zz")
if ok { if ok {
t.Error("Unexpectedly OK") t.Error("Unexpectedly OK")
} }
@ -318,15 +330,15 @@ func TestGlobalSet(t *testing.T) {
} }
av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0} av := []protocol.DeviceID{protocol.LocalDeviceID, remoteDevice0}
a := m.Availability("a") a := snap.Availability("a")
if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) { if !(len(a) == 2 && (a[0] == av[0] && a[1] == av[1] || a[0] == av[1] && a[1] == av[0])) {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av) t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, av)
} }
a = m.Availability("b") a = snap.Availability("b")
if len(a) != 1 || a[0] != remoteDevice0 { if len(a) != 1 || a[0] != remoteDevice0 {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0) t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, remoteDevice0)
} }
a = m.Availability("d") a = snap.Availability("d")
if len(a) != 1 || a[0] != protocol.LocalDeviceID { if len(a) != 1 || a[0] != protocol.LocalDeviceID {
t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID) t.Errorf("Availability incorrect;\n A: %v !=\n E: %v", a, protocol.LocalDeviceID)
} }
@ -446,19 +458,22 @@ func TestInvalidAvailability(t *testing.T) {
replace(s, remoteDevice0, remote0Have) replace(s, remoteDevice0, remote0Have)
replace(s, remoteDevice1, remote1Have) replace(s, remoteDevice1, remote1Have)
if av := s.Availability("both"); len(av) != 2 { snap := s.Snapshot()
defer snap.Release()
if av := snap.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av) t.Error("Incorrect availability for 'both':", av)
} }
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 { if av := snap.Availability("r0only"); len(av) != 1 || av[0] != remoteDevice0 {
t.Error("Incorrect availability for 'r0only':", av) t.Error("Incorrect availability for 'r0only':", av)
} }
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 { if av := snap.Availability("r1only"); len(av) != 1 || av[0] != remoteDevice1 {
t.Error("Incorrect availability for 'r1only':", av) t.Error("Incorrect availability for 'r1only':", av)
} }
if av := s.Availability("none"); len(av) != 0 { if av := snap.Availability("none"); len(av) != 0 {
t.Error("Incorrect availability for 'none':", av) t.Error("Incorrect availability for 'none':", av)
} }
} }
@ -826,20 +841,20 @@ func TestIssue4701(t *testing.T) {
s.Update(protocol.LocalDeviceID, localHave) s.Update(protocol.LocalDeviceID, localHave)
if c := s.LocalSize(); c.Files != 1 { if c := localSize(s); c.Files != 1 {
t.Errorf("Expected 1 local file, got %v", c.Files) t.Errorf("Expected 1 local file, got %v", c.Files)
} }
if c := s.GlobalSize(); c.Files != 1 { if c := globalSize(s); c.Files != 1 {
t.Errorf("Expected 1 global file, got %v", c.Files) t.Errorf("Expected 1 global file, got %v", c.Files)
} }
localHave[1].LocalFlags = 0 localHave[1].LocalFlags = 0
s.Update(protocol.LocalDeviceID, localHave) s.Update(protocol.LocalDeviceID, localHave)
if c := s.LocalSize(); c.Files != 2 { if c := localSize(s); c.Files != 2 {
t.Errorf("Expected 2 local files, got %v", c.Files) t.Errorf("Expected 2 local files, got %v", c.Files)
} }
if c := s.GlobalSize(); c.Files != 2 { if c := globalSize(s); c.Files != 2 {
t.Errorf("Expected 2 global files, got %v", c.Files) t.Errorf("Expected 2 global files, got %v", c.Files)
} }
@ -847,10 +862,10 @@ func TestIssue4701(t *testing.T) {
localHave[1].LocalFlags = protocol.FlagLocalIgnored localHave[1].LocalFlags = protocol.FlagLocalIgnored
s.Update(protocol.LocalDeviceID, localHave) s.Update(protocol.LocalDeviceID, localHave)
if c := s.LocalSize(); c.Files != 0 { if c := localSize(s); c.Files != 0 {
t.Errorf("Expected 0 local files, got %v", c.Files) t.Errorf("Expected 0 local files, got %v", c.Files)
} }
if c := s.GlobalSize(); c.Files != 0 { if c := globalSize(s); c.Files != 0 {
t.Errorf("Expected 0 global files, got %v", c.Files) t.Errorf("Expected 0 global files, got %v", c.Files)
} }
} }
@ -873,7 +888,9 @@ func TestWithHaveSequence(t *testing.T) {
replace(s, protocol.LocalDeviceID, localHave) replace(s, protocol.LocalDeviceID, localHave)
i := 2 i := 2
s.WithHaveSequence(int64(i), func(fi db.FileIntf) bool { snap := s.Snapshot()
defer snap.Release()
snap.WithHaveSequence(int64(i), func(fi db.FileIntf) bool {
if f := fi.(protocol.FileInfo); !f.IsEquivalent(localHave[i-1], 0) { if f := fi.(protocol.FileInfo); !f.IsEquivalent(localHave[i-1], 0) {
t.Fatalf("Got %v\nExpected %v", f, localHave[i-1]) t.Fatalf("Got %v\nExpected %v", f, localHave[i-1])
} }
@ -922,13 +939,15 @@ loop:
break loop break loop
default: default:
} }
s.WithHaveSequence(prevSeq+1, func(fi db.FileIntf) bool { snap := s.Snapshot()
snap.WithHaveSequence(prevSeq+1, func(fi db.FileIntf) bool {
if fi.SequenceNo() < prevSeq+1 { if fi.SequenceNo() < prevSeq+1 {
t.Fatal("Skipped ", prevSeq+1, fi.SequenceNo()) t.Fatal("Skipped ", prevSeq+1, fi.SequenceNo())
} }
prevSeq = fi.SequenceNo() prevSeq = fi.SequenceNo()
return true return true
}) })
snap.Release()
} }
} }
@ -981,12 +1000,12 @@ func TestMoveGlobalBack(t *testing.T) {
t.Error("Expected no need for remote 0, got", need) t.Error("Expected no need for remote 0, got", need)
} }
ls := s.LocalSize() ls := localSize(s)
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes { if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes) t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
} }
gs := s.GlobalSize() gs := globalSize(s)
if globalBytes := remote0Have[0].Size; gs.Bytes != globalBytes { if globalBytes := remote0Have[0].Size; gs.Bytes != globalBytes {
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes) t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
} }
@ -1007,12 +1026,12 @@ func TestMoveGlobalBack(t *testing.T) {
t.Error("Expected no local need, got", need) t.Error("Expected no local need, got", need)
} }
ls = s.LocalSize() ls = localSize(s)
if haveBytes := localHave[0].Size; ls.Bytes != haveBytes { if haveBytes := localHave[0].Size; ls.Bytes != haveBytes {
t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes) t.Errorf("Incorrect LocalSize bytes; %d != %d", ls.Bytes, haveBytes)
} }
gs = s.GlobalSize() gs = globalSize(s)
if globalBytes := localHave[0].Size; gs.Bytes != globalBytes { if globalBytes := localHave[0].Size; gs.Bytes != globalBytes {
t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes) t.Errorf("Incorrect GlobalSize bytes; %d != %d", gs.Bytes, globalBytes)
} }
@ -1106,22 +1125,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
replace(s, protocol.LocalDeviceID, files) replace(s, protocol.LocalDeviceID, files)
replace(s, remote, files) replace(s, remote, files)
if n := s.LocalSize().Files; n != 3 { if n := localSize(s).Files; n != 3 {
t.Fatal("expected 3 local files initially, not", n) t.Fatal("expected 3 local files initially, not", n)
} }
if n := s.LocalSize().Bytes; n != 30 { if n := localSize(s).Bytes; n != 30 {
t.Fatal("expected 30 local bytes initially, not", n) t.Fatal("expected 30 local bytes initially, not", n)
} }
if n := s.GlobalSize().Files; n != 3 { if n := globalSize(s).Files; n != 3 {
t.Fatal("expected 3 global files initially, not", n) t.Fatal("expected 3 global files initially, not", n)
} }
if n := s.GlobalSize().Bytes; n != 30 { if n := globalSize(s).Bytes; n != 30 {
t.Fatal("expected 30 global bytes initially, not", n) t.Fatal("expected 30 global bytes initially, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Files; n != 0 { if n := receiveOnlyChangedSize(s).Files; n != 0 {
t.Fatal("expected 0 receive only changed files initially, not", n) t.Fatal("expected 0 receive only changed files initially, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Bytes; n != 0 { if n := receiveOnlyChangedSize(s).Bytes; n != 0 {
t.Fatal("expected 0 receive only changed bytes initially, not", n) t.Fatal("expected 0 receive only changed bytes initially, not", n)
} }
@ -1136,22 +1155,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
// Check that we see the files // Check that we see the files
if n := s.LocalSize().Files; n != 3 { if n := localSize(s).Files; n != 3 {
t.Fatal("expected 3 local files after local change, not", n) t.Fatal("expected 3 local files after local change, not", n)
} }
if n := s.LocalSize().Bytes; n != 120 { if n := localSize(s).Bytes; n != 120 {
t.Fatal("expected 120 local bytes after local change, not", n) t.Fatal("expected 120 local bytes after local change, not", n)
} }
if n := s.GlobalSize().Files; n != 3 { if n := globalSize(s).Files; n != 3 {
t.Fatal("expected 3 global files after local change, not", n) t.Fatal("expected 3 global files after local change, not", n)
} }
if n := s.GlobalSize().Bytes; n != 30 { if n := globalSize(s).Bytes; n != 30 {
t.Fatal("expected 30 global files after local change, not", n) t.Fatal("expected 30 global files after local change, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Files; n != 1 { if n := receiveOnlyChangedSize(s).Files; n != 1 {
t.Fatal("expected 1 receive only changed file after local change, not", n) t.Fatal("expected 1 receive only changed file after local change, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Bytes; n != 100 { if n := receiveOnlyChangedSize(s).Bytes; n != 100 {
t.Fatal("expected 100 receive only changed btyes after local change, not", n) t.Fatal("expected 100 receive only changed btyes after local change, not", n)
} }
@ -1167,22 +1186,22 @@ func TestReceiveOnlyAccounting(t *testing.T) {
// Check that we see the files, same data as initially // Check that we see the files, same data as initially
if n := s.LocalSize().Files; n != 3 { if n := localSize(s).Files; n != 3 {
t.Fatal("expected 3 local files after revert, not", n) t.Fatal("expected 3 local files after revert, not", n)
} }
if n := s.LocalSize().Bytes; n != 30 { if n := localSize(s).Bytes; n != 30 {
t.Fatal("expected 30 local bytes after revert, not", n) t.Fatal("expected 30 local bytes after revert, not", n)
} }
if n := s.GlobalSize().Files; n != 3 { if n := globalSize(s).Files; n != 3 {
t.Fatal("expected 3 global files after revert, not", n) t.Fatal("expected 3 global files after revert, not", n)
} }
if n := s.GlobalSize().Bytes; n != 30 { if n := globalSize(s).Bytes; n != 30 {
t.Fatal("expected 30 global bytes after revert, not", n) t.Fatal("expected 30 global bytes after revert, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Files; n != 0 { if n := receiveOnlyChangedSize(s).Files; n != 0 {
t.Fatal("expected 0 receive only changed files after revert, not", n) t.Fatal("expected 0 receive only changed files after revert, not", n)
} }
if n := s.ReceiveOnlyChangedSize().Bytes; n != 0 { if n := receiveOnlyChangedSize(s).Bytes; n != 0 {
t.Fatal("expected 0 receive only changed bytes after revert, not", n) t.Fatal("expected 0 receive only changed bytes after revert, not", n)
} }
} }
@ -1229,7 +1248,7 @@ func TestRemoteInvalidNotAccounted(t *testing.T) {
} }
s.Update(remoteDevice0, files) s.Update(remoteDevice0, files)
global := s.GlobalSize() global := globalSize(s)
if global.Files != 1 { if global.Files != 1 {
t.Error("Expected one file in global size, not", global.Files) t.Error("Expected one file in global size, not", global.Files)
} }
@ -1386,12 +1405,14 @@ func TestSequenceIndex(t *testing.T) {
// a subset of those files if we manage to run before a complete // a subset of those files if we manage to run before a complete
// update has happened since our last iteration. // update has happened since our last iteration.
latest = latest[:0] latest = latest[:0]
s.WithHaveSequence(seq+1, func(f db.FileIntf) bool { snap := s.Snapshot()
snap.WithHaveSequence(seq+1, func(f db.FileIntf) bool {
seen[f.FileName()] = f seen[f.FileName()] = f
latest = append(latest, f) latest = append(latest, f)
seq = f.SequenceNo() seq = f.SequenceNo()
return true return true
}) })
snap.Release()
// Calculate the spread in sequence number. // Calculate the spread in sequence number.
var max, min int64 var max, min int64
@ -1449,7 +1470,9 @@ func TestIgnoreAfterReceiveOnly(t *testing.T) {
s.Update(protocol.LocalDeviceID, fs) s.Update(protocol.LocalDeviceID, fs)
if f, ok := s.Get(protocol.LocalDeviceID, file); !ok { snap := s.Snapshot()
defer snap.Release()
if f, ok := snap.Get(protocol.LocalDeviceID, file); !ok {
t.Error("File missing in db") t.Error("File missing in db")
} else if f.IsReceiveOnlyChanged() { } else if f.IsReceiveOnlyChanged() {
t.Error("File is still receive-only changed") t.Error("File is still receive-only changed")
@ -1462,3 +1485,21 @@ func replace(fs *db.FileSet, device protocol.DeviceID, files []protocol.FileInfo
fs.Drop(device) fs.Drop(device)
fs.Update(device, files) fs.Update(device, files)
} }
func localSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
defer snap.Release()
return snap.LocalSize()
}
func globalSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
defer snap.Release()
return snap.GlobalSize()
}
func receiveOnlyChangedSize(fs *db.FileSet) db.Counts {
snap := fs.Snapshot()
defer snap.Release()
return snap.ReceiveOnlyChangedSize()
}

View File

@ -7,6 +7,8 @@
package db package db
import ( import (
"bytes"
"github.com/syncthing/syncthing/lib/db/backend" "github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
) )
@ -94,6 +96,291 @@ func (t readOnlyTransaction) getGlobal(keyBuf, folder, file []byte, truncate boo
return keyBuf, fi, true, nil return keyBuf, fi, true, nil
} }
func (t *readOnlyTransaction) withHave(folder, device, prefix []byte, truncate bool, fn Iterator) error {
if len(prefix) > 0 {
unslashedPrefix := prefix
if bytes.HasSuffix(prefix, []byte{'/'}) {
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
} else {
prefix = append(prefix, '/')
}
key, err := t.keyer.GenerateDeviceFileKey(nil, folder, device, unslashedPrefix)
if err != nil {
return err
}
if f, ok, err := t.getFileTrunc(key, true); err != nil {
return err
} else if ok && !fn(f) {
return nil
}
}
key, err := t.keyer.GenerateDeviceFileKey(nil, folder, device, prefix)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
name := t.keyer.NameFromDeviceFileKey(dbi.Key())
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
return nil
}
f, err := unmarshalTrunc(dbi.Value(), truncate)
if err != nil {
l.Debugln("unmarshal error:", err)
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func (t *readOnlyTransaction) withHaveSequence(folder []byte, startSeq int64, fn Iterator) error {
first, err := t.keyer.GenerateSequenceKey(nil, folder, startSeq)
if err != nil {
return err
}
last, err := t.keyer.GenerateSequenceKey(nil, folder, maxInt64)
if err != nil {
return err
}
dbi, err := t.NewRangeIterator(first, last)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
f, ok, err := t.getFileByKey(dbi.Value())
if err != nil {
return err
}
if !ok {
l.Debugln("missing file for sequence number", t.keyer.SequenceFromSequenceKey(dbi.Key()))
continue
}
if shouldDebug() {
if seq := t.keyer.SequenceFromSequenceKey(dbi.Key()); f.Sequence != seq {
l.Warnf("Sequence index corruption (folder %v, file %v): sequence %d != expected %d", string(folder), f.Name, f.Sequence, seq)
panic("sequence index corruption")
}
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func (t *readOnlyTransaction) withGlobal(folder, prefix []byte, truncate bool, fn Iterator) error {
if len(prefix) > 0 {
unslashedPrefix := prefix
if bytes.HasSuffix(prefix, []byte{'/'}) {
unslashedPrefix = unslashedPrefix[:len(unslashedPrefix)-1]
} else {
prefix = append(prefix, '/')
}
if _, f, ok, err := t.getGlobal(nil, folder, unslashedPrefix, truncate); err != nil {
return err
} else if ok && !fn(f) {
return nil
}
}
key, err := t.keyer.GenerateGlobalVersionKey(nil, folder, prefix)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer dbi.Release()
var dk []byte
for dbi.Next() {
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
if len(prefix) > 0 && !bytes.HasPrefix(name, prefix) {
return nil
}
vl, ok := unmarshalVersionList(dbi.Value())
if !ok {
continue
}
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
if err != nil {
return err
}
f, ok, err := t.getFileTrunc(dk, truncate)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
if err != nil {
return err
}
return dbi.Error()
}
func (t *readOnlyTransaction) availability(folder, file []byte) ([]protocol.DeviceID, error) {
k, err := t.keyer.GenerateGlobalVersionKey(nil, folder, file)
if err != nil {
return nil, err
}
bs, err := t.Get(k)
if backend.IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
vl, ok := unmarshalVersionList(bs)
if !ok {
return nil, nil
}
var devices []protocol.DeviceID
for _, v := range vl.Versions {
if !v.Version.Equal(vl.Versions[0].Version) {
break
}
if v.Invalid {
continue
}
n := protocol.DeviceIDFromBytes(v.Device)
devices = append(devices, n)
}
return devices, nil
}
func (t *readOnlyTransaction) withNeed(folder, device []byte, truncate bool, fn Iterator) error {
if bytes.Equal(device, protocol.LocalDeviceID[:]) {
return t.withNeedLocal(folder, truncate, fn)
}
key, err := t.keyer.GenerateGlobalVersionKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutName())
if err != nil {
return err
}
defer dbi.Release()
var dk []byte
devID := protocol.DeviceIDFromBytes(device)
for dbi.Next() {
vl, ok := unmarshalVersionList(dbi.Value())
if !ok {
continue
}
haveFV, have := vl.Get(device)
// XXX: This marks Concurrent (i.e. conflicting) changes as
// needs. Maybe we should do that, but it needs special
// handling in the puller.
if have && haveFV.Version.GreaterEqual(vl.Versions[0].Version) {
continue
}
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
needVersion := vl.Versions[0].Version
needDevice := protocol.DeviceIDFromBytes(vl.Versions[0].Device)
for i := range vl.Versions {
if !vl.Versions[i].Version.Equal(needVersion) {
// We haven't found a valid copy of the file with the needed version.
break
}
if vl.Versions[i].Invalid {
// The file is marked invalid, don't use it.
continue
}
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[i].Device, name)
if err != nil {
return err
}
gf, ok, err := t.getFileTrunc(dk, truncate)
if err != nil {
return err
}
if !ok {
continue
}
if gf.IsDeleted() && !have {
// We don't need deleted files that we don't have
break
}
l.Debugf("need folder=%q device=%v name=%q have=%v invalid=%v haveV=%v globalV=%v globalDev=%v", folder, devID, name, have, haveFV.Invalid, haveFV.Version, needVersion, needDevice)
if !fn(gf) {
return nil
}
// This file is handled, no need to look further in the version list
break
}
}
return dbi.Error()
}
func (t *readOnlyTransaction) withNeedLocal(folder []byte, truncate bool, fn Iterator) error {
key, err := t.keyer.GenerateNeedFileKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutName())
if err != nil {
return err
}
defer dbi.Release()
var keyBuf []byte
var f FileIntf
var ok bool
for dbi.Next() {
keyBuf, f, ok, err = t.getGlobal(keyBuf, folder, t.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
// A readWriteTransaction is a readOnlyTransaction plus a batch for writes. // A readWriteTransaction is a readOnlyTransaction plus a batch for writes.
// The batch will be committed on close() or by checkFlush() if it exceeds the // The batch will be committed on close() or by checkFlush() if it exceeds the
// batch size. // batch size.
@ -353,6 +640,65 @@ func (t readWriteTransaction) deleteKeyPrefix(prefix []byte) error {
return dbi.Error() return dbi.Error()
} }
func (t *readWriteTransaction) withAllFolderTruncated(folder []byte, fn func(device []byte, f FileInfoTruncated) bool) error {
key, err := t.keyer.GenerateDeviceFileKey(nil, folder, nil, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutNameAndDevice())
if err != nil {
return err
}
defer dbi.Release()
var gk, keyBuf []byte
for dbi.Next() {
device, ok := t.keyer.DeviceFromDeviceFileKey(dbi.Key())
if !ok {
// Not having the device in the index is bad. Clear it.
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
var f FileInfoTruncated
// The iterator function may keep a reference to the unmarshalled
// struct, which in turn references the buffer it was unmarshalled
// from. dbi.Value() just returns an internal slice that it reuses, so
// we need to copy it.
err := f.Unmarshal(append([]byte{}, dbi.Value()...))
if err != nil {
return err
}
switch f.Name {
case "", ".", "..", "/": // A few obviously invalid filenames
l.Infof("Dropping invalid filename %q from database", f.Name)
name := []byte(f.Name)
gk, err = t.keyer.GenerateGlobalVersionKey(gk, folder, name)
if err != nil {
return err
}
keyBuf, err = t.removeFromGlobal(gk, keyBuf, folder, device, name, nil)
if err != nil {
return err
}
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
if !fn(device, f) {
return nil
}
}
if err := dbi.Error(); err != nil {
return err
}
return t.commit()
}
type marshaller interface { type marshaller interface {
Marshal() ([]byte, error) Marshal() ([]byte, error)
} }

View File

@ -328,11 +328,16 @@ func (f *folder) scanSubdirs(subDirs []string) error {
subDirs[i] = sub subDirs[i] = sub
} }
snap := f.fset.Snapshot()
// We release explicitly later in this function, however we might exit early
// and it's ok to release twice.
defer snap.Release()
// Clean the list of subitems to ensure that we start at a known // Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already // directory, and don't scan subdirectories of things we've already
// scanned. // scanned.
subDirs = unifySubs(subDirs, func(file string) bool { subDirs = unifySubs(subDirs, func(file string) bool {
_, ok := f.fset.Get(protocol.LocalDeviceID, file) _, ok := snap.Get(protocol.LocalDeviceID, file)
return ok return ok
}) })
@ -344,7 +349,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
Subs: subDirs, Subs: subDirs,
Matcher: f.ignores, Matcher: f.ignores,
TempLifetime: time.Duration(f.model.cfg.Options().KeepTemporariesH) * time.Hour, TempLifetime: time.Duration(f.model.cfg.Options().KeepTemporariesH) * time.Hour,
CurrentFiler: cFiler{f.fset}, CurrentFiler: cFiler{snap},
Filesystem: mtimefs, Filesystem: mtimefs,
IgnorePerms: f.IgnorePerms, IgnorePerms: f.IgnorePerms,
AutoNormalize: f.AutoNormalize, AutoNormalize: f.AutoNormalize,
@ -369,7 +374,7 @@ func (f *folder) scanSubdirs(subDirs []string) error {
oldBatchFn := batchFn // can't reference batchFn directly (recursion) oldBatchFn := batchFn // can't reference batchFn directly (recursion)
batchFn = func(fs []protocol.FileInfo) error { batchFn = func(fs []protocol.FileInfo) error {
for i := range fs { for i := range fs {
switch gf, ok := f.fset.GetGlobal(fs[i].Name); { switch gf, ok := snap.GetGlobal(fs[i].Name); {
case !ok: case !ok:
continue continue
case gf.IsEquivalentOptional(fs[i], f.ModTimeWindow(), false, false, protocol.FlagLocalReceiveOnly): case gf.IsEquivalentOptional(fs[i], f.ModTimeWindow(), false, false, protocol.FlagLocalReceiveOnly):
@ -426,10 +431,15 @@ func (f *folder) scanSubdirs(subDirs []string) error {
// ignored files. // ignored files.
var toIgnore []db.FileInfoTruncated var toIgnore []db.FileInfoTruncated
ignoredParent := "" ignoredParent := ""
snap.Release()
snap = f.fset.Snapshot()
defer snap.Release()
for _, sub := range subDirs { for _, sub := range subDirs {
var iterError error var iterError error
f.fset.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool { snap.WithPrefixedHaveTruncated(protocol.LocalDeviceID, sub, func(fi db.FileIntf) bool {
select { select {
case <-f.ctx.Done(): case <-f.ctx.Done():
return false return false
@ -891,7 +901,7 @@ func unifySubs(dirs []string, exists func(dir string) bool) []string {
} }
type cFiler struct { type cFiler struct {
*db.FileSet *db.Snapshot
} }
// Implements scanner.CurrentFiler // Implements scanner.CurrentFiler

View File

@ -79,7 +79,9 @@ func (f *receiveOnlyFolder) Revert() {
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles) batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0 batchSizeBytes := 0
f.fset.WithHave(protocol.LocalDeviceID, func(intf db.FileIntf) bool { snap := f.fset.Snapshot()
defer snap.Release()
snap.WithHave(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
fi := intf.(protocol.FileInfo) fi := intf.(protocol.FileInfo)
if !fi.IsReceiveOnlyChanged() { if !fi.IsReceiveOnlyChanged() {
// We're only interested in files that have changed locally in // We're only interested in files that have changed locally in
@ -93,7 +95,7 @@ func (f *receiveOnlyFolder) Revert() {
// We'll delete files directly, directories get queued and // We'll delete files directly, directories get queued and
// handled below. // handled below.
handled, err := delQueue.handle(fi) handled, err := delQueue.handle(fi, snap)
if err != nil { if err != nil {
l.Infof("Revert: deleting %s: %v\n", fi.Name, err) l.Infof("Revert: deleting %s: %v\n", fi.Name, err)
return true // continue return true // continue
@ -138,7 +140,7 @@ func (f *receiveOnlyFolder) Revert() {
batchSizeBytes = 0 batchSizeBytes = 0
// Handle any queued directories // Handle any queued directories
deleted, err := delQueue.flush() deleted, err := delQueue.flush(snap)
if err != nil { if err != nil {
l.Infoln("Revert:", err) l.Infoln("Revert:", err)
} }
@ -167,15 +169,15 @@ func (f *receiveOnlyFolder) Revert() {
// directories for last. // directories for last.
type deleteQueue struct { type deleteQueue struct {
handler interface { handler interface {
deleteItemOnDisk(item protocol.FileInfo, scanChan chan<- string) error deleteItemOnDisk(item protocol.FileInfo, snap *db.Snapshot, scanChan chan<- string) error
deleteDirOnDisk(dir string, scanChan chan<- string) error deleteDirOnDisk(dir string, snap *db.Snapshot, scanChan chan<- string) error
} }
ignores *ignore.Matcher ignores *ignore.Matcher
dirs []string dirs []string
scanChan chan<- string scanChan chan<- string
} }
func (q *deleteQueue) handle(fi protocol.FileInfo) (bool, error) { func (q *deleteQueue) handle(fi protocol.FileInfo, snap *db.Snapshot) (bool, error) {
// Things that are ignored but not marked deletable are not processed. // Things that are ignored but not marked deletable are not processed.
ign := q.ignores.Match(fi.Name) ign := q.ignores.Match(fi.Name)
if ign.IsIgnored() && !ign.IsDeletable() { if ign.IsIgnored() && !ign.IsDeletable() {
@ -189,11 +191,11 @@ func (q *deleteQueue) handle(fi protocol.FileInfo) (bool, error) {
} }
// Kill it. // Kill it.
err := q.handler.deleteItemOnDisk(fi, q.scanChan) err := q.handler.deleteItemOnDisk(fi, snap, q.scanChan)
return true, err return true, err
} }
func (q *deleteQueue) flush() ([]string, error) { func (q *deleteQueue) flush(snap *db.Snapshot) ([]string, error) {
// Process directories from the leaves inward. // Process directories from the leaves inward.
sort.Sort(sort.Reverse(sort.StringSlice(q.dirs))) sort.Sort(sort.Reverse(sort.StringSlice(q.dirs)))
@ -201,7 +203,7 @@ func (q *deleteQueue) flush() ([]string, error) {
var deleted []string var deleted []string
for _, dir := range q.dirs { for _, dir := range q.dirs {
if err := q.handler.deleteDirOnDisk(dir, q.scanChan); err == nil { if err := q.handler.deleteDirOnDisk(dir, snap, q.scanChan); err == nil {
deleted = append(deleted, dir) deleted = append(deleted, dir)
} else if err != nil && firstError == nil { } else if err != nil && firstError == nil {
firstError = err firstError = err

View File

@ -48,7 +48,7 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
m.Index(device1, "ro", knownFiles) m.Index(device1, "ro", knownFiles)
f.updateLocalsFromScanning(knownFiles) f.updateLocalsFromScanning(knownFiles)
size := m.GlobalSize("ro") size := globalSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Global: expected 1 file and 1 directory: %+v", size) t.Fatalf("Global: expected 1 file and 1 directory: %+v", size)
} }
@ -60,15 +60,15 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
// We should now have two files and two directories. // We should now have two files and two directories.
size = m.GlobalSize("ro") size = globalSize(t, m, "ro")
if size.Files != 2 || size.Directories != 2 { if size.Files != 2 || size.Directories != 2 {
t.Fatalf("Global: expected 2 files and 2 directories: %+v", size) t.Fatalf("Global: expected 2 files and 2 directories: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 2 || size.Directories != 2 { if size.Files != 2 || size.Directories != 2 {
t.Fatalf("Local: expected 2 files and 2 directories: %+v", size) t.Fatalf("Local: expected 2 files and 2 directories: %+v", size)
} }
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files+size.Directories == 0 { if size.Files+size.Directories == 0 {
t.Fatalf("ROChanged: expected something: %+v", size) t.Fatalf("ROChanged: expected something: %+v", size)
} }
@ -93,11 +93,11 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
// We should now have one file and directory again. // We should now have one file and directory again.
size = m.GlobalSize("ro") size = globalSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Global: expected 1 files and 1 directories: %+v", size) t.Fatalf("Global: expected 1 files and 1 directories: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Local: expected 1 files and 1 directories: %+v", size) t.Fatalf("Local: expected 1 files and 1 directories: %+v", size)
} }
@ -131,19 +131,19 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
// Everything should be in sync. // Everything should be in sync.
size := m.GlobalSize("ro") size := globalSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Global: expected 1 file and 1 directory: %+v", size) t.Fatalf("Global: expected 1 file and 1 directory: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Local: expected 1 file and 1 directory: %+v", size) t.Fatalf("Local: expected 1 file and 1 directory: %+v", size)
} }
size = m.NeedSize("ro") size = needSize(t, m, "ro")
if size.Files+size.Directories > 0 { if size.Files+size.Directories > 0 {
t.Fatalf("Need: expected nothing: %+v", size) t.Fatalf("Need: expected nothing: %+v", size)
} }
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files+size.Directories > 0 { if size.Files+size.Directories > 0 {
t.Fatalf("ROChanged: expected nothing: %+v", size) t.Fatalf("ROChanged: expected nothing: %+v", size)
} }
@ -159,20 +159,20 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
// We now have a newer file than the rest of the cluster. Global state should reflect this. // We now have a newer file than the rest of the cluster. Global state should reflect this.
size = m.GlobalSize("ro") size = globalSize(t, m, "ro")
const sizeOfDir = 128 const sizeOfDir = 128
if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(oldData)) { if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(oldData)) {
t.Fatalf("Global: expected no change due to the new file: %+v", size) t.Fatalf("Global: expected no change due to the new file: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(newData)) { if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(newData)) {
t.Fatalf("Local: expected the new file to be reflected: %+v", size) t.Fatalf("Local: expected the new file to be reflected: %+v", size)
} }
size = m.NeedSize("ro") size = needSize(t, m, "ro")
if size.Files+size.Directories > 0 { if size.Files+size.Directories > 0 {
t.Fatalf("Need: expected nothing: %+v", size) t.Fatalf("Need: expected nothing: %+v", size)
} }
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files+size.Directories == 0 { if size.Files+size.Directories == 0 {
t.Fatalf("ROChanged: expected something: %+v", size) t.Fatalf("ROChanged: expected something: %+v", size)
} }
@ -181,15 +181,15 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
m.Revert("ro") m.Revert("ro")
size = m.GlobalSize("ro") size = globalSize(t, m, "ro")
if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(oldData)) { if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(oldData)) {
t.Fatalf("Global: expected the global size to revert: %+v", size) t.Fatalf("Global: expected the global size to revert: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(newData)) { if size.Files != 1 || size.Bytes != sizeOfDir+int64(len(newData)) {
t.Fatalf("Local: expected the local size to remain: %+v", size) t.Fatalf("Local: expected the local size to remain: %+v", size)
} }
size = m.NeedSize("ro") size = needSize(t, m, "ro")
if size.Files != 1 || size.Bytes != int64(len(oldData)) { if size.Files != 1 || size.Bytes != int64(len(oldData)) {
t.Fatalf("Local: expected to need the old file data: %+v", size) t.Fatalf("Local: expected to need the old file data: %+v", size)
} }
@ -227,19 +227,19 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
// Everything should be in sync. // Everything should be in sync.
size := m.GlobalSize("ro") size := globalSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Global: expected 1 file and 1 directory: %+v", size) t.Fatalf("Global: expected 1 file and 1 directory: %+v", size)
} }
size = m.LocalSize("ro") size = localSize(t, m, "ro")
if size.Files != 1 || size.Directories != 1 { if size.Files != 1 || size.Directories != 1 {
t.Fatalf("Local: expected 1 file and 1 directory: %+v", size) t.Fatalf("Local: expected 1 file and 1 directory: %+v", size)
} }
size = m.NeedSize("ro") size = needSize(t, m, "ro")
if size.Files+size.Directories > 0 { if size.Files+size.Directories > 0 {
t.Fatalf("Need: expected nothing: %+v", size) t.Fatalf("Need: expected nothing: %+v", size)
} }
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files+size.Directories > 0 { if size.Files+size.Directories > 0 {
t.Fatalf("ROChanged: expected nothing: %+v", size) t.Fatalf("ROChanged: expected nothing: %+v", size)
} }
@ -253,7 +253,7 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
m.ScanFolder("ro") m.ScanFolder("ro")
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files != 2 { if size.Files != 2 {
t.Fatalf("Receive only: expected 2 files: %+v", size) t.Fatalf("Receive only: expected 2 files: %+v", size)
} }
@ -266,7 +266,7 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
m.ScanFolder("ro") m.ScanFolder("ro")
size = m.ReceiveOnlyChangedSize("ro") size = receiveOnlyChangedSize(t, m, "ro")
if size.Files+size.Directories+size.Deleted != 0 { if size.Files+size.Directories+size.Deleted != 0 {
t.Fatalf("Receive only: expected all zero: %+v", size) t.Fatalf("Receive only: expected all zero: %+v", size)
} }

View File

@ -50,7 +50,9 @@ func (f *sendOnlyFolder) pull() bool {
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles) batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0 batchSizeBytes := 0
f.fset.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool { snap := f.fset.Snapshot()
defer snap.Release()
snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes { if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
f.updateLocalsFromPulling(batch) f.updateLocalsFromPulling(batch)
batch = batch[:0] batch = batch[:0]
@ -66,7 +68,7 @@ func (f *sendOnlyFolder) pull() bool {
return true return true
} }
curFile, ok := f.fset.Get(protocol.LocalDeviceID, intf.FileName()) curFile, ok := snap.Get(protocol.LocalDeviceID, intf.FileName())
if !ok { if !ok {
if intf.IsDeleted() { if intf.IsDeleted() {
l.Debugln("Should never get a deleted file as needed when we don't have it") l.Debugln("Should never get a deleted file as needed when we don't have it")
@ -98,7 +100,9 @@ func (f *sendOnlyFolder) Override() {
f.setState(FolderScanning) f.setState(FolderScanning)
batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles) batch := make([]protocol.FileInfo, 0, maxBatchSizeFiles)
batchSizeBytes := 0 batchSizeBytes := 0
f.fset.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool { snap := f.fset.Snapshot()
defer snap.Release()
snap.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
need := fi.(protocol.FileInfo) need := fi.(protocol.FileInfo)
if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes { if len(batch) == maxBatchSizeFiles || batchSizeBytes > maxBatchSizeBytes {
f.updateLocalsFromScanning(batch) f.updateLocalsFromScanning(batch)
@ -106,7 +110,7 @@ func (f *sendOnlyFolder) Override() {
batchSizeBytes = 0 batchSizeBytes = 0
} }
have, ok := f.fset.Get(protocol.LocalDeviceID, need.Name) have, ok := snap.Get(protocol.LocalDeviceID, need.Name)
// Don't override files that are in a bad state (ignored, // Don't override files that are in a bad state (ignored,
// unsupported, must rescan, ...). // unsupported, must rescan, ...).
if ok && have.IsInvalid() { if ok && have.IsInvalid() {

View File

@ -149,10 +149,12 @@ func (f *sendReceiveFolder) pull() bool {
// If there is nothing to do, don't even enter pulling state. // If there is nothing to do, don't even enter pulling state.
abort := true abort := true
f.fset.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool { snap := f.fset.Snapshot()
snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
abort = false abort = false
return false return false
}) })
snap.Release()
if abort { if abort {
return true return true
} }
@ -234,6 +236,9 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
f.pullErrors = make(map[string]string) f.pullErrors = make(map[string]string)
f.pullErrorsMut.Unlock() f.pullErrorsMut.Unlock()
snap := f.fset.Snapshot()
defer snap.Release()
pullChan := make(chan pullBlockState) pullChan := make(chan pullBlockState)
copyChan := make(chan copyBlocksState) copyChan := make(chan copyBlocksState)
finisherChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState)
@ -272,11 +277,11 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
doneWg.Add(1) doneWg.Add(1)
// finisherRoutine finishes when finisherChan is closed // finisherRoutine finishes when finisherChan is closed
go func() { go func() {
f.finisherRoutine(finisherChan, dbUpdateChan, scanChan) f.finisherRoutine(snap, finisherChan, dbUpdateChan, scanChan)
doneWg.Done() doneWg.Done()
}() }()
changed, fileDeletions, dirDeletions, err := f.processNeeded(dbUpdateChan, copyChan, scanChan) changed, fileDeletions, dirDeletions, err := f.processNeeded(snap, dbUpdateChan, copyChan, scanChan)
// Signal copy and puller routines that we are done with the in data for // Signal copy and puller routines that we are done with the in data for
// this iteration. Wait for them to finish. // this iteration. Wait for them to finish.
@ -291,7 +296,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
doneWg.Wait() doneWg.Wait()
if err == nil { if err == nil {
f.processDeletions(fileDeletions, dirDeletions, dbUpdateChan, scanChan) f.processDeletions(fileDeletions, dirDeletions, snap, dbUpdateChan, scanChan)
} }
// Wait for db updates and scan scheduling to complete // Wait for db updates and scan scheduling to complete
@ -307,7 +312,7 @@ func (f *sendReceiveFolder) pullerIteration(scanChan chan<- string) int {
return changed return changed
} }
func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyChan chan<- copyBlocksState, scanChan chan<- string) (int, map[string]protocol.FileInfo, []protocol.FileInfo, error) { func (f *sendReceiveFolder) processNeeded(snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, copyChan chan<- copyBlocksState, scanChan chan<- string) (int, map[string]protocol.FileInfo, []protocol.FileInfo, error) {
changed := 0 changed := 0
var dirDeletions []protocol.FileInfo var dirDeletions []protocol.FileInfo
fileDeletions := map[string]protocol.FileInfo{} fileDeletions := map[string]protocol.FileInfo{}
@ -317,7 +322,7 @@ func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyC
// Regular files to pull goes into the file queue, everything else // Regular files to pull goes into the file queue, everything else
// (directories, symlinks and deletes) goes into the "process directly" // (directories, symlinks and deletes) goes into the "process directly"
// pile. // pile.
f.fset.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool { snap.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
select { select {
case <-f.ctx.Done(): case <-f.ctx.Done():
return false return false
@ -359,9 +364,9 @@ func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyC
// files to delete inside them before we get to that point. // files to delete inside them before we get to that point.
dirDeletions = append(dirDeletions, file) dirDeletions = append(dirDeletions, file)
} else if file.IsSymlink() { } else if file.IsSymlink() {
f.deleteFile(file, dbUpdateChan, scanChan) f.deleteFile(file, snap, dbUpdateChan, scanChan)
} else { } else {
df, ok := f.fset.Get(protocol.LocalDeviceID, file.Name) df, ok := snap.Get(protocol.LocalDeviceID, file.Name)
// Local file can be already deleted, but with a lower version // Local file can be already deleted, but with a lower version
// number, hence the deletion coming in again as part of // number, hence the deletion coming in again as part of
// WithNeed, furthermore, the file can simply be of the wrong // WithNeed, furthermore, the file can simply be of the wrong
@ -377,7 +382,7 @@ func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyC
} }
case file.Type == protocol.FileInfoTypeFile: case file.Type == protocol.FileInfoTypeFile:
curFile, hasCurFile := f.fset.Get(protocol.LocalDeviceID, file.Name) curFile, hasCurFile := snap.Get(protocol.LocalDeviceID, file.Name)
if _, need := blockDiff(curFile.Blocks, file.Blocks); hasCurFile && len(need) == 0 { if _, need := blockDiff(curFile.Blocks, file.Blocks); hasCurFile && len(need) == 0 {
// We are supposed to copy the entire file, and then fetch nothing. We // We are supposed to copy the entire file, and then fetch nothing. We
// are only updating metadata, so we don't actually *need* to make the // are only updating metadata, so we don't actually *need* to make the
@ -396,13 +401,13 @@ func (f *sendReceiveFolder) processNeeded(dbUpdateChan chan<- dbUpdateJob, copyC
case file.IsDirectory() && !file.IsSymlink(): case file.IsDirectory() && !file.IsSymlink():
l.Debugln(f, "Handling directory", file.Name) l.Debugln(f, "Handling directory", file.Name)
if f.checkParent(file.Name, scanChan) { if f.checkParent(file.Name, scanChan) {
f.handleDir(file, dbUpdateChan, scanChan) f.handleDir(file, snap, dbUpdateChan, scanChan)
} }
case file.IsSymlink(): case file.IsSymlink():
l.Debugln(f, "Handling symlink", file.Name) l.Debugln(f, "Handling symlink", file.Name)
if f.checkParent(file.Name, scanChan) { if f.checkParent(file.Name, scanChan) {
f.handleSymlink(file, dbUpdateChan, scanChan) f.handleSymlink(file, snap, dbUpdateChan, scanChan)
} }
default: default:
@ -451,7 +456,7 @@ nextFile:
break break
} }
fi, ok := f.fset.GetGlobal(fileName) fi, ok := snap.GetGlobal(fileName)
if !ok { if !ok {
// File is no longer in the index. Mark it as done and drop it. // File is no longer in the index. Mark it as done and drop it.
f.queue.Done(fileName) f.queue.Done(fileName)
@ -484,7 +489,7 @@ nextFile:
// desired state with the delete bit set is in the deletion // desired state with the delete bit set is in the deletion
// map. // map.
desired := fileDeletions[candidate.Name] desired := fileDeletions[candidate.Name]
if err := f.renameFile(candidate, desired, fi, dbUpdateChan, scanChan); err != nil { if err := f.renameFile(candidate, desired, fi, snap, dbUpdateChan, scanChan); err != nil {
// Failed to rename, try to handle files as separate // Failed to rename, try to handle files as separate
// deletions and updates. // deletions and updates.
break break
@ -498,11 +503,11 @@ nextFile:
} }
} }
devices := f.fset.Availability(fileName) devices := snap.Availability(fileName)
for _, dev := range devices { for _, dev := range devices {
if _, ok := f.model.Connection(dev); ok { if _, ok := f.model.Connection(dev); ok {
// Handle the file normally, by coping and pulling, etc. // Handle the file normally, by coping and pulling, etc.
f.handleFile(fi, copyChan, dbUpdateChan) f.handleFile(fi, snap, copyChan)
continue nextFile continue nextFile
} }
} }
@ -513,7 +518,7 @@ nextFile:
return changed, fileDeletions, dirDeletions, nil return changed, fileDeletions, dirDeletions, nil
} }
func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.FileInfo, dirDeletions []protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.FileInfo, dirDeletions []protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
for _, file := range fileDeletions { for _, file := range fileDeletions {
select { select {
case <-f.ctx.Done(): case <-f.ctx.Done():
@ -521,7 +526,7 @@ func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.F
default: default:
} }
f.deleteFile(file, dbUpdateChan, scanChan) f.deleteFile(file, snap, dbUpdateChan, scanChan)
} }
// Process in reverse order to delete depth first // Process in reverse order to delete depth first
@ -534,12 +539,12 @@ func (f *sendReceiveFolder) processDeletions(fileDeletions map[string]protocol.F
dir := dirDeletions[len(dirDeletions)-i-1] dir := dirDeletions[len(dirDeletions)-i-1]
l.Debugln(f, "Deleting dir", dir.Name) l.Debugln(f, "Deleting dir", dir.Name)
f.deleteDir(dir, dbUpdateChan, scanChan) f.deleteDir(dir, snap, dbUpdateChan, scanChan)
} }
} }
// handleDir creates or updates the given directory // handleDir creates or updates the given directory
func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
// Used in the defer closure below, updated by the function body. Take // Used in the defer closure below, updated by the function body. Take
// care not declare another err. // care not declare another err.
var err error var err error
@ -567,7 +572,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, dbUpdateChan chan<
} }
if shouldDebug() { if shouldDebug() {
curFile, _ := f.fset.Get(protocol.LocalDeviceID, file.Name) curFile, _ := snap.Get(protocol.LocalDeviceID, file.Name)
l.Debugf("need dir\n\t%v\n\t%v", file, curFile) l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
} }
@ -598,7 +603,7 @@ func (f *sendReceiveFolder) handleDir(file protocol.FileInfo, dbUpdateChan chan<
return f.moveForConflict(name, file.ModifiedBy.String(), scanChan) return f.moveForConflict(name, file.ModifiedBy.String(), scanChan)
}, curFile.Name) }, curFile.Name)
} else { } else {
err = f.deleteItemOnDisk(curFile, scanChan) err = f.deleteItemOnDisk(curFile, snap, scanChan)
} }
if err != nil { if err != nil {
f.newPullError(file.Name, err) f.newPullError(file.Name, err)
@ -693,7 +698,7 @@ func (f *sendReceiveFolder) checkParent(file string, scanChan chan<- string) boo
} }
// handleSymlink creates or updates the given symlink // handleSymlink creates or updates the given symlink
func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
// Used in the defer closure below, updated by the function body. Take // Used in the defer closure below, updated by the function body. Take
// care not declare another err. // care not declare another err.
var err error var err error
@ -716,7 +721,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, dbUpdateChan c
}() }()
if shouldDebug() { if shouldDebug() {
curFile, _ := f.fset.Get(protocol.LocalDeviceID, file.Name) curFile, _ := snap.Get(protocol.LocalDeviceID, file.Name)
l.Debugf("need symlink\n\t%v\n\t%v", file, curFile) l.Debugf("need symlink\n\t%v\n\t%v", file, curFile)
} }
@ -750,7 +755,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, dbUpdateChan c
return f.moveForConflict(name, file.ModifiedBy.String(), scanChan) return f.moveForConflict(name, file.ModifiedBy.String(), scanChan)
}, curFile.Name) }, curFile.Name)
} else { } else {
err = f.deleteItemOnDisk(curFile, scanChan) err = f.deleteItemOnDisk(curFile, snap, scanChan)
} }
if err != nil { if err != nil {
f.newPullError(file.Name, errors.Wrap(err, "symlink remove")) f.newPullError(file.Name, errors.Wrap(err, "symlink remove"))
@ -775,7 +780,7 @@ func (f *sendReceiveFolder) handleSymlink(file protocol.FileInfo, dbUpdateChan c
} }
// deleteDir attempts to remove a directory that was deleted on a remote // deleteDir attempts to remove a directory that was deleted on a remote
func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
// Used in the defer closure below, updated by the function body. Take // Used in the defer closure below, updated by the function body. Take
// care not declare another err. // care not declare another err.
var err error var err error
@ -797,7 +802,7 @@ func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, dbUpdateChan chan<
}) })
}() }()
if err = f.deleteDirOnDisk(file.Name, scanChan); err != nil { if err = f.deleteDirOnDisk(file.Name, snap, scanChan); err != nil {
f.newPullError(file.Name, errors.Wrap(err, "delete dir")) f.newPullError(file.Name, errors.Wrap(err, "delete dir"))
return return
} }
@ -806,8 +811,8 @@ func (f *sendReceiveFolder) deleteDir(file protocol.FileInfo, dbUpdateChan chan<
} }
// deleteFile attempts to delete the given file // deleteFile attempts to delete the given file
func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) deleteFile(file protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
cur, hasCur := f.fset.Get(protocol.LocalDeviceID, file.Name) cur, hasCur := snap.Get(protocol.LocalDeviceID, file.Name)
f.deleteFileWithCurrent(file, cur, hasCur, dbUpdateChan, scanChan) f.deleteFileWithCurrent(file, cur, hasCur, dbUpdateChan, scanChan)
} }
@ -895,7 +900,7 @@ func (f *sendReceiveFolder) deleteFileWithCurrent(file, cur protocol.FileInfo, h
// renameFile attempts to rename an existing file to a destination // renameFile attempts to rename an existing file to a destination
// and set the right attributes on it. // and set the right attributes on it.
func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) error { func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) error {
// Used in the defer closure below, updated by the function body. Take // Used in the defer closure below, updated by the function body. Take
// care not declare another err. // care not declare another err.
var err error var err error
@ -937,7 +942,7 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
return err return err
} }
// Check that the target corresponds to what we have in the DB // Check that the target corresponds to what we have in the DB
curTarget, ok := f.fset.Get(protocol.LocalDeviceID, target.Name) curTarget, ok := snap.Get(protocol.LocalDeviceID, target.Name)
switch stat, serr := f.fs.Lstat(target.Name); { switch stat, serr := f.fs.Lstat(target.Name); {
case serr != nil && fs.IsNotExist(serr): case serr != nil && fs.IsNotExist(serr):
if !ok || curTarget.IsDeleted() { if !ok || curTarget.IsDeleted() {
@ -994,7 +999,7 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
// of the source and the creation of the target temp file. Fix-up the metadata, // of the source and the creation of the target temp file. Fix-up the metadata,
// update the local index of the target file and rename from temp to real name. // update the local index of the target file and rename from temp to real name.
if err = f.performFinish(target, curTarget, true, tempName, dbUpdateChan, scanChan); err != nil { if err = f.performFinish(target, curTarget, true, tempName, snap, dbUpdateChan, scanChan); err != nil {
return err return err
} }
@ -1039,8 +1044,8 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, db
// handleFile queues the copies and pulls as necessary for a single new or // handleFile queues the copies and pulls as necessary for a single new or
// changed file. // changed file.
func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, dbUpdateChan chan<- dbUpdateJob) { func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, snap *db.Snapshot, copyChan chan<- copyBlocksState) {
curFile, hasCurFile := f.fset.Get(protocol.LocalDeviceID, file.Name) curFile, hasCurFile := snap.Get(protocol.LocalDeviceID, file.Name)
have, _ := blockDiff(curFile.Blocks, file.Blocks) have, _ := blockDiff(curFile.Blocks, file.Blocks)
@ -1493,7 +1498,7 @@ func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPu
out <- state.sharedPullerState out <- state.sharedPullerState
} }
func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCurFile bool, tempName string, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) error { func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCurFile bool, tempName string, snap *db.Snapshot, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) error {
// Set the correct permission bits on the new file // Set the correct permission bits on the new file
if !f.IgnorePerms && !file.NoPermissions { if !f.IgnorePerms && !file.NoPermissions {
if err := f.fs.Chmod(tempName, fs.FileMode(file.Permissions&0777)); err != nil { if err := f.fs.Chmod(tempName, fs.FileMode(file.Permissions&0777)); err != nil {
@ -1528,7 +1533,7 @@ func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCu
return f.moveForConflict(name, file.ModifiedBy.String(), scanChan) return f.moveForConflict(name, file.ModifiedBy.String(), scanChan)
}, curFile.Name) }, curFile.Name)
} else { } else {
err = f.deleteItemOnDisk(curFile, scanChan) err = f.deleteItemOnDisk(curFile, snap, scanChan)
} }
if err != nil { if err != nil {
return err return err
@ -1549,7 +1554,7 @@ func (f *sendReceiveFolder) performFinish(file, curFile protocol.FileInfo, hasCu
return nil return nil
} }
func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) { func (f *sendReceiveFolder) finisherRoutine(snap *db.Snapshot, in <-chan *sharedPullerState, dbUpdateChan chan<- dbUpdateJob, scanChan chan<- string) {
for state := range in { for state := range in {
if closed, err := state.finalClose(); closed { if closed, err := state.finalClose(); closed {
l.Debugln(f, "closing", state.file.Name) l.Debugln(f, "closing", state.file.Name)
@ -1557,7 +1562,7 @@ func (f *sendReceiveFolder) finisherRoutine(in <-chan *sharedPullerState, dbUpda
f.queue.Done(state.file.Name) f.queue.Done(state.file.Name)
if err == nil { if err == nil {
err = f.performFinish(state.file, state.curFile, state.hasCurFile, state.tempName, dbUpdateChan, scanChan) err = f.performFinish(state.file, state.curFile, state.hasCurFile, state.tempName, snap, dbUpdateChan, scanChan)
} }
if err != nil { if err != nil {
@ -1809,7 +1814,7 @@ func (f *sendReceiveFolder) Errors() []FileError {
} }
// deleteItemOnDisk deletes the file represented by old that is about to be replaced by new. // deleteItemOnDisk deletes the file represented by old that is about to be replaced by new.
func (f *sendReceiveFolder) deleteItemOnDisk(item protocol.FileInfo, scanChan chan<- string) (err error) { func (f *sendReceiveFolder) deleteItemOnDisk(item protocol.FileInfo, snap *db.Snapshot, scanChan chan<- string) (err error) {
defer func() { defer func() {
err = errors.Wrap(err, contextRemovingOldItem) err = errors.Wrap(err, contextRemovingOldItem)
}() }()
@ -1818,7 +1823,7 @@ func (f *sendReceiveFolder) deleteItemOnDisk(item protocol.FileInfo, scanChan ch
case item.IsDirectory(): case item.IsDirectory():
// Directories aren't archived and need special treatment due // Directories aren't archived and need special treatment due
// to potential children. // to potential children.
return f.deleteDirOnDisk(item.Name, scanChan) return f.deleteDirOnDisk(item.Name, snap, scanChan)
case !item.IsSymlink() && f.versioner != nil: case !item.IsSymlink() && f.versioner != nil:
// If we should use versioning, let the versioner archive the // If we should use versioning, let the versioner archive the
@ -1834,7 +1839,7 @@ func (f *sendReceiveFolder) deleteItemOnDisk(item protocol.FileInfo, scanChan ch
// deleteDirOnDisk attempts to delete a directory. It checks for files/dirs inside // deleteDirOnDisk attempts to delete a directory. It checks for files/dirs inside
// the directory and removes them if possible or returns an error if it fails // the directory and removes them if possible or returns an error if it fails
func (f *sendReceiveFolder) deleteDirOnDisk(dir string, scanChan chan<- string) error { func (f *sendReceiveFolder) deleteDirOnDisk(dir string, snap *db.Snapshot, scanChan chan<- string) error {
if err := osutil.TraversesSymlink(f.fs, filepath.Dir(dir)); err != nil { if err := osutil.TraversesSymlink(f.fs, filepath.Dir(dir)); err != nil {
return err return err
} }
@ -1853,7 +1858,7 @@ func (f *sendReceiveFolder) deleteDirOnDisk(dir string, scanChan chan<- string)
toBeDeleted = append(toBeDeleted, fullDirFile) toBeDeleted = append(toBeDeleted, fullDirFile)
} else if f.ignores != nil && f.ignores.Match(fullDirFile).IsIgnored() { } else if f.ignores != nil && f.ignores.Match(fullDirFile).IsIgnored() {
hasIgnored = true hasIgnored = true
} else if cf, ok := f.fset.Get(protocol.LocalDeviceID, fullDirFile); !ok || cf.IsDeleted() || cf.IsInvalid() { } else if cf, ok := snap.Get(protocol.LocalDeviceID, fullDirFile); !ok || cf.IsDeleted() || cf.IsInvalid() {
// Something appeared in the dir that we either are not aware of // Something appeared in the dir that we either are not aware of
// at all, that we think should be deleted or that is invalid, // at all, that we think should be deleted or that is invalid,
// but not currently ignored -> schedule scan. The scanChan // but not currently ignored -> schedule scan. The scanChan

View File

@ -148,9 +148,8 @@ func TestHandleFile(t *testing.T) {
defer cleanupSRFolder(f, m) defer cleanupSRFolder(f, m)
copyChan := make(chan copyBlocksState, 1) copyChan := make(chan copyBlocksState, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
f.handleFile(requiredFile, copyChan, dbUpdateChan) f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
// Receive the results // Receive the results
toCopy := <-copyChan toCopy := <-copyChan
@ -195,9 +194,8 @@ func TestHandleFileWithTemp(t *testing.T) {
} }
copyChan := make(chan copyBlocksState, 1) copyChan := make(chan copyBlocksState, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
f.handleFile(requiredFile, copyChan, dbUpdateChan) f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
// Receive the results // Receive the results
toCopy := <-copyChan toCopy := <-copyChan
@ -247,13 +245,12 @@ func TestCopierFinder(t *testing.T) {
copyChan := make(chan copyBlocksState) copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, 4) pullChan := make(chan pullBlockState, 4)
finisherChan := make(chan *sharedPullerState, 1) finisherChan := make(chan *sharedPullerState, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
// Run a single fetcher routine // Run a single fetcher routine
go f.copierRoutine(copyChan, pullChan, finisherChan) go f.copierRoutine(copyChan, pullChan, finisherChan)
defer close(copyChan) defer close(copyChan)
f.handleFile(requiredFile, copyChan, dbUpdateChan) f.handleFile(requiredFile, f.fset.Snapshot(), copyChan)
timeout := time.After(10 * time.Second) timeout := time.After(10 * time.Second)
pulls := make([]pullBlockState, 4) pulls := make([]pullBlockState, 4)
@ -378,7 +375,6 @@ func TestWeakHash(t *testing.T) {
copyChan := make(chan copyBlocksState) copyChan := make(chan copyBlocksState)
pullChan := make(chan pullBlockState, expectBlocks) pullChan := make(chan pullBlockState, expectBlocks)
finisherChan := make(chan *sharedPullerState, 1) finisherChan := make(chan *sharedPullerState, 1)
dbUpdateChan := make(chan dbUpdateJob, 1)
// Run a single fetcher routine // Run a single fetcher routine
go fo.copierRoutine(copyChan, pullChan, finisherChan) go fo.copierRoutine(copyChan, pullChan, finisherChan)
@ -386,7 +382,7 @@ func TestWeakHash(t *testing.T) {
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls). // Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
fo.WeakHashThresholdPct = 101 fo.WeakHashThresholdPct = 101
fo.handleFile(desiredFile, copyChan, dbUpdateChan) fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
var pulls []pullBlockState var pulls []pullBlockState
timeout := time.After(10 * time.Second) timeout := time.After(10 * time.Second)
@ -415,7 +411,7 @@ func TestWeakHash(t *testing.T) {
// Test 2 - using weak hash, expectPulls blocks pulled. // Test 2 - using weak hash, expectPulls blocks pulled.
fo.WeakHashThresholdPct = -1 fo.WeakHashThresholdPct = -1
fo.handleFile(desiredFile, copyChan, dbUpdateChan) fo.handleFile(desiredFile, fo.fset.Snapshot(), copyChan)
pulls = pulls[:0] pulls = pulls[:0]
for len(pulls) < expectPulls { for len(pulls) < expectPulls {
@ -495,9 +491,10 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
finisherBufferChan := make(chan *sharedPullerState) finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState)
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
snap := f.fset.Snapshot()
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan) copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string)) go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
defer func() { defer func() {
close(copyChan) close(copyChan)
@ -507,7 +504,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
close(finisherChan) close(finisherChan)
}() }()
f.handleFile(file, copyChan, dbUpdateChan) f.handleFile(file, snap, copyChan)
// Receive a block at puller, to indicate that at least a single copier // Receive a block at puller, to indicate that at least a single copier
// loop has been performed. // loop has been performed.
@ -589,6 +586,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
finisherBufferChan := make(chan *sharedPullerState) finisherBufferChan := make(chan *sharedPullerState)
finisherChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState)
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
snap := f.fset.Snapshot()
copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan) copyChan, copyWg := startCopier(f, pullChan, finisherBufferChan)
pullWg := sync.NewWaitGroup() pullWg := sync.NewWaitGroup()
@ -597,7 +595,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
f.pullerRoutine(pullChan, finisherBufferChan) f.pullerRoutine(pullChan, finisherBufferChan)
pullWg.Done() pullWg.Done()
}() }()
go f.finisherRoutine(finisherChan, dbUpdateChan, make(chan string)) go f.finisherRoutine(snap, finisherChan, dbUpdateChan, make(chan string))
defer func() { defer func() {
// Unblock copier and puller // Unblock copier and puller
go func() { go func() {
@ -612,7 +610,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
close(finisherChan) close(finisherChan)
}() }()
f.handleFile(file, copyChan, dbUpdateChan) f.handleFile(file, snap, copyChan)
// Receive at finisher, we should error out as puller has nowhere to pull // Receive at finisher, we should error out as puller has nowhere to pull
// from. // from.
@ -693,7 +691,7 @@ func TestIssue3164(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
f.deleteDir(file, dbUpdateChan, make(chan string)) f.deleteDir(file, f.fset.Snapshot(), dbUpdateChan, make(chan string))
if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) { if _, err := ffs.Stat("issue3164"); !fs.IsNotExist(err) {
t.Fatal(err) t.Fatal(err)
@ -832,7 +830,7 @@ func TestCopyOwner(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
defer close(dbUpdateChan) defer close(dbUpdateChan)
f.handleDir(dir, dbUpdateChan, nil) f.handleDir(dir, f.fset.Snapshot(), dbUpdateChan, nil)
<-dbUpdateChan // empty the channel for later <-dbUpdateChan // empty the channel for later
info, err := f.fs.Lstat("foo/bar") info, err := f.fs.Lstat("foo/bar")
@ -858,16 +856,17 @@ func TestCopyOwner(t *testing.T) {
// but it's the way data is passed around. When the database update // but it's the way data is passed around. When the database update
// comes the finisher is done. // comes the finisher is done.
snap := f.fset.Snapshot()
finisherChan := make(chan *sharedPullerState) finisherChan := make(chan *sharedPullerState)
copierChan, copyWg := startCopier(f, nil, finisherChan) copierChan, copyWg := startCopier(f, nil, finisherChan)
go f.finisherRoutine(finisherChan, dbUpdateChan, nil) go f.finisherRoutine(snap, finisherChan, dbUpdateChan, nil)
defer func() { defer func() {
close(copierChan) close(copierChan)
copyWg.Wait() copyWg.Wait()
close(finisherChan) close(finisherChan)
}() }()
f.handleFile(file, copierChan, nil) f.handleFile(file, snap, copierChan)
<-dbUpdateChan <-dbUpdateChan
info, err = f.fs.Lstat("foo/bar/baz") info, err = f.fs.Lstat("foo/bar/baz")
@ -886,7 +885,7 @@ func TestCopyOwner(t *testing.T) {
SymlinkTarget: "over the rainbow", SymlinkTarget: "over the rainbow",
} }
f.handleSymlink(symlink, dbUpdateChan, nil) f.handleSymlink(symlink, snap, dbUpdateChan, nil)
<-dbUpdateChan <-dbUpdateChan
info, err = f.fs.Lstat("foo/bar/sym") info, err = f.fs.Lstat("foo/bar/sym")
@ -921,7 +920,7 @@ func TestSRConflictReplaceFileByDir(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
scanChan := make(chan string, 1) scanChan := make(chan string, 1)
f.handleDir(file, dbUpdateChan, scanChan) f.handleDir(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
if confls := existingConflicts(name, ffs); len(confls) != 1 { if confls := existingConflicts(name, ffs); len(confls) != 1 {
t.Fatal("Expected one conflict, got", len(confls)) t.Fatal("Expected one conflict, got", len(confls))
@ -954,7 +953,7 @@ func TestSRConflictReplaceFileByLink(t *testing.T) {
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
scanChan := make(chan string, 1) scanChan := make(chan string, 1)
f.handleSymlink(file, dbUpdateChan, scanChan) f.handleSymlink(file, f.fset.Snapshot(), dbUpdateChan, scanChan)
if confls := existingConflicts(name, ffs); len(confls) != 1 { if confls := existingConflicts(name, ffs); len(confls) != 1 {
t.Fatal("Expected one conflict, got", len(confls)) t.Fatal("Expected one conflict, got", len(confls))
@ -996,7 +995,7 @@ func TestDeleteBehindSymlink(t *testing.T) {
fi.Version = fi.Version.Update(device1.Short()) fi.Version = fi.Version.Update(device1.Short())
scanChan := make(chan string, 1) scanChan := make(chan string, 1)
dbUpdateChan := make(chan dbUpdateJob, 1) dbUpdateChan := make(chan dbUpdateJob, 1)
f.deleteFile(fi, dbUpdateChan, scanChan) f.deleteFile(fi, f.fset.Snapshot(), dbUpdateChan, scanChan)
select { select {
case f := <-scanChan: case f := <-scanChan:
t.Fatalf("Received %v on scanChan", f) t.Fatalf("Received %v on scanChan", f)

View File

@ -77,6 +77,11 @@ func (c *folderSummaryService) String() string {
func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, error) { func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, error) {
var res = make(map[string]interface{}) var res = make(map[string]interface{})
snap, err := c.model.DBSnapshot(folder)
if err != nil {
return nil, err
}
errors, err := c.model.FolderErrors(folder) errors, err := c.model.FolderErrors(folder)
if err != nil && err != ErrFolderPaused && err != errFolderNotRunning { if err != nil && err != ErrFolderPaused && err != errFolderNotRunning {
// Stats from the db can still be obtained if the folder is just paused/being started // Stats from the db can still be obtained if the folder is just paused/being started
@ -87,19 +92,31 @@ func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, e
res["invalid"] = "" // Deprecated, retains external API for now res["invalid"] = "" // Deprecated, retains external API for now
global := c.model.GlobalSize(folder) global := snap.GlobalSize()
res["globalFiles"], res["globalDirectories"], res["globalSymlinks"], res["globalDeleted"], res["globalBytes"], res["globalTotalItems"] = global.Files, global.Directories, global.Symlinks, global.Deleted, global.Bytes, global.TotalItems() res["globalFiles"], res["globalDirectories"], res["globalSymlinks"], res["globalDeleted"], res["globalBytes"], res["globalTotalItems"] = global.Files, global.Directories, global.Symlinks, global.Deleted, global.Bytes, global.TotalItems()
local := c.model.LocalSize(folder) local := snap.LocalSize()
res["localFiles"], res["localDirectories"], res["localSymlinks"], res["localDeleted"], res["localBytes"], res["localTotalItems"] = local.Files, local.Directories, local.Symlinks, local.Deleted, local.Bytes, local.TotalItems() res["localFiles"], res["localDirectories"], res["localSymlinks"], res["localDeleted"], res["localBytes"], res["localTotalItems"] = local.Files, local.Directories, local.Symlinks, local.Deleted, local.Bytes, local.TotalItems()
need := c.model.NeedSize(folder) need := snap.NeedSize()
need.Bytes -= c.model.FolderProgressBytesCompleted(folder)
// This may happen if we are in progress of pulling files that were
// deleted globally after the pull started.
if need.Bytes < 0 {
need.Bytes = 0
}
res["needFiles"], res["needDirectories"], res["needSymlinks"], res["needDeletes"], res["needBytes"], res["needTotalItems"] = need.Files, need.Directories, need.Symlinks, need.Deleted, need.Bytes, need.TotalItems() res["needFiles"], res["needDirectories"], res["needSymlinks"], res["needDeletes"], res["needBytes"], res["needTotalItems"] = need.Files, need.Directories, need.Symlinks, need.Deleted, need.Bytes, need.TotalItems()
if c.cfg.Folders()[folder].Type == config.FolderTypeReceiveOnly { fcfg, ok := c.cfg.Folder(folder)
if ok && fcfg.IgnoreDelete {
res["needDeletes"] = 0
}
if ok && fcfg.Type == config.FolderTypeReceiveOnly {
// Add statistics for things that have changed locally in a receive // Add statistics for things that have changed locally in a receive
// only folder. // only folder.
ro := c.model.ReceiveOnlyChangedSize(folder) ro := snap.ReceiveOnlyChangedSize()
res["receiveOnlyChangedFiles"] = ro.Files res["receiveOnlyChangedFiles"] = ro.Files
res["receiveOnlyChangedDirectories"] = ro.Directories res["receiveOnlyChangedDirectories"] = ro.Directories
res["receiveOnlyChangedSymlinks"] = ro.Symlinks res["receiveOnlyChangedSymlinks"] = ro.Symlinks
@ -115,8 +132,8 @@ func (c *folderSummaryService) Summary(folder string) (map[string]interface{}, e
res["error"] = err.Error() res["error"] = err.Error()
} }
ourSeq, _ := c.model.CurrentSequence(folder) ourSeq := snap.Sequence(protocol.LocalDeviceID)
remoteSeq, _ := c.model.RemoteSequence(folder) remoteSeq := snap.Sequence(protocol.GlobalDeviceID)
res["version"] = ourSeq + remoteSeq // legacy res["version"] = ourSeq + remoteSeq // legacy
res["sequence"] = ourSeq + remoteSeq // new name res["sequence"] = ourSeq + remoteSeq // new name

View File

@ -90,21 +90,14 @@ type Model interface {
GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error) GetFolderVersions(folder string) (map[string][]versioner.FileVersion, error)
RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]string, error) RestoreFolderVersions(folder string, versions map[string]time.Time) (map[string]string, error)
LocalChangedFiles(folder string, page, perpage int) []db.FileInfoTruncated DBSnapshot(folder string) (*db.Snapshot, error)
NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfoTruncated, []db.FileInfoTruncated, []db.FileInfoTruncated)
RemoteNeedFolderFiles(device protocol.DeviceID, folder string, page, perpage int) ([]db.FileInfoTruncated, error) FolderProgressBytesCompleted(folder string) int64
CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool) CurrentFolderFile(folder string, file string) (protocol.FileInfo, bool)
CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool)
Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) []Availability Availability(folder string, file protocol.FileInfo, block protocol.BlockInfo) []Availability
GlobalSize(folder string) db.Counts
LocalSize(folder string) db.Counts
NeedSize(folder string) db.Counts
ReceiveOnlyChangedSize(folder string) db.Counts
CurrentSequence(folder string) (int64, bool)
RemoteSequence(folder string) (int64, bool)
Completion(device protocol.DeviceID, folder string) FolderCompletion Completion(device protocol.DeviceID, folder string) FolderCompletion
ConnectionStats() map[string]interface{} ConnectionStats() map[string]interface{}
DeviceStatistics() (map[string]stats.DeviceStatistics, error) DeviceStatistics() (map[string]stats.DeviceStatistics, error)
@ -764,7 +757,10 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
return FolderCompletion{} // Folder doesn't exist, so we hardly have any of it return FolderCompletion{} // Folder doesn't exist, so we hardly have any of it
} }
tot := rf.GlobalSize().Bytes snap := rf.Snapshot()
defer snap.Release()
tot := snap.GlobalSize().Bytes
if tot == 0 { if tot == 0 {
// Folder is empty, so we have all of it // Folder is empty, so we have all of it
return FolderCompletion{ return FolderCompletion{
@ -777,7 +773,7 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
m.pmut.RUnlock() m.pmut.RUnlock()
var need, items, fileNeed, downloaded, deletes int64 var need, items, fileNeed, downloaded, deletes int64
rf.WithNeedTruncated(device, func(f db.FileIntf) bool { snap.WithNeedTruncated(device, func(f db.FileIntf) bool {
ft := f.(db.FileInfoTruncated) ft := f.(db.FileInfoTruncated)
// If the file is deleted, we account it only in the deleted column. // If the file is deleted, we account it only in the deleted column.
@ -822,84 +818,19 @@ func (m *model) Completion(device protocol.DeviceID, folder string) FolderComple
} }
} }
func addSizeOfFile(s *db.Counts, f db.FileIntf) { // DBSnapshot returns a snapshot of the database content relevant to the given folder.
switch { func (m *model) DBSnapshot(folder string) (*db.Snapshot, error) {
case f.IsDeleted():
s.Deleted++
case f.IsDirectory():
s.Directories++
case f.IsSymlink():
s.Symlinks++
default:
s.Files++
}
s.Bytes += f.FileSize()
}
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
func (m *model) GlobalSize(folder string) db.Counts {
m.fmut.RLock() m.fmut.RLock()
rf, ok := m.folderFiles[folder] rf, ok := m.folderFiles[folder]
m.fmut.RUnlock() m.fmut.RUnlock()
if ok { if !ok {
return rf.GlobalSize() return nil, errFolderMissing
} }
return db.Counts{} return rf.Snapshot(), nil
} }
// LocalSize returns the number of files, deleted files and total bytes for all func (m *model) FolderProgressBytesCompleted(folder string) int64 {
// files in the local folder. return m.progressEmitter.BytesCompleted(folder)
func (m *model) LocalSize(folder string) db.Counts {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if ok {
return rf.LocalSize()
}
return db.Counts{}
}
// ReceiveOnlyChangedSize returns the number of files, deleted files and
// total bytes for all files that have changed locally in a receieve only
// folder.
func (m *model) ReceiveOnlyChangedSize(folder string) db.Counts {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if ok {
return rf.ReceiveOnlyChangedSize()
}
return db.Counts{}
}
// NeedSize returns the number of currently needed files and their total size
// minus the amount that has already been downloaded.
func (m *model) NeedSize(folder string) db.Counts {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
cfg := m.folderCfgs[folder]
m.fmut.RUnlock()
var result db.Counts
if ok {
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
if cfg.IgnoreDelete && f.IsDeleted() {
return true
}
addSizeOfFile(&result, f)
return true
})
}
result.Bytes -= m.progressEmitter.BytesCompleted(folder)
// This may happen if we are in progress of pulling files that were
// deleted globally after the pull started.
if result.Bytes < 0 {
result.Bytes = 0
}
l.Debugf("%v NeedSize(%q): %v", m, folder, result)
return result
} }
// NeedFolderFiles returns paginated list of currently needed files in // NeedFolderFiles returns paginated list of currently needed files in
@ -915,6 +846,8 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
return nil, nil, nil return nil, nil, nil
} }
snap := rf.Snapshot()
defer snap.Release()
var progress, queued, rest []db.FileInfoTruncated var progress, queued, rest []db.FileInfoTruncated
var seen map[string]struct{} var seen map[string]struct{}
@ -929,14 +862,14 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
seen = make(map[string]struct{}, len(progressNames)+len(queuedNames)) seen = make(map[string]struct{}, len(progressNames)+len(queuedNames))
for i, name := range progressNames { for i, name := range progressNames {
if f, ok := rf.GetGlobalTruncated(name); ok { if f, ok := snap.GetGlobalTruncated(name); ok {
progress[i] = f progress[i] = f
seen[name] = struct{}{} seen[name] = struct{}{}
} }
} }
for i, name := range queuedNames { for i, name := range queuedNames {
if f, ok := rf.GetGlobalTruncated(name); ok { if f, ok := snap.GetGlobalTruncated(name); ok {
queued[i] = f queued[i] = f
seen[name] = struct{}{} seen[name] = struct{}{}
} }
@ -950,7 +883,7 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
} }
rest = make([]db.FileInfoTruncated, 0, perpage) rest = make([]db.FileInfoTruncated, 0, perpage)
rf.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool { snap.WithNeedTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
if cfg.IgnoreDelete && f.IsDeleted() { if cfg.IgnoreDelete && f.IsDeleted() {
return true return true
} }
@ -970,77 +903,6 @@ func (m *model) NeedFolderFiles(folder string, page, perpage int) ([]db.FileInfo
return progress, queued, rest return progress, queued, rest
} }
// LocalChangedFiles returns a paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
func (m *model) LocalChangedFiles(folder string, page, perpage int) []db.FileInfoTruncated {
m.fmut.RLock()
rf, ok := m.folderFiles[folder]
fcfg := m.folderCfgs[folder]
m.fmut.RUnlock()
if !ok {
return nil
}
if fcfg.Type != config.FolderTypeReceiveOnly {
return nil
}
if rf.ReceiveOnlyChangedSize().TotalItems() == 0 {
return nil
}
files := make([]db.FileInfoTruncated, 0, perpage)
skip := (page - 1) * perpage
get := perpage
rf.WithHaveTruncated(protocol.LocalDeviceID, func(f db.FileIntf) bool {
if !f.IsReceiveOnlyChanged() {
return true
}
if skip > 0 {
skip--
return true
}
ft := f.(db.FileInfoTruncated)
files = append(files, ft)
get--
return get > 0
})
return files
}
// RemoteNeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
func (m *model) RemoteNeedFolderFiles(device protocol.DeviceID, folder string, page, perpage int) ([]db.FileInfoTruncated, error) {
m.fmut.RLock()
m.pmut.RLock()
err := m.checkDeviceFolderConnectedLocked(device, folder)
rf := m.folderFiles[folder]
m.pmut.RUnlock()
m.fmut.RUnlock()
if err != nil {
return nil, err
}
files := make([]db.FileInfoTruncated, 0, perpage)
skip := (page - 1) * perpage
get := perpage
rf.WithNeedTruncated(device, func(f db.FileIntf) bool {
if skip > 0 {
skip--
return true
}
files = append(files, f.(db.FileInfoTruncated))
get--
return get > 0
})
return files, nil
}
// Index is called when a new device is connected and we receive their full index. // Index is called when a new device is connected and we receive their full index.
// Implements the protocol.Model interface. // Implements the protocol.Model interface.
func (m *model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) error { func (m *model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.FileInfo) error {
@ -1752,7 +1614,9 @@ func (m *model) CurrentFolderFile(folder string, file string) (protocol.FileInfo
if !ok { if !ok {
return protocol.FileInfo{}, false return protocol.FileInfo{}, false
} }
return fs.Get(protocol.LocalDeviceID, file) snap := fs.Snapshot()
defer snap.Release()
return snap.Get(protocol.LocalDeviceID, file)
} }
func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) { func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo, bool) {
@ -1762,7 +1626,9 @@ func (m *model) CurrentGlobalFile(folder string, file string) (protocol.FileInfo
if !ok { if !ok {
return protocol.FileInfo{}, false return protocol.FileInfo{}, false
} }
return fs.GetGlobal(file) snap := fs.Snapshot()
defer snap.Release()
return snap.GetGlobal(file)
} }
// Connection returns the current connection for device, and a boolean whether a connection was found. // Connection returns the current connection for device, and a boolean whether a connection was found.
@ -2075,7 +1941,9 @@ func (s *indexSender) sendIndexTo(ctx context.Context) error {
var err error var err error
var f protocol.FileInfo var f protocol.FileInfo
s.fset.WithHaveSequence(s.prevSequence+1, func(fi db.FileIntf) bool { snap := s.fset.Snapshot()
defer snap.Release()
snap.WithHaveSequence(s.prevSequence+1, func(fi db.FileIntf) bool {
if err = batch.flushIfFull(); err != nil { if err = batch.flushIfFull(); err != nil {
return false return false
} }
@ -2347,45 +2215,6 @@ func (m *model) Revert(folder string) {
runner.Revert() runner.Revert()
} }
// CurrentSequence returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
// changed.
func (m *model) CurrentSequence(folder string) (int64, bool) {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
m.fmut.RUnlock()
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0, false
}
return fs.Sequence(protocol.LocalDeviceID), true
}
// RemoteSequence returns the change version for the given folder, as
// sent by remote peers. This is guaranteed to increment if the contents of
// the remote or global folder has changed.
func (m *model) RemoteSequence(folder string) (int64, bool) {
m.fmut.RLock()
fs, ok := m.folderFiles[folder]
cfg := m.folderCfgs[folder]
m.fmut.RUnlock()
if !ok {
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
return 0, false
}
var ver int64
for _, device := range cfg.Devices {
ver += fs.Sequence(device.DeviceID)
}
return ver, true
}
func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{} { func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly bool) map[string]interface{} {
m.fmut.RLock() m.fmut.RLock()
files, ok := m.folderFiles[folder] files, ok := m.folderFiles[folder]
@ -2402,7 +2231,9 @@ func (m *model) GlobalDirectoryTree(folder, prefix string, levels int, dirsonly
prefix = prefix + sep prefix = prefix + sep
} }
files.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool { snap := files.Snapshot()
defer snap.Release()
snap.WithPrefixedGlobalTruncated(prefix, func(fi db.FileIntf) bool {
f := fi.(db.FileInfoTruncated) f := fi.(db.FileInfoTruncated)
// Don't include the prefix itself. // Don't include the prefix itself.
@ -2514,8 +2345,10 @@ func (m *model) Availability(folder string, file protocol.FileInfo, block protoc
} }
var availabilities []Availability var availabilities []Availability
snap := fs.Snapshot()
defer snap.Release()
next: next:
for _, device := range fs.Availability(file.Name) { for _, device := range snap.Availability(file.Name) {
for _, pausedFolder := range m.remotePausedFolders[device] { for _, pausedFolder := range m.remotePausedFolders[device] {
if pausedFolder == folder { if pausedFolder == folder {
continue next continue next

View File

@ -2146,8 +2146,8 @@ func TestIssue3028(t *testing.T) {
// Get a count of how many files are there now // Get a count of how many files are there now
locorigfiles := m.LocalSize("default").Files locorigfiles := localSize(t, m, "default").Files
globorigfiles := m.GlobalSize("default").Files globorigfiles := globalSize(t, m, "default").Files
// Delete and rescan specifically these two // Delete and rescan specifically these two
@ -2158,8 +2158,8 @@ func TestIssue3028(t *testing.T) {
// Verify that the number of files decreased by two and the number of // Verify that the number of files decreased by two and the number of
// deleted files increases by two // deleted files increases by two
loc := m.LocalSize("default") loc := localSize(t, m, "default")
glob := m.GlobalSize("default") glob := globalSize(t, m, "default")
if loc.Files != locorigfiles-2 { if loc.Files != locorigfiles-2 {
t.Errorf("Incorrect local accounting; got %d current files, expected %d", loc.Files, locorigfiles-2) t.Errorf("Incorrect local accounting; got %d current files, expected %d", loc.Files, locorigfiles-2)
} }
@ -2439,10 +2439,12 @@ func TestIssue3496(t *testing.T) {
fs := m.folderFiles["default"] fs := m.folderFiles["default"]
m.fmut.RUnlock() m.fmut.RUnlock()
var localFiles []protocol.FileInfo var localFiles []protocol.FileInfo
fs.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool { snap := fs.Snapshot()
snap.WithHave(protocol.LocalDeviceID, func(i db.FileIntf) bool {
localFiles = append(localFiles, i.(protocol.FileInfo)) localFiles = append(localFiles, i.(protocol.FileInfo))
return true return true
}) })
snap.Release()
// Mark all files as deleted and fake it as update from device1 // Mark all files as deleted and fake it as update from device1
@ -2482,7 +2484,7 @@ func TestIssue3496(t *testing.T) {
t.Log(comp) t.Log(comp)
// Check that NeedSize does the correct thing // Check that NeedSize does the correct thing
need := m.NeedSize("default") need := needSize(t, m, "default")
if need.Files != 1 || need.Bytes != 1234 { if need.Files != 1 || need.Bytes != 1234 {
// The one we added synthetically above // The one we added synthetically above
t.Errorf("Incorrect need size; %d, %d != 1, 1234", need.Files, need.Bytes) t.Errorf("Incorrect need size; %d, %d != 1, 1234", need.Files, need.Bytes)

View File

@ -9,6 +9,7 @@ package model
import ( import (
"io/ioutil" "io/ioutil"
"os" "os"
"testing"
"time" "time"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
@ -171,3 +172,40 @@ func (c *alwaysChanged) Seen(fs fs.Filesystem, name string) bool {
func (c *alwaysChanged) Changed() bool { func (c *alwaysChanged) Changed() bool {
return true return true
} }
func localSize(t *testing.T, m Model, folder string) db.Counts {
t.Helper()
snap := dbSnapshot(t, m, folder)
defer snap.Release()
return snap.LocalSize()
}
func globalSize(t *testing.T, m Model, folder string) db.Counts {
t.Helper()
snap := dbSnapshot(t, m, folder)
defer snap.Release()
return snap.GlobalSize()
}
func receiveOnlyChangedSize(t *testing.T, m Model, folder string) db.Counts {
t.Helper()
snap := dbSnapshot(t, m, folder)
defer snap.Release()
return snap.ReceiveOnlyChangedSize()
}
func needSize(t *testing.T, m Model, folder string) db.Counts {
t.Helper()
snap := dbSnapshot(t, m, folder)
defer snap.Release()
return snap.NeedSize()
}
func dbSnapshot(t *testing.T, m Model, folder string) *db.Snapshot {
t.Helper()
snap, err := m.DBSnapshot(folder)
if err != nil {
t.Fatal(err)
}
return snap
}

View File

@ -88,7 +88,12 @@ func (s *Service) reportData(urVersion int, preview bool) map[string]interface{}
var totFiles, maxFiles int var totFiles, maxFiles int
var totBytes, maxBytes int64 var totBytes, maxBytes int64
for folderID := range s.cfg.Folders() { for folderID := range s.cfg.Folders() {
global := s.model.GlobalSize(folderID) snap, err := s.model.DBSnapshot(folderID)
if err != nil {
continue
}
global := snap.GlobalSize()
snap.Release()
totFiles += int(global.Files) totFiles += int(global.Files)
totBytes += global.Bytes totBytes += global.Bytes
if int(global.Files) > maxFiles { if int(global.Files) > maxFiles {