2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2015-01-12 13:50:30 +00:00
|
|
|
// Package db provides a set type to track local/remote files with newness
|
2014-08-15 10:52:16 +00:00
|
|
|
// checks. We must do a certain amount of normalization in here. We will get
|
|
|
|
// fed paths with either native or wire-format separators and encodings
|
|
|
|
// depending on who calls us. We transform paths to wire-format (NFC and
|
|
|
|
// slashes) on the way to the database, and transform to native format
|
|
|
|
// (varying separator and encoding) on the way back out.
|
2015-01-12 13:50:30 +00:00
|
|
|
package db
|
2014-03-28 13:36:57 +00:00
|
|
|
|
|
|
|
import (
|
2017-12-14 09:51:17 +00:00
|
|
|
"time"
|
2015-10-20 13:58:18 +00:00
|
|
|
|
2019-11-29 08:11:52 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db/backend"
|
2016-08-05 17:45:45 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2014-03-28 13:36:57 +00:00
|
|
|
)
|
|
|
|
|
2015-01-12 13:52:24 +00:00
|
|
|
type FileSet struct {
|
2019-01-23 09:22:33 +00:00
|
|
|
folder string
|
|
|
|
fs fs.Filesystem
|
2019-12-02 07:18:04 +00:00
|
|
|
db *Lowlevel
|
2019-01-23 09:22:33 +00:00
|
|
|
meta *metadataTracker
|
2017-12-14 09:51:17 +00:00
|
|
|
|
|
|
|
updateMutex sync.Mutex // protects database updates and the corresponding metadata changes
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2015-01-09 07:18:42 +00:00
|
|
|
// FileIntf is the set of methods implemented by both protocol.FileInfo and
|
2016-07-04 10:40:29 +00:00
|
|
|
// FileInfoTruncated.
|
2015-01-09 07:18:42 +00:00
|
|
|
type FileIntf interface {
|
2016-07-04 10:40:29 +00:00
|
|
|
FileSize() int64
|
|
|
|
FileName() string
|
2018-07-12 08:15:57 +00:00
|
|
|
FileLocalFlags() uint32
|
2015-01-09 07:18:42 +00:00
|
|
|
IsDeleted() bool
|
|
|
|
IsInvalid() bool
|
2018-07-12 08:15:57 +00:00
|
|
|
IsIgnored() bool
|
|
|
|
IsUnsupported() bool
|
|
|
|
MustRescan() bool
|
2018-12-11 08:59:04 +00:00
|
|
|
IsReceiveOnlyChanged() bool
|
2015-01-09 07:18:42 +00:00
|
|
|
IsDirectory() bool
|
|
|
|
IsSymlink() bool
|
2018-08-25 08:32:35 +00:00
|
|
|
ShouldConflict() bool
|
2015-01-09 07:18:42 +00:00
|
|
|
HasPermissionBits() bool
|
2017-12-14 09:51:17 +00:00
|
|
|
SequenceNo() int64
|
2018-04-16 18:08:50 +00:00
|
|
|
BlockSize() int
|
2018-06-02 13:08:32 +00:00
|
|
|
FileVersion() protocol.Vector
|
2019-10-15 09:25:12 +00:00
|
|
|
FileType() protocol.FileInfoType
|
|
|
|
FilePermissions() uint32
|
|
|
|
FileModifiedBy() protocol.ShortID
|
|
|
|
ModTime() time.Time
|
2015-01-09 07:18:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The Iterator is called with either a protocol.FileInfo or a
|
2016-07-04 10:40:29 +00:00
|
|
|
// FileInfoTruncated (depending on the method) and returns true to
|
2015-01-09 07:18:42 +00:00
|
|
|
// continue iteration, false to stop.
|
|
|
|
type Iterator func(f FileIntf) bool
|
|
|
|
|
2019-12-02 07:18:04 +00:00
|
|
|
func NewFileSet(folder string, fs fs.Filesystem, db *Lowlevel) *FileSet {
|
2020-02-22 08:36:59 +00:00
|
|
|
return &FileSet{
|
2017-12-14 09:51:17 +00:00
|
|
|
folder: folder,
|
|
|
|
fs: fs,
|
|
|
|
db: db,
|
2020-03-19 14:58:32 +00:00
|
|
|
meta: db.loadMetadataTracker(folder),
|
2017-12-14 09:51:17 +00:00
|
|
|
updateMutex: sync.NewMutex(),
|
2015-10-20 13:58:18 +00:00
|
|
|
}
|
2020-02-22 08:36:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-12 20:20:34 +00:00
|
|
|
func (s *FileSet) Drop(device protocol.DeviceID) {
|
|
|
|
l.Debugf("%s Drop(%v)", s.folder, device)
|
2016-07-23 18:32:10 +00:00
|
|
|
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
|
2019-11-29 08:11:52 +00:00
|
|
|
if err := s.db.dropDeviceFolder(device[:], []byte(s.folder), s.meta); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2017-11-12 20:20:34 +00:00
|
|
|
|
2014-10-07 21:15:01 +00:00
|
|
|
if device == protocol.LocalDeviceID {
|
2017-12-14 09:51:17 +00:00
|
|
|
s.meta.resetCounts(device)
|
|
|
|
// We deliberately do not reset the sequence number here. Dropping
|
|
|
|
// all files for the local device ID only happens in testing - which
|
|
|
|
// expects the sequence to be retained, like an old Replace() of all
|
|
|
|
// files would do. However, if we ever did it "in production" we
|
|
|
|
// would anyway want to retain the sequence for delta indexes to be
|
|
|
|
// happy.
|
2017-11-12 20:20:34 +00:00
|
|
|
} else {
|
|
|
|
// Here, on the other hand, we want to make sure that any file
|
|
|
|
// announced from the remote is newer than our current sequence
|
|
|
|
// number.
|
2017-12-14 09:51:17 +00:00
|
|
|
s.meta.resetAll(device)
|
2014-10-07 21:15:01 +00:00
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
|
2020-02-13 14:23:08 +00:00
|
|
|
t, err := s.db.newReadWriteTransaction()
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
if err := s.meta.toDB(t, []byte(s.folder)); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if err := t.Commit(); backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2015-01-12 13:52:24 +00:00
|
|
|
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
|
2018-01-18 12:40:43 +00:00
|
|
|
|
|
|
|
// do not modify fs in place, it is still used in outer scope
|
|
|
|
fs = append([]protocol.FileInfo(nil), fs...)
|
|
|
|
|
2020-05-16 12:34:53 +00:00
|
|
|
// If one file info is present multiple times, only keep the last.
|
|
|
|
// Updating the same file multiple times is problematic, because the
|
|
|
|
// previous updates won't yet be represented in the db when we update it
|
|
|
|
// again. Additionally even if that problem was taken care of, it would
|
|
|
|
// be pointless because we remove the previously added file info again
|
|
|
|
// right away.
|
|
|
|
fs = normalizeFilenamesAndDropDuplicates(fs)
|
2016-07-23 18:32:10 +00:00
|
|
|
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
|
2019-01-23 09:22:33 +00:00
|
|
|
if device == protocol.LocalDeviceID {
|
|
|
|
// For the local device we have a bunch of metadata to track.
|
2019-11-29 08:11:52 +00:00
|
|
|
if err := s.db.updateLocalFiles([]byte(s.folder), fs, s.meta); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-09-02 18:58:32 +00:00
|
|
|
return
|
|
|
|
}
|
2019-01-23 09:22:33 +00:00
|
|
|
// Easy case, just update the files and we're done.
|
2019-11-29 08:11:52 +00:00
|
|
|
if err := s.db.updateRemoteFiles([]byte(s.folder), device[:], fs, s.meta); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
type Snapshot struct {
|
|
|
|
folder string
|
|
|
|
t readOnlyTransaction
|
|
|
|
meta *countsMap
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) Snapshot() *Snapshot {
|
|
|
|
t, err := s.db.newReadOnlyTransaction()
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return &Snapshot{
|
|
|
|
folder: s.folder,
|
|
|
|
t: t,
|
|
|
|
meta: s.meta.Snapshot(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) Release() {
|
|
|
|
s.t.close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) WithNeed(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithNeed(%v)", s.folder, device)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withNeed([]byte(s.folder), device[:], false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 11:53:31 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withNeed([]byte(s.folder), device[:], true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithHave(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithHave(%v)", s.folder, device)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 11:53:31 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithHaveSequence(startSeq int64, fn Iterator) {
|
2018-05-01 21:39:15 +00:00
|
|
|
l.Debugf("%s WithHaveSequence(%v)", s.folder, startSeq)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withHaveSequence([]byte(s.folder), startSeq, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2018-05-01 21:39:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-17 07:26:40 +00:00
|
|
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
|
|
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithPrefixedHaveTruncated(device protocol.DeviceID, prefix string, fn Iterator) {
|
2018-05-17 07:26:40 +00:00
|
|
|
l.Debugf(`%s WithPrefixedHaveTruncated(%v, "%v")`, s.folder, device, prefix)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withHave([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2016-03-18 12:16:33 +00:00
|
|
|
}
|
2019-11-29 08:11:52 +00:00
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithGlobal(fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithGlobal()", s.folder)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), nil, false, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-08-12 14:17:28 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithGlobalTruncated(fn Iterator) {
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("%s WithGlobalTruncated()", s.folder)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), nil, true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2018-05-17 07:26:40 +00:00
|
|
|
// Except for an item with a path equal to prefix, only children of prefix are iterated.
|
|
|
|
// E.g. for prefix "dir", "dir/file" is iterated, but "dir.file" is not.
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
|
2018-05-17 07:26:40 +00:00
|
|
|
l.Debugf(`%s WithPrefixedGlobalTruncated("%v")`, s.folder, prefix)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err := s.t.withGlobal([]byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
2019-11-29 08:11:52 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) Get(device protocol.DeviceID, file string) (protocol.FileInfo, bool) {
|
|
|
|
f, ok, err := s.t.getFile([]byte(s.folder), device[:], []byte(osutil.NormalizedFilename(file)))
|
2019-11-29 08:11:52 +00:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-11-05 23:41:51 +00:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2015-01-06 21:12:45 +00:00
|
|
|
return f, ok
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) GetGlobal(file string) (protocol.FileInfo, bool) {
|
|
|
|
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), false)
|
2019-11-29 08:11:52 +00:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2015-01-09 07:41:02 +00:00
|
|
|
if !ok {
|
|
|
|
return protocol.FileInfo{}, false
|
|
|
|
}
|
|
|
|
f := fi.(protocol.FileInfo)
|
2014-11-05 23:41:51 +00:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2015-01-09 07:41:02 +00:00
|
|
|
return f, true
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) GetGlobalTruncated(file string) (FileInfoTruncated, bool) {
|
|
|
|
_, fi, ok, err := s.t.getGlobal(nil, []byte(s.folder), []byte(osutil.NormalizedFilename(file)), true)
|
2019-11-29 08:11:52 +00:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return FileInfoTruncated{}, false
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2015-01-09 07:41:02 +00:00
|
|
|
if !ok {
|
|
|
|
return FileInfoTruncated{}, false
|
|
|
|
}
|
|
|
|
f := fi.(FileInfoTruncated)
|
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
|
|
|
return f, true
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) Availability(file string) []protocol.DeviceID {
|
|
|
|
av, err := s.t.availability([]byte(s.folder), []byte(osutil.NormalizedFilename(file)))
|
2019-11-29 08:11:52 +00:00
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return av
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) Sequence(device protocol.DeviceID) int64 {
|
|
|
|
return s.meta.Counts(device, 0).Sequence
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2014-08-15 10:52:16 +00:00
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
// RemoteSequence returns the change version for the given folder, as
|
|
|
|
// sent by remote peers. This is guaranteed to increment if the contents of
|
|
|
|
// the remote or global folder has changed.
|
|
|
|
func (s *Snapshot) RemoteSequence() int64 {
|
|
|
|
var ver int64
|
|
|
|
|
|
|
|
for _, device := range s.meta.devices() {
|
|
|
|
ver += s.Sequence(device)
|
|
|
|
}
|
|
|
|
|
|
|
|
return ver
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Snapshot) LocalSize() Counts {
|
2018-07-12 08:15:57 +00:00
|
|
|
local := s.meta.Counts(protocol.LocalDeviceID, 0)
|
2020-01-21 17:23:08 +00:00
|
|
|
return local.Add(s.ReceiveOnlyChangedSize())
|
2018-07-12 08:15:57 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) ReceiveOnlyChangedSize() Counts {
|
2018-07-12 08:15:57 +00:00
|
|
|
return s.meta.Counts(protocol.LocalDeviceID, protocol.FlagLocalReceiveOnly)
|
2015-10-21 07:10:26 +00:00
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) GlobalSize() Counts {
|
2018-07-12 08:15:57 +00:00
|
|
|
global := s.meta.Counts(protocol.GlobalDeviceID, 0)
|
|
|
|
recvOnlyChanged := s.meta.Counts(protocol.GlobalDeviceID, protocol.FlagLocalReceiveOnly)
|
|
|
|
return global.Add(recvOnlyChanged)
|
2015-10-21 07:10:26 +00:00
|
|
|
}
|
|
|
|
|
2020-05-11 13:07:06 +00:00
|
|
|
func (s *Snapshot) NeedSize(device protocol.DeviceID) Counts {
|
|
|
|
return s.meta.Counts(device, needFlag)
|
2020-01-21 17:23:08 +00:00
|
|
|
}
|
|
|
|
|
2020-04-02 14:14:25 +00:00
|
|
|
// LocalChangedFiles returns a paginated list of files that were changed locally.
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *Snapshot) LocalChangedFiles(page, perpage int) []FileInfoTruncated {
|
|
|
|
if s.ReceiveOnlyChangedSize().TotalItems() == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
files := make([]FileInfoTruncated, 0, perpage)
|
|
|
|
|
|
|
|
skip := (page - 1) * perpage
|
|
|
|
get := perpage
|
|
|
|
|
|
|
|
s.WithHaveTruncated(protocol.LocalDeviceID, func(f FileIntf) bool {
|
|
|
|
if !f.IsReceiveOnlyChanged() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if skip > 0 {
|
|
|
|
skip--
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
ft := f.(FileInfoTruncated)
|
|
|
|
files = append(files, ft)
|
|
|
|
get--
|
|
|
|
return get > 0
|
|
|
|
})
|
|
|
|
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoteNeedFolderFiles returns paginated list of currently needed files in
|
|
|
|
// progress, queued, and to be queued on next puller iteration, as well as the
|
|
|
|
// total number of files currently needed.
|
|
|
|
func (s *Snapshot) RemoteNeedFolderFiles(device protocol.DeviceID, page, perpage int) []FileInfoTruncated {
|
|
|
|
files := make([]FileInfoTruncated, 0, perpage)
|
|
|
|
skip := (page - 1) * perpage
|
|
|
|
get := perpage
|
|
|
|
s.WithNeedTruncated(device, func(f FileIntf) bool {
|
|
|
|
if skip > 0 {
|
|
|
|
skip--
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
files = append(files, f.(FileInfoTruncated))
|
|
|
|
get--
|
|
|
|
return get > 0
|
|
|
|
})
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
2020-05-11 18:15:11 +00:00
|
|
|
func (s *Snapshot) WithBlocksHash(hash []byte, fn Iterator) {
|
|
|
|
l.Debugf(`%s WithBlocksHash("%x")`, s.folder, hash)
|
|
|
|
if err := s.t.withBlocksHash([]byte(s.folder), hash, nativeFileIterator(fn)); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
func (s *FileSet) Sequence(device protocol.DeviceID) int64 {
|
|
|
|
return s.meta.Sequence(device)
|
|
|
|
}
|
|
|
|
|
2016-07-23 12:46:31 +00:00
|
|
|
func (s *FileSet) IndexID(device protocol.DeviceID) protocol.IndexID {
|
2019-11-29 08:11:52 +00:00
|
|
|
id, err := s.db.getIndexID(device[:], []byte(s.folder))
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return 0
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 12:46:31 +00:00
|
|
|
if id == 0 && device == protocol.LocalDeviceID {
|
|
|
|
// No index ID set yet. We create one now.
|
|
|
|
id = protocol.NewIndexID()
|
2019-11-29 08:11:52 +00:00
|
|
|
err := s.db.setIndexID(device[:], []byte(s.folder), id)
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return 0
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 12:46:31 +00:00
|
|
|
}
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) SetIndexID(device protocol.DeviceID, id protocol.IndexID) {
|
|
|
|
if device == protocol.LocalDeviceID {
|
|
|
|
panic("do not explicitly set index ID for local device")
|
|
|
|
}
|
2019-11-29 08:11:52 +00:00
|
|
|
if err := s.db.setIndexID(device[:], []byte(s.folder), id); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
2016-07-23 12:46:31 +00:00
|
|
|
}
|
|
|
|
|
2016-08-05 17:45:45 +00:00
|
|
|
func (s *FileSet) MtimeFS() *fs.MtimeFS {
|
2019-11-29 08:11:52 +00:00
|
|
|
prefix, err := s.db.keyer.GenerateMtimesKey(nil, []byte(s.folder))
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return nil
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2019-12-02 07:18:04 +00:00
|
|
|
kv := NewNamespacedKV(s.db, string(prefix))
|
2017-08-19 14:36:56 +00:00
|
|
|
return fs.NewMtimeFS(s.fs, kv)
|
2016-08-05 17:45:45 +00:00
|
|
|
}
|
|
|
|
|
2016-08-07 16:21:59 +00:00
|
|
|
func (s *FileSet) ListDevices() []protocol.DeviceID {
|
2017-12-14 09:51:17 +00:00
|
|
|
return s.meta.devices()
|
2016-07-23 12:46:31 +00:00
|
|
|
}
|
|
|
|
|
2020-03-18 16:34:46 +00:00
|
|
|
func (s *FileSet) RepairSequence() (int, error) {
|
|
|
|
s.updateAndGCMutexLock() // Ensures consistent locking order
|
|
|
|
defer s.updateMutex.Unlock()
|
|
|
|
defer s.db.gcMut.RUnlock()
|
|
|
|
return s.db.repairSequenceGCLocked(s.folder, s.meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *FileSet) updateAndGCMutexLock() {
|
|
|
|
s.updateMutex.Lock()
|
|
|
|
s.db.gcMut.RLock()
|
|
|
|
}
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
// DropFolder clears out all information related to the given folder from the
|
2014-08-31 11:34:17 +00:00
|
|
|
// database.
|
2019-12-02 07:18:04 +00:00
|
|
|
func DropFolder(db *Lowlevel, folder string) {
|
2019-11-29 08:11:52 +00:00
|
|
|
droppers := []func([]byte) error{
|
|
|
|
db.dropFolder,
|
|
|
|
db.dropMtimes,
|
|
|
|
db.dropFolderMeta,
|
|
|
|
db.folderIdx.Delete,
|
|
|
|
}
|
|
|
|
for _, drop := range droppers {
|
|
|
|
if err := drop([]byte(folder)); backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2018-10-10 09:34:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DropDeltaIndexIDs removes all delta index IDs from the database.
|
|
|
|
// This will cause a full index transmission on the next connection.
|
|
|
|
func DropDeltaIndexIDs(db *Lowlevel) {
|
2019-11-29 08:11:52 +00:00
|
|
|
dbi, err := db.NewPrefixIterator([]byte{KeyTypeIndexID})
|
|
|
|
if backend.IsClosed(err) {
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2018-10-10 09:34:24 +00:00
|
|
|
defer dbi.Release()
|
|
|
|
for dbi.Next() {
|
2019-11-29 08:11:52 +00:00
|
|
|
if err := db.Delete(dbi.Key()); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := dbi.Error(); err != nil && !backend.IsClosed(err) {
|
|
|
|
panic(err)
|
2018-10-10 09:34:24 +00:00
|
|
|
}
|
2014-08-31 11:34:17 +00:00
|
|
|
}
|
|
|
|
|
2020-05-16 12:34:53 +00:00
|
|
|
func normalizeFilenamesAndDropDuplicates(fs []protocol.FileInfo) []protocol.FileInfo {
|
|
|
|
positions := make(map[string]int, len(fs))
|
|
|
|
for i, f := range fs {
|
|
|
|
norm := osutil.NormalizedFilename(f.Name)
|
|
|
|
if pos, ok := positions[norm]; ok {
|
|
|
|
fs[pos] = protocol.FileInfo{}
|
|
|
|
}
|
|
|
|
positions[norm] = i
|
|
|
|
fs[i].Name = norm
|
|
|
|
}
|
|
|
|
for i := 0; i < len(fs); {
|
|
|
|
if fs[i].Name == "" {
|
|
|
|
fs = append(fs[:i], fs[i+1:]...)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i++
|
2014-08-15 10:52:16 +00:00
|
|
|
}
|
2020-05-16 12:34:53 +00:00
|
|
|
return fs
|
2014-08-15 10:52:16 +00:00
|
|
|
}
|
|
|
|
|
2015-01-09 07:18:42 +00:00
|
|
|
func nativeFileIterator(fn Iterator) Iterator {
|
|
|
|
return func(fi FileIntf) bool {
|
2014-08-15 10:52:16 +00:00
|
|
|
switch f := fi.(type) {
|
|
|
|
case protocol.FileInfo:
|
2014-11-05 23:41:51 +00:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2014-08-15 10:52:16 +00:00
|
|
|
return fn(f)
|
2015-01-09 07:19:32 +00:00
|
|
|
case FileInfoTruncated:
|
2014-11-05 23:41:51 +00:00
|
|
|
f.Name = osutil.NativeFilename(f.Name)
|
2014-08-15 10:52:16 +00:00
|
|
|
return fn(f)
|
|
|
|
default:
|
|
|
|
panic("unknown interface type")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|