2020-10-21 09:51:53 +00:00
|
|
|
// Copyright (C) 2020 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
|
|
|
|
package model
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
"github.com/thejerf/suture/v4"
|
2020-10-21 09:51:53 +00:00
|
|
|
|
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2020-12-22 19:17:14 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/svcutil"
|
2020-10-21 09:51:53 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type indexSender struct {
|
2020-11-09 14:33:32 +00:00
|
|
|
conn protocol.Connection
|
|
|
|
folder string
|
|
|
|
folderIsReceiveEncrypted bool
|
|
|
|
dev string
|
|
|
|
fset *db.FileSet
|
|
|
|
prevSequence int64
|
|
|
|
evLogger events.Logger
|
|
|
|
connClosed chan struct{}
|
2020-12-30 08:59:11 +00:00
|
|
|
done chan struct{}
|
2020-11-09 14:33:32 +00:00
|
|
|
token suture.ServiceToken
|
|
|
|
pauseChan chan struct{}
|
|
|
|
resumeChan chan *db.FileSet
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
2020-11-18 12:43:57 +00:00
|
|
|
func (s *indexSender) Serve(ctx context.Context) (err error) {
|
2020-11-09 07:58:46 +00:00
|
|
|
l.Debugf("Starting indexSender for %s to %s at %s (slv=%d)", s.folder, s.conn.ID(), s.conn, s.prevSequence)
|
2020-11-18 12:43:57 +00:00
|
|
|
defer func() {
|
2020-12-30 08:59:11 +00:00
|
|
|
close(s.done)
|
2020-12-22 19:17:14 +00:00
|
|
|
err = svcutil.NoRestartErr(err)
|
2020-11-18 12:43:57 +00:00
|
|
|
l.Debugf("Exiting indexSender for %s to %s at %s: %v", s.folder, s.conn.ID(), s.conn, err)
|
|
|
|
}()
|
2020-10-21 09:51:53 +00:00
|
|
|
|
|
|
|
// We need to send one index, regardless of whether there is something to send or not
|
|
|
|
err = s.sendIndexTo(ctx)
|
|
|
|
|
|
|
|
// Subscribe to LocalIndexUpdated (we have new information to send) and
|
|
|
|
// DeviceDisconnected (it might be us who disconnected, so we should
|
|
|
|
// exit).
|
|
|
|
sub := s.evLogger.Subscribe(events.LocalIndexUpdated | events.DeviceDisconnected)
|
|
|
|
defer sub.Unsubscribe()
|
|
|
|
|
|
|
|
paused := false
|
|
|
|
evChan := sub.C()
|
|
|
|
ticker := time.NewTicker(time.Minute)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for err == nil {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-11-17 12:19:04 +00:00
|
|
|
return ctx.Err()
|
2020-10-21 09:51:53 +00:00
|
|
|
case <-s.connClosed:
|
2020-11-17 12:19:04 +00:00
|
|
|
return nil
|
2020-10-21 09:51:53 +00:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// While we have sent a sequence at least equal to the one
|
|
|
|
// currently in the database, wait for the local index to update. The
|
|
|
|
// local index may update for other folders than the one we are
|
|
|
|
// sending for.
|
|
|
|
if s.fset.Sequence(protocol.LocalDeviceID) <= s.prevSequence {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2020-11-17 12:19:04 +00:00
|
|
|
return ctx.Err()
|
2020-10-21 09:51:53 +00:00
|
|
|
case <-s.connClosed:
|
2020-11-17 12:19:04 +00:00
|
|
|
return nil
|
2020-10-21 09:51:53 +00:00
|
|
|
case <-evChan:
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-s.pauseChan:
|
|
|
|
paused = true
|
|
|
|
case s.fset = <-s.resumeChan:
|
|
|
|
paused = false
|
|
|
|
}
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !paused {
|
|
|
|
err = s.sendIndexTo(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait a short amount of time before entering the next loop. If there
|
|
|
|
// are continuous changes happening to the local index, this gives us
|
|
|
|
// time to batch them up a little.
|
|
|
|
time.Sleep(250 * time.Millisecond)
|
|
|
|
}
|
2020-11-17 12:19:04 +00:00
|
|
|
|
2020-11-18 12:43:57 +00:00
|
|
|
return err
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *indexSender) resume(fset *db.FileSet) {
|
|
|
|
select {
|
2020-12-30 08:59:11 +00:00
|
|
|
case <-s.done:
|
2020-10-21 09:51:53 +00:00
|
|
|
case s.resumeChan <- fset:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *indexSender) pause() {
|
|
|
|
select {
|
2020-12-30 08:59:11 +00:00
|
|
|
case <-s.done:
|
2020-10-21 09:51:53 +00:00
|
|
|
case s.pauseChan <- struct{}{}:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendIndexTo sends file infos with a sequence number higher than prevSequence and
|
|
|
|
// returns the highest sent sequence number.
|
|
|
|
func (s *indexSender) sendIndexTo(ctx context.Context) error {
|
|
|
|
initial := s.prevSequence == 0
|
|
|
|
batch := newFileInfoBatch(nil)
|
|
|
|
batch.flushFn = func(fs []protocol.FileInfo) error {
|
|
|
|
l.Debugf("%v: Sending %d files (<%d bytes)", s, len(batch.infos), batch.size)
|
|
|
|
if initial {
|
|
|
|
initial = false
|
|
|
|
return s.conn.Index(ctx, s.folder, fs)
|
|
|
|
}
|
|
|
|
return s.conn.IndexUpdate(ctx, s.folder, fs)
|
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
|
|
|
var f protocol.FileInfo
|
|
|
|
snap := s.fset.Snapshot()
|
|
|
|
defer snap.Release()
|
|
|
|
previousWasDelete := false
|
|
|
|
snap.WithHaveSequence(s.prevSequence+1, func(fi protocol.FileIntf) bool {
|
|
|
|
// This is to make sure that renames (which is an add followed by a delete) land in the same batch.
|
|
|
|
// Even if the batch is full, we allow a last delete to slip in, we do this by making sure that
|
|
|
|
// the batch ends with a non-delete, or that the last item in the batch is already a delete
|
|
|
|
if batch.full() && (!fi.IsDeleted() || previousWasDelete) {
|
|
|
|
if err = batch.flush(); err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if shouldDebug() {
|
|
|
|
if fi.SequenceNo() < s.prevSequence+1 {
|
|
|
|
panic(fmt.Sprintln("sequence lower than requested, got:", fi.SequenceNo(), ", asked to start at:", s.prevSequence+1))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if f.Sequence > 0 && fi.SequenceNo() <= f.Sequence {
|
|
|
|
l.Warnln("Non-increasing sequence detected: Checking and repairing the db...")
|
|
|
|
// Abort this round of index sending - the next one will pick
|
|
|
|
// up from the last successful one with the repeaired db.
|
|
|
|
defer func() {
|
|
|
|
if fixed, dbErr := s.fset.RepairSequence(); dbErr != nil {
|
|
|
|
l.Warnln("Failed repairing sequence entries:", dbErr)
|
|
|
|
panic("Failed repairing sequence entries")
|
|
|
|
} else {
|
|
|
|
s.evLogger.Log(events.Failure, "detected and repaired non-increasing sequence")
|
|
|
|
l.Infof("Repaired %v sequence entries in database", fixed)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
f = fi.(protocol.FileInfo)
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
// If this is a folder receiving encrypted files only, we
|
|
|
|
// mustn't ever send locally changed file infos. Those aren't
|
|
|
|
// encrypted and thus would be a protocol error at the remote.
|
|
|
|
if s.folderIsReceiveEncrypted && fi.IsReceiveOnlyChanged() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-10-21 09:51:53 +00:00
|
|
|
// Mark the file as invalid if any of the local bad stuff flags are set.
|
|
|
|
f.RawInvalid = f.IsInvalid()
|
|
|
|
// If the file is marked LocalReceive (i.e., changed locally on a
|
|
|
|
// receive only folder) we do not want it to ever become the
|
|
|
|
// globally best version, invalid or not.
|
|
|
|
if f.IsReceiveOnlyChanged() {
|
|
|
|
f.Version = protocol.Vector{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// never sent externally
|
|
|
|
f.LocalFlags = 0
|
|
|
|
f.VersionHash = nil
|
|
|
|
|
|
|
|
previousWasDelete = f.IsDeleted()
|
|
|
|
|
|
|
|
batch.append(f)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = batch.flush()
|
|
|
|
|
|
|
|
// True if there was nothing to be sent
|
|
|
|
if f.Sequence == 0 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.prevSequence = f.Sequence
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *indexSender) String() string {
|
2020-11-09 07:58:46 +00:00
|
|
|
return fmt.Sprintf("indexSender@%p for %s to %s at %s", s, s.folder, s.conn.ID(), s.conn)
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type indexSenderRegistry struct {
|
|
|
|
deviceID protocol.DeviceID
|
|
|
|
sup *suture.Supervisor
|
|
|
|
evLogger events.Logger
|
|
|
|
conn protocol.Connection
|
|
|
|
closed chan struct{}
|
|
|
|
indexSenders map[string]*indexSender
|
|
|
|
startInfos map[string]*indexSenderStartInfo
|
|
|
|
mut sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newIndexSenderRegistry(conn protocol.Connection, closed chan struct{}, sup *suture.Supervisor, evLogger events.Logger) *indexSenderRegistry {
|
|
|
|
return &indexSenderRegistry{
|
|
|
|
deviceID: conn.ID(),
|
|
|
|
conn: conn,
|
|
|
|
closed: closed,
|
|
|
|
sup: sup,
|
|
|
|
evLogger: evLogger,
|
|
|
|
indexSenders: make(map[string]*indexSender),
|
|
|
|
startInfos: make(map[string]*indexSenderStartInfo),
|
|
|
|
mut: sync.Mutex{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add starts an index sender for given folder.
|
|
|
|
// If an index sender is already running, it will be stopped first.
|
2020-10-22 11:05:31 +00:00
|
|
|
func (r *indexSenderRegistry) add(folder config.FolderConfiguration, fset *db.FileSet, startInfo *indexSenderStartInfo) {
|
2020-10-21 09:51:53 +00:00
|
|
|
r.mut.Lock()
|
2020-10-22 11:05:31 +00:00
|
|
|
r.addLocked(folder, fset, startInfo)
|
2020-10-21 09:51:53 +00:00
|
|
|
r.mut.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-11-09 07:58:46 +00:00
|
|
|
func (r *indexSenderRegistry) addLocked(folder config.FolderConfiguration, fset *db.FileSet, startInfo *indexSenderStartInfo) {
|
2020-10-21 09:51:53 +00:00
|
|
|
myIndexID := fset.IndexID(protocol.LocalDeviceID)
|
|
|
|
mySequence := fset.Sequence(protocol.LocalDeviceID)
|
|
|
|
var startSequence int64
|
|
|
|
|
|
|
|
// This is the other side's description of what it knows
|
|
|
|
// about us. Lets check to see if we can start sending index
|
|
|
|
// updates directly or need to send the index from start...
|
|
|
|
|
2020-10-22 11:05:31 +00:00
|
|
|
if startInfo.local.IndexID == myIndexID {
|
2020-10-21 09:51:53 +00:00
|
|
|
// They say they've seen our index ID before, so we can
|
|
|
|
// send a delta update only.
|
|
|
|
|
2020-10-22 11:05:31 +00:00
|
|
|
if startInfo.local.MaxSequence > mySequence {
|
2020-10-21 09:51:53 +00:00
|
|
|
// Safety check. They claim to have more or newer
|
|
|
|
// index data than we have - either we have lost
|
|
|
|
// index data, or reset the index without resetting
|
|
|
|
// the IndexID, or something else weird has
|
|
|
|
// happened. We send a full index to reset the
|
|
|
|
// situation.
|
|
|
|
l.Infof("Device %v folder %s is delta index compatible, but seems out of sync with reality", r.deviceID, folder.Description())
|
|
|
|
startSequence = 0
|
|
|
|
} else {
|
2020-10-22 11:05:31 +00:00
|
|
|
l.Debugf("Device %v folder %s is delta index compatible (mlv=%d)", r.deviceID, folder.Description(), startInfo.local.MaxSequence)
|
|
|
|
startSequence = startInfo.local.MaxSequence
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
2020-10-22 11:05:31 +00:00
|
|
|
} else if startInfo.local.IndexID != 0 {
|
2020-10-21 09:51:53 +00:00
|
|
|
// They say they've seen an index ID from us, but it's
|
|
|
|
// not the right one. Either they are confused or we
|
|
|
|
// must have reset our database since last talking to
|
|
|
|
// them. We'll start with a full index transfer.
|
2020-10-22 11:05:31 +00:00
|
|
|
l.Infof("Device %v folder %s has mismatching index ID for us (%v != %v)", r.deviceID, folder.Description(), startInfo.local.IndexID, myIndexID)
|
2020-10-21 09:51:53 +00:00
|
|
|
startSequence = 0
|
2020-11-18 12:43:57 +00:00
|
|
|
} else {
|
2020-11-20 14:53:13 +00:00
|
|
|
l.Debugf("Device %v folder %s has no index ID for us", r.deviceID, folder.Description())
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is the other side's description of themselves. We
|
|
|
|
// check to see that it matches the IndexID we have on file,
|
|
|
|
// otherwise we drop our old index data and expect to get a
|
|
|
|
// completely new set.
|
|
|
|
|
|
|
|
theirIndexID := fset.IndexID(r.deviceID)
|
2020-10-22 11:05:31 +00:00
|
|
|
if startInfo.remote.IndexID == 0 {
|
2020-10-21 09:51:53 +00:00
|
|
|
// They're not announcing an index ID. This means they
|
|
|
|
// do not support delta indexes and we should clear any
|
|
|
|
// information we have from them before accepting their
|
|
|
|
// index, which will presumably be a full index.
|
2020-11-20 14:53:13 +00:00
|
|
|
l.Debugf("Device %v folder %s does not announce an index ID", r.deviceID, folder.Description())
|
2020-10-21 09:51:53 +00:00
|
|
|
fset.Drop(r.deviceID)
|
2020-10-22 11:05:31 +00:00
|
|
|
} else if startInfo.remote.IndexID != theirIndexID {
|
2020-10-21 09:51:53 +00:00
|
|
|
// The index ID we have on file is not what they're
|
|
|
|
// announcing. They must have reset their database and
|
|
|
|
// will probably send us a full index. We drop any
|
|
|
|
// information we have and remember this new index ID
|
|
|
|
// instead.
|
2020-10-22 11:05:31 +00:00
|
|
|
l.Infof("Device %v folder %s has a new index ID (%v)", r.deviceID, folder.Description(), startInfo.remote.IndexID)
|
2020-10-21 09:51:53 +00:00
|
|
|
fset.Drop(r.deviceID)
|
2020-10-22 11:05:31 +00:00
|
|
|
fset.SetIndexID(r.deviceID, startInfo.remote.IndexID)
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 14:53:13 +00:00
|
|
|
if is, ok := r.indexSenders[folder.ID]; ok {
|
2020-11-09 07:58:46 +00:00
|
|
|
r.sup.RemoveAndWait(is.token, 0)
|
2020-11-20 14:53:13 +00:00
|
|
|
delete(r.indexSenders, folder.ID)
|
2020-11-09 07:58:46 +00:00
|
|
|
}
|
2020-11-20 14:53:13 +00:00
|
|
|
if _, ok := r.startInfos[folder.ID]; ok {
|
|
|
|
delete(r.startInfos, folder.ID)
|
2020-11-09 07:58:46 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 09:51:53 +00:00
|
|
|
is := &indexSender{
|
2020-11-24 20:49:45 +00:00
|
|
|
conn: r.conn,
|
|
|
|
connClosed: r.closed,
|
2020-12-30 08:59:11 +00:00
|
|
|
done: make(chan struct{}),
|
2020-11-24 20:49:45 +00:00
|
|
|
folder: folder.ID,
|
|
|
|
folderIsReceiveEncrypted: folder.Type == config.FolderTypeReceiveEncrypted,
|
|
|
|
fset: fset,
|
|
|
|
prevSequence: startSequence,
|
|
|
|
evLogger: r.evLogger,
|
|
|
|
pauseChan: make(chan struct{}),
|
|
|
|
resumeChan: make(chan *db.FileSet),
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
is.token = r.sup.Add(is)
|
2020-11-20 14:53:13 +00:00
|
|
|
r.indexSenders[folder.ID] = is
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
2020-11-20 14:53:13 +00:00
|
|
|
// addPending stores the given info to start an index sender once resume is called
|
2020-10-21 09:51:53 +00:00
|
|
|
// for this folder.
|
|
|
|
// If an index sender is already running, it will be stopped.
|
2020-11-20 14:53:13 +00:00
|
|
|
func (r *indexSenderRegistry) addPending(folder config.FolderConfiguration, startInfo *indexSenderStartInfo) {
|
2020-10-21 09:51:53 +00:00
|
|
|
r.mut.Lock()
|
|
|
|
defer r.mut.Unlock()
|
|
|
|
|
|
|
|
if is, ok := r.indexSenders[folder.ID]; ok {
|
|
|
|
r.sup.RemoveAndWait(is.token, 0)
|
|
|
|
delete(r.indexSenders, folder.ID)
|
|
|
|
}
|
2020-10-22 11:05:31 +00:00
|
|
|
r.startInfos[folder.ID] = startInfo
|
2020-10-21 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// remove stops a running index sender or removes one pending to be started.
|
|
|
|
// It is a noop if the folder isn't known.
|
|
|
|
func (r *indexSenderRegistry) remove(folder string) {
|
|
|
|
r.mut.Lock()
|
|
|
|
defer r.mut.Unlock()
|
|
|
|
|
|
|
|
if is, ok := r.indexSenders[folder]; ok {
|
|
|
|
r.sup.RemoveAndWait(is.token, 0)
|
|
|
|
delete(r.indexSenders, folder)
|
|
|
|
}
|
|
|
|
delete(r.startInfos, folder)
|
|
|
|
}
|
|
|
|
|
|
|
|
// removeAllExcept stops all running index senders and removes those pending to be started,
|
|
|
|
// except mentioned ones.
|
|
|
|
// It is a noop if the folder isn't known.
|
|
|
|
func (r *indexSenderRegistry) removeAllExcept(except map[string]struct{}) {
|
|
|
|
r.mut.Lock()
|
|
|
|
defer r.mut.Unlock()
|
|
|
|
|
|
|
|
for folder, is := range r.indexSenders {
|
|
|
|
if _, ok := except[folder]; !ok {
|
|
|
|
r.sup.RemoveAndWait(is.token, 0)
|
|
|
|
delete(r.indexSenders, folder)
|
|
|
|
}
|
|
|
|
}
|
2020-12-30 08:59:11 +00:00
|
|
|
for folder := range r.startInfos {
|
2020-10-21 09:51:53 +00:00
|
|
|
if _, ok := except[folder]; !ok {
|
|
|
|
delete(r.startInfos, folder)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pause stops a running index sender.
|
|
|
|
// It is a noop if the folder isn't known or has not been started yet.
|
|
|
|
func (r *indexSenderRegistry) pause(folder string) {
|
|
|
|
r.mut.Lock()
|
|
|
|
defer r.mut.Unlock()
|
|
|
|
|
|
|
|
if is, ok := r.indexSenders[folder]; ok {
|
|
|
|
is.pause()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// resume unpauses an already running index sender or starts it, if it was added
|
|
|
|
// while paused.
|
|
|
|
// It is a noop if the folder isn't known.
|
|
|
|
func (r *indexSenderRegistry) resume(folder config.FolderConfiguration, fset *db.FileSet) {
|
|
|
|
r.mut.Lock()
|
|
|
|
defer r.mut.Unlock()
|
|
|
|
|
|
|
|
is, isOk := r.indexSenders[folder.ID]
|
|
|
|
if info, ok := r.startInfos[folder.ID]; ok {
|
|
|
|
if isOk {
|
|
|
|
r.sup.RemoveAndWait(is.token, 0)
|
|
|
|
delete(r.indexSenders, folder.ID)
|
|
|
|
}
|
2020-10-22 11:05:31 +00:00
|
|
|
r.addLocked(folder, fset, info)
|
2020-10-21 09:51:53 +00:00
|
|
|
delete(r.startInfos, folder.ID)
|
|
|
|
} else if isOk {
|
|
|
|
is.resume(fset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type indexSenderStartInfo struct {
|
|
|
|
local, remote protocol.Device
|
|
|
|
}
|