2014-12-08 13:02:27 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-12-08 13:02:27 +00:00
|
|
|
|
2014-11-16 23:18:59 +00:00
|
|
|
package model
|
|
|
|
|
|
|
|
import (
|
2019-11-21 07:41:15 +00:00
|
|
|
"context"
|
2015-06-03 07:47:39 +00:00
|
|
|
"fmt"
|
2014-11-16 23:18:59 +00:00
|
|
|
"time"
|
|
|
|
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2016-04-15 10:59:41 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2014-11-16 23:18:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type ProgressEmitter struct {
|
2019-12-04 06:15:00 +00:00
|
|
|
cfg config.Wrapper
|
2019-04-13 12:20:51 +00:00
|
|
|
registry map[string]map[string]*sharedPullerState // folder: name: puller
|
2016-04-15 10:59:41 +00:00
|
|
|
interval time.Duration
|
|
|
|
minBlocks int
|
|
|
|
sentDownloadStates map[protocol.DeviceID]*sentDownloadState // States representing what we've sent to the other peer via DownloadProgress messages.
|
2019-04-13 12:20:51 +00:00
|
|
|
connections map[protocol.DeviceID]protocol.Connection
|
|
|
|
foldersByConns map[protocol.DeviceID][]string
|
|
|
|
disabled bool
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger events.Logger
|
2016-04-15 10:59:41 +00:00
|
|
|
mut sync.Mutex
|
2014-11-16 23:18:59 +00:00
|
|
|
|
|
|
|
timer *time.Timer
|
|
|
|
}
|
|
|
|
|
2020-05-01 07:54:15 +00:00
|
|
|
type progressUpdate struct {
|
|
|
|
conn protocol.Connection
|
|
|
|
folder string
|
|
|
|
updates []protocol.FileDownloadProgressUpdate
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p progressUpdate) send(ctx context.Context) {
|
|
|
|
p.conn.DownloadProgress(ctx, p.folder, p.updates)
|
|
|
|
}
|
|
|
|
|
2015-04-28 20:32:10 +00:00
|
|
|
// NewProgressEmitter creates a new progress emitter which emits
|
|
|
|
// DownloadProgress events every interval.
|
2019-08-15 14:29:37 +00:00
|
|
|
func NewProgressEmitter(cfg config.Wrapper, evLogger events.Logger) *ProgressEmitter {
|
2014-11-16 23:18:59 +00:00
|
|
|
t := &ProgressEmitter{
|
2019-12-04 06:15:00 +00:00
|
|
|
cfg: cfg,
|
2019-04-13 12:20:51 +00:00
|
|
|
registry: make(map[string]map[string]*sharedPullerState),
|
2016-04-15 10:59:41 +00:00
|
|
|
timer: time.NewTimer(time.Millisecond),
|
|
|
|
sentDownloadStates: make(map[protocol.DeviceID]*sentDownloadState),
|
2019-04-13 12:20:51 +00:00
|
|
|
connections: make(map[protocol.DeviceID]protocol.Connection),
|
|
|
|
foldersByConns: make(map[protocol.DeviceID][]string),
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger: evLogger,
|
2016-04-15 10:59:41 +00:00
|
|
|
mut: sync.NewMutex(),
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2016-11-12 09:34:18 +00:00
|
|
|
t.CommitConfiguration(config.Configuration{}, cfg.RawCopy())
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2014-11-16 23:18:59 +00:00
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
2019-07-09 09:40:30 +00:00
|
|
|
// serve starts the progress emitter which starts emitting DownloadProgress
|
2015-04-28 20:32:10 +00:00
|
|
|
// events as the progress happens.
|
2020-11-17 12:19:04 +00:00
|
|
|
func (t *ProgressEmitter) Serve(ctx context.Context) error {
|
2019-12-04 06:15:00 +00:00
|
|
|
t.cfg.Subscribe(t)
|
|
|
|
defer t.cfg.Unsubscribe(t)
|
|
|
|
|
2016-04-15 10:59:41 +00:00
|
|
|
var lastUpdate time.Time
|
|
|
|
var lastCount, newCount int
|
2014-11-16 23:18:59 +00:00
|
|
|
for {
|
|
|
|
select {
|
2019-11-21 07:41:15 +00:00
|
|
|
case <-ctx.Done():
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugln("progress emitter: stopping")
|
2020-11-17 12:19:04 +00:00
|
|
|
return nil
|
2014-11-16 23:18:59 +00:00
|
|
|
case <-t.timer.C:
|
2014-11-29 21:51:13 +00:00
|
|
|
t.mut.Lock()
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugln("progress emitter: timer - looking after", len(t.registry))
|
2016-04-15 10:59:41 +00:00
|
|
|
|
|
|
|
newLastUpdated := lastUpdate
|
2019-04-13 12:20:51 +00:00
|
|
|
newCount = t.lenRegistryLocked()
|
2020-05-01 07:54:15 +00:00
|
|
|
var progressUpdates []progressUpdate
|
2019-04-13 12:20:51 +00:00
|
|
|
for _, pullers := range t.registry {
|
|
|
|
for _, puller := range pullers {
|
|
|
|
if updated := puller.Updated(); updated.After(newLastUpdated) {
|
|
|
|
newLastUpdated = updated
|
|
|
|
}
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
|
|
|
if !newLastUpdated.Equal(lastUpdate) || newCount != lastCount {
|
|
|
|
lastUpdate = newLastUpdated
|
|
|
|
lastCount = newCount
|
2019-04-13 12:20:51 +00:00
|
|
|
t.sendDownloadProgressEventLocked()
|
2020-05-01 07:54:15 +00:00
|
|
|
progressUpdates = t.computeProgressUpdates()
|
2015-10-03 15:25:21 +00:00
|
|
|
} else {
|
2014-11-16 23:18:59 +00:00
|
|
|
l.Debugln("progress emitter: nothing new")
|
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
|
|
|
if newCount != 0 {
|
2014-11-16 23:18:59 +00:00
|
|
|
t.timer.Reset(t.interval)
|
|
|
|
}
|
|
|
|
t.mut.Unlock()
|
2020-05-01 07:54:15 +00:00
|
|
|
|
|
|
|
// Do the sending outside of the lock.
|
|
|
|
// If these send block, the whole process of reporting progress to others stops, but that's probably fine.
|
|
|
|
// It's better to stop this component from working under back-pressure than causing other components that
|
|
|
|
// rely on this component to be waiting for locks.
|
|
|
|
//
|
|
|
|
// This might leave remote peers in some funky state where we are unable the fact that we no longer have
|
|
|
|
// something, but there is not much we can do here.
|
|
|
|
for _, update := range progressUpdates {
|
|
|
|
update.send(ctx)
|
|
|
|
}
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
func (t *ProgressEmitter) sendDownloadProgressEventLocked() {
|
2016-04-15 10:59:41 +00:00
|
|
|
output := make(map[string]map[string]*pullerProgress)
|
2019-04-13 12:20:51 +00:00
|
|
|
for folder, pullers := range t.registry {
|
|
|
|
if len(pullers) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
output[folder] = make(map[string]*pullerProgress)
|
|
|
|
for name, puller := range pullers {
|
|
|
|
output[folder][name] = puller.Progress()
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-15 14:29:37 +00:00
|
|
|
t.evLogger.Log(events.DownloadProgress, output)
|
2016-04-15 10:59:41 +00:00
|
|
|
l.Debugf("progress emitter: emitting %#v", output)
|
|
|
|
}
|
|
|
|
|
2020-05-01 07:54:15 +00:00
|
|
|
func (t *ProgressEmitter) computeProgressUpdates() []progressUpdate {
|
|
|
|
var progressUpdates []progressUpdate
|
2019-04-13 12:20:51 +00:00
|
|
|
for id, conn := range t.connections {
|
|
|
|
for _, folder := range t.foldersByConns[id] {
|
|
|
|
pullers, ok := t.registry[folder]
|
|
|
|
if !ok {
|
|
|
|
// There's never been any puller registered for this folder yet
|
|
|
|
continue
|
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
|
|
|
state, ok := t.sentDownloadStates[id]
|
|
|
|
if !ok {
|
|
|
|
state = &sentDownloadState{
|
|
|
|
folderStates: make(map[string]*sentFolderDownloadState),
|
|
|
|
}
|
|
|
|
t.sentDownloadStates[id] = state
|
|
|
|
}
|
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
activePullers := make([]*sharedPullerState, 0, len(pullers))
|
|
|
|
for _, puller := range pullers {
|
2016-04-15 10:59:41 +00:00
|
|
|
if puller.folder != folder || puller.file.IsSymlink() || puller.file.IsDirectory() || len(puller.file.Blocks) <= t.minBlocks {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
activePullers = append(activePullers, puller)
|
|
|
|
}
|
|
|
|
|
|
|
|
// For every new puller that hasn't yet been seen, it will send all the blocks the puller has available
|
|
|
|
// For every existing puller, it will check for new blocks, and send update for the new blocks only
|
|
|
|
// For every puller that we've seen before but is no longer there, we will send a forget message
|
|
|
|
updates := state.update(folder, activePullers)
|
|
|
|
|
|
|
|
if len(updates) > 0 {
|
2020-05-01 07:54:15 +00:00
|
|
|
progressUpdates = append(progressUpdates, progressUpdate{
|
|
|
|
conn: conn,
|
|
|
|
folder: folder,
|
|
|
|
updates: updates,
|
|
|
|
})
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clean up sentDownloadStates for devices which we are no longer connected to.
|
|
|
|
for id := range t.sentDownloadStates {
|
2019-04-13 12:20:51 +00:00
|
|
|
_, ok := t.connections[id]
|
2016-04-15 10:59:41 +00:00
|
|
|
if !ok {
|
|
|
|
// Null out outstanding entries for device
|
|
|
|
delete(t.sentDownloadStates, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a folder was unshared from some device, tell it that all temp files
|
|
|
|
// are now gone.
|
2019-04-13 12:20:51 +00:00
|
|
|
for id, state := range t.sentDownloadStates {
|
2016-04-15 10:59:41 +00:00
|
|
|
// For each of the folders that the state is aware of,
|
|
|
|
// try to match it with a shared folder we've discovered above,
|
2019-04-13 12:20:51 +00:00
|
|
|
nextFolder:
|
2016-04-15 10:59:41 +00:00
|
|
|
for _, folder := range state.folders() {
|
2019-04-13 12:20:51 +00:00
|
|
|
for _, existingFolder := range t.foldersByConns[id] {
|
2016-04-15 10:59:41 +00:00
|
|
|
if existingFolder == folder {
|
|
|
|
continue nextFolder
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we fail to find that folder, we tell the state to forget about it
|
|
|
|
// and return us a list of updates which would clean up the state
|
|
|
|
// on the remote end.
|
2019-02-02 11:09:07 +00:00
|
|
|
state.cleanup(folder)
|
|
|
|
// updates := state.cleanup(folder)
|
|
|
|
// if len(updates) > 0 {
|
|
|
|
// XXX: Don't send this now, as the only way we've unshared a folder
|
|
|
|
// is by breaking the connection and reconnecting, hence sending
|
|
|
|
// forget messages for some random folder currently makes no sense.
|
|
|
|
// deviceConns[id].DownloadProgress(folder, updates, 0, nil)
|
|
|
|
// }
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
}
|
2020-05-01 07:54:15 +00:00
|
|
|
|
|
|
|
return progressUpdates
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
// VerifyConfiguration implements the config.Committer interface
|
|
|
|
func (t *ProgressEmitter) VerifyConfiguration(from, to config.Configuration) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitConfiguration implements the config.Committer interface
|
2020-01-20 20:14:29 +00:00
|
|
|
func (t *ProgressEmitter) CommitConfiguration(_, to config.Configuration) bool {
|
2014-11-16 23:18:59 +00:00
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
|
|
|
|
2020-01-20 20:14:29 +00:00
|
|
|
newInterval := time.Duration(to.Options.ProgressUpdateIntervalS) * time.Second
|
|
|
|
if newInterval > 0 {
|
|
|
|
if t.disabled {
|
|
|
|
t.disabled = false
|
|
|
|
l.Debugln("progress emitter: enabled")
|
2019-04-13 12:20:51 +00:00
|
|
|
}
|
2020-01-20 20:14:29 +00:00
|
|
|
if t.interval != newInterval {
|
|
|
|
t.interval = newInterval
|
|
|
|
l.Debugln("progress emitter: updated interval", t.interval)
|
|
|
|
}
|
|
|
|
} else if !t.disabled {
|
2019-04-13 12:20:51 +00:00
|
|
|
t.clearLocked()
|
|
|
|
t.disabled = true
|
|
|
|
l.Debugln("progress emitter: disabled")
|
2016-08-16 18:22:01 +00:00
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
t.minBlocks = to.Options.TempIndexMinBlocks
|
2020-01-20 20:14:29 +00:00
|
|
|
if t.interval < time.Second {
|
|
|
|
// can't happen when we're not disabled, but better safe than sorry.
|
|
|
|
t.interval = time.Second
|
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
|
|
|
return true
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Register a puller with the emitter which will start broadcasting pullers
|
|
|
|
// progress.
|
|
|
|
func (t *ProgressEmitter) Register(s *sharedPullerState) {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
2019-04-13 12:20:51 +00:00
|
|
|
if t.disabled {
|
|
|
|
l.Debugln("progress emitter: disabled, skip registering")
|
|
|
|
return
|
|
|
|
}
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugln("progress emitter: registering", s.folder, s.file.Name)
|
2019-04-13 12:20:51 +00:00
|
|
|
if t.emptyLocked() {
|
2014-11-16 23:18:59 +00:00
|
|
|
t.timer.Reset(t.interval)
|
|
|
|
}
|
2019-04-13 12:20:51 +00:00
|
|
|
if _, ok := t.registry[s.folder]; !ok {
|
|
|
|
t.registry[s.folder] = make(map[string]*sharedPullerState)
|
|
|
|
}
|
|
|
|
t.registry[s.folder][s.file.Name] = s
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 15:34:55 +00:00
|
|
|
// Deregister a puller which will stop broadcasting pullers state.
|
2014-11-16 23:18:59 +00:00
|
|
|
func (t *ProgressEmitter) Deregister(s *sharedPullerState) {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
2016-04-15 10:59:41 +00:00
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
if t.disabled {
|
|
|
|
l.Debugln("progress emitter: disabled, skip deregistering")
|
|
|
|
return
|
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
l.Debugln("progress emitter: deregistering", s.folder, s.file.Name)
|
|
|
|
delete(t.registry[s.folder], s.file.Name)
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
|
2015-04-28 20:32:10 +00:00
|
|
|
// BytesCompleted returns the number of bytes completed in the given folder.
|
2014-11-16 23:18:59 +00:00
|
|
|
func (t *ProgressEmitter) BytesCompleted(folder string) (bytes int64) {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
for _, s := range t.registry[folder] {
|
|
|
|
bytes += s.Progress().BytesDone
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Debugf("progress emitter: bytes completed for %s: %d", folder, bytes)
|
2014-11-16 23:18:59 +00:00
|
|
|
return
|
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
|
|
|
func (t *ProgressEmitter) String() string {
|
|
|
|
return fmt.Sprintf("ProgressEmitter@%p", t)
|
|
|
|
}
|
2015-10-20 06:51:14 +00:00
|
|
|
|
|
|
|
func (t *ProgressEmitter) lenRegistry() int {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
2019-04-13 12:20:51 +00:00
|
|
|
return t.lenRegistryLocked()
|
2015-10-20 06:51:14 +00:00
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
func (t *ProgressEmitter) lenRegistryLocked() (out int) {
|
|
|
|
for _, pullers := range t.registry {
|
|
|
|
out += len(pullers)
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
2019-04-13 12:20:51 +00:00
|
|
|
return out
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
func (t *ProgressEmitter) emptyLocked() bool {
|
|
|
|
for _, pullers := range t.registry {
|
|
|
|
if len(pullers) != 0 {
|
|
|
|
return false
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-13 12:20:51 +00:00
|
|
|
return true
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
func (t *ProgressEmitter) temporaryIndexSubscribe(conn protocol.Connection, folders []string) {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
|
|
|
t.connections[conn.ID()] = conn
|
|
|
|
t.foldersByConns[conn.ID()] = folders
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *ProgressEmitter) temporaryIndexUnsubscribe(conn protocol.Connection) {
|
|
|
|
t.mut.Lock()
|
|
|
|
defer t.mut.Unlock()
|
|
|
|
delete(t.connections, conn.ID())
|
|
|
|
delete(t.foldersByConns, conn.ID())
|
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
|
2019-04-13 12:20:51 +00:00
|
|
|
func (t *ProgressEmitter) clearLocked() {
|
|
|
|
for id, state := range t.sentDownloadStates {
|
|
|
|
conn, ok := t.connections[id]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, folder := range state.folders() {
|
|
|
|
if updates := state.cleanup(folder); len(updates) > 0 {
|
2019-11-25 10:07:36 +00:00
|
|
|
conn.DownloadProgress(context.Background(), folder, updates)
|
2019-04-13 12:20:51 +00:00
|
|
|
}
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-13 12:20:51 +00:00
|
|
|
t.registry = make(map[string]map[string]*sharedPullerState)
|
|
|
|
t.sentDownloadStates = make(map[protocol.DeviceID]*sentDownloadState)
|
|
|
|
t.connections = make(map[protocol.DeviceID]protocol.Connection)
|
|
|
|
t.foldersByConns = make(map[protocol.DeviceID][]string)
|
2016-04-15 10:59:41 +00:00
|
|
|
}
|