syncthing/lib/model/progressemitter.go

320 lines
9.3 KiB
Go
Raw Normal View History

2014-12-08 13:02:27 +00:00
// Copyright (C) 2014 The Syncthing Authors.
//
2015-03-07 20:36:35 +00:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-12-08 13:02:27 +00:00
package model
import (
"fmt"
"time"
"github.com/thejerf/suture"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/util"
)
type ProgressEmitter struct {
suture.Service
registry map[string]map[string]*sharedPullerState // folder: name: puller
interval time.Duration
minBlocks int
sentDownloadStates map[protocol.DeviceID]*sentDownloadState // States representing what we've sent to the other peer via DownloadProgress messages.
connections map[protocol.DeviceID]protocol.Connection
foldersByConns map[protocol.DeviceID][]string
disabled bool
mut sync.Mutex
timer *time.Timer
}
2015-04-28 20:32:10 +00:00
// NewProgressEmitter creates a new progress emitter which emits
// DownloadProgress events every interval.
func NewProgressEmitter(cfg config.Wrapper) *ProgressEmitter {
t := &ProgressEmitter{
registry: make(map[string]map[string]*sharedPullerState),
timer: time.NewTimer(time.Millisecond),
sentDownloadStates: make(map[protocol.DeviceID]*sentDownloadState),
connections: make(map[protocol.DeviceID]protocol.Connection),
foldersByConns: make(map[protocol.DeviceID][]string),
mut: sync.NewMutex(),
}
t.Service = util.AsService(t.serve)
2016-11-12 09:34:18 +00:00
t.CommitConfiguration(config.Configuration{}, cfg.RawCopy())
cfg.Subscribe(t)
return t
}
// serve starts the progress emitter which starts emitting DownloadProgress
2015-04-28 20:32:10 +00:00
// events as the progress happens.
func (t *ProgressEmitter) serve(stop chan struct{}) {
var lastUpdate time.Time
var lastCount, newCount int
for {
select {
case <-stop:
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
l.Debugln("progress emitter: stopping")
return
case <-t.timer.C:
t.mut.Lock()
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
l.Debugln("progress emitter: timer - looking after", len(t.registry))
newLastUpdated := lastUpdate
newCount = t.lenRegistryLocked()
for _, pullers := range t.registry {
for _, puller := range pullers {
if updated := puller.Updated(); updated.After(newLastUpdated) {
newLastUpdated = updated
}
}
}
if !newLastUpdated.Equal(lastUpdate) || newCount != lastCount {
lastUpdate = newLastUpdated
lastCount = newCount
t.sendDownloadProgressEventLocked()
if len(t.connections) > 0 {
t.sendDownloadProgressMessagesLocked()
}
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
} else {
l.Debugln("progress emitter: nothing new")
}
if newCount != 0 {
t.timer.Reset(t.interval)
}
t.mut.Unlock()
}
}
}
func (t *ProgressEmitter) sendDownloadProgressEventLocked() {
output := make(map[string]map[string]*pullerProgress)
for folder, pullers := range t.registry {
if len(pullers) == 0 {
continue
}
output[folder] = make(map[string]*pullerProgress)
for name, puller := range pullers {
output[folder][name] = puller.Progress()
}
}
events.Default.Log(events.DownloadProgress, output)
l.Debugf("progress emitter: emitting %#v", output)
}
func (t *ProgressEmitter) sendDownloadProgressMessagesLocked() {
for id, conn := range t.connections {
for _, folder := range t.foldersByConns[id] {
pullers, ok := t.registry[folder]
if !ok {
// There's never been any puller registered for this folder yet
continue
}
state, ok := t.sentDownloadStates[id]
if !ok {
state = &sentDownloadState{
folderStates: make(map[string]*sentFolderDownloadState),
}
t.sentDownloadStates[id] = state
}
activePullers := make([]*sharedPullerState, 0, len(pullers))
for _, puller := range pullers {
if puller.folder != folder || puller.file.IsSymlink() || puller.file.IsDirectory() || len(puller.file.Blocks) <= t.minBlocks {
continue
}
activePullers = append(activePullers, puller)
}
// For every new puller that hasn't yet been seen, it will send all the blocks the puller has available
// For every existing puller, it will check for new blocks, and send update for the new blocks only
// For every puller that we've seen before but is no longer there, we will send a forget message
updates := state.update(folder, activePullers)
if len(updates) > 0 {
conn.DownloadProgress(folder, updates)
}
}
}
// Clean up sentDownloadStates for devices which we are no longer connected to.
for id := range t.sentDownloadStates {
_, ok := t.connections[id]
if !ok {
// Null out outstanding entries for device
delete(t.sentDownloadStates, id)
}
}
// If a folder was unshared from some device, tell it that all temp files
// are now gone.
for id, state := range t.sentDownloadStates {
// For each of the folders that the state is aware of,
// try to match it with a shared folder we've discovered above,
nextFolder:
for _, folder := range state.folders() {
for _, existingFolder := range t.foldersByConns[id] {
if existingFolder == folder {
continue nextFolder
}
}
// If we fail to find that folder, we tell the state to forget about it
// and return us a list of updates which would clean up the state
// on the remote end.
state.cleanup(folder)
// updates := state.cleanup(folder)
// if len(updates) > 0 {
// XXX: Don't send this now, as the only way we've unshared a folder
// is by breaking the connection and reconnecting, hence sending
// forget messages for some random folder currently makes no sense.
// deviceConns[id].DownloadProgress(folder, updates, 0, nil)
// }
}
}
}
// VerifyConfiguration implements the config.Committer interface
func (t *ProgressEmitter) VerifyConfiguration(from, to config.Configuration) error {
return nil
}
// CommitConfiguration implements the config.Committer interface
func (t *ProgressEmitter) CommitConfiguration(from, to config.Configuration) bool {
t.mut.Lock()
defer t.mut.Unlock()
switch {
case t.disabled && to.Options.ProgressUpdateIntervalS >= 0:
t.disabled = false
l.Debugln("progress emitter: enabled")
fallthrough
case !t.disabled && from.Options.ProgressUpdateIntervalS != to.Options.ProgressUpdateIntervalS:
t.interval = time.Duration(to.Options.ProgressUpdateIntervalS) * time.Second
if t.interval < time.Second {
t.interval = time.Second
}
l.Debugln("progress emitter: updated interval", t.interval)
case !t.disabled && to.Options.ProgressUpdateIntervalS < 0:
t.clearLocked()
t.disabled = true
l.Debugln("progress emitter: disabled")
}
t.minBlocks = to.Options.TempIndexMinBlocks
return true
}
// Register a puller with the emitter which will start broadcasting pullers
// progress.
func (t *ProgressEmitter) Register(s *sharedPullerState) {
t.mut.Lock()
defer t.mut.Unlock()
if t.disabled {
l.Debugln("progress emitter: disabled, skip registering")
return
}
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
l.Debugln("progress emitter: registering", s.folder, s.file.Name)
if t.emptyLocked() {
t.timer.Reset(t.interval)
}
if _, ok := t.registry[s.folder]; !ok {
t.registry[s.folder] = make(map[string]*sharedPullerState)
}
t.registry[s.folder][s.file.Name] = s
}
2015-04-28 15:34:55 +00:00
// Deregister a puller which will stop broadcasting pullers state.
func (t *ProgressEmitter) Deregister(s *sharedPullerState) {
t.mut.Lock()
defer t.mut.Unlock()
if t.disabled {
l.Debugln("progress emitter: disabled, skip deregistering")
return
}
l.Debugln("progress emitter: deregistering", s.folder, s.file.Name)
delete(t.registry[s.folder], s.file.Name)
}
2015-04-28 20:32:10 +00:00
// BytesCompleted returns the number of bytes completed in the given folder.
func (t *ProgressEmitter) BytesCompleted(folder string) (bytes int64) {
t.mut.Lock()
defer t.mut.Unlock()
for _, s := range t.registry[folder] {
bytes += s.Progress().BytesDone
}
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
l.Debugf("progress emitter: bytes completed for %s: %d", folder, bytes)
return
}
func (t *ProgressEmitter) String() string {
return fmt.Sprintf("ProgressEmitter@%p", t)
}
func (t *ProgressEmitter) lenRegistry() int {
t.mut.Lock()
defer t.mut.Unlock()
return t.lenRegistryLocked()
}
func (t *ProgressEmitter) lenRegistryLocked() (out int) {
for _, pullers := range t.registry {
out += len(pullers)
}
return out
}
func (t *ProgressEmitter) emptyLocked() bool {
for _, pullers := range t.registry {
if len(pullers) != 0 {
return false
}
}
return true
}
func (t *ProgressEmitter) temporaryIndexSubscribe(conn protocol.Connection, folders []string) {
t.mut.Lock()
defer t.mut.Unlock()
t.connections[conn.ID()] = conn
t.foldersByConns[conn.ID()] = folders
}
func (t *ProgressEmitter) temporaryIndexUnsubscribe(conn protocol.Connection) {
t.mut.Lock()
defer t.mut.Unlock()
delete(t.connections, conn.ID())
delete(t.foldersByConns, conn.ID())
}
func (t *ProgressEmitter) clearLocked() {
for id, state := range t.sentDownloadStates {
conn, ok := t.connections[id]
if !ok {
continue
}
for _, folder := range state.folders() {
if updates := state.cleanup(folder); len(updates) > 0 {
conn.DownloadProgress(folder, updates)
}
}
}
t.registry = make(map[string]map[string]*sharedPullerState)
t.sentDownloadStates = make(map[protocol.DeviceID]*sentDownloadState)
t.connections = make(map[protocol.DeviceID]protocol.Connection)
t.foldersByConns = make(map[protocol.DeviceID][]string)
}