Implement facility based logger, debugging via REST API

This implements a new debug/trace infrastructure based on a slightly
hacked up logger. Instead of the traditional "if debug { ... }" I've
rewritten the logger to have no-op Debugln and Debugf, unless debugging
has been enabled for a given "facility". The "facility" is just a
string, typically a package name.

This will be slightly slower than before; but not that much as it's
mostly a function call that returns immediately. For the cases where it
matters (the Debugln takes a hex.Dump() of something for example, and
it's not in a very occasional "if err != nil" branch) there is an
l.ShouldDebug(facility) that is fast enough to be used like the old "if
debug".

The point of all this is that we can now toggle debugging for the
various packages on and off at runtime. There's a new method
/rest/system/debug that can be POSTed a set of facilities to enable and
disable debug for, or GET from to get a list of facilities with
descriptions and their current debug status.

Similarly a /rest/system/log?since=... can grab the latest log entries,
up to 250 of them (hardcoded constant in main.go) plus the initial few.

Not implemented in this commit (but planned) is a simple debug GUI
available on /debug that shows the current log in an easily pasteable
format and has checkboxes to enable the various debug facilities.

The debug instructions to a user then becomes "visit this URL, check
these boxes, reproduce your problem, copy and paste the log". The actual
log viewer on the hypothetical /debug URL can poll regularly for new log
entries and this bypass the 250 line limit.

The existing STTRACE=foo variable is still obeyed and just sets the
start state of the system.
This commit is contained in:
Jakob Borg 2015-10-03 17:25:21 +02:00
parent 2de364414f
commit 76af9ba53d
62 changed files with 796 additions and 1383 deletions

4
Godeps/Godeps.json generated
View File

@ -13,10 +13,6 @@
"ImportPath": "github.com/calmh/du", "ImportPath": "github.com/calmh/du",
"Rev": "3c0690cca16228b97741327b1b6781397afbdb24" "Rev": "3c0690cca16228b97741327b1b6781397afbdb24"
}, },
{
"ImportPath": "github.com/calmh/logger",
"Rev": "c96f6a1a8c7b6bf2f4860c667867d90174799eb2"
},
{ {
"ImportPath": "github.com/calmh/luhn", "ImportPath": "github.com/calmh/luhn",
"Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632" "Rev": "0c8388ff95fa92d4094011e5a04fc99dea3d1632"

View File

@ -1,19 +0,0 @@
Copyright (C) 2013 Jakob Borg
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,15 +0,0 @@
logger
======
A small wrapper around `log` to provide log levels.
Documentation
-------------
http://godoc.org/github.com/calmh/logger
License
-------
MIT

View File

@ -1,187 +0,0 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
// Package logger implements a standardized logger with callback functionality
package logger
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"sync"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelVerbose
LevelInfo
LevelOK
LevelWarn
LevelFatal
NumLevels
)
// A MessageHandler is called with the log level and message text.
type MessageHandler func(l LogLevel, msg string)
type Logger struct {
logger *log.Logger
handlers [NumLevels][]MessageHandler
mut sync.Mutex
}
// The default logger logs to standard output with a time prefix.
var DefaultLogger = New()
func New() *Logger {
if os.Getenv("LOGGER_DISCARD") != "" {
// Hack to completely disable logging, for example when running benchmarks.
return &Logger{
logger: log.New(ioutil.Discard, "", 0),
}
}
return &Logger{
logger: log.New(os.Stdout, "", log.Ltime),
}
}
// AddHandler registers a new MessageHandler to receive messages with the
// specified log level or above.
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
l.mut.Lock()
defer l.mut.Unlock()
l.handlers[level] = append(l.handlers[level], h)
}
// See log.SetFlags
func (l *Logger) SetFlags(flag int) {
l.logger.SetFlags(flag)
}
// See log.SetPrefix
func (l *Logger) SetPrefix(prefix string) {
l.logger.SetPrefix(prefix)
}
func (l *Logger) callHandlers(level LogLevel, s string) {
for _, h := range l.handlers[level] {
h(level, strings.TrimSpace(s))
}
}
// Debugln logs a line with a DEBUG prefix.
func (l *Logger) Debugln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Debugf logs a formatted line with a DEBUG prefix.
func (l *Logger) Debugf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
// Infoln logs a line with a VERBOSE prefix.
func (l *Logger) Verboseln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infof logs a formatted line with a VERBOSE prefix.
func (l *Logger) Verbosef(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "VERBOSE: "+s)
l.callHandlers(LevelVerbose, s)
}
// Infoln logs a line with an INFO prefix.
func (l *Logger) Infoln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Infof logs a formatted line with an INFO prefix.
func (l *Logger) Infof(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
// Okln logs a line with an OK prefix.
func (l *Logger) Okln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Okf logs a formatted line with an OK prefix.
func (l *Logger) Okf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
// Warnln logs a formatted line with a WARNING prefix.
func (l *Logger) Warnln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Warnf logs a formatted line with a WARNING prefix.
func (l *Logger) Warnf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
// Fatalln logs a line with a FATAL prefix and exits the process with exit
// code 1.
func (l *Logger) Fatalln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}
// Fatalf logs a formatted line with a FATAL prefix and exits the process with
// exit code 1.
func (l *Logger) Fatalf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(1)
}

View File

@ -1,58 +0,0 @@
// Copyright (C) 2014 Jakob Borg. All rights reserved. Use of this source code
// is governed by an MIT-style license that can be found in the LICENSE file.
package logger
import (
"strings"
"testing"
)
func TestAPI(t *testing.T) {
l := New()
l.SetFlags(0)
l.SetPrefix("testing")
debug := 0
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug))
info := 0
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
ok := 0
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok))
l.Debugf("test %d", 0)
l.Debugln("test", 0)
l.Infof("test %d", 1)
l.Infoln("test", 1)
l.Warnf("test %d", 2)
l.Warnln("test", 2)
l.Okf("test %d", 3)
l.Okln("test", 3)
if debug != 2 {
t.Errorf("Debug handler called %d != 2 times", debug)
}
if info != 2 {
t.Errorf("Info handler called %d != 2 times", info)
}
if warn != 2 {
t.Errorf("Warn handler called %d != 2 times", warn)
}
if ok != 2 {
t.Errorf("Ok handler called %d != 2 times", ok)
}
}
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) {
return func(l LogLevel, msg string) {
*counter++
if l != expectl {
t.Errorf("Incorrect message level %d != %d", l, expectl)
}
if !strings.HasSuffix(msg, expectmsg) {
t.Errorf("%q does not end with %q", msg, expectmsg)
}
}
}

View File

@ -9,10 +9,24 @@ package main
import ( import (
"os" "os"
"strings" "strings"
"github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debugNet = strings.Contains(os.Getenv("STTRACE"), "net") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("main", "Main package")
debugHTTP = strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all" httpl = logger.DefaultLogger.NewFacility("http", "REST API")
debugSuture = strings.Contains(os.Getenv("STTRACE"), "suture") || os.Getenv("STTRACE") == "all"
) )
func init() {
l.SetDebug("main", strings.Contains(os.Getenv("STTRACE"), "main") || os.Getenv("STTRACE") == "all")
l.SetDebug("http", strings.Contains(os.Getenv("STTRACE"), "http") || os.Getenv("STTRACE") == "all")
}
func shouldDebugMain() bool {
return l.ShouldDebug("main")
}
func shouldDebugHTTP() bool {
return l.ShouldDebug("http")
}

View File

@ -20,16 +20,17 @@ import (
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime" "runtime"
"sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/lib/auto" "github.com/syncthing/syncthing/lib/auto"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/discover" "github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/logger"
"github.com/syncthing/syncthing/lib/model" "github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
@ -41,15 +42,8 @@ import (
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
type guiError struct {
Time time.Time `json:"time"`
Error string `json:"error"`
}
var ( var (
configInSync = true configInSync = true
guiErrors = []guiError{}
guiErrorsMut = sync.NewMutex()
startTime = time.Now() startTime = time.Now()
) )
@ -65,9 +59,12 @@ type apiSvc struct {
fss *folderSummarySvc fss *folderSummarySvc
stop chan struct{} stop chan struct{}
systemConfigMut sync.Mutex systemConfigMut sync.Mutex
guiErrors *logger.Recorder
systemLog *logger.Recorder
} }
func newAPISvc(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *model.Model, eventSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc) (*apiSvc, error) { func newAPISvc(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *model.Model, eventSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc, errors, systemLog *logger.Recorder) (*apiSvc, error) {
svc := &apiSvc{ svc := &apiSvc{
id: id, id: id,
cfg: cfg, cfg: cfg,
@ -77,6 +74,8 @@ func newAPISvc(id protocol.DeviceID, cfg *config.Wrapper, assetDir string, m *mo
discoverer: discoverer, discoverer: discoverer,
relaySvc: relaySvc, relaySvc: relaySvc,
systemConfigMut: sync.NewMutex(), systemConfigMut: sync.NewMutex(),
guiErrors: errors,
systemLog: systemLog,
} }
var err error var err error
@ -138,8 +137,6 @@ func (s *apiSvc) getListener(cfg config.GUIConfiguration) (net.Listener, error)
func (s *apiSvc) Serve() { func (s *apiSvc) Serve() {
s.stop = make(chan struct{}) s.stop = make(chan struct{})
l.AddHandler(logger.LevelWarn, s.showGuiError)
// The GET handlers // The GET handlers
getRestMux := http.NewServeMux() getRestMux := http.NewServeMux()
getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion) // device folder getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion) // device folder
@ -164,6 +161,9 @@ func (s *apiSvc) Serve() {
getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus) // - getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus) // -
getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade) // - getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade) // -
getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion) // - getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion) // -
getRestMux.HandleFunc("/rest/system/debug", s.getSystemDebug) // -
getRestMux.HandleFunc("/rest/system/log", s.getSystemLog) // [since]
getRestMux.HandleFunc("/rest/system/log.txt", s.getSystemLogTxt) // [since]
// The POST handlers // The POST handlers
postRestMux := http.NewServeMux() postRestMux := http.NewServeMux()
@ -181,6 +181,7 @@ func (s *apiSvc) Serve() {
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // - postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device postRestMux.HandleFunc("/rest/system/pause", s.postSystemPause) // device
postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device postRestMux.HandleFunc("/rest/system/resume", s.postSystemResume) // device
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
// Debug endpoints, not for general use // Debug endpoints, not for general use
getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion) getRestMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
@ -223,9 +224,7 @@ func (s *apiSvc) Serve() {
handler = redirectToHTTPSMiddleware(handler) handler = redirectToHTTPSMiddleware(handler)
} }
if debugHTTP {
handler = debugMiddleware(handler) handler = debugMiddleware(handler)
}
srv := http.Server{ srv := http.Server{
Handler: handler, Handler: handler,
@ -379,6 +378,36 @@ func (s *apiSvc) getSystemVersion(w http.ResponseWriter, r *http.Request) {
}) })
} }
func (s *apiSvc) getSystemDebug(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
names := l.Facilities()
enabled := l.FacilityDebugging()
sort.Strings(enabled)
json.NewEncoder(w).Encode(map[string]interface{}{
"facilities": names,
"enabled": enabled,
})
}
func (s *apiSvc) postSystemDebug(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
q := r.URL.Query()
for _, f := range strings.Split(q.Get("enable"), ",") {
if f == "" {
continue
}
l.SetDebug(f, true)
l.Infof("Enabled debug data for %q", f)
}
for _, f := range strings.Split(q.Get("disable"), ",") {
if f == "" {
continue
}
l.SetDebug(f, false)
l.Infof("Disabled debug data for %q", f)
}
}
func (s *apiSvc) getDBBrowse(w http.ResponseWriter, r *http.Request) { func (s *apiSvc) getDBBrowse(w http.ResponseWriter, r *http.Request) {
qs := r.URL.Query() qs := r.URL.Query()
folder := qs.Get("folder") folder := qs.Get("folder")
@ -684,30 +713,41 @@ func (s *apiSvc) getSystemStatus(w http.ResponseWriter, r *http.Request) {
func (s *apiSvc) getSystemError(w http.ResponseWriter, r *http.Request) { func (s *apiSvc) getSystemError(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Type", "application/json; charset=utf-8")
guiErrorsMut.Lock() json.NewEncoder(w).Encode(map[string][]logger.Line{
json.NewEncoder(w).Encode(map[string][]guiError{"errors": guiErrors}) "errors": s.guiErrors.Since(time.Time{}),
guiErrorsMut.Unlock() })
} }
func (s *apiSvc) postSystemError(w http.ResponseWriter, r *http.Request) { func (s *apiSvc) postSystemError(w http.ResponseWriter, r *http.Request) {
bs, _ := ioutil.ReadAll(r.Body) bs, _ := ioutil.ReadAll(r.Body)
r.Body.Close() r.Body.Close()
s.showGuiError(0, string(bs)) l.Warnln(string(bs))
} }
func (s *apiSvc) postSystemErrorClear(w http.ResponseWriter, r *http.Request) { func (s *apiSvc) postSystemErrorClear(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock() s.guiErrors.Clear()
guiErrors = []guiError{}
guiErrorsMut.Unlock()
} }
func (s *apiSvc) showGuiError(l logger.LogLevel, err string) { func (s *apiSvc) getSystemLog(w http.ResponseWriter, r *http.Request) {
guiErrorsMut.Lock() q := r.URL.Query()
guiErrors = append(guiErrors, guiError{time.Now(), err}) since, err := time.Parse(time.RFC3339, q.Get("since"))
if len(guiErrors) > 5 { l.Debugln(err)
guiErrors = guiErrors[len(guiErrors)-5:] w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string][]logger.Line{
"messages": s.systemLog.Since(since),
})
}
func (s *apiSvc) getSystemLogTxt(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
since, err := time.Parse(time.RFC3339, q.Get("since"))
l.Debugln(err)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
for _, line := range s.systemLog.Since(since) {
fmt.Fprintf(w, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
} }
guiErrorsMut.Unlock()
} }
func (s *apiSvc) getSystemDiscovery(w http.ResponseWriter, r *http.Request) { func (s *apiSvc) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {

View File

@ -42,9 +42,7 @@ func basicAuthAndSessionMiddleware(cookieName string, cfg config.GUIConfiguratio
} }
} }
if debugHTTP { httpl.Debugln("Sessionless HTTP request with authentication; this is expensive.")
l.Debugln("Sessionless HTTP request with authentication; this is expensive.")
}
error := func() { error := func() {
time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond) time.Sleep(time.Duration(rand.Intn(100)+100) * time.Millisecond)

View File

@ -7,6 +7,7 @@
package main package main
import ( import (
"bytes"
"crypto/tls" "crypto/tls"
"flag" "flag"
"fmt" "fmt"
@ -21,16 +22,17 @@ import (
"regexp" "regexp"
"runtime" "runtime"
"runtime/pprof" "runtime/pprof"
"sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/calmh/logger"
"github.com/syncthing/syncthing/lib/config" "github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/connections" "github.com/syncthing/syncthing/lib/connections"
"github.com/syncthing/syncthing/lib/db" "github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/discover" "github.com/syncthing/syncthing/lib/discover"
"github.com/syncthing/syncthing/lib/events" "github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/logger"
"github.com/syncthing/syncthing/lib/model" "github.com/syncthing/syncthing/lib/model"
"github.com/syncthing/syncthing/lib/osutil" "github.com/syncthing/syncthing/lib/osutil"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
@ -71,6 +73,9 @@ const (
tlsDefaultCommonName = "syncthing" tlsDefaultCommonName = "syncthing"
tlsRSABits = 3072 tlsRSABits = 3072
pingEventInterval = time.Minute pingEventInterval = time.Minute
maxSystemErrors = 5
initialSystemLog = 10
maxSystemLog = 250
) )
// The discovery results are sorted by their source priority. // The discovery results are sorted by their source priority.
@ -80,8 +85,6 @@ const (
globalDiscoveryPriority globalDiscoveryPriority
) )
var l = logger.DefaultLogger
func init() { func init() {
if Version != "unknown-dev" { if Version != "unknown-dev" {
// If not a generic dev build, version string should come from git describe // If not a generic dev build, version string should come from git describe
@ -148,25 +151,11 @@ Development Settings
The following environment variables modify syncthing's behavior in ways that The following environment variables modify syncthing's behavior in ways that
are mostly useful for developers. Use with care. are mostly useful for developers. Use with care.
STGUIASSETS Directory to load GUI assets from. Overrides compiled in assets. STGUIASSETS Directory to load GUI assets from. Overrides compiled in
assets.
STTRACE A comma separated string of facilities to trace. The valid STTRACE A comma separated string of facilities to trace. The valid
facility strings are: facility strings listed below.
- "beacon" (the beacon package)
- "discover" (the discover package)
- "events" (the events package)
- "files" (the files package)
- "http" (the main package; HTTP requests)
- "locks" (the sync package; trace long held locks)
- "net" (the main package; connections & network messages)
- "model" (the model package)
- "scanner" (the scanner package)
- "stats" (the stats package)
- "suture" (the suture package; service management)
- "upnp" (the upnp package)
- "xdr" (the xdr package)
- "all" (all of the above)
STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the STPROFILER Set to a listen address such as "127.0.0.1:9090" to start the
profiler with HTTP access. profiler with HTTP access.
@ -189,7 +178,15 @@ are mostly useful for developers. Use with care.
GOGC Percentage of heap growth at which to trigger GC. Default is GOGC Percentage of heap growth at which to trigger GC. Default is
100. Lower numbers keep peak memory usage down, at the price 100. Lower numbers keep peak memory usage down, at the price
of CPU usage (ie. performance).` of CPU usage (ie. performance).
Debugging Facilities
--------------------
The following are valid values for the STTRACE variable:
%s`
) )
// Command line and environment options // Command line and environment options
@ -245,7 +242,8 @@ func main() {
flag.BoolVar(&verbose, "verbose", false, "Print verbose log output") flag.BoolVar(&verbose, "verbose", false, "Print verbose log output")
flag.BoolVar(&paused, "paused", false, "Start with all devices paused") flag.BoolVar(&paused, "paused", false, "Start with all devices paused")
flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"])) longUsage := fmt.Sprintf(extraUsage, baseDirs["config"], debugFacilities())
flag.Usage = usageFor(flag.CommandLine, usage, longUsage)
flag.Parse() flag.Parse()
if noConsole { if noConsole {
@ -397,6 +395,28 @@ func main() {
} }
} }
func debugFacilities() string {
facilities := l.Facilities()
// Get a sorted list of names
var names []string
maxLen := 0
for name := range facilities {
names = append(names, name)
if len(name) > maxLen {
maxLen = len(name)
}
}
sort.Strings(names)
// Format the choices
b := new(bytes.Buffer)
for _, name := range names {
fmt.Fprintf(b, " %-*s - %s\n", maxLen, name, facilities[name])
}
return b.String()
}
func upgradeViaRest() error { func upgradeViaRest() error {
cfg, err := config.Load(locations[locConfigFile], protocol.LocalDeviceID) cfg, err := config.Load(locations[locConfigFile], protocol.LocalDeviceID)
if err != nil { if err != nil {
@ -439,9 +459,7 @@ func syncthingMain() {
// We want any logging it does to go through our log system. // We want any logging it does to go through our log system.
mainSvc := suture.New("main", suture.Spec{ mainSvc := suture.New("main", suture.Spec{
Log: func(line string) { Log: func(line string) {
if debugSuture {
l.Debugln(line) l.Debugln(line)
}
}, },
}) })
mainSvc.ServeBackground() mainSvc.ServeBackground()
@ -458,6 +476,9 @@ func syncthingMain() {
mainSvc.Add(newVerboseSvc()) mainSvc.Add(newVerboseSvc())
} }
errors := logger.NewRecorder(l, logger.LevelWarn, maxSystemErrors, 0)
systemLog := logger.NewRecorder(l, logger.LevelDebug, maxSystemLog, initialSystemLog)
// Event subscription for the API; must start early to catch the early events. // Event subscription for the API; must start early to catch the early events.
apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000) apiSub := events.NewBufferedSubscription(events.Default.Subscribe(events.AllEvents), 1000)
@ -733,7 +754,7 @@ func syncthingMain() {
// GUI // GUI
setupGUI(mainSvc, cfg, m, apiSub, cachedDiscovery, relaySvc) setupGUI(mainSvc, cfg, m, apiSub, cachedDiscovery, relaySvc, errors, systemLog)
// Start connection management // Start connection management
@ -883,7 +904,7 @@ func startAuditing(mainSvc *suture.Supervisor) {
l.Infoln("Audit log in", auditFile) l.Infoln("Audit log in", auditFile)
} }
func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc) { func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, apiSub *events.BufferedSubscription, discoverer *discover.CachingMux, relaySvc *relay.Svc, errors, systemLog *logger.Recorder) {
guiCfg := cfg.GUI() guiCfg := cfg.GUI()
if !guiCfg.Enabled { if !guiCfg.Enabled {
@ -918,7 +939,7 @@ func setupGUI(mainSvc *suture.Supervisor, cfg *config.Wrapper, m *model.Model, a
urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port))) urlShow := fmt.Sprintf("%s://%s/", proto, net.JoinHostPort(hostShow, strconv.Itoa(addr.Port)))
l.Infoln("Starting web GUI on", urlShow) l.Infoln("Starting web GUI on", urlShow)
api, err := newAPISvc(myID, cfg, guiAssets, m, apiSub, discoverer, relaySvc) api, err := newAPISvc(myID, cfg, guiAssets, m, apiSub, discoverer, relaySvc, errors, systemLog)
if err != nil { if err != nil {
l.Fatalln("Cannot start GUI:", err) l.Fatalln("Cannot start GUI:", err)
} }

View File

@ -98,9 +98,7 @@ func (s *upnpSvc) tryIGDs(igds []upnp.IGD, prevExtPort int) int {
l.Infof("New UPnP port mapping: external port %d to local port %d.", extPort, s.localPort) l.Infof("New UPnP port mapping: external port %d to local port %d.", extPort, s.localPort)
events.Default.Log(events.ExternalPortMappingChanged, map[string]int{"port": extPort}) events.Default.Log(events.ExternalPortMappingChanged, map[string]int{"port": extPort})
} }
if debugNet {
l.Debugf("Created/updated UPnP port mapping for external port %d on device %s.", extPort, igd.FriendlyIdentifier()) l.Debugf("Created/updated UPnP port mapping for external port %d on device %s.", extPort, igd.FriendlyIdentifier())
}
return extPort return extPort
} }

View File

@ -33,9 +33,7 @@ func NewBroadcast(port int) *Broadcast {
FailureBackoff: 60 * time.Second, FailureBackoff: 60 * time.Second,
// Only log restarts in debug mode. // Only log restarts in debug mode.
Log: func(line string) { Log: func(line string) {
if debug {
l.Debugln(line) l.Debugln(line)
}
}, },
}), }),
port: port, port: port,
@ -81,17 +79,13 @@ type broadcastWriter struct {
} }
func (w *broadcastWriter) Serve() { func (w *broadcastWriter) Serve() {
if debug {
l.Debugln(w, "starting") l.Debugln(w, "starting")
defer l.Debugln(w, "stopping") defer l.Debugln(w, "stopping")
}
var err error var err error
w.conn, err = net.ListenUDP("udp4", nil) w.conn, err = net.ListenUDP("udp4", nil)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
@ -100,9 +94,7 @@ func (w *broadcastWriter) Serve() {
for bs := range w.inbox { for bs := range w.inbox {
addrs, err := net.InterfaceAddrs() addrs, err := net.InterfaceAddrs()
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
continue continue
} }
@ -120,9 +112,7 @@ func (w *broadcastWriter) Serve() {
dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff}) dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff})
} }
if debug {
l.Debugln("addresses:", dsts) l.Debugln("addresses:", dsts)
}
success := 0 success := 0
for _, ip := range dsts { for _, ip := range dsts {
@ -135,34 +125,25 @@ func (w *broadcastWriter) Serve() {
if err, ok := err.(net.Error); ok && err.Timeout() { if err, ok := err.(net.Error); ok && err.Timeout() {
// Write timeouts should not happen. We treat it as a fatal // Write timeouts should not happen. We treat it as a fatal
// error on the socket. // error on the socket.
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
if err, ok := err.(net.Error); ok && err.Temporary() { if err, ok := err.(net.Error); ok && err.Temporary() {
// A transient error. Lets hope for better luck in the future. // A transient error. Lets hope for better luck in the future.
if debug {
l.Debugln(err) l.Debugln(err)
}
continue continue
} }
if err != nil { if err != nil {
// Some other error that we don't expect. Bail and retry. // Some other error that we don't expect. Bail and retry.
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
if debug {
l.Debugf("sent %d bytes to %s", len(bs), dst) l.Debugf("sent %d bytes to %s", len(bs), dst)
}
success++ success++
} }
@ -188,17 +169,13 @@ type broadcastReader struct {
} }
func (r *broadcastReader) Serve() { func (r *broadcastReader) Serve() {
if debug {
l.Debugln(r, "starting") l.Debugln(r, "starting")
defer l.Debugln(r, "stopping") defer l.Debugln(r, "stopping")
}
var err error var err error
r.conn, err = net.ListenUDP("udp4", &net.UDPAddr{Port: r.port}) r.conn, err = net.ListenUDP("udp4", &net.UDPAddr{Port: r.port})
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
return return
} }
@ -208,29 +185,23 @@ func (r *broadcastReader) Serve() {
for { for {
n, addr, err := r.conn.ReadFrom(bs) n, addr, err := r.conn.ReadFrom(bs)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
return return
} }
r.setError(nil) r.setError(nil)
if debug {
l.Debugf("recv %d bytes from %s", n, addr) l.Debugf("recv %d bytes from %s", n, addr)
}
c := make([]byte, n) c := make([]byte, n)
copy(c, bs) copy(c, bs)
select { select {
case r.outbox <- recv{c, addr}: case r.outbox <- recv{c, addr}:
default: default:
if debug {
l.Debugln("dropping message") l.Debugln("dropping message")
} }
} }
}
} }

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "beacon") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("beacon", "Multicast and broadcast discovery")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("beacon", strings.Contains(os.Getenv("STTRACE"), "beacon") || os.Getenv("STTRACE") == "all")
}

View File

@ -35,9 +35,7 @@ func NewMulticast(addr string) *Multicast {
FailureBackoff: 60 * time.Second, FailureBackoff: 60 * time.Second,
// Only log restarts in debug mode. // Only log restarts in debug mode.
Log: func(line string) { Log: func(line string) {
if debug {
l.Debugln(line) l.Debugln(line)
}
}, },
}), }),
inbox: make(chan []byte), inbox: make(chan []byte),
@ -85,25 +83,19 @@ type multicastWriter struct {
} }
func (w *multicastWriter) Serve() { func (w *multicastWriter) Serve() {
if debug {
l.Debugln(w, "starting") l.Debugln(w, "starting")
defer l.Debugln(w, "stopping") defer l.Debugln(w, "stopping")
}
gaddr, err := net.ResolveUDPAddr("udp6", w.addr) gaddr, err := net.ResolveUDPAddr("udp6", w.addr)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
conn, err := net.ListenPacket("udp6", ":0") conn, err := net.ListenPacket("udp6", ":0")
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
@ -117,9 +109,7 @@ func (w *multicastWriter) Serve() {
for bs := range w.inbox { for bs := range w.inbox {
intfs, err := net.Interfaces() intfs, err := net.Interfaces()
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
return return
} }
@ -132,16 +122,12 @@ func (w *multicastWriter) Serve() {
pconn.SetWriteDeadline(time.Time{}) pconn.SetWriteDeadline(time.Time{})
if err != nil { if err != nil {
if debug {
l.Debugln(err, "on write to", gaddr, intf.Name) l.Debugln(err, "on write to", gaddr, intf.Name)
}
w.setError(err) w.setError(err)
continue continue
} }
if debug {
l.Debugf("sent %d bytes to %v on %s", len(bs), gaddr, intf.Name) l.Debugf("sent %d bytes to %v on %s", len(bs), gaddr, intf.Name)
}
success++ success++
} }
@ -149,9 +135,7 @@ func (w *multicastWriter) Serve() {
if success > 0 { if success > 0 {
w.setError(nil) w.setError(nil)
} else { } else {
if debug {
l.Debugln(err) l.Debugln(err)
}
w.setError(err) w.setError(err)
} }
} }
@ -173,34 +157,26 @@ type multicastReader struct {
} }
func (r *multicastReader) Serve() { func (r *multicastReader) Serve() {
if debug {
l.Debugln(r, "starting") l.Debugln(r, "starting")
defer l.Debugln(r, "stopping") defer l.Debugln(r, "stopping")
}
gaddr, err := net.ResolveUDPAddr("udp6", r.addr) gaddr, err := net.ResolveUDPAddr("udp6", r.addr)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
return return
} }
conn, err := net.ListenPacket("udp6", r.addr) conn, err := net.ListenPacket("udp6", r.addr)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
return return
} }
intfs, err := net.Interfaces() intfs, err := net.Interfaces()
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
return return
} }
@ -209,20 +185,16 @@ func (r *multicastReader) Serve() {
joined := 0 joined := 0
for _, intf := range intfs { for _, intf := range intfs {
err := pconn.JoinGroup(&intf, &net.UDPAddr{IP: gaddr.IP}) err := pconn.JoinGroup(&intf, &net.UDPAddr{IP: gaddr.IP})
if debug {
if err != nil { if err != nil {
l.Debugln("IPv6 join", intf.Name, "failed:", err) l.Debugln("IPv6 join", intf.Name, "failed:", err)
} else { } else {
l.Debugln("IPv6 join", intf.Name, "success") l.Debugln("IPv6 join", intf.Name, "success")
} }
}
joined++ joined++
} }
if joined == 0 { if joined == 0 {
if debug {
l.Debugln("no multicast interfaces available") l.Debugln("no multicast interfaces available")
}
r.setError(errors.New("no multicast interfaces available")) r.setError(errors.New("no multicast interfaces available"))
return return
} }
@ -231,26 +203,20 @@ func (r *multicastReader) Serve() {
for { for {
n, _, addr, err := pconn.ReadFrom(bs) n, _, addr, err := pconn.ReadFrom(bs)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
r.setError(err) r.setError(err)
continue continue
} }
if debug {
l.Debugf("recv %d bytes from %s", n, addr) l.Debugf("recv %d bytes from %s", n, addr)
}
c := make([]byte, n) c := make([]byte, n)
copy(c, bs) copy(c, bs)
select { select {
case r.outbox <- recv{c, addr}: case r.outbox <- recv{c, addr}:
default: default:
if debug {
l.Debugln("dropping message") l.Debugln("dropping message")
} }
} }
}
} }
func (r *multicastReader) Stop() { func (r *multicastReader) Stop() {

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "config") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("config", "Configuration loading and saving")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("config", strings.Contains(os.Getenv("STTRACE"), "config") || os.Getenv("STTRACE") == "all")
}

View File

@ -142,13 +142,9 @@ func (w *Wrapper) replaceLocked(to Configuration) CommitResponse {
from := w.cfg from := w.cfg
for _, sub := range w.subs { for _, sub := range w.subs {
if debug {
l.Debugln(sub, "verifying configuration") l.Debugln(sub, "verifying configuration")
}
if err := sub.VerifyConfiguration(from, to); err != nil { if err := sub.VerifyConfiguration(from, to); err != nil {
if debug {
l.Debugln(sub, "rejected config:", err) l.Debugln(sub, "rejected config:", err)
}
return CommitResponse{ return CommitResponse{
ValidationError: err, ValidationError: err,
} }
@ -157,14 +153,10 @@ func (w *Wrapper) replaceLocked(to Configuration) CommitResponse {
allOk := true allOk := true
for _, sub := range w.subs { for _, sub := range w.subs {
if debug {
l.Debugln(sub, "committing configuration") l.Debugln(sub, "committing configuration")
}
ok := sub.CommitConfiguration(from, to) ok := sub.CommitConfiguration(from, to)
if !ok { if !ok {
if debug {
l.Debugln(sub, "requires restart") l.Debugln(sub, "requires restart")
}
allOk = false allOk = false
} }
} }

View File

@ -121,9 +121,7 @@ func NewConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tl
continue continue
} }
if debug { l.Debugln("listening on", uri)
l.Debugln("listening on", uri.String())
}
svc.Add(serviceFunc(func() { svc.Add(serviceFunc(func() {
listener(uri, svc.tlsCfg, svc.conns) listener(uri, svc.tlsCfg, svc.conns)
@ -178,9 +176,7 @@ next:
ct, ok := s.connType[remoteID] ct, ok := s.connType[remoteID]
s.mut.RUnlock() s.mut.RUnlock()
if ok && !ct.IsDirect() && c.Type.IsDirect() { if ok && !ct.IsDirect() && c.Type.IsDirect() {
if debug {
l.Debugln("Switching connections", remoteID) l.Debugln("Switching connections", remoteID)
}
s.model.Close(remoteID, fmt.Errorf("switching connections")) s.model.Close(remoteID, fmt.Errorf("switching connections"))
} else if s.model.ConnectedTo(remoteID) { } else if s.model.ConnectedTo(remoteID) {
// We should not already be connected to the other party. TODO: This // We should not already be connected to the other party. TODO: This
@ -236,9 +232,7 @@ next:
protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression) protoConn := protocol.NewConnection(remoteID, rd, wr, s.model, name, deviceCfg.Compression)
l.Infof("Established secure connection to %s at %s", remoteID, name) l.Infof("Established secure connection to %s at %s", remoteID, name)
if debug {
l.Debugf("cipher suite: %04X in lan: %t", c.Conn.ConnectionState().CipherSuite, !limit) l.Debugf("cipher suite: %04X in lan: %t", c.Conn.ConnectionState().CipherSuite, !limit)
}
s.model.AddConnection(model.Connection{ s.model.AddConnection(model.Connection{
c.Conn, c.Conn,
@ -311,18 +305,14 @@ func (s *connectionSvc) connect() {
dialer, ok := dialers[uri.Scheme] dialer, ok := dialers[uri.Scheme]
if !ok { if !ok {
l.Infoln("Unknown address schema", uri.String()) l.Infoln("Unknown address schema", uri)
continue continue
} }
if debug { l.Debugln("dial", deviceCfg.DeviceID, uri)
l.Debugln("dial", deviceCfg.DeviceID, uri.String())
}
conn, err := dialer(uri, s.tlsCfg) conn, err := dialer(uri, s.tlsCfg)
if err != nil { if err != nil {
if debug { l.Debugln("dial failed", deviceCfg.DeviceID, uri, err)
l.Debugln("dial failed", deviceCfg.DeviceID, uri.String(), err)
}
continue continue
} }
@ -347,11 +337,9 @@ func (s *connectionSvc) connect() {
reconIntv := time.Duration(s.cfg.Options().RelayReconnectIntervalM) * time.Minute reconIntv := time.Duration(s.cfg.Options().RelayReconnectIntervalM) * time.Minute
if last, ok := s.lastRelayCheck[deviceID]; ok && time.Since(last) < reconIntv { if last, ok := s.lastRelayCheck[deviceID]; ok && time.Since(last) < reconIntv {
if debug {
l.Debugln("Skipping connecting via relay to", deviceID, "last checked at", last) l.Debugln("Skipping connecting via relay to", deviceID, "last checked at", last)
}
continue nextDevice continue nextDevice
} else if debug { } else {
l.Debugln("Trying relay connections to", deviceID, relays) l.Debugln("Trying relay connections to", deviceID, relays)
} }
@ -366,21 +354,17 @@ func (s *connectionSvc) connect() {
inv, err := client.GetInvitationFromRelay(uri, deviceID, s.tlsCfg.Certificates) inv, err := client.GetInvitationFromRelay(uri, deviceID, s.tlsCfg.Certificates)
if err != nil { if err != nil {
if debug {
l.Debugf("Failed to get invitation for %s from %s: %v", deviceID, uri, err) l.Debugf("Failed to get invitation for %s from %s: %v", deviceID, uri, err)
}
continue continue
} else if debug { } else {
l.Debugln("Succesfully retrieved relay invitation", inv, "from", uri) l.Debugln("Succesfully retrieved relay invitation", inv, "from", uri)
} }
conn, err := client.JoinSession(inv) conn, err := client.JoinSession(inv)
if err != nil { if err != nil {
if debug {
l.Debugf("Failed to join relay session %s: %v", inv, err) l.Debugf("Failed to join relay session %s: %v", inv, err)
}
continue continue
} else if debug { } else {
l.Debugln("Sucessfully joined relay session", inv) l.Debugln("Sucessfully joined relay session", inv)
} }

View File

@ -33,17 +33,13 @@ func tcpDialer(uri *url.URL, tlsCfg *tls.Config) (*tls.Conn, error) {
raddr, err := net.ResolveTCPAddr("tcp", uri.Host) raddr, err := net.ResolveTCPAddr("tcp", uri.Host)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
return nil, err return nil, err
} }
conn, err := net.DialTCP("tcp", nil, raddr) conn, err := net.DialTCP("tcp", nil, raddr)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
return nil, err return nil, err
} }
@ -81,9 +77,7 @@ func tcpListener(uri *url.URL, tlsCfg *tls.Config, conns chan<- model.Intermedia
continue continue
} }
if debug {
l.Debugln("connect from", conn.RemoteAddr()) l.Debugln("connect from", conn.RemoteAddr())
}
err = osutil.SetTCPOptions(conn.(*net.TCPConn)) err = osutil.SetTCPOptions(conn.(*net.TCPConn))
if err != nil { if err != nil {

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "connections") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("connections", "Connection handling")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("connections", strings.Contains(os.Getenv("STTRACE"), "connections") || os.Getenv("STTRACE") == "all")
}

View File

@ -10,11 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("db", "The database layer")
debugDB = strings.Contains(os.Getenv("STTRACE"), "db") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("db", strings.Contains(os.Getenv("STTRACE"), "db") || os.Getenv("STTRACE") == "all")
}

View File

@ -178,20 +178,14 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files limit := deviceKey(folder, device, []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
batch := new(leveldb.Batch) batch := new(leveldb.Batch)
if debugDB {
l.Debugf("new batch %p", batch) l.Debugf("new batch %p", batch)
}
snap, err := db.GetSnapshot() snap, err := db.GetSnapshot()
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -220,15 +214,11 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
cmp := bytes.Compare(newName, oldName) cmp := bytes.Compare(newName, oldName)
if debugDB {
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName) l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
}
switch { switch {
case moreFs && (!moreDb || cmp == -1): case moreFs && (!moreDb || cmp == -1):
if debugDB {
l.Debugln("generic replace; missing - insert") l.Debugln("generic replace; missing - insert")
}
// Database is missing this file. Insert it. // Database is missing this file. Insert it.
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer { if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv maxLocalVer = lv
@ -244,15 +234,11 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
// File exists on both sides - compare versions. We might get an // File exists on both sides - compare versions. We might get an
// update with the same version and different flags if a device has // update with the same version and different flags if a device has
// marked a file as invalid, so handle that too. // marked a file as invalid, so handle that too.
if debugDB {
l.Debugln("generic replace; exists - compare") l.Debugln("generic replace; exists - compare")
}
var ef FileInfoTruncated var ef FileInfoTruncated
ef.UnmarshalXDR(dbi.Value()) ef.UnmarshalXDR(dbi.Value())
if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags { if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags {
if debugDB {
l.Debugln("generic replace; differs - insert") l.Debugln("generic replace; differs - insert")
}
if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer { if lv := ldbInsert(batch, folder, device, fs[fsi]); lv > maxLocalVer {
maxLocalVer = lv maxLocalVer = lv
} }
@ -261,7 +247,7 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
} else { } else {
ldbUpdateGlobal(snap, batch, folder, device, fs[fsi]) ldbUpdateGlobal(snap, batch, folder, device, fs[fsi])
} }
} else if debugDB { } else {
l.Debugln("generic replace; equal - ignore") l.Debugln("generic replace; equal - ignore")
} }
@ -269,9 +255,7 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
moreDb = dbi.Next() moreDb = dbi.Next()
case moreDb && (!moreFs || cmp == 1): case moreDb && (!moreFs || cmp == 1):
if debugDB {
l.Debugln("generic replace; exists - remove") l.Debugln("generic replace; exists - remove")
}
if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer { if lv := deleteFn(snap, batch, folder, device, oldName, dbi); lv > maxLocalVer {
maxLocalVer = lv maxLocalVer = lv
} }
@ -281,9 +265,7 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
// Write out and reuse the batch every few records, to avoid the batch // Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory. // growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize { if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch) l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil) err = db.Write(batch, nil)
if err != nil { if err != nil {
@ -294,9 +276,7 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
} }
} }
if debugDB {
l.Debugf("db.Write %p", batch) l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil) err = db.Write(batch, nil)
if err != nil { if err != nil {
panic(err) panic(err)
@ -309,13 +289,9 @@ func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) i
// TODO: Return the remaining maxLocalVer? // TODO: Return the remaining maxLocalVer?
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64 { return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64 {
// Database has a file that we are missing. Remove it. // Database has a file that we are missing. Remove it.
if debugDB {
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name) l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
}
ldbRemoveFromGlobal(db, batch, folder, device, name) ldbRemoveFromGlobal(db, batch, folder, device, name)
if debugDB {
l.Debugf("batch.Delete %p %x", batch, dbi.Key()) l.Debugf("batch.Delete %p %x", batch, dbi.Key())
}
batch.Delete(dbi.Key()) batch.Delete(dbi.Key())
return 0 return 0
}) })
@ -325,20 +301,14 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
runtime.GC() runtime.GC()
batch := new(leveldb.Batch) batch := new(leveldb.Batch)
if debugDB {
l.Debugf("new batch %p", batch) l.Debugf("new batch %p", batch)
}
snap, err := db.GetSnapshot() snap, err := db.GetSnapshot()
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -347,9 +317,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
for _, f := range fs { for _, f := range fs {
name := []byte(f.Name) name := []byte(f.Name)
fk = deviceKeyInto(fk[:cap(fk)], folder, device, name) fk = deviceKeyInto(fk[:cap(fk)], folder, device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk) l.Debugf("snap.Get %p %x", snap, fk)
}
bs, err := snap.Get(fk, nil) bs, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer { if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
@ -384,9 +352,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
// Write out and reuse the batch every few records, to avoid the batch // Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory. // growing too large and thus allocating unnecessarily much memory.
if batch.Len() > batchFlushSize { if batch.Len() > batchFlushSize {
if debugDB {
l.Debugf("db.Write %p", batch) l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil) err = db.Write(batch, nil)
if err != nil { if err != nil {
@ -397,9 +363,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
} }
} }
if debugDB {
l.Debugf("db.Write %p", batch) l.Debugf("db.Write %p", batch)
}
err = db.Write(batch, nil) err = db.Write(batch, nil)
if err != nil { if err != nil {
panic(err) panic(err)
@ -409,9 +373,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
} }
func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) int64 { func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) int64 {
if debugDB {
l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file) l.Debugf("insert; folder=%q device=%v %v", folder, protocol.DeviceIDFromBytes(device), file)
}
if file.LocalVersion == 0 { if file.LocalVersion == 0 {
file.LocalVersion = clock(0) file.LocalVersion = clock(0)
@ -419,9 +381,7 @@ func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) in
name := []byte(file.Name) name := []byte(file.Name)
nk := deviceKey(folder, device, name) nk := deviceKey(folder, device, name)
if debugDB {
l.Debugf("batch.Put %p %x", batch, nk) l.Debugf("batch.Put %p %x", batch, nk)
}
batch.Put(nk, file.MustMarshalXDR()) batch.Put(nk, file.MustMarshalXDR())
return file.LocalVersion return file.LocalVersion
@ -431,9 +391,7 @@ func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) in
// file. If the device is already present in the list, the version is updated. // file. If the device is already present in the list, the version is updated.
// If the file does not have an entry in the global list, it is created. // If the file does not have an entry in the global list, it is created.
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device []byte, file protocol.FileInfo) bool { func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device []byte, file protocol.FileInfo) bool {
if debugDB {
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version) l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file.Name, file.Version)
}
name := []byte(file.Name) name := []byte(file.Name)
gk := globalKey(folder, name) gk := globalKey(folder, name)
svl, err := db.Get(gk, nil) svl, err := db.Get(gk, nil)
@ -499,10 +457,8 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device []byte, file pr
fl.versions = append(fl.versions, nv) fl.versions = append(fl.versions, nv)
done: done:
if debugDB {
l.Debugf("batch.Put %p %x", batch, gk) l.Debugf("batch.Put %p %x", batch, gk)
l.Debugf("new global after update: %v", fl) l.Debugf("new global after update: %v", fl)
}
batch.Put(gk, fl.MustMarshalXDR()) batch.Put(gk, fl.MustMarshalXDR())
return true return true
@ -519,9 +475,7 @@ func insertVersion(vl []fileVersion, i int, v fileVersion) []fileVersion {
// given file. If the version list is empty after this, the file entry is // given file. If the version list is empty after this, the file entry is
// removed entirely. // removed entirely.
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) { func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byte) {
if debugDB {
l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file) l.Debugf("remove from global; folder=%q device=%v file=%q", folder, protocol.DeviceIDFromBytes(device), file)
}
gk := globalKey(folder, file) gk := globalKey(folder, file)
svl, err := db.Get(gk, nil) svl, err := db.Get(gk, nil)
@ -545,15 +499,11 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, folder, device, file []byt
} }
if len(fl.versions) == 0 { if len(fl.versions) == 0 {
if debugDB {
l.Debugf("batch.Delete %p %x", batch, gk) l.Debugf("batch.Delete %p %x", batch, gk)
}
batch.Delete(gk) batch.Delete(gk)
} else { } else {
if debugDB {
l.Debugf("batch.Put %p %x", batch, gk) l.Debugf("batch.Put %p %x", batch, gk)
l.Debugf("new global after remove: %v", fl) l.Debugf("new global after remove: %v", fl)
}
batch.Put(gk, fl.MustMarshalXDR()) batch.Put(gk, fl.MustMarshalXDR())
} }
} }
@ -565,13 +515,9 @@ func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterat
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -598,13 +544,9 @@ func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []b
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -659,19 +601,13 @@ func ldbGetGlobal(db *leveldb.DB, folder, file []byte, truncate bool) (FileIntf,
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
if debugDB {
l.Debugf("snap.Get %p %x", snap, k) l.Debugf("snap.Get %p %x", snap, k)
}
bs, err := snap.Get(k, nil) bs, err := snap.Get(k, nil)
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
return nil, false return nil, false
@ -691,9 +627,7 @@ func ldbGetGlobal(db *leveldb.DB, folder, file []byte, truncate bool) (FileIntf,
} }
k = deviceKey(folder, vl.versions[0].device, file) k = deviceKey(folder, vl.versions[0].device, file)
if debugDB {
l.Debugf("snap.Get %p %x", snap, k) l.Debugf("snap.Get %p %x", snap, k)
}
bs, err = snap.Get(k, nil) bs, err = snap.Get(k, nil)
if err != nil { if err != nil {
panic(err) panic(err)
@ -713,13 +647,9 @@ func ldbWithGlobal(db *leveldb.DB, folder, prefix []byte, truncate bool, fn Iter
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -739,9 +669,7 @@ func ldbWithGlobal(db *leveldb.DB, folder, prefix []byte, truncate bool, fn Iter
} }
name := globalKeyName(dbi.Key()) name := globalKeyName(dbi.Key())
fk = deviceKeyInto(fk[:cap(fk)], folder, vl.versions[0].device, name) fk = deviceKeyInto(fk[:cap(fk)], folder, vl.versions[0].device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk) l.Debugf("snap.Get %p %x", snap, fk)
}
bs, err := snap.Get(fk, nil) bs, err := snap.Get(fk, nil)
if err != nil { if err != nil {
l.Debugf("folder: %q (%x)", folder, folder) l.Debugf("folder: %q (%x)", folder, folder)
@ -802,13 +730,9 @@ func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterat
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -854,9 +778,7 @@ nextFile:
continue nextFile continue nextFile
} }
fk = deviceKeyInto(fk[:cap(fk)], folder, vl.versions[i].device, name) fk = deviceKeyInto(fk[:cap(fk)], folder, vl.versions[i].device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk) l.Debugf("snap.Get %p %x", snap, fk)
}
bs, err := snap.Get(fk, nil) bs, err := snap.Get(fk, nil)
if err != nil { if err != nil {
var id protocol.DeviceID var id protocol.DeviceID
@ -886,9 +808,7 @@ nextFile:
continue nextFile continue nextFile
} }
if debugDB {
l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version) l.Debugf("need folder=%q device=%v name=%q need=%v have=%v haveV=%d globalV=%d", folder, protocol.DeviceIDFromBytes(device), name, need, have, haveVersion, vl.versions[0].version)
}
if cont := fn(gf); !cont { if cont := fn(gf); !cont {
return return
@ -908,13 +828,9 @@ func ldbListFolders(db *leveldb.DB) []string {
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -945,13 +861,9 @@ func ldbDropFolder(db *leveldb.DB, folder []byte) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -995,13 +907,9 @@ func ldbCheckGlobals(db *leveldb.DB, folder []byte) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
if debugDB {
l.Debugf("created snapshot %p", snap) l.Debugf("created snapshot %p", snap)
}
defer func() { defer func() {
if debugDB {
l.Debugf("close snapshot %p", snap) l.Debugf("close snapshot %p", snap)
}
snap.Release() snap.Release()
}() }()
@ -1011,9 +919,7 @@ func ldbCheckGlobals(db *leveldb.DB, folder []byte) {
defer dbi.Release() defer dbi.Release()
batch := new(leveldb.Batch) batch := new(leveldb.Batch)
if debugDB {
l.Debugf("new batch %p", batch) l.Debugf("new batch %p", batch)
}
var fk []byte var fk []byte
for dbi.Next() { for dbi.Next() {
@ -1033,9 +939,7 @@ func ldbCheckGlobals(db *leveldb.DB, folder []byte) {
var newVL versionList var newVL versionList
for _, version := range vl.versions { for _, version := range vl.versions {
fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name) fk = deviceKeyInto(fk[:cap(fk)], folder, version.device, name)
if debugDB {
l.Debugf("snap.Get %p %x", snap, fk) l.Debugf("snap.Get %p %x", snap, fk)
}
_, err := snap.Get(fk, nil) _, err := snap.Get(fk, nil)
if err == leveldb.ErrNotFound { if err == leveldb.ErrNotFound {
continue continue
@ -1051,8 +955,6 @@ func ldbCheckGlobals(db *leveldb.DB, folder []byte) {
batch.Put(dbi.Key(), newVL.MustMarshalXDR()) batch.Put(dbi.Key(), newVL.MustMarshalXDR())
} }
} }
if debugDB { l.Debugf("db check completed for %q", folder)
l.Infoln("db check completed for %q", folder)
}
db.Write(batch, nil) db.Write(batch, nil)
} }

View File

@ -62,18 +62,14 @@ func NewFileSet(folder string, db *leveldb.DB) *FileSet {
} }
return true return true
}) })
if debug {
l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion) l.Debugf("loaded localVersion for %q: %#v", folder, s.localVersion)
}
clock(s.localVersion[protocol.LocalDeviceID]) clock(s.localVersion[protocol.LocalDeviceID])
return &s return &s
} }
func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) { func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs)) l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs) normalizeFilenames(fs)
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
@ -89,9 +85,7 @@ func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
} }
func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) { func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
if debug {
l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs)) l.Debugf("%s Update(%v, [%d])", s.folder, device, len(fs))
}
normalizeFilenames(fs) normalizeFilenames(fs)
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
@ -114,51 +108,37 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
} }
func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) { func (s *FileSet) WithNeed(device protocol.DeviceID, fn Iterator) {
if debug {
l.Debugf("%s WithNeed(%v)", s.folder, device) l.Debugf("%s WithNeed(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn)) ldbWithNeed(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
} }
func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) { func (s *FileSet) WithNeedTruncated(device protocol.DeviceID, fn Iterator) {
if debug {
l.Debugf("%s WithNeedTruncated(%v)", s.folder, device) l.Debugf("%s WithNeedTruncated(%v)", s.folder, device)
}
ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn)) ldbWithNeed(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
} }
func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) { func (s *FileSet) WithHave(device protocol.DeviceID, fn Iterator) {
if debug {
l.Debugf("%s WithHave(%v)", s.folder, device) l.Debugf("%s WithHave(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn)) ldbWithHave(s.db, []byte(s.folder), device[:], false, nativeFileIterator(fn))
} }
func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) { func (s *FileSet) WithHaveTruncated(device protocol.DeviceID, fn Iterator) {
if debug {
l.Debugf("%s WithHaveTruncated(%v)", s.folder, device) l.Debugf("%s WithHaveTruncated(%v)", s.folder, device)
}
ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn)) ldbWithHave(s.db, []byte(s.folder), device[:], true, nativeFileIterator(fn))
} }
func (s *FileSet) WithGlobal(fn Iterator) { func (s *FileSet) WithGlobal(fn Iterator) {
if debug {
l.Debugf("%s WithGlobal()", s.folder) l.Debugf("%s WithGlobal()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.folder), nil, false, nativeFileIterator(fn)) ldbWithGlobal(s.db, []byte(s.folder), nil, false, nativeFileIterator(fn))
} }
func (s *FileSet) WithGlobalTruncated(fn Iterator) { func (s *FileSet) WithGlobalTruncated(fn Iterator) {
if debug {
l.Debugf("%s WithGlobalTruncated()", s.folder) l.Debugf("%s WithGlobalTruncated()", s.folder)
}
ldbWithGlobal(s.db, []byte(s.folder), nil, true, nativeFileIterator(fn)) ldbWithGlobal(s.db, []byte(s.folder), nil, true, nativeFileIterator(fn))
} }
func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) { func (s *FileSet) WithPrefixedGlobalTruncated(prefix string, fn Iterator) {
if debug {
l.Debugf("%s WithPrefixedGlobalTruncated()", s.folder, prefix) l.Debugf("%s WithPrefixedGlobalTruncated()", s.folder, prefix)
}
ldbWithGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn)) ldbWithGlobal(s.db, []byte(s.folder), []byte(osutil.NormalizedFilename(prefix)), true, nativeFileIterator(fn))
} }

View File

@ -34,9 +34,7 @@ func NewVirtualMtimeRepo(ldb *leveldb.DB, folder string) *VirtualMtimeRepo {
} }
func (r *VirtualMtimeRepo) UpdateMtime(path string, diskMtime, actualMtime time.Time) { func (r *VirtualMtimeRepo) UpdateMtime(path string, diskMtime, actualMtime time.Time) {
if debug {
l.Debugf("virtual mtime: storing values for path:%s disk:%v actual:%v", path, diskMtime, actualMtime) l.Debugf("virtual mtime: storing values for path:%s disk:%v actual:%v", path, diskMtime, actualMtime)
}
diskBytes, _ := diskMtime.MarshalBinary() diskBytes, _ := diskMtime.MarshalBinary()
actualBytes, _ := actualMtime.MarshalBinary() actualBytes, _ := actualMtime.MarshalBinary()
@ -63,15 +61,11 @@ func (r *VirtualMtimeRepo) GetMtime(path string, diskMtime time.Time) time.Time
panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err)) panic(fmt.Sprintf("Can't unmarshal stored mtime at path %s: %v", path, err))
} }
if debug {
l.Debugf("virtual mtime: return %v instead of %v for path: %s", mtime, diskMtime, path) l.Debugf("virtual mtime: return %v instead of %v for path: %s", mtime, diskMtime, path)
}
return mtime return mtime
} }
if debug {
l.Debugf("virtual mtime: record exists, but mismatch inDisk: %v dbDisk: %v for path: %s", diskMtime, mtime, path) l.Debugf("virtual mtime: record exists, but mismatch inDisk: %v dbDisk: %v for path: %s", diskMtime, mtime, path)
}
return diskMtime return diskMtime
} }

View File

@ -75,10 +75,8 @@ func (m *CachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, relays
if cacheEntry.found && time.Since(cacheEntry.when) < finder.cacheTime { if cacheEntry.found && time.Since(cacheEntry.when) < finder.cacheTime {
// It's a positive, valid entry. Use it. // It's a positive, valid entry. Use it.
if debug { l.Debugln("cached discovery entry for", deviceID, "at", finder)
l.Debugln("cached discovery entry for", deviceID, "at", finder.String())
l.Debugln(" cache:", cacheEntry) l.Debugln(" cache:", cacheEntry)
}
for _, addr := range cacheEntry.Direct { for _, addr := range cacheEntry.Direct {
pdirect = append(pdirect, prioritizedAddress{finder.priority, addr}) pdirect = append(pdirect, prioritizedAddress{finder.priority, addr})
} }
@ -89,9 +87,7 @@ func (m *CachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, relays
if !cacheEntry.found && time.Since(cacheEntry.when) < finder.negCacheTime { if !cacheEntry.found && time.Since(cacheEntry.when) < finder.negCacheTime {
// It's a negative, valid entry. We should not make another // It's a negative, valid entry. We should not make another
// attempt right now. // attempt right now.
if debug { l.Debugln("negative cache entry for", deviceID, "at", finder)
l.Debugln("negative cache entry for", deviceID, "at", finder.String())
}
continue continue
} }
@ -100,11 +96,9 @@ func (m *CachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, relays
// Perform the actual lookup and cache the result. // Perform the actual lookup and cache the result.
if td, tr, err := finder.Lookup(deviceID); err == nil { if td, tr, err := finder.Lookup(deviceID); err == nil {
if debug { l.Debugln("lookup for", deviceID, "at", finder)
l.Debugln("lookup for", deviceID, "at", finder.String())
l.Debugln(" direct:", td) l.Debugln(" direct:", td)
l.Debugln(" relays:", tr) l.Debugln(" relays:", tr)
}
for _, addr := range td { for _, addr := range td {
pdirect = append(pdirect, prioritizedAddress{finder.priority, addr}) pdirect = append(pdirect, prioritizedAddress{finder.priority, addr})
} }
@ -121,11 +115,9 @@ func (m *CachingMux) Lookup(deviceID protocol.DeviceID) (direct []string, relays
direct = uniqueSortedAddrs(pdirect) direct = uniqueSortedAddrs(pdirect)
relays = uniqueSortedRelays(relays) relays = uniqueSortedRelays(relays)
if debug {
l.Debugln("lookup results for", deviceID) l.Debugln("lookup results for", deviceID)
l.Debugln(" direct: ", direct) l.Debugln(" direct: ", direct)
l.Debugln(" relays: ", relays) l.Debugln(" relays: ", relays)
}
return direct, relays, nil return direct, relays, nil
} }

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "discover") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("discover", "Remote device discovery")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("discover", strings.Contains(os.Getenv("STTRACE"), "discover") || os.Getenv("STTRACE") == "all")
}

View File

@ -124,16 +124,12 @@ func (c *globalClient) Lookup(device protocol.DeviceID) (direct []string, relays
resp, err := c.queryClient.Get(qURL.String()) resp, err := c.queryClient.Get(qURL.String())
if err != nil { if err != nil {
if debug { l.Debugln("globalClient.Lookup", qURL, err)
l.Debugln("globalClient.Lookup", qURL.String(), err)
}
return nil, nil, err return nil, nil, err
} }
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
resp.Body.Close() resp.Body.Close()
if debug { l.Debugln("globalClient.Lookup", qURL, resp.Status)
l.Debugln("globalClient.Lookup", qURL.String(), resp.Status)
}
return nil, nil, errors.New(resp.Status) return nil, nil, errors.New(resp.Status)
} }
@ -198,9 +194,7 @@ func (c *globalClient) sendAnnouncement(timer *time.Timer) {
if len(ann.Direct)+len(ann.Relays) == 0 { if len(ann.Direct)+len(ann.Relays) == 0 {
c.setError(errors.New("nothing to announce")) c.setError(errors.New("nothing to announce"))
if debug {
l.Debugln("Nothing to announce") l.Debugln("Nothing to announce")
}
timer.Reset(announceErrorRetryInterval) timer.Reset(announceErrorRetryInterval)
return return
} }
@ -208,37 +202,27 @@ func (c *globalClient) sendAnnouncement(timer *time.Timer) {
// The marshal doesn't fail, I promise. // The marshal doesn't fail, I promise.
postData, _ := json.Marshal(ann) postData, _ := json.Marshal(ann)
if debug {
l.Debugf("Announcement: %s", postData) l.Debugf("Announcement: %s", postData)
}
resp, err := c.announceClient.Post(c.server, "application/json", bytes.NewReader(postData)) resp, err := c.announceClient.Post(c.server, "application/json", bytes.NewReader(postData))
if err != nil { if err != nil {
if debug {
l.Debugln("announce POST:", err) l.Debugln("announce POST:", err)
}
c.setError(err) c.setError(err)
timer.Reset(announceErrorRetryInterval) timer.Reset(announceErrorRetryInterval)
return return
} }
if debug {
l.Debugln("announce POST:", resp.Status) l.Debugln("announce POST:", resp.Status)
}
resp.Body.Close() resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 { if resp.StatusCode < 200 || resp.StatusCode > 299 {
if debug {
l.Debugln("announce POST:", resp.Status) l.Debugln("announce POST:", resp.Status)
}
c.setError(errors.New(resp.Status)) c.setError(errors.New(resp.Status))
if h := resp.Header.Get("Retry-After"); h != "" { if h := resp.Header.Get("Retry-After"); h != "" {
// The server has a recommendation on when we should // The server has a recommendation on when we should
// retry. Follow it. // retry. Follow it.
if secs, err := strconv.Atoi(h); err == nil && secs > 0 { if secs, err := strconv.Atoi(h); err == nil && secs > 0 {
if debug {
l.Debugln("announce Retry-After:", secs, err) l.Debugln("announce Retry-After:", secs, err)
}
timer.Reset(time.Duration(secs) * time.Second) timer.Reset(time.Duration(secs) * time.Second)
return return
} }
@ -254,9 +238,7 @@ func (c *globalClient) sendAnnouncement(timer *time.Timer) {
// The server has a recommendation on when we should // The server has a recommendation on when we should
// reannounce. Follow it. // reannounce. Follow it.
if secs, err := strconv.Atoi(h); err == nil && secs > 0 { if secs, err := strconv.Atoi(h); err == nil && secs > 0 {
if debug {
l.Debugln("announce Reannounce-After:", secs, err) l.Debugln("announce Reannounce-After:", secs, err)
}
timer.Reset(time.Duration(secs) * time.Second) timer.Reset(time.Duration(secs) * time.Second)
return return
} }

View File

@ -166,15 +166,11 @@ func (c *localClient) recvAnnouncements(b beacon.Interface) {
var pkt Announce var pkt Announce
err := pkt.UnmarshalXDR(buf) err := pkt.UnmarshalXDR(buf)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
if debug {
l.Debugf("discover: Failed to unmarshal local announcement from %s:\n%s", addr, hex.Dump(buf)) l.Debugf("discover: Failed to unmarshal local announcement from %s:\n%s", addr, hex.Dump(buf))
}
continue continue
} }
if debug {
l.Debugf("discover: Received local announcement from %s for %s", addr, protocol.DeviceIDFromBytes(pkt.This.ID)) l.Debugf("discover: Received local announcement from %s for %s", addr, protocol.DeviceIDFromBytes(pkt.This.ID))
}
var newDevice bool var newDevice bool
if bytes.Compare(pkt.This.ID, c.myID[:]) != 0 { if bytes.Compare(pkt.This.ID, c.myID[:]) != 0 {

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "events") || os.Getenv("STTRACE") == "all" dl = logger.DefaultLogger.NewFacility("events", "Event generation and logging")
dl = logger.DefaultLogger
) )
func init() {
dl.SetDebug("events", strings.Contains(os.Getenv("STTRACE"), "events") || os.Getenv("STTRACE") == "all")
}

View File

@ -138,9 +138,7 @@ func NewLogger() *Logger {
func (l *Logger) Log(t EventType, data interface{}) { func (l *Logger) Log(t EventType, data interface{}) {
l.mutex.Lock() l.mutex.Lock()
if debug { dl.Debugln("log", l.nextID, t, data)
dl.Debugln("log", l.nextID, t.String(), data)
}
l.nextID++ l.nextID++
e := Event{ e := Event{
ID: l.nextID, ID: l.nextID,
@ -162,9 +160,7 @@ func (l *Logger) Log(t EventType, data interface{}) {
func (l *Logger) Subscribe(mask EventType) *Subscription { func (l *Logger) Subscribe(mask EventType) *Subscription {
l.mutex.Lock() l.mutex.Lock()
if debug {
dl.Debugln("subscribe", mask) dl.Debugln("subscribe", mask)
}
s := &Subscription{ s := &Subscription{
mask: mask, mask: mask,
events: make(chan Event, BufferSize), events: make(chan Event, BufferSize),
@ -177,9 +173,7 @@ func (l *Logger) Subscribe(mask EventType) *Subscription {
func (l *Logger) Unsubscribe(s *Subscription) { func (l *Logger) Unsubscribe(s *Subscription) {
l.mutex.Lock() l.mutex.Lock()
if debug {
dl.Debugln("unsubscribe") dl.Debugln("unsubscribe")
}
for i, ss := range l.subs { for i, ss := range l.subs {
if s == ss { if s == ss {
last := len(l.subs) - 1 last := len(l.subs) - 1
@ -197,9 +191,7 @@ func (l *Logger) Unsubscribe(s *Subscription) {
// out of the event channel is closed. Poll should not be called concurrently // out of the event channel is closed. Poll should not be called concurrently
// from multiple goroutines for a single subscription. // from multiple goroutines for a single subscription.
func (s *Subscription) Poll(timeout time.Duration) (Event, error) { func (s *Subscription) Poll(timeout time.Duration) (Event, error) {
if debug {
dl.Debugln("poll", timeout) dl.Debugln("poll", timeout)
}
if !s.timeout.Reset(timeout) { if !s.timeout.Reset(timeout) {
select { select {

View File

@ -11,8 +11,12 @@ import (
"os" "os"
"strings" "strings"
"sync" "sync"
"time"
) )
// This package uses stdlib sync as it may be used to debug syncthing/lib/sync
// and that would cause an implosion of the universe.
type LogLevel int type LogLevel int
const ( const (
@ -46,14 +50,16 @@ type Logger interface {
Fatalf(format string, vals ...interface{}) Fatalf(format string, vals ...interface{})
ShouldDebug(facility string) bool ShouldDebug(facility string) bool
SetDebug(facility string, enabled bool) SetDebug(facility string, enabled bool)
Facilities() (enabled, disabled []string) Facilities() map[string]string
NewFacility(facility string) Logger FacilityDebugging() []string
NewFacility(facility, description string) Logger
} }
type logger struct { type logger struct {
logger *log.Logger logger *log.Logger
handlers [NumLevels][]MessageHandler handlers [NumLevels][]MessageHandler
debug map[string]bool facilities map[string]string // facility name => description
debug map[string]bool // facility name => debugging enabled
mut sync.Mutex mut sync.Mutex
} }
@ -226,24 +232,42 @@ func (l *logger) SetDebug(facility string, enabled bool) {
l.mut.Unlock() l.mut.Unlock()
} }
// Facilities returns the currently known set of facilities, both those for // FacilityDebugging returns the set of facilities that have debugging
// which debug is enabled and those for which it is disabled. // enabled.
func (l *logger) Facilities() (enabled, disabled []string) { func (l *logger) FacilityDebugging() []string {
var enabled []string
l.mut.Lock() l.mut.Lock()
for facility, isEnabled := range l.debug { for facility, isEnabled := range l.debug {
if isEnabled { if isEnabled {
enabled = append(enabled, facility) enabled = append(enabled, facility)
} else {
disabled = append(disabled, facility)
} }
} }
l.mut.Unlock() l.mut.Unlock()
return return enabled
}
// Facilities returns the currently known set of facilities and their
// descriptions.
func (l *logger) Facilities() map[string]string {
l.mut.Lock()
res := make(map[string]string, len(l.facilities))
for facility, descr := range l.facilities {
res[facility] = descr
}
l.mut.Unlock()
return res
} }
// NewFacility returns a new logger bound to the named facility. // NewFacility returns a new logger bound to the named facility.
func (l *logger) NewFacility(facility string) Logger { func (l *logger) NewFacility(facility, description string) Logger {
l.mut.Lock() l.mut.Lock()
if l.facilities == nil {
l.facilities = make(map[string]string)
}
if description != "" {
l.facilities[facility] = description
}
if l.debug == nil { if l.debug == nil {
l.debug = make(map[string]bool) l.debug = make(map[string]bool)
} }
@ -279,3 +303,77 @@ func (l *facilityLogger) Debugf(format string, vals ...interface{}) {
} }
l.logger.Debugf(format, vals...) l.logger.Debugf(format, vals...)
} }
// A Recorder keeps a size limited record of log events.
type Recorder struct {
lines []Line
initial int
mut sync.Mutex
}
// A Line represents a single log entry.
type Line struct {
When time.Time
Message string
}
func NewRecorder(l Logger, level LogLevel, size, initial int) *Recorder {
r := &Recorder{
lines: make([]Line, 0, size),
initial: initial,
}
l.AddHandler(level, r.append)
return r
}
func (r *Recorder) Since(t time.Time) []Line {
r.mut.Lock()
defer r.mut.Unlock()
res := r.lines
for i := 0; i < len(res) && res[i].When.Before(t); i++ {
// nothing, just incrementing i
}
if len(res) == 0 {
return nil
}
// We must copy the result as r.lines can be mutated as soon as the lock
// is released.
cp := make([]Line, len(res))
copy(cp, res)
return cp
}
func (r *Recorder) Clear() {
r.mut.Lock()
r.lines = r.lines[:0]
r.mut.Unlock()
}
func (r *Recorder) append(l LogLevel, msg string) {
line := Line{
When: time.Now(),
Message: msg,
}
r.mut.Lock()
defer r.mut.Unlock()
if len(r.lines) == cap(r.lines) {
if r.initial > 0 {
// Shift all lines one step to the left, keeping the "initial" first intact.
copy(r.lines[r.initial+1:], r.lines[r.initial+2:])
} else {
copy(r.lines, r.lines[1:])
}
// Add the new one at the end
r.lines[len(r.lines)-1] = line
return
}
r.lines = append(r.lines, line)
if len(r.lines) == r.initial {
r.lines = append(r.lines, Line{time.Now(), "..."})
}
}

View File

@ -4,8 +4,10 @@
package logger package logger
import ( import (
"fmt"
"strings" "strings"
"testing" "testing"
"time"
) )
func TestAPI(t *testing.T) { func TestAPI(t *testing.T) {
@ -14,45 +16,42 @@ func TestAPI(t *testing.T) {
l.SetPrefix("testing") l.SetPrefix("testing")
debug := 0 debug := 0
l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, "test 0", &debug)) l.AddHandler(LevelDebug, checkFunc(t, LevelDebug, &debug))
info := 0 info := 0
l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, "test 1", &info)) l.AddHandler(LevelInfo, checkFunc(t, LevelInfo, &info))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, "test 2", &warn))
ok := 0 ok := 0
l.AddHandler(LevelOK, checkFunc(t, LevelOK, "test 3", &ok)) l.AddHandler(LevelOK, checkFunc(t, LevelOK, &ok))
warn := 0
l.AddHandler(LevelWarn, checkFunc(t, LevelWarn, &warn))
l.Debugf("test %d", 0) l.Debugf("test %d", 0)
l.Debugln("test", 0) l.Debugln("test", 0)
l.Infof("test %d", 1) l.Infof("test %d", 1)
l.Infoln("test", 1) l.Infoln("test", 1)
l.Warnf("test %d", 2) l.Okf("test %d", 2)
l.Warnln("test", 2) l.Okln("test", 2)
l.Okf("test %d", 3) l.Warnf("test %d", 3)
l.Okln("test", 3) l.Warnln("test", 3)
if debug != 2 { if debug != 8 {
t.Errorf("Debug handler called %d != 2 times", debug) t.Errorf("Debug handler called %d != 8 times", debug)
} }
if info != 2 { if info != 6 {
t.Errorf("Info handler called %d != 2 times", info) t.Errorf("Info handler called %d != 6 times", info)
}
if ok != 4 {
t.Errorf("Ok handler called %d != 4 times", ok)
} }
if warn != 2 { if warn != 2 {
t.Errorf("Warn handler called %d != 2 times", warn) t.Errorf("Warn handler called %d != 2 times", warn)
} }
if ok != 2 {
t.Errorf("Ok handler called %d != 2 times", ok)
}
} }
func checkFunc(t *testing.T, expectl LogLevel, expectmsg string, counter *int) func(LogLevel, string) { func checkFunc(t *testing.T, expectl LogLevel, counter *int) func(LogLevel, string) {
return func(l LogLevel, msg string) { return func(l LogLevel, msg string) {
*counter++ *counter++
if l != expectl { if l < expectl {
t.Errorf("Incorrect message level %d != %d", l, expectl) t.Errorf("Incorrect message level %d < %d", l, expectl)
}
if !strings.HasSuffix(msg, expectmsg) {
t.Errorf("%q does not end with %q", msg, expectmsg)
} }
} }
} }
@ -69,12 +68,12 @@ func TestFacilityDebugging(t *testing.T) {
} }
}) })
f0 := l.NewFacility("f0", "foo#0")
f1 := l.NewFacility("f1", "foo#1")
l.SetDebug("f0", true) l.SetDebug("f0", true)
l.SetDebug("f1", false) l.SetDebug("f1", false)
f0 := l.NewFacility("f0")
f1 := l.NewFacility("f1")
f0.Debugln("Debug line from f0") f0.Debugln("Debug line from f0")
f1.Debugln("Debug line from f1") f1.Debugln("Debug line from f1")
@ -82,3 +81,60 @@ func TestFacilityDebugging(t *testing.T) {
t.Fatalf("Incorrent number of messages, %d != 1", msgs) t.Fatalf("Incorrent number of messages, %d != 1", msgs)
} }
} }
func TestRecorder(t *testing.T) {
l := New()
l.SetFlags(0)
// Keep the last five warnings or higher, no special initial handling.
r0 := NewRecorder(l, LevelWarn, 5, 0)
// Keep the last ten infos or higher, with the first three being permanent.
r1 := NewRecorder(l, LevelInfo, 10, 3)
// Log a bunch of messages.
for i := 0; i < 15; i++ {
l.Debugf("Debug#%d", i)
l.Infof("Info#%d", i)
l.Warnf("Warn#%d", i)
}
// r0 should contain the last five warnings
lines := r0.Since(time.Time{})
if len(lines) != 5 {
t.Fatalf("Incorrect length %d != 5", len(lines))
}
for i := 0; i < 5; i++ {
expected := fmt.Sprintf("Warn#%d", i+10)
if lines[i].Message != expected {
t.Error("Incorrect warning in r0:", lines[i].Message, "!=", expected)
}
}
// r0 should contain:
// - The first three messages
// - A "..." marker
// - The last six messages
// (totalling ten)
lines = r1.Since(time.Time{})
if len(lines) != 10 {
t.Fatalf("Incorrect length %d != 10", len(lines))
}
expected := []string{
"Info#0",
"Warn#0",
"Info#1",
"...",
"Info#12",
"Warn#12",
"Info#13",
"Warn#13",
"Info#14",
"Warn#14",
}
for i := 0; i < 10; i++ {
if lines[i].Message != expected[i] {
t.Error("Incorrect warning in r0:", lines[i].Message, "!=", expected[i])
}
}
}

View File

@ -10,10 +10,17 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "model") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("model", "The root hub")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("model", strings.Contains(os.Getenv("STTRACE"), "model") || os.Getenv("STTRACE") == "all")
}
func shouldDebug() bool {
return l.ShouldDebug("model")
}

View File

@ -107,9 +107,7 @@ func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName,
m := &Model{ m := &Model{
Supervisor: suture.New("model", suture.Spec{ Supervisor: suture.New("model", suture.Spec{
Log: func(line string) { Log: func(line string) {
if debug {
l.Debugln(line) l.Debugln(line)
}
}, },
}), }),
cfg: cfg, cfg: cfg,
@ -335,9 +333,7 @@ func (m *Model) Completion(device protocol.DeviceID, folder string) float64 {
}) })
res := 100 * (1 - float64(need)/float64(tot)) res := 100 * (1 - float64(need)/float64(tot))
if debug {
l.Debugf("%v Completion(%s, %q): %f (%d / %d)", m, device, folder, res, need, tot) l.Debugf("%v Completion(%s, %q): %f (%d / %d)", m, device, folder, res, need, tot)
}
return res return res
} }
@ -412,9 +408,7 @@ func (m *Model) NeedSize(folder string) (nfiles int, bytes int64) {
}) })
} }
bytes -= m.progressEmitter.BytesCompleted(folder) bytes -= m.progressEmitter.BytesCompleted(folder)
if debug {
l.Debugf("%v NeedSize(%q): %d %d", m, folder, nfiles, bytes) l.Debugf("%v NeedSize(%q): %d %d", m, folder, nfiles, bytes)
}
return return
} }
@ -493,9 +487,7 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
return return
} }
if debug {
l.Debugf("IDX(in): %s %q: %d files", deviceID, folder, len(fs)) l.Debugf("IDX(in): %s %q: %d files", deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) { if !m.folderSharedWith(folder, deviceID) {
events.Default.Log(events.FolderRejected, map[string]string{ events.Default.Log(events.FolderRejected, map[string]string{
@ -541,9 +533,7 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
return return
} }
if debug {
l.Debugf("%v IDXUP(in): %s / %q: %d files", m, deviceID, folder, len(fs)) l.Debugf("%v IDXUP(in): %s / %q: %d files", m, deviceID, folder, len(fs))
}
if !m.folderSharedWith(folder, deviceID) { if !m.folderSharedWith(folder, deviceID) {
l.Infof("Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID) l.Infof("Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration.", folder, deviceID)
@ -766,16 +756,12 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
} }
if lf.IsInvalid() || lf.IsDeleted() { if lf.IsInvalid() || lf.IsDeleted() {
if debug {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", m, deviceID, folder, name, offset, len(buf), lf) l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", m, deviceID, folder, name, offset, len(buf), lf)
}
return protocol.ErrInvalid return protocol.ErrInvalid
} }
if offset > lf.Size() { if offset > lf.Size() {
if debug {
l.Debugf("%v REQ(in; nonexistent): %s: %q o=%d s=%d", m, deviceID, name, offset, len(buf)) l.Debugf("%v REQ(in; nonexistent): %s: %q o=%d s=%d", m, deviceID, name, offset, len(buf))
}
return protocol.ErrNoSuchFile return protocol.ErrNoSuchFile
} }
@ -806,7 +792,7 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
m.rvmut.Unlock() m.rvmut.Unlock()
} }
if debug && deviceID != protocol.LocalDeviceID { if deviceID != protocol.LocalDeviceID {
l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, len(buf)) l.Debugf("%v REQ(in): %s: %q / %q o=%d s=%d", m, deviceID, folder, name, offset, len(buf))
} }
m.fmut.RLock() m.fmut.RLock()
@ -1033,9 +1019,8 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
name := conn.Name() name := conn.Name()
var err error var err error
if debug {
l.Debugf("sendIndexes for %s-%s/%q starting", deviceID, name, folder) l.Debugf("sendIndexes for %s-%s/%q starting", deviceID, name, folder)
} defer l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
minLocalVer, err := sendIndexTo(true, 0, conn, folder, fs, ignores) minLocalVer, err := sendIndexTo(true, 0, conn, folder, fs, ignores)
@ -1060,9 +1045,6 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
} }
if debug {
l.Debugf("sendIndexes for %s-%s/%q exiting: %v", deviceID, name, folder, err)
}
} }
func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) (int64, error) { func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, folder string, fs *db.FileSet, ignores *ignore.Matcher) (int64, error) {
@ -1084,9 +1066,7 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
} }
if ignores.Match(f.Name) || symlinkInvalid(folder, f) { if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
if debug {
l.Debugln("not sending update for ignored/unsupported symlink", f) l.Debugln("not sending update for ignored/unsupported symlink", f)
}
return true return true
} }
@ -1095,18 +1075,14 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
if err = conn.Index(folder, batch, 0, nil); err != nil { if err = conn.Index(folder, batch, 0, nil); err != nil {
return false return false
} }
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)", deviceID, name, folder, len(batch), currentBatchSize) l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)", deviceID, name, folder, len(batch), currentBatchSize)
}
initial = false initial = false
} else { } else {
if err = conn.IndexUpdate(folder, batch, 0, nil); err != nil { if err = conn.IndexUpdate(folder, batch, 0, nil); err != nil {
return false return false
} }
if debug {
l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)", deviceID, name, folder, len(batch), currentBatchSize) l.Debugf("sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)", deviceID, name, folder, len(batch), currentBatchSize)
} }
}
batch = make([]protocol.FileInfo, 0, indexBatchSize) batch = make([]protocol.FileInfo, 0, indexBatchSize)
currentBatchSize = 0 currentBatchSize = 0
@ -1119,12 +1095,12 @@ func sendIndexTo(initial bool, minLocalVer int64, conn protocol.Connection, fold
if initial && err == nil { if initial && err == nil {
err = conn.Index(folder, batch, 0, nil) err = conn.Index(folder, batch, 0, nil)
if debug && err == nil { if err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", deviceID, name, folder, len(batch)) l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", deviceID, name, folder, len(batch))
} }
} else if len(batch) > 0 && err == nil { } else if len(batch) > 0 && err == nil {
err = conn.IndexUpdate(folder, batch, 0, nil) err = conn.IndexUpdate(folder, batch, 0, nil)
if debug && err == nil { if err == nil {
l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", deviceID, name, folder, len(batch)) l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", deviceID, name, folder, len(batch))
} }
} }
@ -1160,9 +1136,7 @@ func (m *Model) requestGlobal(deviceID protocol.DeviceID, folder, name string, o
return nil, fmt.Errorf("requestGlobal: no such device: %s", deviceID) return nil, fmt.Errorf("requestGlobal: no such device: %s", deviceID)
} }
if debug {
l.Debugf("%v REQ(out): %s: %q / %q o=%d s=%d h=%x f=%x op=%s", m, deviceID, folder, name, offset, size, hash, flags, options) l.Debugf("%v REQ(out): %s: %q / %q o=%d s=%d h=%x f=%x op=%s", m, deviceID, folder, name, offset, size, hash, flags, options)
}
return nc.Request(folder, name, offset, size, hash, flags, options) return nc.Request(folder, name, offset, size, hash, flags, options)
} }
@ -1403,9 +1377,7 @@ nextSub:
if ignores.Match(f.Name) || symlinkInvalid(folder, f) { if ignores.Match(f.Name) || symlinkInvalid(folder, f) {
// File has been ignored or an unsupported symlink. Set invalid bit. // File has been ignored or an unsupported symlink. Set invalid bit.
if debug {
l.Debugln("setting invalid bit on ignored", f) l.Debugln("setting invalid bit on ignored", f)
}
nf := protocol.FileInfo{ nf := protocol.FileInfo{
Name: f.Name, Name: f.Name,
Flags: f.Flags | protocol.FlagInvalid, Flags: f.Flags | protocol.FlagInvalid,
@ -1820,9 +1792,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
for folderID, cfg := range toFolders { for folderID, cfg := range toFolders {
if _, ok := fromFolders[folderID]; !ok { if _, ok := fromFolders[folderID]; !ok {
// A folder was added. // A folder was added.
if debug {
l.Debugln(m, "adding folder", folderID) l.Debugln(m, "adding folder", folderID)
}
m.AddFolder(cfg) m.AddFolder(cfg)
if cfg.ReadOnly { if cfg.ReadOnly {
m.StartFolderRO(folderID) m.StartFolderRO(folderID)
@ -1846,9 +1816,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
toCfg, ok := toFolders[folderID] toCfg, ok := toFolders[folderID]
if !ok { if !ok {
// A folder was removed. Requires restart. // A folder was removed. Requires restart.
if debug {
l.Debugln(m, "requires restart, removing folder", folderID) l.Debugln(m, "requires restart, removing folder", folderID)
}
return false return false
} }
@ -1860,9 +1828,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
for dev := range fromDevs { for dev := range fromDevs {
if _, ok := toDevs[dev]; !ok { if _, ok := toDevs[dev]; !ok {
// A device was removed. Requires restart. // A device was removed. Requires restart.
if debug {
l.Debugln(m, "requires restart, removing device", dev, "from folder", folderID) l.Debugln(m, "requires restart, removing device", dev, "from folder", folderID)
}
return false return false
} }
} }
@ -1895,9 +1861,7 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
fromCfg.Devices = nil fromCfg.Devices = nil
toCfg.Devices = nil toCfg.Devices = nil
if !reflect.DeepEqual(fromCfg, toCfg) { if !reflect.DeepEqual(fromCfg, toCfg) {
if debug {
l.Debugln(m, "requires restart, folder", folderID, "configuration differs") l.Debugln(m, "requires restart, folder", folderID, "configuration differs")
}
return false return false
} }
} }
@ -1906,18 +1870,14 @@ func (m *Model) CommitConfiguration(from, to config.Configuration) bool {
toDevs := mapDeviceCfgs(from.Devices) toDevs := mapDeviceCfgs(from.Devices)
for _, dev := range from.Devices { for _, dev := range from.Devices {
if _, ok := toDevs[dev.DeviceID]; !ok { if _, ok := toDevs[dev.DeviceID]; !ok {
if debug {
l.Debugln(m, "requires restart, device", dev.DeviceID, "was removed") l.Debugln(m, "requires restart, device", dev.DeviceID, "was removed")
}
return false return false
} }
} }
// All of the generic options require restart // All of the generic options require restart
if !reflect.DeepEqual(from.Options, to.Options) { if !reflect.DeepEqual(from.Options, to.Options) {
if debug {
l.Debugln(m, "requires restart, options differ") l.Debugln(m, "requires restart, options differ")
}
return false return false
} }
@ -1957,21 +1917,15 @@ func mapDeviceCfgs(devices []config.DeviceConfiguration) map[protocol.DeviceID]s
func filterIndex(folder string, fs []protocol.FileInfo, dropDeletes bool) []protocol.FileInfo { func filterIndex(folder string, fs []protocol.FileInfo, dropDeletes bool) []protocol.FileInfo {
for i := 0; i < len(fs); { for i := 0; i < len(fs); {
if fs[i].Flags&^protocol.FlagsAll != 0 { if fs[i].Flags&^protocol.FlagsAll != 0 {
if debug {
l.Debugln("dropping update for file with unknown bits set", fs[i]) l.Debugln("dropping update for file with unknown bits set", fs[i])
}
fs[i] = fs[len(fs)-1] fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1] fs = fs[:len(fs)-1]
} else if fs[i].IsDeleted() && dropDeletes { } else if fs[i].IsDeleted() && dropDeletes {
if debug {
l.Debugln("dropping update for undesired delete", fs[i]) l.Debugln("dropping update for undesired delete", fs[i])
}
fs[i] = fs[len(fs)-1] fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1] fs = fs[:len(fs)-1]
} else if symlinkInvalid(folder, fs[i]) { } else if symlinkInvalid(folder, fs[i]) {
if debug {
l.Debugln("dropping update for unsupported symlink", fs[i]) l.Debugln("dropping update for unsupported symlink", fs[i])
}
fs[i] = fs[len(fs)-1] fs[i] = fs[len(fs)-1]
fs = fs[:len(fs)-1] fs = fs[:len(fs)-1]
} else { } else {

View File

@ -51,15 +51,11 @@ func (t *ProgressEmitter) Serve() {
for { for {
select { select {
case <-t.stop: case <-t.stop:
if debug {
l.Debugln("progress emitter: stopping") l.Debugln("progress emitter: stopping")
}
return return
case <-t.timer.C: case <-t.timer.C:
t.mut.Lock() t.mut.Lock()
if debug {
l.Debugln("progress emitter: timer - looking after", len(t.registry)) l.Debugln("progress emitter: timer - looking after", len(t.registry))
}
output := make(map[string]map[string]*pullerProgress) output := make(map[string]map[string]*pullerProgress)
for _, puller := range t.registry { for _, puller := range t.registry {
if output[puller.folder] == nil { if output[puller.folder] == nil {
@ -70,10 +66,8 @@ func (t *ProgressEmitter) Serve() {
if !reflect.DeepEqual(t.last, output) { if !reflect.DeepEqual(t.last, output) {
events.Default.Log(events.DownloadProgress, output) events.Default.Log(events.DownloadProgress, output)
t.last = output t.last = output
if debug {
l.Debugf("progress emitter: emitting %#v", output) l.Debugf("progress emitter: emitting %#v", output)
} } else {
} else if debug {
l.Debugln("progress emitter: nothing new") l.Debugln("progress emitter: nothing new")
} }
if len(t.registry) != 0 { if len(t.registry) != 0 {
@ -95,9 +89,7 @@ func (t *ProgressEmitter) CommitConfiguration(from, to config.Configuration) boo
defer t.mut.Unlock() defer t.mut.Unlock()
t.interval = time.Duration(to.Options.ProgressUpdateIntervalS) * time.Second t.interval = time.Duration(to.Options.ProgressUpdateIntervalS) * time.Second
if debug {
l.Debugln("progress emitter: updated interval", t.interval) l.Debugln("progress emitter: updated interval", t.interval)
}
return true return true
} }
@ -112,9 +104,7 @@ func (t *ProgressEmitter) Stop() {
func (t *ProgressEmitter) Register(s *sharedPullerState) { func (t *ProgressEmitter) Register(s *sharedPullerState) {
t.mut.Lock() t.mut.Lock()
defer t.mut.Unlock() defer t.mut.Unlock()
if debug {
l.Debugln("progress emitter: registering", s.folder, s.file.Name) l.Debugln("progress emitter: registering", s.folder, s.file.Name)
}
if len(t.registry) == 0 { if len(t.registry) == 0 {
t.timer.Reset(t.interval) t.timer.Reset(t.interval)
} }
@ -125,9 +115,7 @@ func (t *ProgressEmitter) Register(s *sharedPullerState) {
func (t *ProgressEmitter) Deregister(s *sharedPullerState) { func (t *ProgressEmitter) Deregister(s *sharedPullerState) {
t.mut.Lock() t.mut.Lock()
defer t.mut.Unlock() defer t.mut.Unlock()
if debug {
l.Debugln("progress emitter: deregistering", s.folder, s.file.Name) l.Debugln("progress emitter: deregistering", s.folder, s.file.Name)
}
delete(t.registry, filepath.Join(s.folder, s.file.Name)) delete(t.registry, filepath.Join(s.folder, s.file.Name))
} }
@ -141,9 +129,7 @@ func (t *ProgressEmitter) BytesCompleted(folder string) (bytes int64) {
bytes += s.Progress().BytesDone bytes += s.Progress().BytesDone
} }
} }
if debug {
l.Debugf("progress emitter: bytes completed for %s: %d", folder, bytes) l.Debugf("progress emitter: bytes completed for %s: %d", folder, bytes)
}
return return
} }

View File

@ -48,10 +48,8 @@ func newROFolder(model *Model, folder string, interval time.Duration) *roFolder
} }
func (s *roFolder) Serve() { func (s *roFolder) Serve() {
if debug {
l.Debugln(s, "starting") l.Debugln(s, "starting")
defer l.Debugln(s, "exiting") defer l.Debugln(s, "exiting")
}
defer func() { defer func() {
s.timer.Stop() s.timer.Stop()
@ -79,9 +77,7 @@ func (s *roFolder) Serve() {
continue continue
} }
if debug {
l.Debugln(s, "rescan") l.Debugln(s, "rescan")
}
if err := s.model.internalScanFolderSubs(s.folder, nil); err != nil { if err := s.model.internalScanFolderSubs(s.folder, nil); err != nil {
// Potentially sets the error twice, once in the scanner just // Potentially sets the error twice, once in the scanner just
@ -111,9 +107,7 @@ func (s *roFolder) Serve() {
continue continue
} }
if debug {
l.Debugln(s, "forced rescan") l.Debugln(s, "forced rescan")
}
if err := s.model.internalScanFolderSubs(s.folder, req.subs); err != nil { if err := s.model.internalScanFolderSubs(s.folder, req.subs); err != nil {
// Potentially sets the error twice, once in the scanner just // Potentially sets the error twice, once in the scanner just

View File

@ -157,10 +157,8 @@ func (p *rwFolder) ignorePermissions(file protocol.FileInfo) bool {
// Serve will run scans and pulls. It will return when Stop()ed or on a // Serve will run scans and pulls. It will return when Stop()ed or on a
// critical error. // critical error.
func (p *rwFolder) Serve() { func (p *rwFolder) Serve() {
if debug {
l.Debugln(p, "starting") l.Debugln(p, "starting")
defer l.Debugln(p, "exiting") defer l.Debugln(p, "exiting")
}
defer func() { defer func() {
p.pullTimer.Stop() p.pullTimer.Stop()
@ -182,9 +180,7 @@ func (p *rwFolder) Serve() {
sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4 sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4
intv := time.Duration(sleepNanos) * time.Nanosecond intv := time.Duration(sleepNanos) * time.Nanosecond
if debug {
l.Debugln(p, "next rescan in", intv) l.Debugln(p, "next rescan in", intv)
}
p.scanTimer.Reset(intv) p.scanTimer.Reset(intv)
} }
@ -199,15 +195,11 @@ func (p *rwFolder) Serve() {
case <-p.remoteIndex: case <-p.remoteIndex:
prevVer = 0 prevVer = 0
p.pullTimer.Reset(shortPullIntv) p.pullTimer.Reset(shortPullIntv)
if debug {
l.Debugln(p, "remote index updated, rescheduling pull") l.Debugln(p, "remote index updated, rescheduling pull")
}
case <-p.pullTimer.C: case <-p.pullTimer.C:
if !initialScanCompleted { if !initialScanCompleted {
if debug {
l.Debugln(p, "skip (initial)") l.Debugln(p, "skip (initial)")
}
p.pullTimer.Reset(nextPullIntv) p.pullTimer.Reset(nextPullIntv)
continue continue
} }
@ -225,9 +217,7 @@ func (p *rwFolder) Serve() {
if newHash := curIgnores.Hash(); newHash != prevIgnoreHash { if newHash := curIgnores.Hash(); newHash != prevIgnoreHash {
// The ignore patterns have changed. We need to re-evaluate if // The ignore patterns have changed. We need to re-evaluate if
// there are files we need now that were ignored before. // there are files we need now that were ignored before.
if debug {
l.Debugln(p, "ignore patterns have changed, resetting prevVer") l.Debugln(p, "ignore patterns have changed, resetting prevVer")
}
prevVer = 0 prevVer = 0
prevIgnoreHash = newHash prevIgnoreHash = newHash
} }
@ -235,16 +225,12 @@ func (p *rwFolder) Serve() {
// RemoteLocalVersion() is a fast call, doesn't touch the database. // RemoteLocalVersion() is a fast call, doesn't touch the database.
curVer, ok := p.model.RemoteLocalVersion(p.folder) curVer, ok := p.model.RemoteLocalVersion(p.folder)
if !ok || curVer == prevVer { if !ok || curVer == prevVer {
if debug {
l.Debugln(p, "skip (curVer == prevVer)", prevVer, ok) l.Debugln(p, "skip (curVer == prevVer)", prevVer, ok)
}
p.pullTimer.Reset(nextPullIntv) p.pullTimer.Reset(nextPullIntv)
continue continue
} }
if debug {
l.Debugln(p, "pulling", prevVer, curVer) l.Debugln(p, "pulling", prevVer, curVer)
}
p.setState(FolderSyncing) p.setState(FolderSyncing)
p.clearErrors() p.clearErrors()
@ -254,9 +240,7 @@ func (p *rwFolder) Serve() {
tries++ tries++
changed := p.pullerIteration(curIgnores) changed := p.pullerIteration(curIgnores)
if debug {
l.Debugln(p, "changed", changed) l.Debugln(p, "changed", changed)
}
if changed == 0 { if changed == 0 {
// No files were changed by the puller, so we are in // No files were changed by the puller, so we are in
@ -276,9 +260,7 @@ func (p *rwFolder) Serve() {
curVer = lv curVer = lv
} }
prevVer = curVer prevVer = curVer
if debug {
l.Debugln(p, "next pull in", nextPullIntv) l.Debugln(p, "next pull in", nextPullIntv)
}
p.pullTimer.Reset(nextPullIntv) p.pullTimer.Reset(nextPullIntv)
break break
} }
@ -289,9 +271,7 @@ func (p *rwFolder) Serve() {
// errors preventing us. Flag this with a warning and // errors preventing us. Flag this with a warning and
// wait a bit longer before retrying. // wait a bit longer before retrying.
l.Infof("Folder %q isn't making progress. Pausing puller for %v.", p.folder, pauseIntv) l.Infof("Folder %q isn't making progress. Pausing puller for %v.", p.folder, pauseIntv)
if debug {
l.Debugln(p, "next pull in", pauseIntv) l.Debugln(p, "next pull in", pauseIntv)
}
if folderErrors := p.currentErrors(); len(folderErrors) > 0 { if folderErrors := p.currentErrors(); len(folderErrors) > 0 {
events.Default.Log(events.FolderErrors, map[string]interface{}{ events.Default.Log(events.FolderErrors, map[string]interface{}{
@ -316,9 +296,7 @@ func (p *rwFolder) Serve() {
continue continue
} }
if debug {
l.Debugln(p, "rescan") l.Debugln(p, "rescan")
}
if err := p.model.internalScanFolderSubs(p.folder, nil); err != nil { if err := p.model.internalScanFolderSubs(p.folder, nil); err != nil {
// Potentially sets the error twice, once in the scanner just // Potentially sets the error twice, once in the scanner just
@ -345,9 +323,7 @@ func (p *rwFolder) Serve() {
continue continue
} }
if debug {
l.Debugln(p, "forced rescan") l.Debugln(p, "forced rescan")
}
if err := p.model.internalScanFolderSubs(p.folder, req.subs); err != nil { if err := p.model.internalScanFolderSubs(p.folder, req.subs); err != nil {
// Potentially sets the error twice, once in the scanner just // Potentially sets the error twice, once in the scanner just
@ -409,9 +385,7 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
pullWg := sync.NewWaitGroup() pullWg := sync.NewWaitGroup()
doneWg := sync.NewWaitGroup() doneWg := sync.NewWaitGroup()
if debug {
l.Debugln(p, "c", p.copiers, "p", p.pullers) l.Debugln(p, "c", p.copiers, "p", p.pullers)
}
p.dbUpdates = make(chan dbUpdateJob) p.dbUpdates = make(chan dbUpdateJob)
updateWg.Add(1) updateWg.Add(1)
@ -474,9 +448,7 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
return true return true
} }
if debug {
l.Debugln(p, "handling", file.Name) l.Debugln(p, "handling", file.Name)
}
switch { switch {
case file.IsDeleted(): case file.IsDeleted():
@ -498,9 +470,7 @@ func (p *rwFolder) pullerIteration(ignores *ignore.Matcher) int {
} }
case file.IsDirectory() && !file.IsSymlink(): case file.IsDirectory() && !file.IsSymlink():
// A new or changed directory // A new or changed directory
if debug {
l.Debugln("Creating directory", file.Name) l.Debugln("Creating directory", file.Name)
}
p.handleDir(file) p.handleDir(file)
default: default:
// A new or changed file or symlink. This is the only case where we // A new or changed file or symlink. This is the only case where we
@ -591,17 +561,13 @@ nextFile:
doneWg.Wait() doneWg.Wait()
for _, file := range fileDeletions { for _, file := range fileDeletions {
if debug {
l.Debugln("Deleting file", file.Name) l.Debugln("Deleting file", file.Name)
}
p.deleteFile(file) p.deleteFile(file)
} }
for i := range dirDeletions { for i := range dirDeletions {
dir := dirDeletions[len(dirDeletions)-i-1] dir := dirDeletions[len(dirDeletions)-i-1]
if debug {
l.Debugln("Deleting dir", dir.Name) l.Debugln("Deleting dir", dir.Name)
}
p.deleteDir(dir) p.deleteDir(dir)
} }
@ -638,7 +604,7 @@ func (p *rwFolder) handleDir(file protocol.FileInfo) {
mode = 0777 mode = 0777
} }
if debug { if shouldDebug() {
curFile, _ := p.model.CurrentFolderFile(p.folder, file.Name) curFile, _ := p.model.CurrentFolderFile(p.folder, file.Name)
l.Debugf("need dir\n\t%v\n\t%v", file, curFile) l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
} }
@ -836,9 +802,7 @@ func (p *rwFolder) renameFile(source, target protocol.FileInfo) {
}) })
}() }()
if debug {
l.Debugln(p, "taking rename shortcut", source.Name, "->", target.Name) l.Debugln(p, "taking rename shortcut", source.Name, "->", target.Name)
}
from := filepath.Join(p.dir, source.Name) from := filepath.Join(p.dir, source.Name)
to := filepath.Join(p.dir, target.Name) to := filepath.Join(p.dir, target.Name)
@ -926,9 +890,7 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
// We are supposed to copy the entire file, and then fetch nothing. We // We are supposed to copy the entire file, and then fetch nothing. We
// are only updating metadata, so we don't actually *need* to make the // are only updating metadata, so we don't actually *need* to make the
// copy. // copy.
if debug {
l.Debugln(p, "taking shortcut on", file.Name) l.Debugln(p, "taking shortcut on", file.Name)
}
events.Default.Log(events.ItemStarted, map[string]string{ events.Default.Log(events.ItemStarted, map[string]string{
"folder": p.folder, "folder": p.folder,
@ -1033,9 +995,7 @@ func (p *rwFolder) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocks
mut: sync.NewMutex(), mut: sync.NewMutex(),
} }
if debug {
l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused) l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
}
cs := copyBlocksState{ cs := copyBlocksState{
sharedPullerState: &s, sharedPullerState: &s,
@ -1135,14 +1095,12 @@ func (p *rwFolder) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pull
hash, err := scanner.VerifyBuffer(buf, block) hash, err := scanner.VerifyBuffer(buf, block)
if err != nil { if err != nil {
if hash != nil { if hash != nil {
if debug {
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash) l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
}
err = p.model.finder.Fix(folder, file, index, block.Hash, hash) err = p.model.finder.Fix(folder, file, index, block.Hash, hash)
if err != nil { if err != nil {
l.Warnln("finder fix:", err) l.Warnln("finder fix:", err)
} }
} else if debug { } else {
l.Debugln("Finder failed to verify buffer", err) l.Debugln("Finder failed to verify buffer", err)
} }
return false return false
@ -1217,9 +1175,7 @@ func (p *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash, 0, nil) buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash, 0, nil)
activity.done(selected) activity.done(selected)
if lastError != nil { if lastError != nil {
if debug {
l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "returned error:", lastError) l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "returned error:", lastError)
}
continue continue
} }
@ -1227,9 +1183,7 @@ func (p *rwFolder) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPul
// try pulling it from another device. // try pulling it from another device.
_, lastError = scanner.VerifyBuffer(buf, state.block) _, lastError = scanner.VerifyBuffer(buf, state.block)
if lastError != nil { if lastError != nil {
if debug {
l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "hash mismatch") l.Debugln("request:", p.folder, state.file.Name, state.block.Offset, state.block.Size, "hash mismatch")
}
continue continue
} }
@ -1339,9 +1293,7 @@ func (p *rwFolder) performFinish(state *sharedPullerState) error {
func (p *rwFolder) finisherRoutine(in <-chan *sharedPullerState) { func (p *rwFolder) finisherRoutine(in <-chan *sharedPullerState) {
for state := range in { for state := range in {
if closed, err := state.finalClose(); closed { if closed, err := state.finalClose(); closed {
if debug {
l.Debugln(p, "closing", state.file.Name) l.Debugln(p, "closing", state.file.Name)
}
p.queue.Done(state.file.Name) p.queue.Done(state.file.Name)

View File

@ -193,9 +193,7 @@ func (s *sharedPullerState) failed() error {
func (s *sharedPullerState) copyDone() { func (s *sharedPullerState) copyDone() {
s.mut.Lock() s.mut.Lock()
s.copyNeeded-- s.copyNeeded--
if debug {
l.Debugln("sharedPullerState", s.folder, s.file.Name, "copyNeeded ->", s.copyNeeded) l.Debugln("sharedPullerState", s.folder, s.file.Name, "copyNeeded ->", s.copyNeeded)
}
s.mut.Unlock() s.mut.Unlock()
} }
@ -211,18 +209,14 @@ func (s *sharedPullerState) pullStarted() {
s.copyNeeded-- s.copyNeeded--
s.pullTotal++ s.pullTotal++
s.pullNeeded++ s.pullNeeded++
if debug {
l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded start ->", s.pullNeeded) l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded start ->", s.pullNeeded)
}
s.mut.Unlock() s.mut.Unlock()
} }
func (s *sharedPullerState) pullDone() { func (s *sharedPullerState) pullDone() {
s.mut.Lock() s.mut.Lock()
s.pullNeeded-- s.pullNeeded--
if debug {
l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded done ->", s.pullNeeded) l.Debugln("sharedPullerState", s.folder, s.file.Name, "pullNeeded done ->", s.pullNeeded)
}
s.mut.Unlock() s.mut.Unlock()
} }

View File

@ -6,10 +6,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("protocol", "The BEP protocol")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("protocol", strings.Contains(os.Getenv("STTRACE"), "protocol") || os.Getenv("STTRACE") == "all")
}

View File

@ -369,9 +369,7 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4])) hdr = decodeHeader(binary.BigEndian.Uint32(c.rdbuf0[0:4]))
msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8])) msglen := int(binary.BigEndian.Uint32(c.rdbuf0[4:8]))
if debug {
l.Debugf("read header %v (msglen=%d)", hdr, msglen) l.Debugf("read header %v (msglen=%d)", hdr, msglen)
}
if msglen > MaxMessageLen { if msglen > MaxMessageLen {
err = fmt.Errorf("message length %d exceeds maximum %d", msglen, MaxMessageLen) err = fmt.Errorf("message length %d exceeds maximum %d", msglen, MaxMessageLen)
@ -393,9 +391,7 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
return return
} }
if debug {
l.Debugf("read %d bytes", len(c.rdbuf0)) l.Debugf("read %d bytes", len(c.rdbuf0))
}
msgBuf := c.rdbuf0 msgBuf := c.rdbuf0
if hdr.compression && msglen > 0 { if hdr.compression && msglen > 0 {
@ -405,18 +401,14 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
return return
} }
msgBuf = c.rdbuf1 msgBuf = c.rdbuf1
if debug {
l.Debugf("decompressed to %d bytes", len(msgBuf)) l.Debugf("decompressed to %d bytes", len(msgBuf))
} }
}
if debug {
if len(msgBuf) > 1024 { if len(msgBuf) > 1024 {
l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024])) l.Debugf("message data:\n%s", hex.Dump(msgBuf[:1024]))
} else { } else {
l.Debugf("message data:\n%s", hex.Dump(msgBuf)) l.Debugf("message data:\n%s", hex.Dump(msgBuf))
} }
}
// We check each returned error for the XDRError.IsEOF() method. // We check each returned error for the XDRError.IsEOF() method.
// IsEOF()==true here means that the message contained fewer fields than // IsEOF()==true here means that the message contained fewer fields than
@ -477,16 +469,12 @@ func (c *rawConnection) readMessage() (hdr header, msg encodable, err error) {
} }
func (c *rawConnection) handleIndex(im IndexMessage) { func (c *rawConnection) handleIndex(im IndexMessage) {
if debug {
l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options) l.Debugf("Index(%v, %v, %d file, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
}
c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options) c.receiver.Index(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
} }
func (c *rawConnection) handleIndexUpdate(im IndexMessage) { func (c *rawConnection) handleIndexUpdate(im IndexMessage) {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options) l.Debugf("queueing IndexUpdate(%v, %v, %d files, flags %x, opts: %s)", c.id, im.Folder, len(im.Files), im.Flags, im.Options)
}
c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options) c.receiver.IndexUpdate(c.id, im.Folder, filterIndexMessageFiles(im.Files), im.Flags, im.Options)
} }
@ -636,9 +624,7 @@ func (c *rawConnection) writerLoop() {
binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf))) binary.BigEndian.PutUint32(msgBuf[4:8], uint32(len(tempBuf)))
msgBuf = msgBuf[0 : len(tempBuf)+8] msgBuf = msgBuf[0 : len(tempBuf)+8]
if debug {
l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf)) l.Debugf("write compressed message; %v (len=%d)", hm.hdr, len(tempBuf))
}
} else { } else {
// No point in compressing very short messages // No point in compressing very short messages
hm.hdr.compression = false hm.hdr.compression = false
@ -652,14 +638,10 @@ func (c *rawConnection) writerLoop() {
msgBuf = msgBuf[0 : len(uncBuf)+8] msgBuf = msgBuf[0 : len(uncBuf)+8]
copy(msgBuf[8:], uncBuf) copy(msgBuf[8:], uncBuf)
if debug {
l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf)) l.Debugf("write uncompressed message; %v (len=%d)", hm.hdr, len(uncBuf))
} }
}
} else { } else {
if debug {
l.Debugf("write empty message; %v", hm.hdr) l.Debugf("write empty message; %v", hm.hdr)
}
binary.BigEndian.PutUint32(msgBuf[4:8], 0) binary.BigEndian.PutUint32(msgBuf[4:8], 0)
msgBuf = msgBuf[:8] msgBuf = msgBuf[:8]
} }
@ -669,10 +651,8 @@ func (c *rawConnection) writerLoop() {
if err == nil { if err == nil {
var n int var n int
n, err = c.cw.Write(msgBuf) n, err = c.cw.Write(msgBuf)
if debug {
l.Debugf("wrote %d bytes on the wire", n) l.Debugf("wrote %d bytes on the wire", n)
} }
}
if err != nil { if err != nil {
c.close(err) c.close(err)
return return
@ -725,15 +705,11 @@ func (c *rawConnection) pingSender() {
case <-ticker: case <-ticker:
d := time.Since(c.cw.Last()) d := time.Since(c.cw.Last())
if d < PingSendInterval/2 { if d < PingSendInterval/2 {
if debug {
l.Debugln(c.id, "ping skipped after wr", d) l.Debugln(c.id, "ping skipped after wr", d)
}
continue continue
} }
if debug {
l.Debugln(c.id, "ping -> after", d) l.Debugln(c.id, "ping -> after", d)
}
c.ping() c.ping()
case <-c.closed: case <-c.closed:
@ -753,15 +729,11 @@ func (c *rawConnection) pingReceiver() {
case <-ticker: case <-ticker:
d := time.Since(c.cr.Last()) d := time.Since(c.cr.Last())
if d > ReceiveTimeout { if d > ReceiveTimeout {
if debug {
l.Debugln(c.id, "ping timeout", d) l.Debugln(c.id, "ping timeout", d)
}
c.close(ErrTimeout) c.close(ErrTimeout)
} }
if debug {
l.Debugln(c.id, "last read within", d) l.Debugln(c.id, "last read within", d)
}
case <-c.closed: case <-c.closed:
return return

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "rc") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("rc", "Remote control package")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("rc", strings.Contains(os.Getenv("STTRACE"), "rc") || os.Getenv("STTRACE") == "all")
}

View File

@ -351,9 +351,7 @@ func (p *Process) Model(folder string) (Model, error) {
return Model{}, err return Model{}, err
} }
if debug {
l.Debugf("%+v", res) l.Debugf("%+v", res)
}
return res, nil return res, nil
} }
@ -506,9 +504,7 @@ func (p *Process) eventLoop() {
m[p.id.String()] = version m[p.id.String()] = version
p.localVersion[folder] = m p.localVersion[folder] = m
p.done[folder] = false p.done[folder] = false
if debug {
l.Debugf("LocalIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m) l.Debugf("LocalIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
}
p.eventMut.Unlock() p.eventMut.Unlock()
case "RemoteIndexUpdated": case "RemoteIndexUpdated":
@ -524,9 +520,7 @@ func (p *Process) eventLoop() {
m[device] = version m[device] = version
p.localVersion[folder] = m p.localVersion[folder] = m
p.done[folder] = false p.done[folder] = false
if debug {
l.Debugf("RemoteIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m) l.Debugf("RemoteIndexUpdated %v %v done=false\n\t%+v", p.id, folder, m)
}
p.eventMut.Unlock() p.eventMut.Unlock()
case "FolderSummary": case "FolderSummary":
@ -537,9 +531,7 @@ func (p *Process) eventLoop() {
done := need == 0 done := need == 0
p.eventMut.Lock() p.eventMut.Lock()
p.done[folder] = done p.done[folder] = done
if debug {
l.Debugf("Foldersummary %v %v\n\t%+v", p.id, folder, p.done) l.Debugf("Foldersummary %v %v\n\t%+v", p.id, folder, p.done)
}
p.eventMut.Unlock() p.eventMut.Unlock()
} }
} }

View File

@ -66,15 +66,11 @@ func (c *ProtocolClient) Serve() {
defer close(c.stopped) defer close(c.stopped)
if err := c.connect(); err != nil { if err := c.connect(); err != nil {
if debug {
l.Debugln("Relay connect:", err) l.Debugln("Relay connect:", err)
}
return return
} }
if debug {
l.Debugln(c, "connected", c.conn.RemoteAddr()) l.Debugln(c, "connected", c.conn.RemoteAddr())
}
if err := c.join(); err != nil { if err := c.join(); err != nil {
c.conn.Close() c.conn.Close()
@ -87,9 +83,7 @@ func (c *ProtocolClient) Serve() {
return return
} }
if debug {
l.Debugln(c, "joined", c.conn.RemoteAddr(), "via", c.conn.LocalAddr()) l.Debugln(c, "joined", c.conn.RemoteAddr(), "via", c.conn.LocalAddr())
}
defer c.cleanup() defer c.cleanup()
c.mut.Lock() c.mut.Lock()
@ -107,9 +101,7 @@ func (c *ProtocolClient) Serve() {
select { select {
case message := <-messages: case message := <-messages:
timeout.Reset(c.timeout) timeout.Reset(c.timeout)
if debug {
log.Printf("%s received message %T", c, message) log.Printf("%s received message %T", c, message)
}
switch msg := message.(type) { switch msg := message.(type) {
case protocol.Ping: case protocol.Ping:
@ -118,9 +110,7 @@ func (c *ProtocolClient) Serve() {
return return
} }
if debug {
l.Debugln(c, "sent pong") l.Debugln(c, "sent pong")
}
case protocol.SessionInvitation: case protocol.SessionInvitation:
ip := net.IP(msg.Address) ip := net.IP(msg.Address)
@ -135,9 +125,7 @@ func (c *ProtocolClient) Serve() {
} }
case <-c.stop: case <-c.stop:
if debug {
l.Debugln(c, "stopping") l.Debugln(c, "stopping")
}
return return
case err := <-errors: case err := <-errors:
@ -145,9 +133,7 @@ func (c *ProtocolClient) Serve() {
return return
case <-timeout.C: case <-timeout.C:
if debug {
l.Debugln(c, "timed out") l.Debugln(c, "timed out")
}
return return
} }
} }
@ -220,9 +206,7 @@ func (c *ProtocolClient) cleanup() {
c.Invitations = make(chan protocol.SessionInvitation) c.Invitations = make(chan protocol.SessionInvitation)
} }
if debug {
l.Debugln(c, "cleaning up") l.Debugln(c, "cleaning up")
}
c.mut.Lock() c.mut.Lock()
c.connected = false c.connected = false

View File

@ -6,10 +6,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "relay") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("relay", "")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("relay", strings.Contains(os.Getenv("STTRACE"), "relay") || os.Getenv("STTRACE") == "all")
}

View File

@ -49,9 +49,7 @@ func GetInvitationFromRelay(uri *url.URL, id syncthingprotocol.DeviceID, certs [
case protocol.Response: case protocol.Response:
return protocol.SessionInvitation{}, fmt.Errorf("Incorrect response code %d: %s", msg.Code, msg.Message) return protocol.SessionInvitation{}, fmt.Errorf("Incorrect response code %d: %s", msg.Code, msg.Message)
case protocol.SessionInvitation: case protocol.SessionInvitation:
if debug {
l.Debugln("Received invitation", msg, "via", conn.LocalAddr()) l.Debugln("Received invitation", msg, "via", conn.LocalAddr())
}
ip := net.IP(msg.Address) ip := net.IP(msg.Address)
if len(ip) == 0 || ip.IsUnspecified() { if len(ip) == 0 || ip.IsUnspecified() {
msg.Address = conn.RemoteAddr().(*net.TCPAddr).IP[:] msg.Address = conn.RemoteAddr().(*net.TCPAddr).IP[:]

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "relay") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("relay", "Relay connection handling")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("relay", strings.Contains(os.Getenv("STTRACE"), "relay") || os.Getenv("STTRACE") == "all")
}

View File

@ -47,9 +47,7 @@ func NewSvc(cfg *config.Wrapper, tlsCfg *tls.Config) *Svc {
svc := &Svc{ svc := &Svc{
Supervisor: suture.New("Svc", suture.Spec{ Supervisor: suture.New("Svc", suture.Spec{
Log: func(log string) { Log: func(log string) {
if debug {
l.Debugln(log) l.Debugln(log)
}
}, },
FailureBackoff: 5 * time.Minute, FailureBackoff: 5 * time.Minute,
FailureDecay: float64((10 * time.Minute) / time.Second), FailureDecay: float64((10 * time.Minute) / time.Second),
@ -102,9 +100,7 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
for _, addr := range to.Options.RelayServers { for _, addr := range to.Options.RelayServers {
uri, err := url.Parse(addr) uri, err := url.Parse(addr)
if err != nil { if err != nil {
if debug {
l.Debugln("Failed to parse relay address", addr, err) l.Debugln("Failed to parse relay address", addr, err)
}
continue continue
} }
existing[uri.String()] = uri existing[uri.String()] = uri
@ -120,15 +116,11 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
// Trim off the `dynamic+` prefix // Trim off the `dynamic+` prefix
uri.Scheme = uri.Scheme[8:] uri.Scheme = uri.Scheme[8:]
if debug {
l.Debugln("Looking up dynamic relays from", uri) l.Debugln("Looking up dynamic relays from", uri)
}
data, err := http.Get(uri.String()) data, err := http.Get(uri.String())
if err != nil { if err != nil {
if debug {
l.Debugln("Failed to lookup dynamic relays", err) l.Debugln("Failed to lookup dynamic relays", err)
}
continue continue
} }
@ -136,9 +128,7 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
err = json.NewDecoder(data.Body).Decode(&ann) err = json.NewDecoder(data.Body).Decode(&ann)
data.Body.Close() data.Body.Close()
if err != nil { if err != nil {
if debug {
l.Debugln("Failed to lookup dynamic relays", err) l.Debugln("Failed to lookup dynamic relays", err)
}
continue continue
} }
@ -146,26 +136,20 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
for _, relayAnn := range ann.Relays { for _, relayAnn := range ann.Relays {
ruri, err := url.Parse(relayAnn.URL) ruri, err := url.Parse(relayAnn.URL)
if err != nil { if err != nil {
if debug {
l.Debugln("Failed to parse dynamic relay address", relayAnn.URL, err) l.Debugln("Failed to parse dynamic relay address", relayAnn.URL, err)
}
continue continue
} }
if debug {
l.Debugln("Found", ruri, "via", uri) l.Debugln("Found", ruri, "via", uri)
}
dynRelayAddrs = append(dynRelayAddrs, ruri.String()) dynRelayAddrs = append(dynRelayAddrs, ruri.String())
} }
if len(dynRelayAddrs) > 0 { if len(dynRelayAddrs) > 0 {
dynRelayAddrs = relayAddressesSortedByLatency(dynRelayAddrs) dynRelayAddrs = relayAddressesSortedByLatency(dynRelayAddrs)
closestRelay := dynRelayAddrs[0] closestRelay := dynRelayAddrs[0]
if debug {
l.Debugln("Picking", closestRelay, "as closest dynamic relay from", uri) l.Debugln("Picking", closestRelay, "as closest dynamic relay from", uri)
}
ruri, _ := url.Parse(closestRelay) ruri, _ := url.Parse(closestRelay)
existing[closestRelay] = ruri existing[closestRelay] = ruri
} else if debug { } else {
l.Debugln("No dynamic relay found on", uri) l.Debugln("No dynamic relay found on", uri)
} }
} }
@ -175,9 +159,7 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
for key, uri := range existing { for key, uri := range existing {
_, ok := s.tokens[key] _, ok := s.tokens[key]
if !ok { if !ok {
if debug {
l.Debugln("Connecting to relay", uri) l.Debugln("Connecting to relay", uri)
}
c := client.NewProtocolClient(uri, s.tlsCfg.Certificates, s.invitations) c := client.NewProtocolClient(uri, s.tlsCfg.Certificates, s.invitations)
s.tokens[key] = s.Add(c) s.tokens[key] = s.Add(c)
s.clients[key] = c s.clients[key] = c
@ -190,11 +172,9 @@ func (s *Svc) CommitConfiguration(from, to config.Configuration) bool {
err := s.Remove(token) err := s.Remove(token)
delete(s.tokens, key) delete(s.tokens, key)
delete(s.clients, key) delete(s.clients, key)
if debug {
l.Debugln("Disconnecting from relay", key, err) l.Debugln("Disconnecting from relay", key, err)
} }
} }
}
s.mut.Unlock() s.mut.Unlock()
@ -262,14 +242,10 @@ func (r *invitationReceiver) Serve() {
for { for {
select { select {
case inv := <-r.invitations: case inv := <-r.invitations:
if debug {
l.Debugln("Received relay invitation", inv) l.Debugln("Received relay invitation", inv)
}
conn, err := client.JoinSession(inv) conn, err := client.JoinSession(inv)
if err != nil { if err != nil {
if debug {
l.Debugf("Failed to join relay session %s: %v", inv, err) l.Debugf("Failed to join relay session %s: %v", inv, err)
}
continue continue
} }

View File

@ -42,9 +42,7 @@ func newParallelHasher(dir string, blockSize, workers int, outbox, inbox chan pr
func HashFile(path string, blockSize int, sizeHint int64, counter *int64) ([]protocol.BlockInfo, error) { func HashFile(path string, blockSize int, sizeHint int64, counter *int64) ([]protocol.BlockInfo, error) {
fd, err := os.Open(path) fd, err := os.Open(path)
if err != nil { if err != nil {
if debug {
l.Debugln("open:", err) l.Debugln("open:", err)
}
return []protocol.BlockInfo{}, err return []protocol.BlockInfo{}, err
} }
defer fd.Close() defer fd.Close()
@ -52,9 +50,7 @@ func HashFile(path string, blockSize int, sizeHint int64, counter *int64) ([]pro
if sizeHint == 0 { if sizeHint == 0 {
fi, err := fd.Stat() fi, err := fd.Stat()
if err != nil { if err != nil {
if debug {
l.Debugln("stat:", err) l.Debugln("stat:", err)
}
return []protocol.BlockInfo{}, err return []protocol.BlockInfo{}, err
} }
sizeHint = fi.Size() sizeHint = fi.Size()
@ -71,9 +67,7 @@ func hashFiles(dir string, blockSize int, outbox, inbox chan protocol.FileInfo,
blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter) blocks, err := HashFile(filepath.Join(dir, f.Name), blockSize, f.CachedSize, counter)
if err != nil { if err != nil {
if debug {
l.Debugln("hash error:", f.Name, err) l.Debugln("hash error:", f.Name, err)
}
continue continue
} }

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "scanner") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("scanner", "File change detection and hashing")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("scanner", strings.Contains(os.Getenv("STTRACE"), "scanner") || os.Getenv("STTRACE") == "all")
}

View File

@ -94,9 +94,7 @@ type IgnoreMatcher interface {
// Walk returns the list of files found in the local folder by scanning the // Walk returns the list of files found in the local folder by scanning the
// file system. Files are blockwise hashed. // file system. Files are blockwise hashed.
func (w *Walker) Walk() (chan protocol.FileInfo, error) { func (w *Walker) Walk() (chan protocol.FileInfo, error) {
if debug {
l.Debugln("Walk", w.Dir, w.Subs, w.BlockSize, w.Matcher) l.Debugln("Walk", w.Dir, w.Subs, w.BlockSize, w.Matcher)
}
err := checkDir(w.Dir) err := checkDir(w.Dir)
if err != nil { if err != nil {
@ -159,16 +157,12 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
for { for {
select { select {
case <-done: case <-done:
if debug {
l.Debugln("Walk progress done", w.Dir, w.Subs, w.BlockSize, w.Matcher) l.Debugln("Walk progress done", w.Dir, w.Subs, w.BlockSize, w.Matcher)
}
ticker.Stop() ticker.Stop()
return return
case <-ticker.C: case <-ticker.C:
current := atomic.LoadInt64(&progress) current := atomic.LoadInt64(&progress)
if debug {
l.Debugf("Walk %s %s current progress %d/%d (%d%%)", w.Dir, w.Subs, current, total, current*100/total) l.Debugf("Walk %s %s current progress %d/%d (%d%%)", w.Dir, w.Subs, current, total, current*100/total)
}
events.Default.Log(events.FolderScanProgress, map[string]interface{}{ events.Default.Log(events.FolderScanProgress, map[string]interface{}{
"folder": w.Folder, "folder": w.Folder,
"current": current, "current": current,
@ -179,9 +173,7 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
}() }()
for _, file := range filesToHash { for _, file := range filesToHash {
if debug {
l.Debugln("real to hash:", file.Name) l.Debugln("real to hash:", file.Name)
}
realToHashChan <- file realToHashChan <- file
} }
close(realToHashChan) close(realToHashChan)
@ -202,17 +194,13 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
} }
if err != nil { if err != nil {
if debug {
l.Debugln("error:", p, info, err) l.Debugln("error:", p, info, err)
}
return skip return skip
} }
rn, err := filepath.Rel(w.Dir, p) rn, err := filepath.Rel(w.Dir, p)
if err != nil { if err != nil {
if debug {
l.Debugln("rel error:", p, err) l.Debugln("rel error:", p, err)
}
return skip return skip
} }
@ -227,24 +215,18 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) { if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
// A temporary file // A temporary file
if debug {
l.Debugln("temporary:", rn) l.Debugln("temporary:", rn)
}
if info.Mode().IsRegular() && mtime.Add(w.TempLifetime).Before(now) { if info.Mode().IsRegular() && mtime.Add(w.TempLifetime).Before(now) {
os.Remove(p) os.Remove(p)
if debug {
l.Debugln("removing temporary:", rn, mtime) l.Debugln("removing temporary:", rn, mtime)
} }
}
return nil return nil
} }
if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" || if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stfolder" ||
strings.HasPrefix(rn, ".stversions") || (w.Matcher != nil && w.Matcher.Match(rn)) { strings.HasPrefix(rn, ".stversions") || (w.Matcher != nil && w.Matcher.Match(rn)) {
// An ignored file // An ignored file
if debug {
l.Debugln("ignored:", rn) l.Debugln("ignored:", rn)
}
return skip return skip
} }
@ -313,17 +295,13 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
target, targetType, err := symlinks.Read(p) target, targetType, err := symlinks.Read(p)
if err != nil { if err != nil {
if debug {
l.Debugln("readlink error:", p, err) l.Debugln("readlink error:", p, err)
}
return skip return skip
} }
blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil) blocks, err := Blocks(strings.NewReader(target), w.BlockSize, 0, nil)
if err != nil { if err != nil {
if debug {
l.Debugln("hash link error:", p, err) l.Debugln("hash link error:", p, err)
}
return skip return skip
} }
@ -349,9 +327,7 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
Blocks: blocks, Blocks: blocks,
} }
if debug {
l.Debugln("symlink changedb:", p, f) l.Debugln("symlink changedb:", p, f)
}
dchan <- f dchan <- f
@ -386,9 +362,7 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
Flags: flags, Flags: flags,
Modified: mtime.Unix(), Modified: mtime.Unix(),
} }
if debug {
l.Debugln("dir:", p, f) l.Debugln("dir:", p, f)
}
dchan <- f dchan <- f
return nil return nil
} }
@ -416,10 +390,8 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
return nil return nil
} }
if debug {
l.Debugln("rescan:", cf, mtime.Unix(), info.Mode()&os.ModePerm) l.Debugln("rescan:", cf, mtime.Unix(), info.Mode()&os.ModePerm)
} }
}
var flags = curMode & uint32(maskModePerm) var flags = curMode & uint32(maskModePerm)
if w.IgnorePerms { if w.IgnorePerms {
@ -433,9 +405,7 @@ func (w *Walker) walkAndHashFiles(fchan, dchan chan protocol.FileInfo) filepath.
Modified: mtime.Unix(), Modified: mtime.Unix(),
CachedSize: info.Size(), CachedSize: info.Size(),
} }
if debug {
l.Debugln("to hash:", p, f) l.Debugln("to hash:", p, f)
}
fchan <- f fchan <- f
} }
@ -448,7 +418,7 @@ func checkDir(dir string) error {
return err return err
} else if !info.IsDir() { } else if !info.IsDir() {
return errors.New(dir + ": not a directory") return errors.New(dir + ": not a directory")
} else if debug { } else {
l.Debugln("checkDir", dir, info) l.Debugln("checkDir", dir, info)
} }
return nil return nil

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "stats") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("stats", "Persistent device and folder statistics")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("stats", strings.Contains(os.Getenv("STTRACE"), "stats") || os.Getenv("STTRACE") == "all")
}

View File

@ -37,16 +37,12 @@ func (s *DeviceStatisticsReference) GetLastSeen() time.Time {
// time.Time{} from s.ns // time.Time{} from s.ns
return time.Unix(0, 0) return time.Unix(0, 0)
} }
if debug {
l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, t) l.Debugln("stats.DeviceStatisticsReference.GetLastSeen:", s.device, t)
}
return t return t
} }
func (s *DeviceStatisticsReference) WasSeen() { func (s *DeviceStatisticsReference) WasSeen() {
if debug {
l.Debugln("stats.DeviceStatisticsReference.WasSeen:", s.device) l.Debugln("stats.DeviceStatisticsReference.WasSeen:", s.device)
}
s.ns.PutTime("lastSeen", time.Now()) s.ns.PutTime("lastSeen", time.Now())
} }

View File

@ -54,9 +54,7 @@ func (s *FolderStatisticsReference) GetLastFile() LastFile {
} }
func (s *FolderStatisticsReference) ReceivedFile(file string, deleted bool) { func (s *FolderStatisticsReference) ReceivedFile(file string, deleted bool) {
if debug {
l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, file) l.Debugln("stats.FolderStatisticsReference.ReceivedFile:", s.folder, file)
}
s.ns.PutTime("lastFileAt", time.Now()) s.ns.PutTime("lastFileAt", time.Now())
s.ns.PutString("lastFileName", file) s.ns.PutString("lastFileName", file)
s.ns.PutBool("lastFileDeleted", deleted) s.ns.PutBool("lastFileDeleted", deleted)

View File

@ -12,20 +12,28 @@ import (
"strings" "strings"
"time" "time"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "locks") || os.Getenv("STTRACE") == "all"
threshold = time.Duration(100 * time.Millisecond) threshold = time.Duration(100 * time.Millisecond)
l = logger.DefaultLogger l = logger.DefaultLogger.NewFacility("sync", "Mutexes")
// We make an exception in this package and have an actual "if debug { ...
// }" variable, as it may be rather performance critical and does
// nonstandard things (from a debug logging PoV).
debug = strings.Contains(os.Getenv("STTRACE"), "sync") || os.Getenv("STTRACE") == "all"
) )
func init() { func init() {
if n, err := strconv.Atoi(os.Getenv("STLOCKTHRESHOLD")); debug && err == nil { l.SetDebug("sync", strings.Contains(os.Getenv("STTRACE"), "sync") || os.Getenv("STTRACE") == "all")
if n, err := strconv.Atoi(os.Getenv("STLOCKTHRESHOLD")); err == nil {
threshold = time.Duration(n) * time.Millisecond threshold = time.Duration(n) * time.Millisecond
} }
if debug {
l.Debugf("Enabling lock logging at %v threshold", threshold) l.Debugf("Enabling lock logging at %v threshold", threshold)
} }
func shouldDebug() bool {
return l.ShouldDebug("sync")
} }

View File

@ -12,7 +12,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
const ( const (
@ -23,6 +23,7 @@ const (
func TestTypes(t *testing.T) { func TestTypes(t *testing.T) {
debug = false debug = false
l.SetDebug("sync", false)
if _, ok := NewMutex().(*sync.Mutex); !ok { if _, ok := NewMutex().(*sync.Mutex); !ok {
t.Error("Wrong type") t.Error("Wrong type")
@ -37,6 +38,7 @@ func TestTypes(t *testing.T) {
} }
debug = true debug = true
l.SetDebug("sync", true)
if _, ok := NewMutex().(*loggedMutex); !ok { if _, ok := NewMutex().(*loggedMutex); !ok {
t.Error("Wrong type") t.Error("Wrong type")
@ -51,10 +53,12 @@ func TestTypes(t *testing.T) {
} }
debug = false debug = false
l.SetDebug("sync", false)
} }
func TestMutex(t *testing.T) { func TestMutex(t *testing.T) {
debug = true debug = true
l.SetDebug("sync", true)
threshold = logThreshold threshold = logThreshold
msgmut := sync.Mutex{} msgmut := sync.Mutex{}
@ -84,10 +88,12 @@ func TestMutex(t *testing.T) {
} }
debug = false debug = false
l.SetDebug("sync", false)
} }
func TestRWMutex(t *testing.T) { func TestRWMutex(t *testing.T) {
debug = true debug = true
l.SetDebug("sync", true)
threshold = logThreshold threshold = logThreshold
msgmut := sync.Mutex{} msgmut := sync.Mutex{}
@ -142,10 +148,12 @@ func TestRWMutex(t *testing.T) {
mut.RUnlock() mut.RUnlock()
debug = false debug = false
l.SetDebug("sync", false)
} }
func TestWaitGroup(t *testing.T) { func TestWaitGroup(t *testing.T) {
debug = true debug = true
l.SetDebug("sync", true)
threshold = logThreshold threshold = logThreshold
msgmut := sync.Mutex{} msgmut := sync.Mutex{}
@ -182,4 +190,5 @@ func TestWaitGroup(t *testing.T) {
} }
debug = false debug = false
l.SetDebug("sync", false)
} }

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "upgrade") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("upgrade", "Binary upgrades")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("upgrade", strings.Contains(os.Getenv("STTRACE"), "upgrade") || os.Getenv("STTRACE") == "all")
}

View File

@ -96,12 +96,8 @@ func SelectLatestRelease(version string, rels []Release) (Release, error) {
assetName := path.Base(asset.Name) assetName := path.Base(asset.Name)
// Check for the architecture // Check for the architecture
expectedRelease := releaseName(rel.Tag) expectedRelease := releaseName(rel.Tag)
if debug {
l.Debugf("expected release asset %q", expectedRelease) l.Debugf("expected release asset %q", expectedRelease)
}
if debug {
l.Debugln("considering release", assetName) l.Debugln("considering release", assetName)
}
if strings.HasPrefix(assetName, expectedRelease) { if strings.HasPrefix(assetName, expectedRelease) {
return rel, nil return rel, nil
} }
@ -113,14 +109,10 @@ func SelectLatestRelease(version string, rels []Release) (Release, error) {
// Upgrade to the given release, saving the previous binary with a ".old" extension. // Upgrade to the given release, saving the previous binary with a ".old" extension.
func upgradeTo(binary string, rel Release) error { func upgradeTo(binary string, rel Release) error {
expectedRelease := releaseName(rel.Tag) expectedRelease := releaseName(rel.Tag)
if debug {
l.Debugf("expected release asset %q", expectedRelease) l.Debugf("expected release asset %q", expectedRelease)
}
for _, asset := range rel.Assets { for _, asset := range rel.Assets {
assetName := path.Base(asset.Name) assetName := path.Base(asset.Name)
if debug {
l.Debugln("considering release", assetName) l.Debugln("considering release", assetName)
}
if strings.HasPrefix(assetName, expectedRelease) { if strings.HasPrefix(assetName, expectedRelease) {
return upgradeToURL(binary, asset.URL) return upgradeToURL(binary, asset.URL)
@ -151,9 +143,7 @@ func upgradeToURL(binary string, url string) error {
} }
func readRelease(dir, url string) (string, error) { func readRelease(dir, url string) (string, error) {
if debug {
l.Debugf("loading %q", url) l.Debugf("loading %q", url)
}
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
@ -199,9 +189,7 @@ func readTarGz(dir string, r io.Reader) (string, error) {
shortName := path.Base(hdr.Name) shortName := path.Base(hdr.Name)
if debug {
l.Debugf("considering file %q", shortName) l.Debugf("considering file %q", shortName)
}
err = archiveFileVisitor(dir, &tempName, &sig, shortName, tr) err = archiveFileVisitor(dir, &tempName, &sig, shortName, tr)
if err != nil { if err != nil {
@ -238,9 +226,7 @@ func readZip(dir string, r io.Reader) (string, error) {
for _, file := range archive.File { for _, file := range archive.File {
shortName := path.Base(file.Name) shortName := path.Base(file.Name)
if debug {
l.Debugf("considering file %q", shortName) l.Debugf("considering file %q", shortName)
}
inFile, err := file.Open() inFile, err := file.Open()
if err != nil { if err != nil {
@ -271,18 +257,14 @@ func archiveFileVisitor(dir string, tempFile *string, signature *[]byte, filenam
var err error var err error
switch filename { switch filename {
case "syncthing", "syncthing.exe": case "syncthing", "syncthing.exe":
if debug {
l.Debugln("reading binary") l.Debugln("reading binary")
}
*tempFile, err = writeBinary(dir, filedata) *tempFile, err = writeBinary(dir, filedata)
if err != nil { if err != nil {
return err return err
} }
case "syncthing.sig", "syncthing.exe.sig": case "syncthing.sig", "syncthing.exe.sig":
if debug {
l.Debugln("reading signature") l.Debugln("reading signature")
}
*signature, err = ioutil.ReadAll(filedata) *signature, err = ioutil.ReadAll(filedata)
if err != nil { if err != nil {
return err return err
@ -300,9 +282,7 @@ func verifyUpgrade(tempName string, sig []byte) error {
return fmt.Errorf("no signature found") return fmt.Errorf("no signature found")
} }
if debug {
l.Debugf("checking signature\n%s", sig) l.Debugf("checking signature\n%s", sig)
}
fd, err := os.Open(tempName) fd, err := os.Open(tempName)
if err != nil { if err != nil {

View File

@ -10,10 +10,17 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "upnp") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("upnp", "UPnP discovery and port mapping")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("upnp", strings.Contains(os.Getenv("STTRACE"), "upnp") || os.Getenv("STTRACE") == "all")
}
func shouldDebug() bool {
return l.ShouldDebug("upnp")
}

View File

@ -129,7 +129,7 @@ nextResult:
for result := range resultChan { for result := range resultChan {
for _, existingResult := range results { for _, existingResult := range results {
if existingResult.uuid == result.uuid { if existingResult.uuid == result.uuid {
if debug { if shouldDebug() {
l.Debugf("Skipping duplicate result %s with services:", result.uuid) l.Debugf("Skipping duplicate result %s with services:", result.uuid)
for _, svc := range result.services { for _, svc := range result.services {
l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL) l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
@ -140,7 +140,7 @@ nextResult:
} }
results = append(results, result) results = append(results, result)
if debug { if shouldDebug() {
l.Debugf("UPnP discovery result %s with services:", result.uuid) l.Debugf("UPnP discovery result %s with services:", result.uuid)
for _, svc := range result.services { for _, svc := range result.services {
l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL) l.Debugf("* [%s] %s", svc.serviceID, svc.serviceURL)
@ -167,15 +167,11 @@ Mx: %d
search := []byte(strings.Replace(searchStr, "\n", "\r\n", -1)) search := []byte(strings.Replace(searchStr, "\n", "\r\n", -1))
if debug { l.Debugln("Starting discovery of device type", deviceType, "on", intf.Name)
l.Debugln("Starting discovery of device type " + deviceType + " on " + intf.Name)
}
socket, err := net.ListenMulticastUDP("udp4", intf, &net.UDPAddr{IP: ssdp.IP}) socket, err := net.ListenMulticastUDP("udp4", intf, &net.UDPAddr{IP: ssdp.IP})
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
return return
} }
defer socket.Close() // Make sure our socket gets closed defer socket.Close() // Make sure our socket gets closed
@ -186,9 +182,7 @@ Mx: %d
return return
} }
if debug { l.Debugln("Sending search request for device type", deviceType, "on", intf.Name)
l.Debugln("Sending search request for device type " + deviceType + " on " + intf.Name)
}
_, err = socket.WriteTo(search, ssdp) _, err = socket.WriteTo(search, ssdp)
if err != nil { if err != nil {
@ -196,9 +190,7 @@ Mx: %d
return return
} }
if debug { l.Debugln("Listening for UPnP response for device type", deviceType, "on", intf.Name)
l.Debugln("Listening for UPnP response for device type " + deviceType + " on " + intf.Name)
}
// Listen for responses until a timeout is reached // Listen for responses until a timeout is reached
for { for {
@ -217,15 +209,11 @@ Mx: %d
} }
results <- igd results <- igd
} }
if debug { l.Debugln("Discovery for device type", deviceType, "on", intf.Name, "finished.")
l.Debugln("Discovery for device type " + deviceType + " on " + intf.Name + " finished.")
}
} }
func parseResponse(deviceType string, resp []byte) (IGD, error) { func parseResponse(deviceType string, resp []byte) (IGD, error) {
if debug {
l.Debugln("Handling UPnP response:\n\n" + string(resp)) l.Debugln("Handling UPnP response:\n\n" + string(resp))
}
reader := bufio.NewReader(bytes.NewBuffer(resp)) reader := bufio.NewReader(bytes.NewBuffer(resp))
request := &http.Request{} request := &http.Request{}
@ -368,7 +356,7 @@ func getIGDServices(rootURL string, device upnpDevice, wanDeviceURN string, wanC
devices := getChildDevices(device, wanDeviceURN) devices := getChildDevices(device, wanDeviceURN)
if len(devices) < 1 { if len(devices) < 1 {
l.Infoln("[" + rootURL + "] Malformed InternetGatewayDevice description: no WANDevices specified.") l.Infoln(rootURL, "- malformed InternetGatewayDevice description: no WANDevices specified.")
return result return result
} }
@ -376,27 +364,23 @@ func getIGDServices(rootURL string, device upnpDevice, wanDeviceURN string, wanC
connections := getChildDevices(device, wanConnectionURN) connections := getChildDevices(device, wanConnectionURN)
if len(connections) < 1 { if len(connections) < 1 {
l.Infoln("[" + rootURL + "] Malformed " + wanDeviceURN + " description: no WANConnectionDevices specified.") l.Infoln(rootURL, "- malformed ", wanDeviceURN, "description: no WANConnectionDevices specified.")
} }
for _, connection := range connections { for _, connection := range connections {
for _, serviceURN := range serviceURNs { for _, serviceURN := range serviceURNs {
services := getChildServices(connection, serviceURN) services := getChildServices(connection, serviceURN)
if len(services) < 1 && debug { l.Debugln(rootURL, "- no services of type", serviceURN, " found on connection.")
l.Debugln("[" + rootURL + "] No services of type " + serviceURN + " found on connection.")
}
for _, service := range services { for _, service := range services {
if len(service.ControlURL) == 0 { if len(service.ControlURL) == 0 {
l.Infoln("[" + rootURL + "] Malformed " + service.ServiceType + " description: no control URL.") l.Infoln(rootURL+"- malformed", service.ServiceType, "description: no control URL.")
} else { } else {
u, _ := url.Parse(rootURL) u, _ := url.Parse(rootURL)
replaceRawPath(u, service.ControlURL) replaceRawPath(u, service.ControlURL)
if debug { l.Debugln(rootURL, "- found", service.ServiceType, "with URL", u)
l.Debugln("[" + rootURL + "] Found " + service.ServiceType + " with URL " + u.String())
}
service := IGDService{serviceID: service.ServiceID, serviceURL: u.String(), serviceURN: service.ServiceType} service := IGDService{serviceID: service.ServiceID, serviceURL: u.String(), serviceURN: service.ServiceType}
@ -456,24 +440,18 @@ func soapRequest(url, service, function, message string) ([]byte, error) {
req.Header.Set("Cache-Control", "no-cache") req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Pragma", "no-cache") req.Header.Set("Pragma", "no-cache")
if debug {
l.Debugln("SOAP Request URL: " + url) l.Debugln("SOAP Request URL: " + url)
l.Debugln("SOAP Action: " + req.Header.Get("SOAPAction")) l.Debugln("SOAP Action: " + req.Header.Get("SOAPAction"))
l.Debugln("SOAP Request:\n\n" + body) l.Debugln("SOAP Request:\n\n" + body)
}
r, err := http.DefaultClient.Do(req) r, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
if debug {
l.Debugln(err) l.Debugln(err)
}
return resp, err return resp, err
} }
resp, _ = ioutil.ReadAll(r.Body) resp, _ = ioutil.ReadAll(r.Body)
if debug { l.Debugf("SOAP Response: %s\n\n%s\n\n", r.Status, resp)
l.Debugf("SOAP Response: %v\n\n%v\n\n", r.StatusCode, string(resp))
}
r.Body.Close() r.Body.Close()

View File

@ -10,10 +10,13 @@ import (
"os" "os"
"strings" "strings"
"github.com/calmh/logger" "github.com/syncthing/syncthing/lib/logger"
) )
var ( var (
debug = strings.Contains(os.Getenv("STTRACE"), "versioner") || os.Getenv("STTRACE") == "all" l = logger.DefaultLogger.NewFacility("versioner", "File versioning")
l = logger.DefaultLogger
) )
func init() {
l.SetDebug("versioner", strings.Contains(os.Getenv("STTRACE"), "versioner") || os.Getenv("STTRACE") == "all")
}

View File

@ -34,9 +34,7 @@ func NewExternal(folderID, folderPath string, params map[string]string) Versione
folderPath: folderPath, folderPath: folderPath,
} }
if debug {
l.Debugf("instantiated %#v", s) l.Debugf("instantiated %#v", s)
}
return s return s
} }
@ -45,17 +43,13 @@ func NewExternal(folderID, folderPath string, params map[string]string) Versione
func (v External) Archive(filePath string) error { func (v External) Archive(filePath string) error {
_, err := osutil.Lstat(filePath) _, err := osutil.Lstat(filePath)
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("not archiving nonexistent file", filePath) l.Debugln("not archiving nonexistent file", filePath)
}
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
} }
if debug {
l.Debugln("archiving", filePath) l.Debugln("archiving", filePath)
}
inFolderPath, err := filepath.Rel(v.folderPath, filePath) inFolderPath, err := filepath.Rel(v.folderPath, filePath)
if err != nil { if err != nil {

View File

@ -35,9 +35,7 @@ func NewSimple(folderID, folderPath string, params map[string]string) Versioner
folderPath: folderPath, folderPath: folderPath,
} }
if debug {
l.Debugf("instantiated %#v", s) l.Debugf("instantiated %#v", s)
}
return s return s
} }
@ -46,9 +44,7 @@ func NewSimple(folderID, folderPath string, params map[string]string) Versioner
func (v Simple) Archive(filePath string) error { func (v Simple) Archive(filePath string) error {
fileInfo, err := osutil.Lstat(filePath) fileInfo, err := osutil.Lstat(filePath)
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("not archiving nonexistent file", filePath) l.Debugln("not archiving nonexistent file", filePath)
}
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@ -58,9 +54,7 @@ func (v Simple) Archive(filePath string) error {
_, err = os.Stat(versionsDir) _, err = os.Stat(versionsDir)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("creating versions dir", versionsDir) l.Debugln("creating versions dir", versionsDir)
}
osutil.MkdirAll(versionsDir, 0755) osutil.MkdirAll(versionsDir, 0755)
osutil.HideFile(versionsDir) osutil.HideFile(versionsDir)
} else { } else {
@ -68,9 +62,7 @@ func (v Simple) Archive(filePath string) error {
} }
} }
if debug {
l.Debugln("archiving", filePath) l.Debugln("archiving", filePath)
}
file := filepath.Base(filePath) file := filepath.Base(filePath)
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
@ -86,9 +78,7 @@ func (v Simple) Archive(filePath string) error {
ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat)) ver := taggedFilename(file, fileInfo.ModTime().Format(TimeFormat))
dst := filepath.Join(dir, ver) dst := filepath.Join(dir, ver)
if debug {
l.Debugln("moving to", dst) l.Debugln("moving to", dst)
}
err = osutil.Rename(filePath, dst) err = osutil.Rename(filePath, dst)
if err != nil { if err != nil {
return err return err
@ -114,9 +104,7 @@ func (v Simple) Archive(filePath string) error {
if len(versions) > v.keep { if len(versions) > v.keep {
for _, toRemove := range versions[:len(versions)-v.keep] { for _, toRemove := range versions[:len(versions)-v.keep] {
if debug {
l.Debugln("cleaning out", toRemove) l.Debugln("cleaning out", toRemove)
}
err = os.Remove(toRemove) err = os.Remove(toRemove)
if err != nil { if err != nil {
l.Warnln("removing old version:", err) l.Warnln("removing old version:", err)

View File

@ -47,14 +47,10 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
// Use custom path if set, otherwise .stversions in folderPath // Use custom path if set, otherwise .stversions in folderPath
var versionsDir string var versionsDir string
if params["versionsPath"] == "" { if params["versionsPath"] == "" {
if debug {
l.Debugln("using default dir .stversions") l.Debugln("using default dir .stversions")
}
versionsDir = filepath.Join(folderPath, ".stversions") versionsDir = filepath.Join(folderPath, ".stversions")
} else { } else {
if debug {
l.Debugln("using dir", params["versionsPath"]) l.Debugln("using dir", params["versionsPath"])
}
versionsDir = params["versionsPath"] versionsDir = params["versionsPath"]
} }
@ -71,9 +67,7 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
mutex: sync.NewMutex(), mutex: sync.NewMutex(),
} }
if debug {
l.Debugf("instantiated %#v", s) l.Debugf("instantiated %#v", s)
}
go func() { go func() {
s.clean() s.clean()
@ -86,14 +80,10 @@ func NewStaggered(folderID, folderPath string, params map[string]string) Version
} }
func (v Staggered) clean() { func (v Staggered) clean() {
if debug {
l.Debugln("Versioner clean: Waiting for lock on", v.versionsPath) l.Debugln("Versioner clean: Waiting for lock on", v.versionsPath)
}
v.mutex.Lock() v.mutex.Lock()
defer v.mutex.Unlock() defer v.mutex.Unlock()
if debug {
l.Debugln("Versioner clean: Cleaning", v.versionsPath) l.Debugln("Versioner clean: Cleaning", v.versionsPath)
}
if _, err := os.Stat(v.versionsPath); os.IsNotExist(err) { if _, err := os.Stat(v.versionsPath); os.IsNotExist(err) {
// There is no need to clean a nonexistent dir. // There is no need to clean a nonexistent dir.
@ -144,30 +134,22 @@ func (v Staggered) clean() {
} }
if path == v.versionsPath { if path == v.versionsPath {
if debug {
l.Debugln("Cleaner: versions dir is empty, don't delete", path) l.Debugln("Cleaner: versions dir is empty, don't delete", path)
}
continue continue
} }
if debug {
l.Debugln("Cleaner: deleting empty directory", path) l.Debugln("Cleaner: deleting empty directory", path)
}
err = os.Remove(path) err = os.Remove(path)
if err != nil { if err != nil {
l.Warnln("Versioner: can't remove directory", path, err) l.Warnln("Versioner: can't remove directory", path, err)
} }
} }
if debug {
l.Debugln("Cleaner: Finished cleaning", v.versionsPath) l.Debugln("Cleaner: Finished cleaning", v.versionsPath)
}
} }
func (v Staggered) expire(versions []string) { func (v Staggered) expire(versions []string) {
if debug {
l.Debugln("Versioner: Expiring versions", versions) l.Debugln("Versioner: Expiring versions", versions)
}
var prevAge int64 var prevAge int64
firstFile := true firstFile := true
for _, file := range versions { for _, file := range versions {
@ -185,18 +167,14 @@ func (v Staggered) expire(versions []string) {
loc, _ := time.LoadLocation("Local") loc, _ := time.LoadLocation("Local")
versionTime, err := time.ParseInLocation(TimeFormat, filenameTag(file), loc) versionTime, err := time.ParseInLocation(TimeFormat, filenameTag(file), loc)
if err != nil { if err != nil {
if debug {
l.Debugf("Versioner: file name %q is invalid: %v", file, err) l.Debugf("Versioner: file name %q is invalid: %v", file, err)
}
continue continue
} }
age := int64(time.Since(versionTime).Seconds()) age := int64(time.Since(versionTime).Seconds())
// If the file is older than the max age of the last interval, remove it // If the file is older than the max age of the last interval, remove it
if lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end { if lastIntv := v.interval[len(v.interval)-1]; lastIntv.end > 0 && age > lastIntv.end {
if debug {
l.Debugln("Versioner: File over maximum age -> delete ", file) l.Debugln("Versioner: File over maximum age -> delete ", file)
}
err = os.Remove(file) err = os.Remove(file)
if err != nil { if err != nil {
l.Warnf("Versioner: can't remove %q: %v", file, err) l.Warnf("Versioner: can't remove %q: %v", file, err)
@ -220,9 +198,7 @@ func (v Staggered) expire(versions []string) {
} }
if prevAge-age < usedInterval.step { if prevAge-age < usedInterval.step {
if debug {
l.Debugln("too many files in step -> delete", file) l.Debugln("too many files in step -> delete", file)
}
err = os.Remove(file) err = os.Remove(file)
if err != nil { if err != nil {
l.Warnf("Versioner: can't remove %q: %v", file, err) l.Warnf("Versioner: can't remove %q: %v", file, err)
@ -237,17 +213,13 @@ func (v Staggered) expire(versions []string) {
// Archive moves the named file away to a version archive. If this function // Archive moves the named file away to a version archive. If this function
// returns nil, the named file does not exist any more (has been archived). // returns nil, the named file does not exist any more (has been archived).
func (v Staggered) Archive(filePath string) error { func (v Staggered) Archive(filePath string) error {
if debug {
l.Debugln("Waiting for lock on ", v.versionsPath) l.Debugln("Waiting for lock on ", v.versionsPath)
}
v.mutex.Lock() v.mutex.Lock()
defer v.mutex.Unlock() defer v.mutex.Unlock()
_, err := osutil.Lstat(filePath) _, err := osutil.Lstat(filePath)
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("not archiving nonexistent file", filePath) l.Debugln("not archiving nonexistent file", filePath)
}
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@ -255,9 +227,7 @@ func (v Staggered) Archive(filePath string) error {
if _, err := os.Stat(v.versionsPath); err != nil { if _, err := os.Stat(v.versionsPath); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("creating versions dir", v.versionsPath) l.Debugln("creating versions dir", v.versionsPath)
}
osutil.MkdirAll(v.versionsPath, 0755) osutil.MkdirAll(v.versionsPath, 0755)
osutil.HideFile(v.versionsPath) osutil.HideFile(v.versionsPath)
} else { } else {
@ -265,9 +235,7 @@ func (v Staggered) Archive(filePath string) error {
} }
} }
if debug {
l.Debugln("archiving", filePath) l.Debugln("archiving", filePath)
}
file := filepath.Base(filePath) file := filepath.Base(filePath)
inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath)) inFolderPath, err := filepath.Rel(v.folderPath, filepath.Dir(filePath))
@ -283,9 +251,7 @@ func (v Staggered) Archive(filePath string) error {
ver := taggedFilename(file, time.Now().Format(TimeFormat)) ver := taggedFilename(file, time.Now().Format(TimeFormat))
dst := filepath.Join(dir, ver) dst := filepath.Join(dir, ver)
if debug {
l.Debugln("moving to", dst) l.Debugln("moving to", dst)
}
err = osutil.Rename(filePath, dst) err = osutil.Rename(filePath, dst)
if err != nil { if err != nil {
return err return err

View File

@ -37,9 +37,7 @@ func NewTrashcan(folderID, folderPath string, params map[string]string) Versione
stop: make(chan struct{}), stop: make(chan struct{}),
} }
if debug {
l.Debugf("instantiated %#v", s) l.Debugf("instantiated %#v", s)
}
return s return s
} }
@ -48,9 +46,7 @@ func NewTrashcan(folderID, folderPath string, params map[string]string) Versione
func (t *Trashcan) Archive(filePath string) error { func (t *Trashcan) Archive(filePath string) error {
_, err := osutil.Lstat(filePath) _, err := osutil.Lstat(filePath)
if os.IsNotExist(err) { if os.IsNotExist(err) {
if debug {
l.Debugln("not archiving nonexistent file", filePath) l.Debugln("not archiving nonexistent file", filePath)
}
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@ -62,18 +58,14 @@ func (t *Trashcan) Archive(filePath string) error {
return err return err
} }
if debug {
l.Debugln("creating versions dir", versionsDir) l.Debugln("creating versions dir", versionsDir)
}
if err := osutil.MkdirAll(versionsDir, 0777); err != nil { if err := osutil.MkdirAll(versionsDir, 0777); err != nil {
return err return err
} }
osutil.HideFile(versionsDir) osutil.HideFile(versionsDir)
} }
if debug {
l.Debugln("archiving", filePath) l.Debugln("archiving", filePath)
}
relativePath, err := filepath.Rel(t.folderPath, filePath) relativePath, err := filepath.Rel(t.folderPath, filePath)
if err != nil { if err != nil {
@ -85,9 +77,7 @@ func (t *Trashcan) Archive(filePath string) error {
return err return err
} }
if debug {
l.Debugln("moving to", archivedPath) l.Debugln("moving to", archivedPath)
}
if err := osutil.Rename(filePath, archivedPath); err != nil { if err := osutil.Rename(filePath, archivedPath); err != nil {
return err return err
@ -102,10 +92,8 @@ func (t *Trashcan) Archive(filePath string) error {
} }
func (t *Trashcan) Serve() { func (t *Trashcan) Serve() {
if debug {
l.Debugln(t, "starting") l.Debugln(t, "starting")
defer l.Debugln(t, "stopping") defer l.Debugln(t, "stopping")
}
// Do the first cleanup one minute after startup. // Do the first cleanup one minute after startup.
timer := time.NewTimer(time.Minute) timer := time.NewTimer(time.Minute)