Refactor logging into separate package

This commit is contained in:
Jakob Borg 2014-05-14 21:08:56 -03:00
parent 8903825e02
commit cba554d0fa
23 changed files with 383 additions and 272 deletions

View File

@ -193,7 +193,7 @@ func readConfigXML(rd io.Reader, myID string) (Configuration, error) {
} }
if seen, ok := seenRepos[repo.ID]; ok { if seen, ok := seenRepos[repo.ID]; ok {
warnf("Multiple repositories with ID %q; disabling", repo.ID) l.Warnf("Multiple repositories with ID %q; disabling", repo.ID)
seen.Invalid = "duplicate repository ID" seen.Invalid = "duplicate repository ID"
if seen.ID == repo.ID { if seen.ID == repo.ID {
@ -217,7 +217,7 @@ func readConfigXML(rd io.Reader, myID string) (Configuration, error) {
if len(cfg.GUI.Password) > 0 && cfg.GUI.Password[0] != '$' { if len(cfg.GUI.Password) > 0 && cfg.GUI.Password[0] != '$' {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0) hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} else { } else {
cfg.GUI.Password = string(hash) cfg.GUI.Password = string(hash)
} }

View File

@ -1,15 +1,13 @@
package main package main
import ( import (
"log"
"os" "os"
"strings" "strings"
) )
var ( var (
dlog = log.New(os.Stderr, "main: ", log.Lmicroseconds|log.Lshortfile) debugNet = strings.Contains(os.Getenv("STTRACE"), "net") || os.Getenv("STTRACE") == "all"
debugNet = strings.Contains(os.Getenv("STTRACE"), "net") debugIdx = strings.Contains(os.Getenv("STTRACE"), "idx") || os.Getenv("STTRACE") == "all"
debugIdx = strings.Contains(os.Getenv("STTRACE"), "idx") debugNeed = strings.Contains(os.Getenv("STTRACE"), "need") || os.Getenv("STTRACE") == "all"
debugNeed = strings.Contains(os.Getenv("STTRACE"), "need") debugPull = strings.Contains(os.Getenv("STTRACE"), "pull") || os.Getenv("STTRACE") == "all"
debugPull = strings.Contains(os.Getenv("STTRACE"), "pull")
) )

View File

@ -14,6 +14,7 @@ import (
"time" "time"
"code.google.com/p/go.crypto/bcrypt" "code.google.com/p/go.crypto/bcrypt"
"github.com/calmh/syncthing/logger"
"github.com/codegangsta/martini" "github.com/codegangsta/martini"
) )
@ -34,8 +35,12 @@ const (
unchangedPassword = "--password-unchanged--" unchangedPassword = "--password-unchanged--"
) )
func init() {
l.AddHandler(logger.LevelWarn, showGuiError)
}
func startGUI(cfg GUIConfiguration, m *Model) error { func startGUI(cfg GUIConfiguration, m *Model) error {
l, err := net.Listen("tcp", cfg.Address) listener, err := net.Listen("tcp", cfg.Address)
if err != nil { if err != nil {
return err return err
} }
@ -69,7 +74,7 @@ func startGUI(cfg GUIConfiguration, m *Model) error {
mr.Action(router.Handle) mr.Action(router.Handle)
mr.Map(m) mr.Map(m)
go http.Serve(l, mr) go http.Serve(listener, mr)
return nil return nil
} }
@ -136,14 +141,14 @@ func restPostConfig(req *http.Request) {
var prevPassHash = cfg.GUI.Password var prevPassHash = cfg.GUI.Password
err := json.NewDecoder(req.Body).Decode(&cfg) err := json.NewDecoder(req.Body).Decode(&cfg)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} else { } else {
if cfg.GUI.Password == "" { if cfg.GUI.Password == "" {
// Leave it empty // Leave it empty
} else if cfg.GUI.Password != unchangedPassword { } else if cfg.GUI.Password != unchangedPassword {
hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0) hash, err := bcrypt.GenerateFromPassword([]byte(cfg.GUI.Password), 0)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} else { } else {
cfg.GUI.Password = string(hash) cfg.GUI.Password = string(hash)
} }
@ -217,7 +222,7 @@ func restGetErrors(w http.ResponseWriter) {
func restPostError(req *http.Request) { func restPostError(req *http.Request) {
bs, _ := ioutil.ReadAll(req.Body) bs, _ := ioutil.ReadAll(req.Body)
req.Body.Close() req.Body.Close()
showGuiError(string(bs)) showGuiError(0, string(bs))
} }
func restClearErrors() { func restClearErrors() {
@ -226,7 +231,7 @@ func restClearErrors() {
guiErrorsMut.Unlock() guiErrorsMut.Unlock()
} }
func showGuiError(err string) { func showGuiError(l logger.LogLevel, err string) {
guiErrorsMut.Lock() guiErrorsMut.Lock()
guiErrors = append(guiErrors, guiError{time.Now(), err}) guiErrors = append(guiErrors, guiError{time.Now(), err})
if len(guiErrors) > 5 { if len(guiErrors) > 5 {

View File

@ -75,7 +75,7 @@ func trackCPUUsage() {
for _ = range time.NewTicker(time.Second).C { for _ = range time.NewTicker(time.Second).C {
err := solarisPrusage(pid, &rusage) err := solarisPrusage(pid, &rusage)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
continue continue
} }
curTime := time.Now().UnixNano() curTime := time.Now().UnixNano()

View File

@ -1,64 +0,0 @@
package main
import (
"fmt"
"log"
"os"
)
var logger *log.Logger
func init() {
log.SetOutput(os.Stderr)
logger = log.New(os.Stderr, "", log.Flags())
}
func infoln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
logger.Output(2, "INFO: "+s)
}
func infof(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
logger.Output(2, "INFO: "+s)
}
func okln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
logger.Output(2, "OK: "+s)
}
func okf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
logger.Output(2, "OK: "+s)
}
func warnln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
showGuiError(s)
logger.Output(2, "WARNING: "+s)
}
func warnf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
showGuiError(s)
logger.Output(2, "WARNING: "+s)
}
func fatalln(vals ...interface{}) {
s := fmt.Sprintln(vals...)
logger.Output(2, "FATAL: "+s)
os.Exit(3)
}
func fatalf(format string, vals ...interface{}) {
s := fmt.Sprintf(format, vals...)
logger.Output(2, "FATAL: "+s)
os.Exit(3)
}
func fatalErr(err error) {
if err != nil {
fatalf(err.Error())
}
}

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"github.com/calmh/syncthing/discover" "github.com/calmh/syncthing/discover"
"github.com/calmh/syncthing/logger"
"github.com/calmh/syncthing/protocol" "github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/upnp" "github.com/calmh/syncthing/upnp"
"github.com/juju/ratelimit" "github.com/juju/ratelimit"
@ -37,12 +38,18 @@ var (
LongVersion string LongVersion string
) )
var l = logger.DefaultLogger
func init() { func init() {
stamp, _ := strconv.Atoi(BuildStamp) stamp, _ := strconv.Atoi(BuildStamp)
BuildDate = time.Unix(int64(stamp), 0) BuildDate = time.Unix(int64(stamp), 0)
date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST") date := BuildDate.UTC().Format("2006-01-02 15:04:05 MST")
LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date) LongVersion = fmt.Sprintf("syncthing %s (%s %s-%s) %s@%s %s", Version, runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildUser, BuildHost, date)
if os.Getenv("STTRACE") != "" {
l.SetFlags(log.Ltime | log.Ldate | log.Lmicroseconds | log.Lshortfile)
}
} }
var ( var (
@ -76,6 +83,7 @@ const (
- "pull" (file pull activity) - "pull" (file pull activity)
- "scanner" (the file change scanner) - "scanner" (the file change scanner)
- "upnp" (the upnp port mapper) - "upnp" (the upnp port mapper)
- "all" (all of the above)
STCPUPROFILE Write CPU profile to the specified file.` STCPUPROFILE Write CPU profile to the specified file.`
) )
@ -104,7 +112,7 @@ func main() {
if doUpgrade { if doUpgrade {
err := upgrade() err := upgrade()
if err != nil { if err != nil {
fatalln(err) l.Fatalln(err)
} }
return return
} }
@ -130,7 +138,7 @@ func main() {
if _, err := os.Stat(oldDefault); err == nil { if _, err := os.Stat(oldDefault); err == nil {
os.MkdirAll(filepath.Dir(confDir), 0700) os.MkdirAll(filepath.Dir(confDir), 0700)
if err := os.Rename(oldDefault, confDir); err == nil { if err := os.Rename(oldDefault, confDir); err == nil {
infoln("Moved config dir", oldDefault, "to", confDir) l.Infoln("Moved config dir", oldDefault, "to", confDir)
} }
} }
} }
@ -142,15 +150,14 @@ func main() {
if err != nil { if err != nil {
newCertificate(confDir) newCertificate(confDir)
cert, err = loadCert(confDir) cert, err = loadCert(confDir)
fatalErr(err) l.FatalErr(err)
} }
myID = certID(cert.Certificate[0]) myID = certID(cert.Certificate[0])
log.SetPrefix("[" + myID[0:5] + "] ") l.SetPrefix(fmt.Sprintf("[%s] ", myID[:5]))
logger.SetPrefix("[" + myID[0:5] + "] ")
infoln(LongVersion) l.Infoln(LongVersion)
infoln("My ID:", myID) l.Infoln("My ID:", myID)
// Prepare to be able to save configuration // Prepare to be able to save configuration
@ -165,11 +172,11 @@ func main() {
// Read config.xml // Read config.xml
cfg, err = readConfigXML(cf, myID) cfg, err = readConfigXML(cf, myID)
if err != nil { if err != nil {
fatalln(err) l.Fatalln(err)
} }
cf.Close() cf.Close()
} else { } else {
infoln("No config file; starting with empty defaults") l.Infoln("No config file; starting with empty defaults")
name, _ := os.Hostname() name, _ := os.Hostname()
defaultRepo := filepath.Join(getHomeDir(), "Sync") defaultRepo := filepath.Join(getHomeDir(), "Sync")
ensureDir(defaultRepo, 0755) ensureDir(defaultRepo, 0755)
@ -191,15 +198,15 @@ func main() {
} }
port, err := getFreePort("127.0.0.1", 8080) port, err := getFreePort("127.0.0.1", 8080)
fatalErr(err) l.FatalErr(err)
cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port) cfg.GUI.Address = fmt.Sprintf("127.0.0.1:%d", port)
port, err = getFreePort("", 22000) port, err = getFreePort("", 22000)
fatalErr(err) l.FatalErr(err)
cfg.Options.ListenAddress = []string{fmt.Sprintf(":%d", port)} cfg.Options.ListenAddress = []string{fmt.Sprintf(":%d", port)}
saveConfig() saveConfig()
infof("Edit %s to taste or use the GUI\n", cfgFile) l.Infof("Edit %s to taste or use the GUI\n", cfgFile)
} }
if reset { if reset {
@ -209,10 +216,10 @@ func main() {
if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 { if profiler := os.Getenv("STPROFILER"); len(profiler) > 0 {
go func() { go func() {
dlog.Println("Starting profiler on", profiler) l.Debugln("Starting profiler on", profiler)
err := http.ListenAndServe(profiler, nil) err := http.ListenAndServe(profiler, nil)
if err != nil { if err != nil {
dlog.Fatal(err) l.Fatalln(err)
} }
}() }()
} }
@ -251,7 +258,7 @@ func main() {
if cfg.GUI.Enabled && cfg.GUI.Address != "" { if cfg.GUI.Enabled && cfg.GUI.Address != "" {
addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address) addr, err := net.ResolveTCPAddr("tcp", cfg.GUI.Address)
if err != nil { if err != nil {
fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err) l.Fatalf("Cannot start GUI on %q: %v", cfg.GUI.Address, err)
} else { } else {
var hostOpen, hostShow string var hostOpen, hostShow string
switch { switch {
@ -266,10 +273,10 @@ func main() {
hostShow = hostOpen hostShow = hostOpen
} }
infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port) l.Infof("Starting web GUI on http://%s:%d/", hostShow, addr.Port)
err := startGUI(cfg.GUI, m) err := startGUI(cfg.GUI, m)
if err != nil { if err != nil {
fatalln("Cannot start GUI:", err) l.Fatalln("Cannot start GUI:", err)
} }
if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 { if cfg.Options.StartBrowser && len(os.Getenv("STRESTART")) == 0 {
openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port)) openURL(fmt.Sprintf("http://%s:%d", hostOpen, addr.Port))
@ -280,7 +287,7 @@ func main() {
// Walk the repository and update the local model before establishing any // Walk the repository and update the local model before establishing any
// connections to other nodes. // connections to other nodes.
infoln("Populating repository index") l.Infoln("Populating repository index")
m.LoadIndexes(confDir) m.LoadIndexes(confDir)
for _, repo := range cfg.Repositories { for _, repo := range cfg.Repositories {
@ -296,8 +303,8 @@ func main() {
if files, _, _ := m.LocalSize(repo.ID); files > 0 { if files, _, _ := m.LocalSize(repo.ID); files > 0 {
if fi, err := os.Stat(dir); err != nil || !fi.IsDir() { if fi, err := os.Stat(dir); err != nil || !fi.IsDir() {
warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory) l.Warnf("Configured repository %q has index but directory %q is missing; not starting.", repo.ID, repo.Directory)
fatalf("Ensure that directory is present or remove repository from configuration.") l.Fatalf("Ensure that directory is present or remove repository from configuration.")
} }
} }
@ -330,10 +337,10 @@ func main() {
// Routine to pull blocks from other nodes to synchronize the local // Routine to pull blocks from other nodes to synchronize the local
// repository. Does not run when we are in read only (publish only) mode. // repository. Does not run when we are in read only (publish only) mode.
if repo.ReadOnly { if repo.ReadOnly {
okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID) l.Okf("Ready to synchronize %s (read only; no external updates accepted)", repo.ID)
m.StartRepoRO(repo.ID) m.StartRepoRO(repo.ID)
} else { } else {
okf("Ready to synchronize %s (read-write)", repo.ID) l.Okf("Ready to synchronize %s (read-write)", repo.ID)
m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests) m.StartRepoRW(repo.ID, cfg.Options.ParallelRequests)
} }
} }
@ -355,7 +362,7 @@ func setupUPnP() int {
if len(cfg.Options.ListenAddress) == 1 { if len(cfg.Options.ListenAddress) == 1 {
_, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0]) _, portStr, err := net.SplitHostPort(cfg.Options.ListenAddress[0])
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} else { } else {
// Set up incoming port forwarding, if necessary and possible // Set up incoming port forwarding, if necessary and possible
port, _ := strconv.Atoi(portStr) port, _ := strconv.Atoi(portStr)
@ -366,19 +373,19 @@ func setupUPnP() int {
err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0) err := igd.AddPortMapping(upnp.TCP, r, port, "syncthing", 0)
if err == nil { if err == nil {
externalPort = r externalPort = r
infoln("Created UPnP port mapping - external port", externalPort) l.Infoln("Created UPnP port mapping - external port", externalPort)
break break
} }
} }
if externalPort == 0 { if externalPort == 0 {
warnln("Failed to create UPnP port mapping") l.Warnln("Failed to create UPnP port mapping")
} }
} else { } else {
infof("No UPnP IGD device found, no port mapping created (%v)", err) l.Infof("No UPnP IGD device found, no port mapping created (%v)", err)
} }
} }
} else { } else {
warnln("Multiple listening addresses; not attempting UPnP port mapping") l.Warnln("Multiple listening addresses; not attempting UPnP port mapping")
} }
return externalPort return externalPort
} }
@ -387,7 +394,7 @@ func resetRepositories() {
suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano()) suffix := fmt.Sprintf(".syncthing-reset-%d", time.Now().UnixNano())
for _, repo := range cfg.Repositories { for _, repo := range cfg.Repositories {
if _, err := os.Stat(repo.Directory); err == nil { if _, err := os.Stat(repo.Directory); err == nil {
infof("Reset: Moving %s -> %s", repo.Directory, repo.Directory+suffix) l.Infof("Reset: Moving %s -> %s", repo.Directory, repo.Directory+suffix)
os.Rename(repo.Directory, repo.Directory+suffix) os.Rename(repo.Directory, repo.Directory+suffix)
} }
} }
@ -396,17 +403,17 @@ func resetRepositories() {
idxs, err := filepath.Glob(pat) idxs, err := filepath.Glob(pat)
if err == nil { if err == nil {
for _, idx := range idxs { for _, idx := range idxs {
infof("Reset: Removing %s", idx) l.Infof("Reset: Removing %s", idx)
os.Remove(idx) os.Remove(idx)
} }
} }
} }
func restart() { func restart() {
infoln("Restarting") l.Infoln("Restarting")
if os.Getenv("SMF_FMRI") != "" || os.Getenv("STNORESTART") != "" { if os.Getenv("SMF_FMRI") != "" || os.Getenv("STNORESTART") != "" {
// Solaris SMF // Solaris SMF
infoln("Service manager detected; exit instead of restart") l.Infoln("Service manager detected; exit instead of restart")
stop <- true stop <- true
return return
} }
@ -417,7 +424,7 @@ func restart() {
} }
pgm, err := exec.LookPath(os.Args[0]) pgm, err := exec.LookPath(os.Args[0])
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
return return
} }
proc, err := os.StartProcess(pgm, os.Args, &os.ProcAttr{ proc, err := os.StartProcess(pgm, os.Args, &os.ProcAttr{
@ -425,7 +432,7 @@ func restart() {
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}, Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
}) })
if err != nil { if err != nil {
fatalln(err) l.Fatalln(err)
} }
proc.Release() proc.Release()
stop <- true stop <- true
@ -441,26 +448,26 @@ func saveConfigLoop(cfgFile string) {
for _ = range saveConfigCh { for _ = range saveConfigCh {
fd, err := os.Create(cfgFile + ".tmp") fd, err := os.Create(cfgFile + ".tmp")
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
continue continue
} }
err = writeConfigXML(fd, cfg) err = writeConfigXML(fd, cfg)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
fd.Close() fd.Close()
continue continue
} }
err = fd.Close() err = fd.Close()
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
continue continue
} }
err = Rename(cfgFile+".tmp", cfgFile) err = Rename(cfgFile+".tmp", cfgFile)
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} }
} }
} }
@ -477,26 +484,26 @@ func listenConnect(myID string, m *Model, tlsCfg *tls.Config) {
addr := addr addr := addr
go func() { go func() {
if debugNet { if debugNet {
dlog.Println("listening on", addr) l.Debugln("listening on", addr)
} }
l, err := tls.Listen("tcp", addr, tlsCfg) listener, err := tls.Listen("tcp", addr, tlsCfg)
fatalErr(err) l.FatalErr(err)
for { for {
conn, err := l.Accept() conn, err := listener.Accept()
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
continue continue
} }
if debugNet { if debugNet {
dlog.Println("connect from", conn.RemoteAddr()) l.Debugln("connect from", conn.RemoteAddr())
} }
tc := conn.(*tls.Conn) tc := conn.(*tls.Conn)
err = tc.Handshake() err = tc.Handshake()
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
tc.Close() tc.Close()
continue continue
} }
@ -543,12 +550,12 @@ func listenConnect(myID string, m *Model, tlsCfg *tls.Config) {
addr = net.JoinHostPort(host, "22000") addr = net.JoinHostPort(host, "22000")
} }
if debugNet { if debugNet {
dlog.Println("dial", nodeCfg.NodeID, addr) l.Debugln("dial", nodeCfg.NodeID, addr)
} }
conn, err := tls.Dial("tcp", addr, tlsCfg) conn, err := tls.Dial("tcp", addr, tlsCfg)
if err != nil { if err != nil {
if debugNet { if debugNet {
dlog.Println(err) l.Debugln(err)
} }
continue continue
} }
@ -565,21 +572,21 @@ func listenConnect(myID string, m *Model, tlsCfg *tls.Config) {
next: next:
for conn := range conns { for conn := range conns {
certs := conn.ConnectionState().PeerCertificates certs := conn.ConnectionState().PeerCertificates
if l := len(certs); l != 1 { if cl := len(certs); cl != 1 {
warnf("Got peer certificate list of length %d != 1; protocol error", l) l.Warnf("Got peer certificate list of length %d != 1; protocol error", cl)
conn.Close() conn.Close()
continue continue
} }
remoteID := certID(certs[0].Raw) remoteID := certID(certs[0].Raw)
if remoteID == myID { if remoteID == myID {
warnf("Connected to myself (%s) - should not happen", remoteID) l.Warnf("Connected to myself (%s) - should not happen", remoteID)
conn.Close() conn.Close()
continue continue
} }
if m.ConnectedTo(remoteID) { if m.ConnectedTo(remoteID) {
warnf("Connected to already connected node (%s)", remoteID) l.Warnf("Connected to already connected node (%s)", remoteID)
conn.Close() conn.Close()
continue continue
} }
@ -602,17 +609,17 @@ next:
func discovery(extPort int) *discover.Discoverer { func discovery(extPort int) *discover.Discoverer {
disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress) disc, err := discover.NewDiscoverer(myID, cfg.Options.ListenAddress)
if err != nil { if err != nil {
warnf("No discovery possible (%v)", err) l.Warnf("No discovery possible (%v)", err)
return nil return nil
} }
if cfg.Options.LocalAnnEnabled { if cfg.Options.LocalAnnEnabled {
infoln("Sending local discovery announcements") l.Infoln("Sending local discovery announcements")
disc.StartLocal() disc.StartLocal()
} }
if cfg.Options.GlobalAnnEnabled { if cfg.Options.GlobalAnnEnabled {
infoln("Sending global discovery announcements") l.Infoln("Sending global discovery announcements")
disc.StartGlobal(cfg.Options.GlobalAnnServer, uint16(extPort)) disc.StartGlobal(cfg.Options.GlobalAnnServer, uint16(extPort))
} }
@ -623,10 +630,10 @@ func ensureDir(dir string, mode int) {
fi, err := os.Stat(dir) fi, err := os.Stat(dir)
if os.IsNotExist(err) { if os.IsNotExist(err) {
err := os.MkdirAll(dir, 0700) err := os.MkdirAll(dir, 0700)
fatalErr(err) l.FatalErr(err)
} else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode { } else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode {
err := os.Chmod(dir, os.FileMode(mode)) err := os.Chmod(dir, os.FileMode(mode))
fatalErr(err) l.FatalErr(err)
} }
} }
@ -669,7 +676,7 @@ func getHomeDir() string {
} }
if home == "" { if home == "" {
fatalln("No home directory found - set $HOME (or the platform equivalent).") l.Fatalln("No home directory found - set $HOME (or the platform equivalent).")
} }
return home return home

View File

@ -214,7 +214,7 @@ func (m *Model) NeedFilesRepo(repo string) []scanner.File {
// Implements the protocol.Model interface. // Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) { func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
if debugNet { if debugNet {
dlog.Printf("IDX(in): %s / %q: %d files", nodeID, repo, len(fs)) l.Debugf("IDX(in): %s / %q: %d files", nodeID, repo, len(fs))
} }
var files = make([]scanner.File, len(fs)) var files = make([]scanner.File, len(fs))
@ -228,7 +228,7 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
if r, ok := m.repoFiles[repo]; ok { if r, ok := m.repoFiles[repo]; ok {
r.Replace(id, files) r.Replace(id, files)
} else { } else {
warnf("Index from %s for nonexistant repo %q; dropping", nodeID, repo) l.Warnf("Index from %s for nonexistant repo %q; dropping", nodeID, repo)
} }
m.rmut.RUnlock() m.rmut.RUnlock()
} }
@ -237,7 +237,7 @@ func (m *Model) Index(nodeID string, repo string, fs []protocol.FileInfo) {
// Implements the protocol.Model interface. // Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo) { func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo) {
if debugNet { if debugNet {
dlog.Printf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs)) l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs))
} }
var files = make([]scanner.File, len(fs)) var files = make([]scanner.File, len(fs))
@ -251,7 +251,7 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
if r, ok := m.repoFiles[repo]; ok { if r, ok := m.repoFiles[repo]; ok {
r.Update(id, files) r.Update(id, files)
} else { } else {
warnf("Index update from %s for nonexistant repo %q; dropping", nodeID, repo) l.Warnf("Index update from %s for nonexistant repo %q; dropping", nodeID, repo)
} }
m.rmut.RUnlock() m.rmut.RUnlock()
} }
@ -259,12 +259,12 @@ func (m *Model) IndexUpdate(nodeID string, repo string, fs []protocol.FileInfo)
func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessage) { func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessage) {
compErr := compareClusterConfig(m.clusterConfig(nodeID), config) compErr := compareClusterConfig(m.clusterConfig(nodeID), config)
if debugNet { if debugNet {
dlog.Printf("ClusterConfig: %s: %#v", nodeID, config) l.Debugf("ClusterConfig: %s: %#v", nodeID, config)
dlog.Printf(" ... compare: %s: %v", nodeID, compErr) l.Debugf(" ... compare: %s: %v", nodeID, compErr)
} }
if compErr != nil { if compErr != nil {
warnf("%s: %v", nodeID, compErr) l.Warnf("%s: %v", nodeID, compErr)
m.Close(nodeID, compErr) m.Close(nodeID, compErr)
} }
@ -281,13 +281,13 @@ func (m *Model) ClusterConfig(nodeID string, config protocol.ClusterConfigMessag
// Implements the protocol.Model interface. // Implements the protocol.Model interface.
func (m *Model) Close(node string, err error) { func (m *Model) Close(node string, err error) {
if debugNet { if debugNet {
dlog.Printf("%s: %v", node, err) l.Debugf("%s: %v", node, err)
} }
if err != io.EOF { if err != io.EOF {
warnf("Connection to %s closed: %v", node, err) l.Warnf("Connection to %s closed: %v", node, err)
} else if _, ok := err.(ClusterConfigMismatch); ok { } else if _, ok := err.(ClusterConfigMismatch); ok {
warnf("Connection to %s closed: %v", node, err) l.Warnf("Connection to %s closed: %v", node, err)
} }
cid := m.cm.Get(node) cid := m.cm.Get(node)
@ -318,7 +318,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
m.rmut.RUnlock() m.rmut.RUnlock()
if !ok { if !ok {
warnf("Request from %s for file %s in nonexistent repo %q", nodeID, name, repo) l.Warnf("Request from %s for file %s in nonexistent repo %q", nodeID, name, repo)
return nil, ErrNoSuchFile return nil, ErrNoSuchFile
} }
@ -329,13 +329,13 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
if offset > lf.Size { if offset > lf.Size {
if debugNet { if debugNet {
dlog.Printf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size) l.Debugf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size)
} }
return nil, ErrNoSuchFile return nil, ErrNoSuchFile
} }
if debugNet && nodeID != "<local>" { if debugNet && nodeID != "<local>" {
dlog.Printf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size) l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size)
} }
m.rmut.RLock() m.rmut.RLock()
fn := filepath.Join(m.repoDirs[repo], name) fn := filepath.Join(m.repoDirs[repo], name)
@ -436,7 +436,7 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
go func() { go func() {
for repo, idx := range idxToSend { for repo, idx := range idxToSend {
if debugNet { if debugNet {
dlog.Printf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx)) l.Debugf("IDX(out/initial): %s: %q: %d files", nodeID, repo, len(idx))
} }
protoConn.Index(repo, idx) protoConn.Index(repo, idx)
} }
@ -456,7 +456,7 @@ func (m *Model) protocolIndex(repo string) []protocol.FileInfo {
if mf.Flags&protocol.FlagDeleted != 0 { if mf.Flags&protocol.FlagDeleted != 0 {
flagComment = " (deleted)" flagComment = " (deleted)"
} }
dlog.Printf("IDX(out): %q/%q m=%d f=%o%s v=%d (%d blocks)", repo, mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks)) l.Debugf("IDX(out): %q/%q m=%d f=%o%s v=%d (%d blocks)", repo, mf.Name, mf.Modified, mf.Flags, flagComment, mf.Version, len(mf.Blocks))
} }
index = append(index, mf) index = append(index, mf)
} }
@ -480,7 +480,7 @@ func (m *Model) requestGlobal(nodeID, repo, name string, offset int64, size int,
} }
if debugNet { if debugNet {
dlog.Printf("REQ(out): %s: %q / %q o=%d s=%d h=%x", nodeID, repo, name, offset, size, hash) l.Debugf("REQ(out): %s: %q / %q o=%d s=%d h=%x", nodeID, repo, name, offset, size, hash)
} }
return nc.Request(repo, name, offset, size) return nc.Request(repo, name, offset, size)
@ -509,7 +509,7 @@ func (m *Model) broadcastIndexLoop() {
if conn, ok := m.protoConn[nodeID]; ok { if conn, ok := m.protoConn[nodeID]; ok {
indexWg.Add(1) indexWg.Add(1)
if debugNet { if debugNet {
dlog.Printf("IDX(out/loop): %s: %d files", nodeID, len(idx)) l.Debugf("IDX(out/loop): %s: %d files", nodeID, len(idx))
} }
go func() { go func() {
conn.Index(repo, idx) conn.Index(repo, idx)

View File

@ -91,13 +91,13 @@ func newPuller(repo, dir string, model *Model, slots int) *puller {
p.requestSlots <- true p.requestSlots <- true
} }
if debugPull { if debugPull {
dlog.Printf("starting puller; repo %q dir %q slots %d", repo, dir, slots) l.Debugf("starting puller; repo %q dir %q slots %d", repo, dir, slots)
} }
go p.run() go p.run()
} else { } else {
// Read only // Read only
if debugPull { if debugPull {
dlog.Printf("starting puller; repo %q dir %q (read only)", repo, dir) l.Debugf("starting puller; repo %q dir %q (read only)", repo, dir)
} }
go p.runRO() go p.runRO()
} }
@ -111,7 +111,7 @@ func (p *puller) run() {
<-p.requestSlots <-p.requestSlots
b := p.bq.get() b := p.bq.get()
if debugPull { if debugPull {
dlog.Printf("filler: queueing %q / %q offset %d copy %d", p.repo, b.file.Name, b.block.Offset, len(b.copy)) l.Debugf("filler: queueing %q / %q offset %d copy %d", p.repo, b.file.Name, b.block.Offset, len(b.copy))
} }
p.blocks <- b p.blocks <- b
} }
@ -146,10 +146,10 @@ func (p *puller) run() {
break pull break pull
} }
if debugPull { if debugPull {
dlog.Printf("%q: idle but have %d open files", p.repo, len(p.openFiles)) l.Debugf("%q: idle but have %d open files", p.repo, len(p.openFiles))
i := 5 i := 5
for _, f := range p.openFiles { for _, f := range p.openFiles {
dlog.Printf(" %v", f) l.Debugf(" %v", f)
i-- i--
if i == 0 { if i == 0 {
break break
@ -171,7 +171,7 @@ func (p *puller) run() {
select { select {
case <-walkTicker: case <-walkTicker:
if debugPull { if debugPull {
dlog.Printf("%q: time for rescan", p.repo) l.Debugf("%q: time for rescan", p.repo)
} }
err := p.model.ScanRepo(p.repo) err := p.model.ScanRepo(p.repo)
if err != nil { if err != nil {
@ -192,7 +192,7 @@ func (p *puller) runRO() {
for _ = range walkTicker { for _ = range walkTicker {
if debugPull { if debugPull {
dlog.Printf("%q: time for rescan", p.repo) l.Debugf("%q: time for rescan", p.repo)
} }
err := p.model.ScanRepo(p.repo) err := p.model.ScanRepo(p.repo)
if err != nil { if err != nil {
@ -226,7 +226,7 @@ func (p *puller) fixupDirectories() {
if cur.Flags&protocol.FlagDeleted != 0 { if cur.Flags&protocol.FlagDeleted != 0 {
if debugPull { if debugPull {
dlog.Printf("queue delete dir: %v", cur) l.Debugf("queue delete dir: %v", cur)
} }
// We queue the directories to delete since we walk the // We queue the directories to delete since we walk the
@ -240,7 +240,7 @@ func (p *puller) fixupDirectories() {
if cur.Flags&uint32(os.ModePerm) != uint32(info.Mode()&os.ModePerm) { if cur.Flags&uint32(os.ModePerm) != uint32(info.Mode()&os.ModePerm) {
os.Chmod(path, os.FileMode(cur.Flags)&os.ModePerm) os.Chmod(path, os.FileMode(cur.Flags)&os.ModePerm)
if debugPull { if debugPull {
dlog.Printf("restored dir flags: %o -> %v", info.Mode()&os.ModePerm, cur) l.Debugf("restored dir flags: %o -> %v", info.Mode()&os.ModePerm, cur)
} }
} }
@ -248,7 +248,7 @@ func (p *puller) fixupDirectories() {
t := time.Unix(cur.Modified, 0) t := time.Unix(cur.Modified, 0)
os.Chtimes(path, t, t) os.Chtimes(path, t, t)
if debugPull { if debugPull {
dlog.Printf("restored dir modtime: %d -> %v", info.ModTime().Unix(), cur) l.Debugf("restored dir modtime: %d -> %v", info.ModTime().Unix(), cur)
} }
} }
@ -258,11 +258,11 @@ func (p *puller) fixupDirectories() {
// Delete any queued directories // Delete any queued directories
for i := len(deleteDirs) - 1; i >= 0; i-- { for i := len(deleteDirs) - 1; i >= 0; i-- {
if debugPull { if debugPull {
dlog.Println("delete dir:", deleteDirs[i]) l.Debugln("delete dir:", deleteDirs[i])
} }
err := os.Remove(deleteDirs[i]) err := os.Remove(deleteDirs[i])
if err != nil { if err != nil {
warnln(err) l.Warnln(err)
} }
} }
} }
@ -284,7 +284,7 @@ func (p *puller) handleRequestResult(res requestResult) {
p.openFiles[f.Name] = of p.openFiles[f.Name] = of
if debugPull { if debugPull {
dlog.Printf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repo, f.Name, res.offset, of.outstanding, of.done) l.Debugf("pull: wrote %q / %q offset %d outstanding %d done %v", p.repo, f.Name, res.offset, of.outstanding, of.done)
} }
if of.done && of.outstanding == 0 { if of.done && of.outstanding == 0 {
@ -314,7 +314,7 @@ func (p *puller) handleBlock(b bqBlock) bool {
if !ok { if !ok {
if debugPull { if debugPull {
dlog.Printf("pull: %q: opening file %q", p.repo, f.Name) l.Debugf("pull: %q: opening file %q", p.repo, f.Name)
} }
of.availability = uint64(p.model.repoFiles[p.repo].Availability(f.Name)) of.availability = uint64(p.model.repoFiles[p.repo].Availability(f.Name))
@ -327,13 +327,13 @@ func (p *puller) handleBlock(b bqBlock) bool {
err = os.MkdirAll(dirName, 0777) err = os.MkdirAll(dirName, 0777)
} }
if err != nil { if err != nil {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
} }
of.file, of.err = os.Create(of.temp) of.file, of.err = os.Create(of.temp)
if of.err != nil { if of.err != nil {
if debugPull { if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, of.err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
} }
if !b.last { if !b.last {
p.openFiles[f.Name] = of p.openFiles[f.Name] = of
@ -346,10 +346,10 @@ func (p *puller) handleBlock(b bqBlock) bool {
if of.err != nil { if of.err != nil {
// We have already failed this file. // We have already failed this file.
if debugPull { if debugPull {
dlog.Printf("pull: error: %q / %q has already failed: %v", p.repo, f.Name, of.err) l.Debugf("pull: error: %q / %q has already failed: %v", p.repo, f.Name, of.err)
} }
if b.last { if b.last {
dlog.Printf("pull: removing failed file %q / %q", p.repo, f.Name) l.Debugf("pull: removing failed file %q / %q", p.repo, f.Name)
delete(p.openFiles, f.Name) delete(p.openFiles, f.Name)
} }
@ -378,14 +378,14 @@ func (p *puller) handleCopyBlock(b bqBlock) {
of := p.openFiles[f.Name] of := p.openFiles[f.Name]
if debugPull { if debugPull {
dlog.Printf("pull: copying %d blocks for %q / %q", len(b.copy), p.repo, f.Name) l.Debugf("pull: copying %d blocks for %q / %q", len(b.copy), p.repo, f.Name)
} }
var exfd *os.File var exfd *os.File
exfd, of.err = os.Open(of.filepath) exfd, of.err = os.Open(of.filepath)
if of.err != nil { if of.err != nil {
if debugPull { if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, of.err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
} }
of.file.Close() of.file.Close()
of.file = nil of.file = nil
@ -404,7 +404,7 @@ func (p *puller) handleCopyBlock(b bqBlock) {
buffers.Put(bs) buffers.Put(bs)
if of.err != nil { if of.err != nil {
if debugPull { if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, of.err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, of.err)
} }
exfd.Close() exfd.Close()
of.file.Close() of.file.Close()
@ -447,7 +447,7 @@ func (p *puller) handleRequestBlock(b bqBlock) bool {
go func(node string, b bqBlock) { go func(node string, b bqBlock) {
if debugPull { if debugPull {
dlog.Printf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repo, f.Name, b.block.Offset, b.block.Size, node, of.outstanding) l.Debugf("pull: requesting %q / %q offset %d size %d from %q outstanding %d", p.repo, f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
} }
bs, err := p.model.requestGlobal(node, p.repo, f.Name, b.block.Offset, int(b.block.Size), nil) bs, err := p.model.requestGlobal(node, p.repo, f.Name, b.block.Offset, int(b.block.Size), nil)
@ -476,13 +476,13 @@ func (p *puller) handleEmptyBlock(b bqBlock) {
if f.Flags&protocol.FlagDeleted != 0 { if f.Flags&protocol.FlagDeleted != 0 {
if debugPull { if debugPull {
dlog.Printf("pull: delete %q", f.Name) l.Debugf("pull: delete %q", f.Name)
} }
os.Remove(of.temp) os.Remove(of.temp)
os.Remove(of.filepath) os.Remove(of.filepath)
} else { } else {
if debugPull { if debugPull {
dlog.Printf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repo, f.Name) l.Debugf("pull: no blocks to fetch and nothing to copy for %q / %q", p.repo, f.Name)
} }
t := time.Unix(f.Modified, 0) t := time.Unix(f.Modified, 0)
os.Chtimes(of.temp, t, t) os.Chtimes(of.temp, t, t)
@ -500,7 +500,7 @@ func (p *puller) queueNeededBlocks() {
lf := p.model.CurrentRepoFile(p.repo, f.Name) lf := p.model.CurrentRepoFile(p.repo, f.Name)
have, need := scanner.BlockDiff(lf.Blocks, f.Blocks) have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
if debugNeed { if debugNeed {
dlog.Printf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need) l.Debugf("need:\n local: %v\n global: %v\n haveBlocks: %v\n needBlocks: %v", lf, f, have, need)
} }
queued++ queued++
p.bq.put(bqAdd{ p.bq.put(bqAdd{
@ -510,13 +510,13 @@ func (p *puller) queueNeededBlocks() {
}) })
} }
if debugPull && queued > 0 { if debugPull && queued > 0 {
dlog.Printf("%q: queued %d blocks", p.repo, queued) l.Debugf("%q: queued %d blocks", p.repo, queued)
} }
} }
func (p *puller) closeFile(f scanner.File) { func (p *puller) closeFile(f scanner.File) {
if debugPull { if debugPull {
dlog.Printf("pull: closing %q / %q", p.repo, f.Name) l.Debugf("pull: closing %q / %q", p.repo, f.Name)
} }
of := p.openFiles[f.Name] of := p.openFiles[f.Name]
@ -528,7 +528,7 @@ func (p *puller) closeFile(f scanner.File) {
fd, err := os.Open(of.temp) fd, err := os.Open(of.temp)
if err != nil { if err != nil {
if debugPull { if debugPull {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
} }
return return
} }
@ -537,14 +537,14 @@ func (p *puller) closeFile(f scanner.File) {
if l0, l1 := len(hb), len(f.Blocks); l0 != l1 { if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
if debugPull { if debugPull {
dlog.Printf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1) l.Debugf("pull: %q / %q: nblocks %d != %d", p.repo, f.Name, l0, l1)
} }
return return
} }
for i := range hb { for i := range hb {
if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 { if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
dlog.Printf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i) l.Debugf("pull: %q / %q: block %d hash mismatch", p.repo, f.Name, i)
return return
} }
} }
@ -554,11 +554,11 @@ func (p *puller) closeFile(f scanner.File) {
os.Chmod(of.temp, os.FileMode(f.Flags&0777)) os.Chmod(of.temp, os.FileMode(f.Flags&0777))
defTempNamer.Show(of.temp) defTempNamer.Show(of.temp)
if debugPull { if debugPull {
dlog.Printf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath) l.Debugf("pull: rename %q / %q: %q", p.repo, f.Name, of.filepath)
} }
if err := Rename(of.temp, of.filepath); err == nil { if err := Rename(of.temp, of.filepath); err == nil {
p.model.updateLocal(p.repo, f) p.model.updateLocal(p.repo, f)
} else { } else {
dlog.Printf("pull: error: %q / %q: %v", p.repo, f.Name, err) l.Debugf("pull: error: %q / %q: %v", p.repo, f.Name, err)
} }
} }

Binary file not shown.

View File

@ -41,10 +41,10 @@ func certSeed(bs []byte) int64 {
} }
func newCertificate(dir string) { func newCertificate(dir string) {
infoln("Generating RSA certificate and key...") l.Infoln("Generating RSA certificate and key...")
priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits) priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
fatalErr(err) l.FatalErr(err)
notBefore := time.Now() notBefore := time.Now()
notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
@ -63,17 +63,17 @@ func newCertificate(dir string) {
} }
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
fatalErr(err) l.FatalErr(err)
certOut, err := os.Create(filepath.Join(dir, "cert.pem")) certOut, err := os.Create(filepath.Join(dir, "cert.pem"))
fatalErr(err) l.FatalErr(err)
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close() certOut.Close()
okln("Created RSA certificate file") l.Okln("Created RSA certificate file")
keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
fatalErr(err) l.FatalErr(err)
pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
keyOut.Close() keyOut.Close()
okln("Created RSA key file") l.Okln("Created RSA key file")
} }

View File

@ -51,12 +51,12 @@ func upgrade() error {
rel := rels[0] rel := rels[0]
if rel.Tag > Version { if rel.Tag > Version {
infof("Attempting upgrade to %s...", rel.Tag) l.Infof("Attempting upgrade to %s...", rel.Tag)
} else if rel.Tag == Version { } else if rel.Tag == Version {
okf("Already running the latest version, %s. Not upgrading.", Version) l.Okf("Already running the latest version, %s. Not upgrading.", Version)
return nil return nil
} else { } else {
okf("Current version %s is newer than latest release %s. Not upgrading.", Version, rel.Tag) l.Okf("Current version %s is newer than latest release %s. Not upgrading.", Version, rel.Tag)
return nil return nil
} }
@ -64,7 +64,7 @@ func upgrade() error {
for _, asset := range rel.Assets { for _, asset := range rel.Assets {
if strings.HasPrefix(asset.Name, expectedRelease) { if strings.HasPrefix(asset.Name, expectedRelease) {
if strings.HasSuffix(asset.Name, ".tar.gz") { if strings.HasSuffix(asset.Name, ".tar.gz") {
infof("Downloading %s...", asset.Name) l.Infof("Downloading %s...", asset.Name)
fname, err := readTarGZ(asset.URL, filepath.Dir(path)) fname, err := readTarGZ(asset.URL, filepath.Dir(path))
if err != nil { if err != nil {
return err return err
@ -80,8 +80,8 @@ func upgrade() error {
return err return err
} }
okf("Upgraded %q to %s.", path, rel.Tag) l.Okf("Upgraded %q to %s.", path, rel.Tag)
okf("Previous version saved in %q.", old) l.Okf("Previous version saved in %q.", old)
return nil return nil
} }

View File

@ -14,7 +14,7 @@ func Rename(from, to string) error {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
err := os.Remove(to) err := os.Remove(to)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
warnln(err) l.Warnln(err)
} }
} }
return os.Rename(from, to) return os.Rename(from, to)

View File

@ -1,12 +1,13 @@
package discover package discover
import ( import (
"log"
"os" "os"
"strings" "strings"
"github.com/calmh/syncthing/logger"
) )
var ( var (
dlog = log.New(os.Stderr, "discover: ", log.Lmicroseconds|log.Lshortfile) debug = strings.Contains(os.Getenv("STTRACE"), "discover") || os.Getenv("STTRACE") == "all"
debug = strings.Contains(os.Getenv("STTRACE"), "discover") l = logger.DefaultLogger
) )

View File

@ -5,7 +5,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log"
"net" "net"
"sync" "sync"
"time" "time"
@ -119,10 +118,10 @@ func (d *Discoverer) announcementPkt() []byte {
for _, astr := range d.listenAddrs { for _, astr := range d.listenAddrs {
addr, err := net.ResolveTCPAddr("tcp", astr) addr, err := net.ResolveTCPAddr("tcp", astr)
if err != nil { if err != nil {
log.Printf("discover/announcement: %v: not announcing %s", err, astr) l.Warnln("%v: not announcing %s", err, astr)
continue continue
} else if debug { } else if debug {
dlog.Printf("announcing %s: %#v", astr, addr) l.Debugf("discover: announcing %s: %#v", astr, addr)
} }
if len(addr.IP) == 0 || addr.IP.IsUnspecified() { if len(addr.IP) == 0 || addr.IP.IsUnspecified() {
addrs = append(addrs, Address{Port: uint16(addr.Port)}) addrs = append(addrs, Address{Port: uint16(addr.Port)})
@ -172,13 +171,13 @@ func (d *Discoverer) sendLocalAnnouncements() {
func (d *Discoverer) sendExternalAnnouncements() { func (d *Discoverer) sendExternalAnnouncements() {
remote, err := net.ResolveUDPAddr("udp", d.extServer) remote, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external announcements", err) l.Warnf("Global discovery: %v; no external announcements", err)
return return
} }
conn, err := net.ListenUDP("udp", nil) conn, err := net.ListenUDP("udp", nil)
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external announcements", err) l.Warnf("Global discovery: %v; no external announcements", err)
return return
} }
@ -198,12 +197,14 @@ func (d *Discoverer) sendExternalAnnouncements() {
var ok bool var ok bool
if debug { if debug {
dlog.Printf("send announcement -> %v\n%s", remote, hex.Dump(buf)) l.Debugf("discover: send announcement -> %v\n%s", remote, hex.Dump(buf))
} }
_, err = conn.WriteTo(buf, remote) _, err = conn.WriteTo(buf, remote)
if err != nil { if err != nil {
log.Println("discover/write: warning:", err) if debug {
l.Debugln("discover: warning:", err)
}
errCounter++ errCounter++
ok = false ok = false
} else { } else {
@ -214,7 +215,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
res := d.externalLookup(d.myID) res := d.externalLookup(d.myID)
if debug { if debug {
dlog.Println("external lookup check:", res) l.Debugln("discover: external lookup check:", res)
} }
ok = len(res) > 0 ok = len(res) > 0
@ -230,7 +231,7 @@ func (d *Discoverer) sendExternalAnnouncements() {
time.Sleep(60 * time.Second) time.Sleep(60 * time.Second)
} }
} }
log.Printf("discover/write: %v: stopping due to too many errors: %v", remote, err) l.Warnf("Global discovery: %v: stopping due to too many errors: %v", remote, err)
} }
func (d *Discoverer) recvAnnouncements() { func (d *Discoverer) recvAnnouncements() {
@ -238,7 +239,7 @@ func (d *Discoverer) recvAnnouncements() {
buf, addr := d.beacon.Recv() buf, addr := d.beacon.Recv()
if debug { if debug {
dlog.Printf("read announcement:\n%s", hex.Dump(buf)) l.Debugf("discover: read announcement:\n%s", hex.Dump(buf))
} }
var pkt AnnounceV2 var pkt AnnounceV2
@ -248,7 +249,7 @@ func (d *Discoverer) recvAnnouncements() {
} }
if debug { if debug {
dlog.Printf("parsed announcement: %#v", pkt) l.Debugf("discover: parsed announcement: %#v", pkt)
} }
var newNode bool var newNode bool
@ -287,11 +288,11 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
} }
if len(addrs) == 0 { if len(addrs) == 0 {
if debug { if debug {
dlog.Println("no valid address for", node.ID) l.Debugln("discover: no valid address for", node.ID)
} }
} }
if debug { if debug {
dlog.Printf("register: %s -> %#v", node.ID, addrs) l.Debugf("discover: register: %s -> %#v", node.ID, addrs)
} }
d.registryLock.Lock() d.registryLock.Lock()
_, seen := d.registry[node.ID] _, seen := d.registry[node.ID]
@ -303,27 +304,35 @@ func (d *Discoverer) registerNode(addr net.Addr, node Node) bool {
func (d *Discoverer) externalLookup(node string) []string { func (d *Discoverer) externalLookup(node string) []string {
extIP, err := net.ResolveUDPAddr("udp", d.extServer) extIP, err := net.ResolveUDPAddr("udp", d.extServer)
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external lookup", err) if debug {
l.Debugf("discover: %v; no external lookup", err)
}
return nil return nil
} }
conn, err := net.DialUDP("udp", nil, extIP) conn, err := net.DialUDP("udp", nil, extIP)
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external lookup", err) if debug {
l.Debugf("discover: %v; no external lookup", err)
}
return nil return nil
} }
defer conn.Close() defer conn.Close()
err = conn.SetDeadline(time.Now().Add(5 * time.Second)) err = conn.SetDeadline(time.Now().Add(5 * time.Second))
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external lookup", err) if debug {
l.Debugf("discover: %v; no external lookup", err)
}
return nil return nil
} }
buf := QueryV2{QueryMagicV2, node}.MarshalXDR() buf := QueryV2{QueryMagicV2, node}.MarshalXDR()
_, err = conn.Write(buf) _, err = conn.Write(buf)
if err != nil { if err != nil {
log.Printf("discover/external: %v; no external lookup", err) if debug {
l.Debugf("discover: %v; no external lookup", err)
}
return nil return nil
} }
buffers.Put(buf) buffers.Put(buf)
@ -337,23 +346,27 @@ func (d *Discoverer) externalLookup(node string) []string {
// Expected if the server doesn't know about requested node ID // Expected if the server doesn't know about requested node ID
return nil return nil
} }
log.Printf("discover/external/read: %v; no external lookup", err) if debug {
l.Debugf("discover: %v; no external lookup", err)
}
return nil return nil
} }
if debug { if debug {
dlog.Printf("read external:\n%s", hex.Dump(buf[:n])) l.Debugf("discover: read external:\n%s", hex.Dump(buf[:n]))
} }
var pkt AnnounceV2 var pkt AnnounceV2
err = pkt.UnmarshalXDR(buf[:n]) err = pkt.UnmarshalXDR(buf[:n])
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
log.Println("discover/external/decode:", err) if debug {
l.Debugln("discover:", err)
}
return nil return nil
} }
if debug { if debug {
dlog.Printf("parsed external: %#v", pkt) l.Debugf("discover: parsed external: %#v", pkt)
} }
var addrs []string var addrs []string

View File

@ -1,12 +1,13 @@
package files package files
import ( import (
"log"
"os" "os"
"strings" "strings"
"github.com/calmh/syncthing/logger"
) )
var ( var (
dlog = log.New(os.Stderr, "files: ", log.Lmicroseconds|log.Lshortfile) debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all"
debug = strings.Contains(os.Getenv("STTRACE"), "files") l = logger.DefaultLogger
) )

View File

@ -38,7 +38,7 @@ func NewSet() *Set {
func (m *Set) Replace(id uint, fs []scanner.File) { func (m *Set) Replace(id uint, fs []scanner.File) {
if debug { if debug {
dlog.Printf("Replace(%d, [%d])", id, len(fs)) l.Debugf("Replace(%d, [%d])", id, len(fs))
} }
if id > 63 { if id > 63 {
panic("Connection ID must be in the range 0 - 63 inclusive") panic("Connection ID must be in the range 0 - 63 inclusive")
@ -54,7 +54,7 @@ func (m *Set) Replace(id uint, fs []scanner.File) {
func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) { func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
if debug { if debug {
dlog.Printf("ReplaceWithDelete(%d, [%d])", id, len(fs)) l.Debugf("ReplaceWithDelete(%d, [%d])", id, len(fs))
} }
if id > 63 { if id > 63 {
panic("Connection ID must be in the range 0 - 63 inclusive") panic("Connection ID must be in the range 0 - 63 inclusive")
@ -84,7 +84,7 @@ func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
} }
fs = append(fs, cf) fs = append(fs, cf)
if debug { if debug {
dlog.Println("deleted:", ck.Name) l.Debugln("deleted:", ck.Name)
} }
} }
} }
@ -96,7 +96,7 @@ func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
func (m *Set) Update(id uint, fs []scanner.File) { func (m *Set) Update(id uint, fs []scanner.File) {
if debug { if debug {
dlog.Printf("Update(%d, [%d])", id, len(fs)) l.Debugf("Update(%d, [%d])", id, len(fs))
} }
m.Lock() m.Lock()
m.update(id, fs) m.update(id, fs)
@ -106,7 +106,7 @@ func (m *Set) Update(id uint, fs []scanner.File) {
func (m *Set) Need(id uint) []scanner.File { func (m *Set) Need(id uint) []scanner.File {
if debug { if debug {
dlog.Printf("Need(%d)", id) l.Debugf("Need(%d)", id)
} }
m.Lock() m.Lock()
var fs = make([]scanner.File, 0, len(m.globalKey)/2) // Just a guess, but avoids too many reallocations var fs = make([]scanner.File, 0, len(m.globalKey)/2) // Just a guess, but avoids too many reallocations
@ -130,7 +130,7 @@ func (m *Set) Need(id uint) []scanner.File {
func (m *Set) Have(id uint) []scanner.File { func (m *Set) Have(id uint) []scanner.File {
if debug { if debug {
dlog.Printf("Have(%d)", id) l.Debugf("Have(%d)", id)
} }
var fs = make([]scanner.File, 0, len(m.remoteKey[id])) var fs = make([]scanner.File, 0, len(m.remoteKey[id]))
m.Lock() m.Lock()
@ -143,7 +143,7 @@ func (m *Set) Have(id uint) []scanner.File {
func (m *Set) Global() []scanner.File { func (m *Set) Global() []scanner.File {
if debug { if debug {
dlog.Printf("Global()") l.Debugf("Global()")
} }
m.Lock() m.Lock()
var fs = make([]scanner.File, 0, len(m.globalKey)) var fs = make([]scanner.File, 0, len(m.globalKey))
@ -160,7 +160,7 @@ func (m *Set) Get(id uint, file string) scanner.File {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
if debug { if debug {
dlog.Printf("Get(%d, %q)", id, file) l.Debugf("Get(%d, %q)", id, file)
} }
return m.files[m.remoteKey[id][file]].File return m.files[m.remoteKey[id][file]].File
} }
@ -169,7 +169,7 @@ func (m *Set) GetGlobal(file string) scanner.File {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
if debug { if debug {
dlog.Printf("GetGlobal(%q)", file) l.Debugf("GetGlobal(%q)", file)
} }
return m.files[m.globalKey[file]].File return m.files[m.globalKey[file]].File
} }
@ -179,7 +179,7 @@ func (m *Set) Availability(name string) bitset {
defer m.Unlock() defer m.Unlock()
av := m.globalAvailability[name] av := m.globalAvailability[name]
if debug { if debug {
dlog.Printf("Availability(%q) = %0x", name, av) l.Debugf("Availability(%q) = %0x", name, av)
} }
return av return av
} }
@ -188,7 +188,7 @@ func (m *Set) Changes(id uint) uint64 {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
if debug { if debug {
dlog.Printf("Changes(%d)", id) l.Debugf("Changes(%d)", id)
} }
return m.changes[id] return m.changes[id]
} }

142
logger/logger.go Normal file
View File

@ -0,0 +1,142 @@
package logger
import (
"fmt"
"log"
"os"
"sync"
)
type LogLevel int
const (
LevelDebug LogLevel = iota
LevelInfo
LevelOK
LevelWarn
LevelFatal
NumLevels
)
type MessageHandler func(l LogLevel, msg string)
type Logger struct {
logger *log.Logger
handlers [NumLevels][]MessageHandler
mut sync.Mutex
}
var DefaultLogger = New()
func New() *Logger {
return &Logger{
logger: log.New(os.Stderr, "", log.Ltime),
}
}
func (l *Logger) AddHandler(level LogLevel, h MessageHandler) {
l.mut.Lock()
defer l.mut.Unlock()
l.handlers[level] = append(l.handlers[level], h)
}
func (l *Logger) SetFlags(flag int) {
l.logger.SetFlags(flag)
}
func (l *Logger) SetPrefix(prefix string) {
l.logger.SetPrefix(prefix)
}
func (l *Logger) callHandlers(level LogLevel, s string) {
for _, h := range l.handlers[level] {
h(level, s)
}
}
func (l *Logger) Debugln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
func (l *Logger) Debugf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "DEBUG: "+s)
l.callHandlers(LevelDebug, s)
}
func (l *Logger) Infoln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
func (l *Logger) Infof(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "INFO: "+s)
l.callHandlers(LevelInfo, s)
}
func (l *Logger) Okln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
func (l *Logger) Okf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "OK: "+s)
l.callHandlers(LevelOK, s)
}
func (l *Logger) Warnln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
func (l *Logger) Warnf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "WARNING: "+s)
l.callHandlers(LevelWarn, s)
}
func (l *Logger) Fatalln(vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintln(vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(3)
}
func (l *Logger) Fatalf(format string, vals ...interface{}) {
l.mut.Lock()
defer l.mut.Unlock()
s := fmt.Sprintf(format, vals...)
l.logger.Output(2, "FATAL: "+s)
l.callHandlers(LevelFatal, s)
os.Exit(3)
}
func (l *Logger) FatalErr(err error) {
if err != nil {
l.Fatalf(err.Error())
}
}

View File

@ -52,17 +52,17 @@ func (b *Beacon) reader() {
for { for {
n, addr, err := b.conn.ReadFrom(bs) n, addr, err := b.conn.ReadFrom(bs)
if err != nil { if err != nil {
dlog.Println(err) l.Warnln("Beacon read:", err)
return return
} }
if debug { if debug {
dlog.Printf("recv %d bytes from %s", n, addr) l.Debugf("recv %d bytes from %s", n, addr)
} }
select { select {
case b.outbox <- recv{bs[:n], addr}: case b.outbox <- recv{bs[:n], addr}:
default: default:
if debug { if debug {
dlog.Println("Dropping message") l.Debugln("dropping message")
} }
} }
} }
@ -73,7 +73,7 @@ func (b *Beacon) writer() {
addrs, err := net.InterfaceAddrs() addrs, err := net.InterfaceAddrs()
if err != nil { if err != nil {
dlog.Println(err) l.Warnln("Beacon: interface addresses:", err)
continue continue
} }
@ -90,16 +90,22 @@ func (b *Beacon) writer() {
dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff}) dsts = append(dsts, net.IP{0xff, 0xff, 0xff, 0xff})
} }
if debug {
l.Debugln("addresses:", dsts)
}
for _, ip := range dsts { for _, ip := range dsts {
dst := &net.UDPAddr{IP: ip, Port: b.port} dst := &net.UDPAddr{IP: ip, Port: b.port}
_, err := b.conn.WriteTo(bs, dst) _, err := b.conn.WriteTo(bs, dst)
if err != nil { if err != nil {
dlog.Println(err) if debug {
l.Debugln(err)
}
return return
} }
if debug { if debug {
dlog.Printf("sent %d bytes to %s", len(bs), dst) l.Debugf("sent %d bytes to %s", len(bs), dst)
} }
} }
} }

View File

@ -1,12 +1,13 @@
package mc package mc
import ( import (
"log"
"os" "os"
"strings" "strings"
"github.com/calmh/syncthing/logger"
) )
var ( var (
dlog = log.New(os.Stderr, "mc: ", log.Lmicroseconds|log.Lshortfile) debug = strings.Contains(os.Getenv("STTRACE"), "mc") || os.Getenv("STTRACE") == "all"
debug = strings.Contains(os.Getenv("STTRACE"), "mc") l = logger.DefaultLogger
) )

View File

@ -1,12 +1,13 @@
package scanner package scanner
import ( import (
"log"
"os" "os"
"strings" "strings"
"github.com/calmh/syncthing/logger"
) )
var ( var (
dlog = log.New(os.Stderr, "scanner: ", log.Lmicroseconds|log.Lshortfile) debug = strings.Contains(os.Getenv("STTRACE"), "scanner") || os.Getenv("STTRACE") == "all"
debug = strings.Contains(os.Getenv("STTRACE"), "scanner") l = logger.DefaultLogger
) )

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -56,7 +55,7 @@ func (w *Walker) Walk() (files []File, ignore map[string][]string, err error) {
w.lazyInit() w.lazyInit()
if debug { if debug {
dlog.Println("Walk", w.Dir, w.BlockSize, w.IgnoreFile) l.Debugln("Walk", w.Dir, w.BlockSize, w.IgnoreFile)
} }
err = checkDir(w.Dir) err = checkDir(w.Dir)
@ -75,7 +74,7 @@ func (w *Walker) Walk() (files []File, ignore map[string][]string, err error) {
if debug { if debug {
t1 := time.Now() t1 := time.Now()
d := t1.Sub(t0).Seconds() d := t1.Sub(t0).Seconds()
dlog.Printf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d) l.Debugf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d)
} }
err = checkDir(w.Dir) err = checkDir(w.Dir)
@ -125,7 +124,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
return func(p string, info os.FileInfo, err error) error { return func(p string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
if debug { if debug {
dlog.Println("error:", p, info, err) l.Debugln("error:", p, info, err)
} }
return nil return nil
} }
@ -133,7 +132,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
rn, err := filepath.Rel(w.Dir, p) rn, err := filepath.Rel(w.Dir, p)
if err != nil { if err != nil {
if debug { if debug {
dlog.Println("rel error:", p, err) l.Debugln("rel error:", p, err)
} }
return nil return nil
} }
@ -145,7 +144,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) { if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
// A temporary file // A temporary file
if debug { if debug {
dlog.Println("temporary:", rn) l.Debugln("temporary:", rn)
} }
return nil return nil
} }
@ -153,7 +152,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
if _, sn := filepath.Split(rn); sn == w.IgnoreFile { if _, sn := filepath.Split(rn); sn == w.IgnoreFile {
// An ignore-file; these are ignored themselves // An ignore-file; these are ignored themselves
if debug { if debug {
dlog.Println("ignorefile:", rn) l.Debugln("ignorefile:", rn)
} }
return nil return nil
} }
@ -161,7 +160,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
if w.ignoreFile(ign, rn) { if w.ignoreFile(ign, rn) {
// An ignored file // An ignored file
if debug { if debug {
dlog.Println("ignored:", rn) l.Debugln("ignored:", rn)
} }
if info.IsDir() { if info.IsDir() {
return filepath.SkipDir return filepath.SkipDir
@ -174,7 +173,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
cf := w.CurrentFiler.CurrentFile(rn) cf := w.CurrentFiler.CurrentFile(rn)
if cf.Modified == info.ModTime().Unix() && cf.Flags == uint32(info.Mode()&os.ModePerm|protocol.FlagDirectory) { if cf.Modified == info.ModTime().Unix() && cf.Flags == uint32(info.Mode()&os.ModePerm|protocol.FlagDirectory) {
if debug { if debug {
dlog.Println("unchanged:", cf) l.Debugln("unchanged:", cf)
} }
*res = append(*res, cf) *res = append(*res, cf)
} else { } else {
@ -185,7 +184,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
Modified: info.ModTime().Unix(), Modified: info.ModTime().Unix(),
} }
if debug { if debug {
dlog.Println("dir:", cf, f) l.Debugln("dir:", cf, f)
} }
*res = append(*res, f) *res = append(*res, f)
} }
@ -198,7 +197,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
cf := w.CurrentFiler.CurrentFile(rn) cf := w.CurrentFiler.CurrentFile(rn)
if cf.Flags&protocol.FlagDeleted == 0 && cf.Modified == info.ModTime().Unix() { if cf.Flags&protocol.FlagDeleted == 0 && cf.Modified == info.ModTime().Unix() {
if debug { if debug {
dlog.Println("unchanged:", cf) l.Debugln("unchanged:", cf)
} }
*res = append(*res, cf) *res = append(*res, cf)
return nil return nil
@ -207,17 +206,17 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) { if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) {
if !w.suppressed[rn] { if !w.suppressed[rn] {
w.suppressed[rn] = true w.suppressed[rn] = true
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p) l.Infof("Changes to %q are being temporarily suppressed because it changes too frequently.", p)
cf.Suppressed = true cf.Suppressed = true
cf.Version++ cf.Version++
} }
if debug { if debug {
dlog.Println("suppressed:", cf) l.Debugln("suppressed:", cf)
} }
*res = append(*res, cf) *res = append(*res, cf)
return nil return nil
} else if w.suppressed[rn] { } else if w.suppressed[rn] {
log.Printf("INFO: Changes to %q are no longer suppressed.", p) l.Infof("Changes to %q are no longer suppressed.", p)
delete(w.suppressed, rn) delete(w.suppressed, rn)
} }
} }
@ -225,7 +224,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
fd, err := os.Open(p) fd, err := os.Open(p)
if err != nil { if err != nil {
if debug { if debug {
dlog.Println("open:", p, err) l.Debugln("open:", p, err)
} }
return nil return nil
} }
@ -235,13 +234,13 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
blocks, err := Blocks(fd, w.BlockSize) blocks, err := Blocks(fd, w.BlockSize)
if err != nil { if err != nil {
if debug { if debug {
dlog.Println("hash error:", rn, err) l.Debugln("hash error:", rn, err)
} }
return nil return nil
} }
if debug { if debug {
t1 := time.Now() t1 := time.Now()
dlog.Println("hashed:", rn, ";", len(blocks), "blocks;", info.Size(), "bytes;", int(float64(info.Size())/1024/t1.Sub(t0).Seconds()), "KB/s") l.Debugln("hashed:", rn, ";", len(blocks), "blocks;", info.Size(), "bytes;", int(float64(info.Size())/1024/t1.Sub(t0).Seconds()), "KB/s")
} }
f := File{ f := File{
Name: rn, Name: rn,

View File

@ -1,12 +1,13 @@
package upnp package upnp
import ( import (
"log"
"os" "os"
"strings" "strings"
"github.com/calmh/syncthing/logger"
) )
var ( var (
dlog = log.New(os.Stderr, "upnp: ", log.Lmicroseconds|log.Lshortfile) debug = strings.Contains(os.Getenv("STTRACE"), "upnp") || os.Getenv("STTRACE") == "all"
debug = strings.Contains(os.Getenv("STTRACE"), "upnp") l = logger.DefaultLogger
) )

View File

@ -81,7 +81,7 @@ Mx: 3
} }
if debug { if debug {
dlog.Println(string(resp[:n])) l.Debugln(string(resp[:n]))
} }
reader := bufio.NewReader(bytes.NewBuffer(resp[:n])) reader := bufio.NewReader(bytes.NewBuffer(resp[:n]))
@ -225,8 +225,8 @@ func soapRequest(url, function, message string) error {
req.Header.Set("Pragma", "no-cache") req.Header.Set("Pragma", "no-cache")
if debug { if debug {
dlog.Println(req.Header.Get("SOAPAction")) l.Debugln(req.Header.Get("SOAPAction"))
dlog.Println(body) l.Debugln(body)
} }
r, err := http.DefaultClient.Do(req) r, err := http.DefaultClient.Do(req)
@ -236,7 +236,7 @@ func soapRequest(url, function, message string) error {
if debug { if debug {
resp, _ := ioutil.ReadAll(r.Body) resp, _ := ioutil.ReadAll(r.Body)
dlog.Println(string(resp)) l.Debugln(string(resp))
} }
r.Body.Close() r.Body.Close()