mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 14:50:56 +00:00
all: Fix some linter errors (#5499)
I'm working through linter complaints, these are some fixes. Broad categories: 1) Ignore errors where we can ignore errors: add "_ = ..." construct. you can argue that this is annoying noise, but apart from silencing the linter it *does* serve the purpose of highlighting that an error is being ignored. I think this is OK, because the linter highlighted some error cases I wasn't aware of (starting CPU profiles, for example). 2) Untyped constants where we though we had set the type. 3) A real bug where we ineffectually assigned to a shadowed err. 4) Some dead code removed. There'll be more of these, because not all packages are fixed, but the diff was already large enough.
This commit is contained in:
parent
583172dc8d
commit
2111386ee4
@ -44,7 +44,7 @@ func (s *auditService) Serve() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.C():
|
||||
enc.Encode(ev)
|
||||
_ = enc.Encode(ev)
|
||||
case <-s.stop:
|
||||
return
|
||||
}
|
||||
|
@ -13,6 +13,6 @@ import "time"
|
||||
|
||||
func cpuUsage() time.Duration {
|
||||
var rusage syscall.Rusage
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
_ = syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
return time.Duration(rusage.Utime.Nano() + rusage.Stime.Nano())
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
metrics "github.com/rcrowley/go-metrics"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/connections"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
@ -966,7 +966,7 @@ func (s *apiService) postSystemShutdown(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
func (s *apiService) flushResponse(resp string, w http.ResponseWriter) {
|
||||
w.Write([]byte(resp + "\n"))
|
||||
_, _ = w.Write([]byte(resp + "\n"))
|
||||
f := w.(http.Flusher)
|
||||
f.Flush()
|
||||
}
|
||||
@ -1121,15 +1121,17 @@ func (s *apiService) getSupportBundle(w http.ResponseWriter, r *http.Request) {
|
||||
var heapBuffer, cpuBuffer bytes.Buffer
|
||||
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, Version, time.Now().Format("150405")) // hhmmss
|
||||
runtime.GC()
|
||||
pprof.WriteHeapProfile(&heapBuffer)
|
||||
files = append(files, fileEntry{name: filename, data: heapBuffer.Bytes()})
|
||||
if err := pprof.WriteHeapProfile(&heapBuffer); err == nil {
|
||||
files = append(files, fileEntry{name: filename, data: heapBuffer.Bytes()})
|
||||
}
|
||||
|
||||
const duration = 4 * time.Second
|
||||
filename = fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, Version, time.Now().Format("150405")) // hhmmss
|
||||
pprof.StartCPUProfile(&cpuBuffer)
|
||||
time.Sleep(duration)
|
||||
pprof.StopCPUProfile()
|
||||
files = append(files, fileEntry{name: filename, data: cpuBuffer.Bytes()})
|
||||
if err := pprof.StartCPUProfile(&cpuBuffer); err == nil {
|
||||
time.Sleep(duration)
|
||||
pprof.StopCPUProfile()
|
||||
files = append(files, fileEntry{name: filename, data: cpuBuffer.Bytes()})
|
||||
}
|
||||
|
||||
// Add buffer files to buffer zip
|
||||
var zipFilesBuffer bytes.Buffer
|
||||
@ -1151,7 +1153,7 @@ func (s *apiService) getSupportBundle(w http.ResponseWriter, r *http.Request) {
|
||||
// Serve the buffer zip to client for download
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+zipFileName)
|
||||
io.Copy(w, &zipFilesBuffer)
|
||||
_, _ = io.Copy(w, &zipFilesBuffer)
|
||||
}
|
||||
|
||||
func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
@ -1171,7 +1173,7 @@ func (s *apiService) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
})
|
||||
bs, _ := json.MarshalIndent(stats, "", " ")
|
||||
w.Write(bs)
|
||||
_, _ = w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *apiService) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
||||
@ -1463,7 +1465,7 @@ func (s *apiService) getQR(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "image/png")
|
||||
w.Write(code.PNG())
|
||||
_, _ = w.Write(code.PNG())
|
||||
}
|
||||
|
||||
func (s *apiService) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
||||
@ -1561,7 +1563,7 @@ func (s *apiService) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Default value or in case of error unmarshalling ends up being basic fs.
|
||||
var fsType fs.FilesystemType
|
||||
fsType.UnmarshalText([]byte(qs.Get("filesystem")))
|
||||
_ = fsType.UnmarshalText([]byte(qs.Get("filesystem")))
|
||||
|
||||
sendJSON(w, browseFiles(current, fsType))
|
||||
}
|
||||
@ -1645,9 +1647,10 @@ func (s *apiService) getCPUProf(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
||||
|
||||
pprof.StartCPUProfile(w)
|
||||
time.Sleep(duration)
|
||||
pprof.StopCPUProfile()
|
||||
if err := pprof.StartCPUProfile(w); err == nil {
|
||||
time.Sleep(duration)
|
||||
pprof.StopCPUProfile()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *apiService) getHeapProf(w http.ResponseWriter, r *http.Request) {
|
||||
@ -1657,7 +1660,7 @@ func (s *apiService) getHeapProf(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
||||
|
||||
runtime.GC()
|
||||
pprof.WriteHeapProfile(w)
|
||||
_ = pprof.WriteHeapProfile(w)
|
||||
}
|
||||
|
||||
func toJsonFileInfoSlice(fs []db.FileInfoTruncated) []jsonDBFileInfo {
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"gopkg.in/ldap.v2"
|
||||
ldap "gopkg.in/ldap.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -80,11 +80,10 @@ func basicAuthAndSessionMiddleware(cookieName string, guiCfg config.GUIConfigura
|
||||
return
|
||||
}
|
||||
|
||||
authOk := false
|
||||
username := string(fields[0])
|
||||
password := string(fields[1])
|
||||
|
||||
authOk = auth(username, password, guiCfg, ldapCfg)
|
||||
authOk := auth(username, password, guiCfg, ldapCfg)
|
||||
if !authOk {
|
||||
usernameIso := string(iso88591ToUTF8([]byte(username)))
|
||||
passwordIso := string(iso88591ToUTF8([]byte(password)))
|
||||
|
@ -160,7 +160,7 @@ func (s *staticsServer) serveAsset(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs)))
|
||||
|
||||
w.Write(bs)
|
||||
_, _ = w.Write(bs)
|
||||
}
|
||||
|
||||
func (s *staticsServer) serveThemes(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -114,13 +114,13 @@ func TestAssetsDir(t *testing.T) {
|
||||
// The asset map contains compressed assets, so create a couple of gzip compressed assets here.
|
||||
buf := new(bytes.Buffer)
|
||||
gw := gzip.NewWriter(buf)
|
||||
gw.Write([]byte("default"))
|
||||
_, _ = gw.Write([]byte("default"))
|
||||
gw.Close()
|
||||
def := buf.Bytes()
|
||||
|
||||
buf = new(bytes.Buffer)
|
||||
gw = gzip.NewWriter(buf)
|
||||
gw.Write([]byte("foo"))
|
||||
_, _ = gw.Write([]byte("foo"))
|
||||
gw.Close()
|
||||
foo := buf.Bytes()
|
||||
|
||||
|
@ -127,7 +127,6 @@ func setBuildMetadata() {
|
||||
var (
|
||||
myID protocol.DeviceID
|
||||
stop = make(chan int)
|
||||
lans []*net.IPNet
|
||||
)
|
||||
|
||||
const (
|
||||
@ -436,7 +435,9 @@ func main() {
|
||||
}
|
||||
|
||||
if options.resetDatabase {
|
||||
resetDB()
|
||||
if err := resetDB(); err != nil {
|
||||
l.Fatalln("Resetting database:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -450,7 +451,9 @@ func main() {
|
||||
func openGUI() {
|
||||
cfg, _ := loadOrDefaultConfig()
|
||||
if cfg.GUI().Enabled {
|
||||
openURL(cfg.GUI().URL())
|
||||
if err := openURL(cfg.GUI().URL()); err != nil {
|
||||
l.Fatalln("Open URL:", err)
|
||||
}
|
||||
} else {
|
||||
l.Warnln("Browser: GUI is currently disabled")
|
||||
}
|
||||
@ -631,7 +634,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// Attempt to increase the limit on number of open files to the maximum
|
||||
// allowed, in case we have many peers. We don't really care enough to
|
||||
// report the error if there is one.
|
||||
osutil.MaximizeOpenFileLimit()
|
||||
_, _ = osutil.MaximizeOpenFileLimit()
|
||||
|
||||
// Ensure that we have a certificate and key.
|
||||
cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile])
|
||||
@ -754,7 +757,7 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// Add and start folders
|
||||
for _, folderCfg := range cfg.Folders() {
|
||||
if folderCfg.Paused {
|
||||
folderCfg.CreateRoot()
|
||||
_ = folderCfg.CreateRoot()
|
||||
continue
|
||||
}
|
||||
m.AddFolder(folderCfg)
|
||||
@ -823,9 +826,11 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
if runtimeOptions.cpuProfile {
|
||||
f, err := os.Create(fmt.Sprintf("cpu-%d.pprof", os.Getpid()))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
l.Fatalln("Creating profile:", err)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
l.Fatalln("Starting profile:", err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
}
|
||||
|
||||
myDev, _ := cfg.Device(myID)
|
||||
@ -842,8 +847,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
l.Infoln("Anonymous usage reporting is always enabled for candidate releases.")
|
||||
if opts.URAccepted != usageReportVersion {
|
||||
opts.URAccepted = usageReportVersion
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
_, _ = cfg.SetOptions(opts)
|
||||
_ = cfg.Save()
|
||||
// Unique ID will be set and config saved below if necessary.
|
||||
}
|
||||
}
|
||||
@ -851,8 +856,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
// If we are going to do usage reporting, ensure we have a valid unique ID.
|
||||
if opts := cfg.Options(); opts.URAccepted > 0 && opts.URUniqueID == "" {
|
||||
opts.URUniqueID = rand.String(8)
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
_, _ = cfg.SetOptions(opts)
|
||||
_ = cfg.Save()
|
||||
}
|
||||
|
||||
usageReportingSvc := newUsageReportingService(cfg, m, connectionsService)
|
||||
@ -872,8 +877,8 @@ func syncthingMain(runtimeOptions RuntimeOptions) {
|
||||
opts.AutoUpgradeIntervalH = 12
|
||||
// Set the option into the config as well, as the auto upgrade
|
||||
// loop expects to read a valid interval from there.
|
||||
cfg.SetOptions(opts)
|
||||
cfg.Save()
|
||||
_, _ = cfg.SetOptions(opts)
|
||||
_ = cfg.Save()
|
||||
}
|
||||
// We don't tweak the user's choice of upgrading to pre-releases or
|
||||
// not, as otherwise they cannot step off the candidate channel.
|
||||
@ -954,7 +959,7 @@ func loadConfigAtStartup() *config.Wrapper {
|
||||
cfg, err := config.Load(cfgFile, myID)
|
||||
if os.IsNotExist(err) {
|
||||
cfg = defaultConfig(cfgFile)
|
||||
cfg.Save()
|
||||
_ = cfg.Save()
|
||||
l.Infof("Default config saved. Edit %s to taste or use the GUI\n", cfg.ConfigPath())
|
||||
} else if err == io.EOF {
|
||||
l.Fatalln("Failed to load config: unexpected end of file. Truncated or empty configuration?")
|
||||
@ -1058,7 +1063,7 @@ func setupGUI(mainService *suture.Supervisor, cfg *config.Wrapper, m *model.Mode
|
||||
// Can potentially block if the utility we are invoking doesn't
|
||||
// fork, and just execs, hence keep it in its own routine.
|
||||
<-api.startedOnce
|
||||
go openURL(guiCfg.URL())
|
||||
go func() { _ = openURL(guiCfg.URL()) }()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ func (c *mockedConfig) LDAP() config.LDAPConfiguration {
|
||||
|
||||
func (c *mockedConfig) RawCopy() config.Configuration {
|
||||
cfg := config.Configuration{}
|
||||
util.SetDefaults(&cfg.Options)
|
||||
_ = util.SetDefaults(&cfg.Options)
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
@ -127,13 +127,13 @@ func monitorMain(runtimeOptions RuntimeOptions) {
|
||||
select {
|
||||
case s := <-stopSign:
|
||||
l.Infof("Signal %d received; exiting", s)
|
||||
cmd.Process.Signal(sigTerm)
|
||||
_ = cmd.Process.Signal(sigTerm)
|
||||
<-exit
|
||||
return
|
||||
|
||||
case s := <-restartSign:
|
||||
l.Infof("Signal %d received; restarting", s)
|
||||
cmd.Process.Signal(sigHup)
|
||||
_ = cmd.Process.Signal(sigHup)
|
||||
err = <-exit
|
||||
|
||||
case err = <-exit:
|
||||
@ -179,7 +179,7 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
}
|
||||
|
||||
if panicFd == nil {
|
||||
dst.Write([]byte(line))
|
||||
_, _ = dst.Write([]byte(line))
|
||||
|
||||
if strings.Contains(line, "SIGILL") {
|
||||
l.Warnln(`
|
||||
@ -226,20 +226,20 @@ func copyStderr(stderr io.Reader, dst io.Writer) {
|
||||
|
||||
stdoutMut.Lock()
|
||||
for _, line := range stdoutFirstLines {
|
||||
panicFd.WriteString(line)
|
||||
_, _ = panicFd.WriteString(line)
|
||||
}
|
||||
panicFd.WriteString("...\n")
|
||||
_, _ = panicFd.WriteString("...\n")
|
||||
for _, line := range stdoutLastLines {
|
||||
panicFd.WriteString(line)
|
||||
_, _ = panicFd.WriteString(line)
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
}
|
||||
|
||||
panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
|
||||
_, _ = panicFd.WriteString("Panic at " + time.Now().Format(time.RFC3339) + "\n")
|
||||
}
|
||||
|
||||
if panicFd != nil {
|
||||
panicFd.WriteString(line)
|
||||
_, _ = panicFd.WriteString(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -263,7 +263,7 @@ func copyStdout(stdout io.Reader, dst io.Writer) {
|
||||
}
|
||||
stdoutMut.Unlock()
|
||||
|
||||
dst.Write([]byte(line))
|
||||
_, _ = dst.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
func TestAutoClosedFile(t *testing.T) {
|
||||
os.RemoveAll("_autoclose")
|
||||
defer os.RemoveAll("_autoclose")
|
||||
os.Mkdir("_autoclose", 0755)
|
||||
_ = os.Mkdir("_autoclose", 0755)
|
||||
file := filepath.FromSlash("_autoclose/tmp")
|
||||
data := []byte("hello, world\n")
|
||||
|
||||
|
@ -38,7 +38,10 @@ func savePerfStats(file string) {
|
||||
|
||||
t0 := time.Now()
|
||||
for t := range time.NewTicker(250 * time.Millisecond).C {
|
||||
syscall.Getrusage(syscall.RUSAGE_SELF, &rusage)
|
||||
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &rusage); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
curTime := time.Now().UnixNano()
|
||||
timeDiff := curTime - prevTime
|
||||
curUsage := rusage.Utime.Nano() + rusage.Stime.Nano()
|
||||
|
@ -19,11 +19,11 @@ func optionTable(w io.Writer, rows [][]string) {
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i > 0 {
|
||||
tw.Write([]byte("\t"))
|
||||
_, _ = tw.Write([]byte("\t"))
|
||||
}
|
||||
tw.Write([]byte(cell))
|
||||
_, _ = tw.Write([]byte(cell))
|
||||
}
|
||||
tw.Write([]byte("\n"))
|
||||
_, _ = tw.Write([]byte("\n"))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
@ -348,7 +348,9 @@ func newUsageReportingService(cfg *config.Wrapper, model *model.Model, connectio
|
||||
func (s *usageReportingService) sendUsageReport() error {
|
||||
d := reportData(s.cfg, s.model, s.connectionsService, s.cfg.Options().URAccepted, false)
|
||||
var b bytes.Buffer
|
||||
json.NewEncoder(&b).Encode(d)
|
||||
if err := json.NewEncoder(&b).Encode(d); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
@ -417,7 +419,7 @@ func (s *usageReportingService) Stop() {
|
||||
s.stopMut.RUnlock()
|
||||
}
|
||||
|
||||
func (usageReportingService) String() string {
|
||||
func (*usageReportingService) String() string {
|
||||
return "usageReportingService"
|
||||
}
|
||||
|
||||
@ -425,7 +427,7 @@ func (usageReportingService) String() string {
|
||||
func cpuBench(iterations int, duration time.Duration, useWeakHash bool) float64 {
|
||||
dataSize := 16 * protocol.MinBlockSize
|
||||
bs := make([]byte, dataSize)
|
||||
rand.Reader.Read(bs)
|
||||
_, _ = rand.Reader.Read(bs)
|
||||
|
||||
var perf float64
|
||||
for i := 0; i < iterations; i++ {
|
||||
|
@ -60,14 +60,13 @@ type copyBlocksState struct {
|
||||
const retainBits = fs.ModeSetgid | fs.ModeSetuid | fs.ModeSticky
|
||||
|
||||
var (
|
||||
activity = newDeviceActivity()
|
||||
errNoDevice = errors.New("peers who had this file went away, or the file has changed while syncing. will retry later")
|
||||
errSymlinksUnsupported = errors.New("symlinks not supported")
|
||||
errDirHasToBeScanned = errors.New("directory contains unexpected files, scheduling scan")
|
||||
errDirHasIgnored = errors.New("directory contains ignored files (see ignore documentation for (?d) prefix)")
|
||||
errDirNotEmpty = errors.New("directory is not empty; files within are probably ignored on connected devices only")
|
||||
errNotAvailable = errors.New("no connected device has the required version of this file")
|
||||
errModified = errors.New("file modified but not rescanned; will try again later")
|
||||
activity = newDeviceActivity()
|
||||
errNoDevice = errors.New("peers who had this file went away, or the file has changed while syncing. will retry later")
|
||||
errDirHasToBeScanned = errors.New("directory contains unexpected files, scheduling scan")
|
||||
errDirHasIgnored = errors.New("directory contains ignored files (see ignore documentation for (?d) prefix)")
|
||||
errDirNotEmpty = errors.New("directory is not empty; files within are probably ignored on connected devices only")
|
||||
errNotAvailable = errors.New("no connected device has the required version of this file")
|
||||
errModified = errors.New("file modified but not rescanned; will try again later")
|
||||
)
|
||||
|
||||
const (
|
||||
@ -882,7 +881,8 @@ func (f *sendReceiveFolder) renameFile(cur, source, target protocol.FileInfo, ig
|
||||
scanChan <- target.Name
|
||||
err = errModified
|
||||
default:
|
||||
if fi, err := scanner.CreateFileInfo(stat, target.Name, f.fs); err == nil {
|
||||
var fi protocol.FileInfo
|
||||
if fi, err = scanner.CreateFileInfo(stat, target.Name, f.fs); err == nil {
|
||||
if !fi.IsEquivalentOptional(curTarget, f.IgnorePerms, true, protocol.LocalAllFlags) {
|
||||
// Target changed
|
||||
scanChan <- target.Name
|
||||
@ -1016,7 +1016,7 @@ func (f *sendReceiveFolder) handleFile(file protocol.FileInfo, copyChan chan<- c
|
||||
// Otherwise, discard the file ourselves in order for the
|
||||
// sharedpuller not to panic when it fails to exclusively create a
|
||||
// file which already exists
|
||||
osutil.InWritableDir(f.fs.Remove, f.fs, tempName)
|
||||
_ = osutil.InWritableDir(f.fs.Remove, f.fs, tempName)
|
||||
}
|
||||
} else {
|
||||
// Copy the blocks, as we don't want to shuffle them on the FileInfo
|
||||
@ -1142,7 +1142,7 @@ func (f *sendReceiveFolder) shortcutFile(file, curFile protocol.FileInfo, dbUpda
|
||||
}
|
||||
}
|
||||
|
||||
f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
|
||||
_ = f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
|
||||
|
||||
// This may have been a conflict. We should merge the version vectors so
|
||||
// that our clock doesn't move backwards.
|
||||
@ -1536,7 +1536,7 @@ func (f *sendReceiveFolder) performFinish(ignores *ignore.Matcher, file, curFile
|
||||
}
|
||||
|
||||
// Set the correct timestamp on the new file
|
||||
f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
|
||||
_ = f.fs.Chtimes(file.Name, file.ModTime(), file.ModTime()) // never fails
|
||||
|
||||
// Record the updated file in the index
|
||||
dbUpdateChan <- dbUpdateJob{file, dbUpdateHandleFile}
|
||||
@ -1706,7 +1706,7 @@ func (f *sendReceiveFolder) pullScannerRoutine(scanChan <-chan string) {
|
||||
l.Debugln(f, "scheduling scan after pulling for", path)
|
||||
scanList = append(scanList, path)
|
||||
}
|
||||
f.Scan(scanList)
|
||||
_ = f.Scan(scanList)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1858,7 +1858,7 @@ func (f *sendReceiveFolder) deleteDir(dir string, ignores *ignore.Matcher, scanC
|
||||
}
|
||||
|
||||
for _, del := range toBeDeleted {
|
||||
f.fs.RemoveAll(del)
|
||||
_ = f.fs.RemoveAll(del)
|
||||
}
|
||||
|
||||
err := osutil.InWritableDir(f.fs.Remove, f.fs, dir)
|
||||
|
@ -258,9 +258,9 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
|
||||
ffs := fs.MtimeFS()
|
||||
|
||||
// These are our metadata files, and they should always be hidden.
|
||||
ffs.Hide(config.DefaultMarkerName)
|
||||
ffs.Hide(".stversions")
|
||||
ffs.Hide(".stignore")
|
||||
_ = ffs.Hide(config.DefaultMarkerName)
|
||||
_ = ffs.Hide(".stversions")
|
||||
_ = ffs.Hide(".stignore")
|
||||
|
||||
p := folderFactory(m, cfg, ver, ffs)
|
||||
|
||||
@ -338,7 +338,7 @@ func (m *Model) RemoveFolder(cfg config.FolderConfiguration) {
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
// Delete syncthing specific files
|
||||
cfg.Filesystem().RemoveAll(config.DefaultMarkerName)
|
||||
_ = cfg.Filesystem().RemoveAll(config.DefaultMarkerName)
|
||||
|
||||
m.tearDownFolderLocked(cfg, fmt.Errorf("removing folder %v", cfg.Description()))
|
||||
// Remove it from the database
|
||||
@ -362,7 +362,7 @@ func (m *Model) tearDownFolderLocked(cfg config.FolderConfiguration, err error)
|
||||
m.pmut.Unlock()
|
||||
m.fmut.Unlock()
|
||||
for _, id := range tokens {
|
||||
m.RemoveAndWait(id, 0)
|
||||
_ = m.RemoveAndWait(id, 0)
|
||||
}
|
||||
m.fmut.Lock()
|
||||
m.pmut.Lock()
|
||||
@ -1189,7 +1189,7 @@ func (m *Model) handleIntroductions(introducerCfg config.DeviceConfiguration, cm
|
||||
}
|
||||
|
||||
if changed {
|
||||
m.cfg.SetFolder(fcfg)
|
||||
_, _ = m.cfg.SetFolder(fcfg)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1246,7 +1246,7 @@ func (m *Model) handleDeintroductions(introducerCfg config.DeviceConfiguration,
|
||||
cfg := m.cfg.RawCopy()
|
||||
cfg.Folders = folders
|
||||
cfg.Devices = devices
|
||||
m.cfg.Replace(cfg)
|
||||
_, _ = m.cfg.Replace(cfg)
|
||||
}
|
||||
|
||||
return changed
|
||||
@ -1325,7 +1325,7 @@ func (m *Model) introduceDevice(device protocol.Device, introducerCfg config.Dev
|
||||
newDeviceCfg.SkipIntroductionRemovals = device.SkipIntroductionRemovals
|
||||
}
|
||||
|
||||
m.cfg.SetDevice(newDeviceCfg)
|
||||
_, _ = m.cfg.SetDevice(newDeviceCfg)
|
||||
}
|
||||
|
||||
// Closed is called when a connection has been closed
|
||||
@ -1776,8 +1776,8 @@ func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloR
|
||||
|
||||
if (device.Name == "" || m.cfg.Options().OverwriteRemoteDevNames) && hello.DeviceName != "" {
|
||||
device.Name = hello.DeviceName
|
||||
m.cfg.SetDevice(device)
|
||||
m.cfg.Save()
|
||||
_, _ = m.cfg.SetDevice(device)
|
||||
_ = m.cfg.Save()
|
||||
}
|
||||
|
||||
m.deviceWasSeen(deviceID)
|
||||
@ -1864,7 +1864,7 @@ func sendIndexes(conn protocol.Connection, folder string, fs *db.FileSet, ignore
|
||||
// local index may update for other folders than the one we are
|
||||
// sending for.
|
||||
if fs.Sequence(protocol.LocalDeviceID) <= prevSequence {
|
||||
sub.Poll(time.Minute)
|
||||
_, _ = sub.Poll(time.Minute)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2484,7 +2484,7 @@ func (m *Model) RestoreFolderVersions(folder string, versions map[string]time.Ti
|
||||
}
|
||||
}
|
||||
|
||||
filesystem.MkdirAll(filepath.Dir(target), 0755)
|
||||
_ = filesystem.MkdirAll(filepath.Dir(target), 0755)
|
||||
if err == nil {
|
||||
err = osutil.Copy(filesystem, source, target)
|
||||
}
|
||||
@ -2734,17 +2734,6 @@ func getChunk(data []string, skip, get int) ([]string, int, int) {
|
||||
return data[skip : skip+get], 0, 0
|
||||
}
|
||||
|
||||
func stringSliceWithout(ss []string, s string) []string {
|
||||
for i := range ss {
|
||||
if ss[i] == s {
|
||||
copy(ss[i:], ss[i+1:])
|
||||
ss = ss[:len(ss)-1]
|
||||
return ss
|
||||
}
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
func readOffsetIntoBuf(fs fs.Filesystem, file string, offset int64, buf []byte) error {
|
||||
fd, err := fs.Open(file)
|
||||
if err != nil {
|
||||
|
@ -15,7 +15,7 @@ type Protocol string
|
||||
|
||||
const (
|
||||
TCP Protocol = "TCP"
|
||||
UDP = "UDP"
|
||||
UDP Protocol = "UDP"
|
||||
)
|
||||
|
||||
type Device interface {
|
||||
|
@ -92,7 +92,7 @@ func (s *Service) process() int {
|
||||
toRenew = append(toRenew, mapping)
|
||||
} else {
|
||||
toUpdate = append(toUpdate, mapping)
|
||||
mappingRenewIn := mapping.expires.Sub(time.Now())
|
||||
mappingRenewIn := time.Until(mapping.expires)
|
||||
if mappingRenewIn < renewIn {
|
||||
renewIn = mappingRenewIn
|
||||
}
|
||||
@ -328,6 +328,6 @@ findIP:
|
||||
|
||||
func hash(input string) int64 {
|
||||
h := fnv.New64a()
|
||||
h.Write([]byte(input))
|
||||
_, _ = h.Write([]byte(input))
|
||||
return int64(h.Sum64())
|
||||
}
|
||||
|
@ -77,12 +77,7 @@ func readHello(c io.Reader) (HelloResult, error) {
|
||||
if err := hello.Unmarshal(buf); err != nil {
|
||||
return HelloResult{}, err
|
||||
}
|
||||
res := HelloResult{
|
||||
DeviceName: hello.DeviceName,
|
||||
ClientName: hello.ClientName,
|
||||
ClientVersion: hello.ClientVersion,
|
||||
}
|
||||
return res, nil
|
||||
return HelloResult(hello), nil
|
||||
|
||||
case 0x00010001, 0x00010000, Version13HelloMagic:
|
||||
// This is the first word of an older cluster config message or an
|
||||
|
@ -83,10 +83,10 @@ type Relation int
|
||||
|
||||
const (
|
||||
MajorOlder Relation = -2 // Older by a major version (x in x.y.z or 0.x.y).
|
||||
Older = -1 // Older by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
Equal = 0 // Versions are semantically equal
|
||||
Newer = 1 // Newer by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
MajorNewer = 2 // Newer by a major version (x in x.y.z or 0.x.y).
|
||||
Older Relation = -1 // Older by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
Equal Relation = 0 // Versions are semantically equal
|
||||
Newer Relation = 1 // Newer by a minor version (y or z in x.y.z, or y in 0.x.y)
|
||||
MajorNewer Relation = 2 // Newer by a major version (x in x.y.z or 0.x.y).
|
||||
)
|
||||
|
||||
// CompareVersions returns a relation describing how a compares to b.
|
||||
|
Loading…
Reference in New Issue
Block a user