2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify it
|
|
|
|
// under the terms of the GNU General Public License as published by the Free
|
|
|
|
// Software Foundation, either version 3 of the License, or (at your option)
|
|
|
|
// any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
// more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU General Public License along
|
|
|
|
// with this program. If not, see <http://www.gnu.org/licenses/>.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2014-05-15 03:26:55 +00:00
|
|
|
package model
|
2014-03-28 13:36:57 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2014-08-25 15:45:13 +00:00
|
|
|
"fmt"
|
2014-11-09 04:26:52 +00:00
|
|
|
"io/ioutil"
|
2015-01-02 15:15:53 +00:00
|
|
|
"math/rand"
|
2014-03-28 13:36:57 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2014-09-27 12:44:15 +00:00
|
|
|
"sync"
|
2014-03-28 13:36:57 +00:00
|
|
|
"time"
|
2014-06-19 22:27:54 +00:00
|
|
|
|
2015-01-13 12:22:56 +00:00
|
|
|
"github.com/syncthing/protocol"
|
2014-09-22 19:42:11 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/config"
|
2015-01-12 13:50:30 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/db"
|
2014-09-22 19:42:11 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/events"
|
2014-12-23 09:06:51 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/ignore"
|
2014-09-22 19:42:11 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/osutil"
|
|
|
|
"github.com/syncthing/syncthing/internal/scanner"
|
2014-11-09 04:26:52 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/symlinks"
|
2014-09-22 19:42:11 +00:00
|
|
|
"github.com/syncthing/syncthing/internal/versioner"
|
2014-03-28 13:36:57 +00:00
|
|
|
)
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// TODO: Stop on errors
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
const (
|
2014-11-23 00:02:09 +00:00
|
|
|
pauseIntv = 60 * time.Second
|
|
|
|
nextPullIntv = 10 * time.Second
|
|
|
|
checkPullIntv = 1 * time.Second
|
2014-09-27 12:44:15 +00:00
|
|
|
)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// A pullBlockState is passed to the puller routine for each block that needs
|
|
|
|
// to be fetched.
|
|
|
|
type pullBlockState struct {
|
|
|
|
*sharedPullerState
|
|
|
|
block protocol.BlockInfo
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// A copyBlocksState is passed to copy routine if the file has blocks to be
|
2014-10-08 22:41:23 +00:00
|
|
|
// copied.
|
2014-09-27 12:44:15 +00:00
|
|
|
type copyBlocksState struct {
|
|
|
|
*sharedPullerState
|
|
|
|
blocks []protocol.BlockInfo
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
var (
|
2014-09-28 11:05:25 +00:00
|
|
|
activity = newDeviceActivity()
|
2014-09-28 11:00:38 +00:00
|
|
|
errNoDevice = errors.New("no available source device")
|
2014-09-27 12:44:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Puller struct {
|
2014-11-16 23:18:59 +00:00
|
|
|
folder string
|
|
|
|
dir string
|
|
|
|
scanIntv time.Duration
|
|
|
|
model *Model
|
|
|
|
stop chan struct{}
|
|
|
|
versioner versioner.Versioner
|
|
|
|
ignorePerms bool
|
|
|
|
lenientMtimes bool
|
|
|
|
progressEmitter *ProgressEmitter
|
|
|
|
copiers int
|
|
|
|
pullers int
|
2014-12-30 08:35:21 +00:00
|
|
|
queue *jobQueue
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// Serve will run scans and pulls. It will return when Stop()ed or on a
|
|
|
|
// critical error.
|
|
|
|
func (p *Puller) Serve() {
|
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "starting")
|
|
|
|
defer l.Debugln(p, "exiting")
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
p.stop = make(chan struct{})
|
2014-05-25 18:49:08 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
pullTimer := time.NewTimer(checkPullIntv)
|
2014-09-30 15:34:31 +00:00
|
|
|
scanTimer := time.NewTimer(time.Millisecond) // The first scan should be done immediately.
|
2014-09-27 12:44:15 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
pullTimer.Stop()
|
|
|
|
scanTimer.Stop()
|
2014-09-28 11:00:38 +00:00
|
|
|
// TODO: Should there be an actual FolderStopped state?
|
|
|
|
p.model.setState(p.folder, FolderIdle)
|
2014-09-27 12:44:15 +00:00
|
|
|
}()
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2015-01-18 01:12:06 +00:00
|
|
|
var prevVer int64
|
2014-12-23 09:06:51 +00:00
|
|
|
var prevIgnoreHash string
|
2014-07-24 07:38:16 +00:00
|
|
|
|
2014-09-30 15:34:31 +00:00
|
|
|
// We don't start pulling files until a scan has been completed.
|
|
|
|
initialScanCompleted := false
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
loop:
|
2014-03-28 13:36:57 +00:00
|
|
|
for {
|
2014-09-27 12:44:15 +00:00
|
|
|
select {
|
|
|
|
case <-p.stop:
|
|
|
|
return
|
2014-07-24 07:38:16 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// TODO: We could easily add a channel here for notifications from
|
|
|
|
// Index(), so that we immediately start a pull when new index
|
|
|
|
// information is available. Before that though, I'd like to build a
|
|
|
|
// repeatable benchmark of how long it takes to sync a change from
|
2014-09-28 11:00:38 +00:00
|
|
|
// device A to device B, so we have something to work against.
|
2014-09-27 12:44:15 +00:00
|
|
|
case <-pullTimer.C:
|
2014-09-30 15:34:31 +00:00
|
|
|
if !initialScanCompleted {
|
2014-10-03 15:55:03 +00:00
|
|
|
// How did we even get here?
|
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "skip (initial)")
|
|
|
|
}
|
2014-10-03 14:10:35 +00:00
|
|
|
pullTimer.Reset(nextPullIntv)
|
2014-09-30 15:34:31 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-12-23 09:06:51 +00:00
|
|
|
p.model.fmut.RLock()
|
|
|
|
curIgnores := p.model.folderIgnores[p.folder]
|
|
|
|
p.model.fmut.RUnlock()
|
|
|
|
|
|
|
|
if newHash := curIgnores.Hash(); newHash != prevIgnoreHash {
|
|
|
|
// The ignore patterns have changed. We need to re-evaluate if
|
|
|
|
// there are files we need now that were ignored before.
|
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "ignore patterns have changed, resetting prevVer")
|
|
|
|
}
|
|
|
|
prevVer = 0
|
|
|
|
prevIgnoreHash = newHash
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// RemoteLocalVersion() is a fast call, doesn't touch the database.
|
2014-09-28 11:00:38 +00:00
|
|
|
curVer := p.model.RemoteLocalVersion(p.folder)
|
2014-09-27 12:44:15 +00:00
|
|
|
if curVer == prevVer {
|
2014-10-03 15:55:03 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "skip (curVer == prevVer)", prevVer)
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
pullTimer.Reset(checkPullIntv)
|
|
|
|
continue
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2014-08-04 20:02:44 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "pulling", prevVer, curVer)
|
2014-08-04 20:02:44 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.setState(p.folder, FolderSyncing)
|
2014-09-27 12:44:15 +00:00
|
|
|
tries := 0
|
|
|
|
for {
|
|
|
|
tries++
|
2014-10-24 22:20:08 +00:00
|
|
|
|
2014-12-28 23:11:32 +00:00
|
|
|
changed := p.pullerIteration(curIgnores)
|
2014-09-27 12:44:15 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "changed", changed)
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if changed == 0 {
|
|
|
|
// No files were changed by the puller, so we are in
|
|
|
|
// sync. Remember the local version number and
|
|
|
|
// schedule a resync a little bit into the future.
|
2014-09-28 05:56:05 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
if lv := p.model.RemoteLocalVersion(p.folder); lv < curVer {
|
|
|
|
// There's a corner case where the device we needed
|
2014-09-28 05:56:05 +00:00
|
|
|
// files from disconnected during the puller
|
|
|
|
// iteration. The files will have been removed from
|
|
|
|
// the index, so we've concluded that we don't need
|
|
|
|
// them, but at the same time we have the local
|
|
|
|
// version that includes those files in curVer. So we
|
|
|
|
// catch the case that localVersion might have
|
2014-12-23 09:06:51 +00:00
|
|
|
// decreased here.
|
2014-09-28 11:05:25 +00:00
|
|
|
l.Debugln(p, "adjusting curVer", lv)
|
2014-09-28 05:56:05 +00:00
|
|
|
curVer = lv
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
prevVer = curVer
|
2014-10-16 07:32:23 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "next pull in", nextPullIntv)
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
pullTimer.Reset(nextPullIntv)
|
|
|
|
break
|
|
|
|
}
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if tries > 10 {
|
|
|
|
// We've tried a bunch of times to get in sync, but
|
|
|
|
// we're not making it. Probably there are write
|
|
|
|
// errors preventing us. Flag this with a warning and
|
|
|
|
// wait a bit longer before retrying.
|
2014-09-28 11:00:38 +00:00
|
|
|
l.Warnf("Folder %q isn't making progress - check logs for possible root cause. Pausing puller for %v.", p.folder, pauseIntv)
|
2014-10-16 07:32:23 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "next pull in", pauseIntv)
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
pullTimer.Reset(pauseIntv)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.setState(p.folder, FolderIdle)
|
2014-04-14 07:58:17 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// The reason for running the scanner from within the puller is that
|
|
|
|
// this is the easiest way to make sure we are not doing both at the
|
|
|
|
// same time.
|
|
|
|
case <-scanTimer.C:
|
2014-05-15 03:26:55 +00:00
|
|
|
if debug {
|
2014-09-27 12:44:15 +00:00
|
|
|
l.Debugln(p, "rescan")
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.setState(p.folder, FolderScanning)
|
|
|
|
if err := p.model.ScanFolder(p.folder); err != nil {
|
2014-10-06 07:25:45 +00:00
|
|
|
p.model.cfg.InvalidateFolder(p.folder, err.Error())
|
2014-09-27 12:44:15 +00:00
|
|
|
break loop
|
2014-05-04 16:20:25 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.setState(p.folder, FolderIdle)
|
2014-10-15 08:51:09 +00:00
|
|
|
if p.scanIntv > 0 {
|
2015-01-02 15:15:53 +00:00
|
|
|
// Sleep a random time between 3/4 and 5/4 of the configured interval.
|
|
|
|
sleepNanos := (p.scanIntv.Nanoseconds()*3 + rand.Int63n(2*p.scanIntv.Nanoseconds())) / 4
|
|
|
|
intv := time.Duration(sleepNanos) * time.Nanosecond
|
|
|
|
|
2014-10-16 07:32:23 +00:00
|
|
|
if debug {
|
2015-01-02 15:15:53 +00:00
|
|
|
l.Debugln(p, "next rescan in", intv)
|
2014-10-16 07:32:23 +00:00
|
|
|
}
|
2015-01-02 15:15:53 +00:00
|
|
|
scanTimer.Reset(intv)
|
2014-10-15 08:51:09 +00:00
|
|
|
}
|
2014-09-30 15:34:31 +00:00
|
|
|
if !initialScanCompleted {
|
|
|
|
l.Infoln("Completed initial scan (rw) of folder", p.folder)
|
|
|
|
initialScanCompleted = true
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
func (p *Puller) Stop() {
|
|
|
|
close(p.stop)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
func (p *Puller) String() string {
|
2014-09-28 11:00:38 +00:00
|
|
|
return fmt.Sprintf("puller/%s@%p", p.folder, p)
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-09-07 19:29:06 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
// pullerIteration runs a single puller iteration for the given folder and
|
2014-09-27 12:44:15 +00:00
|
|
|
// returns the number items that should have been synced (even those that
|
|
|
|
// might have failed). One puller iteration handles all files currently
|
2014-11-23 00:02:09 +00:00
|
|
|
// flagged as needed in the folder.
|
2014-12-28 23:11:32 +00:00
|
|
|
func (p *Puller) pullerIteration(ignores *ignore.Matcher) int {
|
2014-09-27 12:44:15 +00:00
|
|
|
pullChan := make(chan pullBlockState)
|
|
|
|
copyChan := make(chan copyBlocksState)
|
|
|
|
finisherChan := make(chan *sharedPullerState)
|
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
var copyWg sync.WaitGroup
|
|
|
|
var pullWg sync.WaitGroup
|
2014-09-27 12:44:15 +00:00
|
|
|
var doneWg sync.WaitGroup
|
|
|
|
|
2014-11-23 18:43:49 +00:00
|
|
|
if debug {
|
2014-12-24 23:12:12 +00:00
|
|
|
l.Debugln(p, "c", p.copiers, "p", p.pullers)
|
2014-11-23 18:43:49 +00:00
|
|
|
}
|
|
|
|
|
2014-11-23 00:02:09 +00:00
|
|
|
for i := 0; i < p.copiers; i++ {
|
2014-10-08 22:41:23 +00:00
|
|
|
copyWg.Add(1)
|
2014-09-27 12:44:15 +00:00
|
|
|
go func() {
|
|
|
|
// copierRoutine finishes when copyChan is closed
|
2014-12-28 23:11:32 +00:00
|
|
|
p.copierRoutine(copyChan, pullChan, finisherChan)
|
2014-10-08 22:41:23 +00:00
|
|
|
copyWg.Done()
|
2014-09-27 12:44:15 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2014-11-23 00:02:09 +00:00
|
|
|
for i := 0; i < p.pullers; i++ {
|
2014-10-08 22:41:23 +00:00
|
|
|
pullWg.Add(1)
|
2014-09-27 12:44:15 +00:00
|
|
|
go func() {
|
|
|
|
// pullerRoutine finishes when pullChan is closed
|
|
|
|
p.pullerRoutine(pullChan, finisherChan)
|
2014-10-08 22:41:23 +00:00
|
|
|
pullWg.Done()
|
2014-09-27 12:44:15 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2014-12-24 23:12:12 +00:00
|
|
|
doneWg.Add(1)
|
|
|
|
// finisherRoutine finishes when finisherChan is closed
|
|
|
|
go func() {
|
|
|
|
p.finisherRoutine(finisherChan)
|
|
|
|
doneWg.Done()
|
|
|
|
}()
|
2014-09-27 12:44:15 +00:00
|
|
|
|
2014-09-28 11:39:39 +00:00
|
|
|
p.model.fmut.RLock()
|
2015-01-09 07:18:42 +00:00
|
|
|
folderFiles := p.model.folderFiles[p.folder]
|
2014-09-28 11:39:39 +00:00
|
|
|
p.model.fmut.RUnlock()
|
2014-09-27 12:44:15 +00:00
|
|
|
|
|
|
|
// !!!
|
|
|
|
// WithNeed takes a database snapshot (by necessity). By the time we've
|
|
|
|
// handled a bunch of files it might have become out of date and we might
|
|
|
|
// be attempting to sync with an old version of a file...
|
|
|
|
// !!!
|
|
|
|
|
|
|
|
changed := 0
|
2014-10-12 21:01:57 +00:00
|
|
|
|
2014-12-19 23:12:12 +00:00
|
|
|
fileDeletions := map[string]protocol.FileInfo{}
|
|
|
|
dirDeletions := []protocol.FileInfo{}
|
|
|
|
buckets := map[string][]protocol.FileInfo{}
|
2014-10-12 21:01:57 +00:00
|
|
|
|
2015-01-12 13:50:30 +00:00
|
|
|
folderFiles.WithNeed(protocol.LocalDeviceID, func(intf db.FileIntf) bool {
|
2014-09-28 05:56:05 +00:00
|
|
|
|
|
|
|
// Needed items are delivered sorted lexicographically. This isn't
|
|
|
|
// really optimal from a performance point of view - it would be
|
|
|
|
// better if files were handled in random order, to spread the load
|
|
|
|
// over the cluster. But it means that we can be sure that we fully
|
|
|
|
// handle directories before the files that go inside them, which is
|
|
|
|
// nice.
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
file := intf.(protocol.FileInfo)
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-12-23 09:06:51 +00:00
|
|
|
if ignores.Match(file.Name) {
|
|
|
|
// This is an ignored file. Skip it, continue iteration.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "handling", file.Name)
|
2014-05-25 18:49:08 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
switch {
|
2014-11-04 23:22:15 +00:00
|
|
|
case file.IsDeleted():
|
2014-11-09 04:26:52 +00:00
|
|
|
// A deleted file, directory or symlink
|
2014-12-19 23:12:12 +00:00
|
|
|
if file.IsDirectory() {
|
|
|
|
dirDeletions = append(dirDeletions, file)
|
|
|
|
} else {
|
|
|
|
fileDeletions[file.Name] = file
|
|
|
|
df, ok := p.model.CurrentFolderFile(p.folder, file.Name)
|
|
|
|
// Local file can be already deleted, but with a lower version
|
|
|
|
// number, hence the deletion coming in again as part of
|
2015-01-30 14:32:59 +00:00
|
|
|
// WithNeed, furthermore, the file can simply be of the wrong
|
|
|
|
// type if we haven't yet managed to pull it.
|
|
|
|
if ok && !df.IsDeleted() && !df.IsSymlink() && !df.IsDirectory() {
|
2014-12-19 23:12:12 +00:00
|
|
|
// Put files into buckets per first hash
|
|
|
|
key := string(df.Blocks[0].Hash)
|
|
|
|
buckets[key] = append(buckets[key], df)
|
|
|
|
}
|
|
|
|
}
|
2014-11-09 04:26:52 +00:00
|
|
|
case file.IsDirectory() && !file.IsSymlink():
|
2014-09-27 12:44:15 +00:00
|
|
|
// A new or changed directory
|
2015-01-30 14:32:59 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln("Creating directory", file.Name)
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
p.handleDir(file)
|
|
|
|
default:
|
2014-11-09 04:26:52 +00:00
|
|
|
// A new or changed file or symlink. This is the only case where we
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
// do stuff concurrently in the background
|
2014-12-30 08:31:34 +00:00
|
|
|
p.queue.Push(file.Name)
|
2014-04-01 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
changed++
|
|
|
|
return true
|
|
|
|
})
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-12-19 23:12:12 +00:00
|
|
|
nextFile:
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
for {
|
2014-12-30 08:31:34 +00:00
|
|
|
fileName, ok := p.queue.Pop()
|
|
|
|
if !ok {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
break
|
|
|
|
}
|
2014-12-19 23:12:12 +00:00
|
|
|
|
|
|
|
f, ok := p.model.CurrentGlobalFile(p.folder, fileName)
|
|
|
|
if !ok {
|
2015-01-06 21:12:45 +00:00
|
|
|
// File is no longer in the index. Mark it as done and drop it.
|
|
|
|
p.queue.Done(fileName)
|
2014-12-19 23:12:12 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-01-18 13:44:10 +00:00
|
|
|
// Local file can be already deleted, but with a lower version
|
|
|
|
// number, hence the deletion coming in again as part of
|
2015-01-30 14:32:59 +00:00
|
|
|
// WithNeed, furthermore, the file can simply be of the wrong type if
|
|
|
|
// the global index changed while we were processing this iteration.
|
|
|
|
if !f.IsDeleted() && !f.IsSymlink() && !f.IsDirectory() {
|
2014-12-19 23:12:12 +00:00
|
|
|
key := string(f.Blocks[0].Hash)
|
|
|
|
for i, candidate := range buckets[key] {
|
|
|
|
if scanner.BlocksEqual(candidate.Blocks, f.Blocks) {
|
|
|
|
// Remove the candidate from the bucket
|
2015-01-30 14:32:59 +00:00
|
|
|
lidx := len(buckets[key]) - 1
|
|
|
|
buckets[key][i] = buckets[key][lidx]
|
|
|
|
buckets[key] = buckets[key][:lidx]
|
|
|
|
|
|
|
|
// candidate is our current state of the file, where as the
|
|
|
|
// desired state with the delete bit set is in the deletion
|
|
|
|
// map.
|
|
|
|
desired := fileDeletions[candidate.Name]
|
2014-12-19 23:12:12 +00:00
|
|
|
// Remove the pending deletion (as we perform it by renaming)
|
|
|
|
delete(fileDeletions, candidate.Name)
|
|
|
|
|
2015-01-30 14:32:59 +00:00
|
|
|
p.renameFile(desired, f)
|
2014-12-19 23:12:12 +00:00
|
|
|
|
|
|
|
p.queue.Done(fileName)
|
|
|
|
continue nextFile
|
|
|
|
}
|
|
|
|
}
|
2015-01-06 21:12:45 +00:00
|
|
|
}
|
2014-12-19 23:12:12 +00:00
|
|
|
|
|
|
|
// Not a rename or a symlink, deal with it.
|
|
|
|
p.handleFile(f, copyChan, finisherChan)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// Signal copy and puller routines that we are done with the in data for
|
2014-10-08 22:41:23 +00:00
|
|
|
// this iteration. Wait for them to finish.
|
2014-09-27 12:44:15 +00:00
|
|
|
close(copyChan)
|
2014-10-08 22:41:23 +00:00
|
|
|
copyWg.Wait()
|
2014-09-27 12:44:15 +00:00
|
|
|
close(pullChan)
|
2014-10-08 22:41:23 +00:00
|
|
|
pullWg.Wait()
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
// Signal the finisher chan that there will be no more input.
|
2014-09-27 12:44:15 +00:00
|
|
|
close(finisherChan)
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// Wait for the finisherChan to finish.
|
|
|
|
doneWg.Wait()
|
2014-05-19 20:31:28 +00:00
|
|
|
|
2014-12-19 23:12:12 +00:00
|
|
|
for _, file := range fileDeletions {
|
2015-01-30 14:32:59 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln("Deleting file", file.Name)
|
|
|
|
}
|
2014-12-19 23:12:12 +00:00
|
|
|
p.deleteFile(file)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range dirDeletions {
|
2015-01-30 14:32:59 +00:00
|
|
|
dir := dirDeletions[len(dirDeletions)-i-1]
|
|
|
|
if debug {
|
|
|
|
l.Debugln("Deleting dir", dir.Name)
|
|
|
|
}
|
|
|
|
p.deleteDir(dir)
|
2014-10-12 21:01:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
return changed
|
|
|
|
}
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// handleDir creates or updates the given directory
|
|
|
|
func (p *Puller) handleDir(file protocol.FileInfo) {
|
2015-02-01 17:31:19 +00:00
|
|
|
var err error
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"details": db.ToTruncated(file),
|
|
|
|
})
|
|
|
|
defer func() {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
realName := filepath.Join(p.dir, file.Name)
|
|
|
|
mode := os.FileMode(file.Flags & 0777)
|
2014-10-09 22:34:32 +00:00
|
|
|
if p.ignorePerms {
|
|
|
|
mode = 0755
|
|
|
|
}
|
2014-05-19 20:31:28 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if debug {
|
2015-01-06 21:12:45 +00:00
|
|
|
curFile, _ := p.model.CurrentFolderFile(p.folder, file.Name)
|
2014-09-27 12:44:15 +00:00
|
|
|
l.Debugf("need dir\n\t%v\n\t%v", file, curFile)
|
2014-04-01 21:18:32 +00:00
|
|
|
}
|
|
|
|
|
2014-11-17 11:25:32 +00:00
|
|
|
info, err := os.Lstat(realName)
|
2014-11-13 22:59:40 +00:00
|
|
|
switch {
|
2014-11-17 11:25:32 +00:00
|
|
|
// There is already something under that name, but it's a file/link.
|
|
|
|
// Most likely a file/link is getting replaced with a directory.
|
|
|
|
// Remove the file/link and fall through to directory creation.
|
2014-12-04 23:02:57 +00:00
|
|
|
case err == nil && (!info.IsDir() || info.Mode()&os.ModeSymlink != 0):
|
2014-11-13 22:59:40 +00:00
|
|
|
err = osutil.InWritableDir(os.Remove, realName)
|
|
|
|
if err != nil {
|
|
|
|
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
2014-09-27 23:54:25 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-13 22:59:40 +00:00
|
|
|
fallthrough
|
|
|
|
// The directory doesn't exist, so we create it with the right
|
|
|
|
// mode bits from the start.
|
|
|
|
case err != nil && os.IsNotExist(err):
|
|
|
|
// We declare a function that acts on only the path name, so
|
|
|
|
// we can pass it to InWritableDir. We use a regular Mkdir and
|
|
|
|
// not MkdirAll because the parent should already exist.
|
|
|
|
mkdir := func(path string) error {
|
|
|
|
return os.Mkdir(path, mode)
|
|
|
|
}
|
2014-09-27 23:54:25 +00:00
|
|
|
|
2014-11-13 22:59:40 +00:00
|
|
|
if err = osutil.InWritableDir(mkdir, realName); err == nil {
|
|
|
|
p.model.updateLocal(p.folder, file)
|
|
|
|
} else {
|
|
|
|
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
|
|
|
}
|
2014-09-27 23:54:25 +00:00
|
|
|
return
|
2014-11-13 22:59:40 +00:00
|
|
|
// Weird error when stat()'ing the dir. Probably won't work to do
|
|
|
|
// anything else with it if we can't even stat() it.
|
|
|
|
case err != nil:
|
|
|
|
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
2014-03-28 13:36:57 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-09-27 23:54:25 +00:00
|
|
|
// The directory already exists, so we just correct the mode bits. (We
|
|
|
|
// don't handle modification times on directories, because that sucks...)
|
|
|
|
// It's OK to change mode bits on stuff within non-writable directories.
|
|
|
|
|
2014-10-09 22:34:32 +00:00
|
|
|
if p.ignorePerms {
|
|
|
|
p.model.updateLocal(p.folder, file)
|
|
|
|
} else if err := os.Chmod(realName, mode); err == nil {
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.updateLocal(p.folder, file)
|
2014-09-27 23:54:25 +00:00
|
|
|
} else {
|
2014-10-10 19:20:46 +00:00
|
|
|
l.Infof("Puller (folder %q, dir %q): %v", p.folder, file.Name, err)
|
2014-07-15 11:04:37 +00:00
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// deleteDir attempts to delete the given directory
|
|
|
|
func (p *Puller) deleteDir(file protocol.FileInfo) {
|
2015-02-01 17:31:19 +00:00
|
|
|
var err error
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"details": db.ToTruncated(file),
|
|
|
|
})
|
|
|
|
defer func() {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
realName := filepath.Join(p.dir, file.Name)
|
2014-10-29 11:19:48 +00:00
|
|
|
// Delete any temporary files lying around in the directory
|
|
|
|
dir, _ := os.Open(realName)
|
|
|
|
if dir != nil {
|
|
|
|
files, _ := dir.Readdirnames(-1)
|
|
|
|
for _, file := range files {
|
|
|
|
if defTempNamer.IsTemporary(file) {
|
|
|
|
osutil.InWritableDir(os.Remove, filepath.Join(realName, file))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-02-01 17:31:19 +00:00
|
|
|
err = osutil.InWritableDir(os.Remove, realName)
|
2014-09-27 12:44:15 +00:00
|
|
|
if err == nil || os.IsNotExist(err) {
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.updateLocal(p.folder, file)
|
2014-10-10 19:20:46 +00:00
|
|
|
} else {
|
|
|
|
l.Infof("Puller (folder %q, dir %q): delete: %v", p.folder, file.Name, err)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// deleteFile attempts to delete the given file
|
|
|
|
func (p *Puller) deleteFile(file protocol.FileInfo) {
|
2015-02-01 17:31:19 +00:00
|
|
|
var err error
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"details": db.ToTruncated(file),
|
|
|
|
})
|
|
|
|
defer func() {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
realName := filepath.Join(p.dir, file.Name)
|
2014-04-01 21:18:32 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
if p.versioner != nil {
|
2014-09-27 23:54:25 +00:00
|
|
|
err = osutil.InWritableDir(p.versioner.Archive, realName)
|
2014-09-27 12:44:15 +00:00
|
|
|
} else {
|
2014-09-27 23:54:25 +00:00
|
|
|
err = osutil.InWritableDir(os.Remove, realName)
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-07-13 19:07:24 +00:00
|
|
|
|
2014-10-06 19:47:42 +00:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2014-09-28 11:00:38 +00:00
|
|
|
l.Infof("Puller (folder %q, file %q): delete: %v", p.folder, file.Name, err)
|
2014-09-27 12:44:15 +00:00
|
|
|
} else {
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.updateLocal(p.folder, file)
|
2014-05-28 09:45:45 +00:00
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-05-28 09:45:45 +00:00
|
|
|
|
2014-12-19 23:12:12 +00:00
|
|
|
// renameFile attempts to rename an existing file to a destination
|
|
|
|
// and set the right attributes on it.
|
|
|
|
func (p *Puller) renameFile(source, target protocol.FileInfo) {
|
2015-02-01 17:31:19 +00:00
|
|
|
var err error
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": source.Name,
|
|
|
|
"details": db.ToTruncated(source),
|
|
|
|
})
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": target.Name,
|
|
|
|
"details": db.ToTruncated(source),
|
|
|
|
})
|
|
|
|
defer func() {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": source.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": target.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
|
2014-12-19 23:12:12 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "taking rename shortcut", source.Name, "->", target.Name)
|
|
|
|
}
|
|
|
|
|
|
|
|
from := filepath.Join(p.dir, source.Name)
|
|
|
|
to := filepath.Join(p.dir, target.Name)
|
|
|
|
|
|
|
|
if p.versioner != nil {
|
|
|
|
err = osutil.Copy(from, to)
|
|
|
|
if err == nil {
|
|
|
|
err = osutil.InWritableDir(p.versioner.Archive, from)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = osutil.TryRename(from, to)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
l.Infof("Puller (folder %q, file %q): rename from %q: %v", p.folder, target.Name, source.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fix-up the metadata, and update the local index of the target file
|
2015-02-01 17:31:19 +00:00
|
|
|
err = p.shortcutFile(target)
|
|
|
|
if err != nil {
|
|
|
|
l.Infof("Puller (folder %q, file %q): rename from %q metadata: %v", p.folder, target.Name, source.Name, err)
|
|
|
|
return
|
|
|
|
}
|
2014-12-19 23:12:12 +00:00
|
|
|
|
|
|
|
// Source file already has the delete bit set.
|
|
|
|
// Because we got rid of the file (by renaming it), we just need to update
|
|
|
|
// the index, and we're done with it.
|
|
|
|
p.model.updateLocal(p.folder, source)
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// handleFile queues the copies and pulls as necessary for a single new or
|
|
|
|
// changed file.
|
2014-10-08 22:41:23 +00:00
|
|
|
func (p *Puller) handleFile(file protocol.FileInfo, copyChan chan<- copyBlocksState, finisherChan chan<- *sharedPullerState) {
|
2015-02-01 17:31:19 +00:00
|
|
|
events.Default.Log(events.ItemStarted, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"details": db.ToTruncated(file),
|
|
|
|
})
|
|
|
|
|
2015-01-06 21:12:45 +00:00
|
|
|
curFile, ok := p.model.CurrentFolderFile(p.folder, file.Name)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2015-01-06 21:12:45 +00:00
|
|
|
if ok && len(curFile.Blocks) == len(file.Blocks) && scanner.BlocksEqual(curFile.Blocks, file.Blocks) {
|
2014-09-27 12:44:15 +00:00
|
|
|
// We are supposed to copy the entire file, and then fetch nothing. We
|
|
|
|
// are only updating metadata, so we don't actually *need* to make the
|
|
|
|
// copy.
|
2014-05-15 03:26:55 +00:00
|
|
|
if debug {
|
2014-09-27 12:44:15 +00:00
|
|
|
l.Debugln(p, "taking shortcut on", file.Name)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2014-12-30 08:31:34 +00:00
|
|
|
p.queue.Done(file.Name)
|
2015-02-01 17:31:19 +00:00
|
|
|
var err error
|
2014-11-09 04:26:52 +00:00
|
|
|
if file.IsSymlink() {
|
2015-02-01 17:31:19 +00:00
|
|
|
err = p.shortcutSymlink(file)
|
2014-11-09 04:26:52 +00:00
|
|
|
} else {
|
2015-02-01 17:31:19 +00:00
|
|
|
err = p.shortcutFile(file)
|
2014-11-09 04:26:52 +00:00
|
|
|
}
|
2015-02-01 17:31:19 +00:00
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": file.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
2014-09-27 12:44:15 +00:00
|
|
|
return
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-10-17 22:16:29 +00:00
|
|
|
scanner.PopulateOffsets(file.Blocks)
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// Figure out the absolute filenames we need once and for all
|
|
|
|
tempName := filepath.Join(p.dir, defTempNamer.TempName(file.Name))
|
|
|
|
realName := filepath.Join(p.dir, file.Name)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-10-12 20:38:22 +00:00
|
|
|
reused := 0
|
2014-10-08 22:41:23 +00:00
|
|
|
var blocks []protocol.BlockInfo
|
2014-10-03 22:15:54 +00:00
|
|
|
|
|
|
|
// Check for an old temporary file which might have some blocks we could
|
|
|
|
// reuse.
|
|
|
|
tempBlocks, err := scanner.HashFile(tempName, protocol.BlockSize)
|
|
|
|
if err == nil {
|
|
|
|
// Check for any reusable blocks in the temp file
|
|
|
|
tempCopyBlocks, _ := scanner.BlockDiff(tempBlocks, file.Blocks)
|
|
|
|
|
|
|
|
// block.String() returns a string unique to the block
|
|
|
|
existingBlocks := make(map[string]bool, len(tempCopyBlocks))
|
|
|
|
for _, block := range tempCopyBlocks {
|
|
|
|
existingBlocks[block.String()] = true
|
|
|
|
}
|
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
// Since the blocks are already there, we don't need to get them.
|
|
|
|
for _, block := range file.Blocks {
|
2014-10-03 22:15:54 +00:00
|
|
|
_, ok := existingBlocks[block.String()]
|
|
|
|
if !ok {
|
2014-10-08 22:41:23 +00:00
|
|
|
blocks = append(blocks, block)
|
2014-10-03 22:15:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-12 20:38:22 +00:00
|
|
|
// The sharedpullerstate will know which flags to use when opening the
|
|
|
|
// temp file depending if we are reusing any blocks or not.
|
|
|
|
reused = len(file.Blocks) - len(blocks)
|
|
|
|
if reused == 0 {
|
2014-10-03 22:15:54 +00:00
|
|
|
// Otherwise, discard the file ourselves in order for the
|
|
|
|
// sharedpuller not to panic when it fails to exlusively create a
|
|
|
|
// file which already exists
|
|
|
|
os.Remove(tempName)
|
|
|
|
}
|
2014-10-08 22:41:23 +00:00
|
|
|
} else {
|
|
|
|
blocks = file.Blocks
|
2014-10-03 22:15:54 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
s := sharedPullerState{
|
|
|
|
file: file,
|
2014-09-28 11:05:25 +00:00
|
|
|
folder: p.folder,
|
2014-09-27 12:44:15 +00:00
|
|
|
tempName: tempName,
|
|
|
|
realName: realName,
|
2015-01-18 01:12:06 +00:00
|
|
|
copyTotal: len(blocks),
|
|
|
|
copyNeeded: len(blocks),
|
|
|
|
reused: reused,
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-05-15 03:26:55 +00:00
|
|
|
if debug {
|
2014-10-12 20:38:22 +00:00
|
|
|
l.Debugf("%v need file %s; copy %d, reused %v", p, file.Name, len(blocks), reused)
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
cs := copyBlocksState{
|
|
|
|
sharedPullerState: &s,
|
|
|
|
blocks: blocks,
|
2014-10-06 08:14:36 +00:00
|
|
|
}
|
2014-10-08 22:41:23 +00:00
|
|
|
copyChan <- cs
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// shortcutFile sets file mode and modification time, when that's the only
|
|
|
|
// thing that has changed.
|
2015-02-01 17:31:19 +00:00
|
|
|
func (p *Puller) shortcutFile(file protocol.FileInfo) (err error) {
|
2014-09-27 12:44:15 +00:00
|
|
|
realName := filepath.Join(p.dir, file.Name)
|
2014-10-09 22:34:32 +00:00
|
|
|
if !p.ignorePerms {
|
2015-02-01 17:31:19 +00:00
|
|
|
err = os.Chmod(realName, os.FileMode(file.Flags&0777))
|
2014-10-09 22:34:32 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
|
|
|
|
return
|
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
t := time.Unix(file.Modified, 0)
|
2015-02-01 17:31:19 +00:00
|
|
|
err = os.Chtimes(realName, t, t)
|
2014-09-27 12:44:15 +00:00
|
|
|
if err != nil {
|
2014-10-14 06:48:35 +00:00
|
|
|
if p.lenientMtimes {
|
2015-02-01 17:31:19 +00:00
|
|
|
err = nil
|
2014-10-14 06:48:35 +00:00
|
|
|
// We accept the failure with a warning here and allow the sync to
|
|
|
|
// continue. We'll sync the new mtime back to the other devices later.
|
|
|
|
// If they have the same problem & setting, we might never get in
|
|
|
|
// sync.
|
|
|
|
l.Infof("Puller (folder %q, file %q): shortcut: %v (continuing anyway as requested)", p.folder, file.Name, err)
|
|
|
|
} else {
|
|
|
|
l.Infof("Puller (folder %q, file %q): shortcut: %v", p.folder, file.Name, err)
|
|
|
|
return
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
p.model.updateLocal(p.folder, file)
|
2015-02-01 17:31:19 +00:00
|
|
|
return
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
|
2014-11-09 04:26:52 +00:00
|
|
|
// shortcutSymlink changes the symlinks type if necessery.
|
2015-02-01 17:31:19 +00:00
|
|
|
func (p *Puller) shortcutSymlink(file protocol.FileInfo) (err error) {
|
|
|
|
err = symlinks.ChangeType(filepath.Join(p.dir, file.Name), file.Flags)
|
|
|
|
if err == nil {
|
|
|
|
p.model.updateLocal(p.folder, file)
|
|
|
|
} else {
|
2014-11-09 04:26:52 +00:00
|
|
|
l.Infof("Puller (folder %q, file %q): symlink shortcut: %v", p.folder, file.Name, err)
|
|
|
|
}
|
2015-02-01 17:31:19 +00:00
|
|
|
return
|
2014-11-09 04:26:52 +00:00
|
|
|
}
|
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
// copierRoutine reads copierStates until the in channel closes and performs
|
|
|
|
// the relevant copies when possible, or passes it to the puller routine.
|
2014-12-28 23:11:32 +00:00
|
|
|
func (p *Puller) copierRoutine(in <-chan copyBlocksState, pullChan chan<- pullBlockState, out chan<- *sharedPullerState) {
|
2014-09-29 22:01:17 +00:00
|
|
|
buf := make([]byte, protocol.BlockSize)
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
for state := range in {
|
2015-01-07 23:12:12 +00:00
|
|
|
if p.progressEmitter != nil {
|
|
|
|
p.progressEmitter.Register(state.sharedPullerState)
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
dstFd, err := state.tempFile()
|
|
|
|
if err != nil {
|
|
|
|
// Nothing more to do for this failed file (the error was logged
|
|
|
|
// when it happened)
|
2015-01-07 23:12:12 +00:00
|
|
|
out <- state.sharedPullerState
|
|
|
|
continue
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
|
|
|
|
2014-11-09 19:03:56 +00:00
|
|
|
folderRoots := make(map[string]string)
|
|
|
|
p.model.fmut.RLock()
|
|
|
|
for folder, cfg := range p.model.folderCfgs {
|
|
|
|
folderRoots[folder] = cfg.Path
|
|
|
|
}
|
|
|
|
p.model.fmut.RUnlock()
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
for _, block := range state.blocks {
|
|
|
|
buf = buf[:int(block.Size)]
|
2015-01-18 01:12:06 +00:00
|
|
|
found := p.model.finder.Iterate(block.Hash, func(folder, file string, index int32) bool {
|
2015-01-28 14:32:59 +00:00
|
|
|
fd, err := os.Open(filepath.Join(folderRoots[folder], file))
|
|
|
|
if err != nil {
|
|
|
|
return false
|
2014-10-08 22:41:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = fd.ReadAt(buf, protocol.BlockSize*int64(index))
|
2015-01-28 14:32:59 +00:00
|
|
|
fd.Close()
|
2014-10-08 22:41:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2014-12-28 23:11:32 +00:00
|
|
|
hash, err := scanner.VerifyBuffer(buf, block)
|
|
|
|
if err != nil {
|
|
|
|
if hash != nil {
|
2014-10-24 22:20:08 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugf("Finder block mismatch in %s:%s:%d expected %q got %q", folder, file, index, block.Hash, hash)
|
|
|
|
}
|
|
|
|
err = p.model.finder.Fix(folder, file, index, block.Hash, hash)
|
|
|
|
if err != nil {
|
|
|
|
l.Warnln("finder fix:", err)
|
|
|
|
}
|
2014-12-28 23:11:32 +00:00
|
|
|
} else if debug {
|
|
|
|
l.Debugln("Finder failed to verify buffer", err)
|
2014-10-24 22:20:08 +00:00
|
|
|
}
|
2014-12-28 23:11:32 +00:00
|
|
|
return false
|
2014-10-24 22:20:08 +00:00
|
|
|
}
|
|
|
|
|
2014-10-08 22:41:23 +00:00
|
|
|
_, err = dstFd.WriteAt(buf, block.Offset)
|
|
|
|
if err != nil {
|
2015-01-07 23:12:12 +00:00
|
|
|
state.fail("dst write", err)
|
2014-10-08 22:41:23 +00:00
|
|
|
}
|
2014-10-12 20:38:22 +00:00
|
|
|
if file == state.file.Name {
|
|
|
|
state.copiedFromOrigin()
|
|
|
|
}
|
2014-10-08 22:41:23 +00:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if state.failed() != nil {
|
|
|
|
break
|
2014-08-27 05:00:15 +00:00
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
|
2014-10-24 22:20:08 +00:00
|
|
|
if !found {
|
2014-10-08 22:41:23 +00:00
|
|
|
state.pullStarted()
|
|
|
|
ps := pullBlockState{
|
|
|
|
sharedPullerState: state.sharedPullerState,
|
|
|
|
block: block,
|
|
|
|
}
|
|
|
|
pullChan <- ps
|
2014-10-12 20:38:22 +00:00
|
|
|
} else {
|
|
|
|
state.copyDone()
|
2014-05-25 18:49:08 +00:00
|
|
|
}
|
2014-05-19 21:42:08 +00:00
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
out <- state.sharedPullerState
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
func (p *Puller) pullerRoutine(in <-chan pullBlockState, out chan<- *sharedPullerState) {
|
|
|
|
for state := range in {
|
|
|
|
if state.failed() != nil {
|
2014-12-28 23:11:32 +00:00
|
|
|
continue
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-07-24 07:38:16 +00:00
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
// Get an fd to the temporary file. Tehcnically we don't need it until
|
|
|
|
// after fetching the block, but if we run into an error here there is
|
|
|
|
// no point in issuing the request to the network.
|
|
|
|
fd, err := state.tempFile()
|
|
|
|
if err != nil {
|
2014-12-28 23:11:32 +00:00
|
|
|
continue
|
2014-07-24 07:38:16 +00:00
|
|
|
}
|
2014-08-05 07:46:11 +00:00
|
|
|
|
2014-12-28 23:11:32 +00:00
|
|
|
var lastError error
|
|
|
|
potentialDevices := p.model.availability(p.folder, state.file.Name)
|
|
|
|
for {
|
|
|
|
// Select the least busy device to pull the block from. If we found no
|
|
|
|
// feasible device at all, fail the block (and in the long run, the
|
|
|
|
// file).
|
|
|
|
selected := activity.leastBusy(potentialDevices)
|
|
|
|
if selected == (protocol.DeviceID{}) {
|
|
|
|
if lastError != nil {
|
2015-01-07 23:12:12 +00:00
|
|
|
state.fail("pull", lastError)
|
2014-12-28 23:11:32 +00:00
|
|
|
} else {
|
2015-01-07 23:12:12 +00:00
|
|
|
state.fail("pull", errNoDevice)
|
2014-12-28 23:11:32 +00:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2014-08-05 07:46:11 +00:00
|
|
|
|
2014-12-28 23:11:32 +00:00
|
|
|
potentialDevices = removeDevice(potentialDevices, selected)
|
2014-07-24 07:38:16 +00:00
|
|
|
|
2014-12-28 23:11:32 +00:00
|
|
|
// Fetch the block, while marking the selected device as in use so that
|
|
|
|
// leastBusy can select another device when someone else asks.
|
|
|
|
activity.using(selected)
|
|
|
|
buf, lastError := p.model.requestGlobal(selected, p.folder, state.file.Name, state.block.Offset, int(state.block.Size), state.block.Hash)
|
|
|
|
activity.done(selected)
|
|
|
|
if lastError != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that the received block matches the desired hash, if not
|
|
|
|
// try pulling it from another device.
|
|
|
|
_, lastError = scanner.VerifyBuffer(buf, state.block)
|
|
|
|
if lastError != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the block data we got from the cluster
|
|
|
|
_, err = fd.WriteAt(buf, state.block.Offset)
|
|
|
|
if err != nil {
|
2015-01-07 23:12:12 +00:00
|
|
|
state.fail("save", err)
|
2014-12-28 23:11:32 +00:00
|
|
|
} else {
|
|
|
|
state.pullDone()
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
2015-01-07 23:12:12 +00:00
|
|
|
out <- state.sharedPullerState
|
2014-07-24 07:38:16 +00:00
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
|
2014-11-16 23:18:59 +00:00
|
|
|
func (p *Puller) performFinish(state *sharedPullerState) {
|
2014-12-28 23:11:32 +00:00
|
|
|
var err error
|
2015-02-01 17:31:19 +00:00
|
|
|
defer func() {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": state.file.Name,
|
|
|
|
"error": err,
|
|
|
|
})
|
|
|
|
}()
|
2014-11-29 22:18:56 +00:00
|
|
|
// Set the correct permission bits on the new file
|
|
|
|
if !p.ignorePerms {
|
|
|
|
err = os.Chmod(state.tempName, os.FileMode(state.file.Flags&0777))
|
2014-11-16 23:18:59 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Warnln("puller: final:", err)
|
|
|
|
return
|
|
|
|
}
|
2014-11-29 22:18:56 +00:00
|
|
|
}
|
2014-11-16 23:18:59 +00:00
|
|
|
|
2014-11-29 22:18:56 +00:00
|
|
|
// Set the correct timestamp on the new file
|
|
|
|
t := time.Unix(state.file.Modified, 0)
|
|
|
|
err = os.Chtimes(state.tempName, t, t)
|
|
|
|
if err != nil {
|
|
|
|
if p.lenientMtimes {
|
|
|
|
// We accept the failure with a warning here and allow the sync to
|
|
|
|
// continue. We'll sync the new mtime back to the other devices later.
|
|
|
|
// If they have the same problem & setting, we might never get in
|
|
|
|
// sync.
|
|
|
|
l.Infof("Puller (folder %q, file %q): final: %v (continuing anyway as requested)", p.folder, state.file.Name, err)
|
|
|
|
} else {
|
2014-11-16 23:18:59 +00:00
|
|
|
l.Warnln("puller: final:", err)
|
|
|
|
return
|
|
|
|
}
|
2014-11-29 22:18:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we should use versioning, let the versioner archive the old
|
|
|
|
// file before we replace it. Archiving a non-existent file is not
|
|
|
|
// an error.
|
|
|
|
if p.versioner != nil {
|
|
|
|
err = p.versioner.Archive(state.realName)
|
2014-11-16 23:18:59 +00:00
|
|
|
if err != nil {
|
2014-11-29 22:18:56 +00:00
|
|
|
l.Warnln("puller: final:", err)
|
2014-11-16 23:18:59 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-29 22:18:56 +00:00
|
|
|
}
|
2014-11-16 23:18:59 +00:00
|
|
|
|
2014-11-29 22:18:56 +00:00
|
|
|
// If the target path is a symlink or a directory, we cannot copy
|
|
|
|
// over it, hence remove it before proceeding.
|
|
|
|
stat, err := os.Lstat(state.realName)
|
2014-12-04 23:02:57 +00:00
|
|
|
if err == nil && (stat.IsDir() || stat.Mode()&os.ModeSymlink != 0) {
|
2014-11-29 22:18:56 +00:00
|
|
|
osutil.InWritableDir(os.Remove, state.realName)
|
|
|
|
}
|
|
|
|
// Replace the original content with the new one
|
|
|
|
err = osutil.Rename(state.tempName, state.realName)
|
|
|
|
if err != nil {
|
|
|
|
l.Warnln("puller: final:", err)
|
|
|
|
return
|
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
|
2014-11-29 22:18:56 +00:00
|
|
|
// If it's a symlink, the target of the symlink is inside the file.
|
|
|
|
if state.file.IsSymlink() {
|
|
|
|
content, err := ioutil.ReadFile(state.realName)
|
2014-11-16 23:18:59 +00:00
|
|
|
if err != nil {
|
2014-11-29 22:18:56 +00:00
|
|
|
l.Warnln("puller: final: reading symlink:", err)
|
|
|
|
return
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
|
2014-11-29 22:18:56 +00:00
|
|
|
// Remove the file, and replace it with a symlink.
|
|
|
|
err = osutil.InWritableDir(func(path string) error {
|
|
|
|
os.Remove(path)
|
2014-12-04 21:24:49 +00:00
|
|
|
return symlinks.Create(path, string(content), state.file.Flags)
|
2014-11-29 22:18:56 +00:00
|
|
|
}, state.realName)
|
2014-11-16 23:18:59 +00:00
|
|
|
if err != nil {
|
2014-11-29 22:18:56 +00:00
|
|
|
l.Warnln("puller: final: creating symlink:", err)
|
2014-11-16 23:18:59 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2014-11-29 22:18:56 +00:00
|
|
|
|
|
|
|
// Record the updated file in the index
|
|
|
|
p.model.updateLocal(p.folder, state.file)
|
2014-11-16 23:18:59 +00:00
|
|
|
}
|
2014-11-09 04:26:52 +00:00
|
|
|
|
2014-11-16 23:18:59 +00:00
|
|
|
func (p *Puller) finisherRoutine(in <-chan *sharedPullerState) {
|
|
|
|
for state := range in {
|
2014-11-29 22:18:56 +00:00
|
|
|
if closed, err := state.finalClose(); closed {
|
|
|
|
if debug {
|
|
|
|
l.Debugln(p, "closing", state.file.Name)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
l.Warnln("puller: final:", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-12-30 08:31:34 +00:00
|
|
|
p.queue.Done(state.file.Name)
|
2015-01-07 23:12:12 +00:00
|
|
|
if state.failed() == nil {
|
|
|
|
p.performFinish(state)
|
2015-02-01 17:31:19 +00:00
|
|
|
} else {
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"folder": p.folder,
|
|
|
|
"item": state.file.Name,
|
|
|
|
"error": state.failed(),
|
|
|
|
})
|
2015-01-07 23:12:12 +00:00
|
|
|
}
|
2014-12-07 20:21:12 +00:00
|
|
|
p.model.receivedFile(p.folder, state.file.Name)
|
2014-11-29 22:18:56 +00:00
|
|
|
if p.progressEmitter != nil {
|
|
|
|
p.progressEmitter.Deregister(state)
|
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
}
|
|
|
|
}
|
2014-09-27 12:44:15 +00:00
|
|
|
}
|
2014-04-27 10:14:53 +00:00
|
|
|
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
// Moves the given filename to the front of the job queue
|
2014-12-30 08:35:21 +00:00
|
|
|
func (p *Puller) BringToFront(filename string) {
|
|
|
|
p.queue.BringToFront(filename)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
|
|
|
|
2014-12-30 08:31:34 +00:00
|
|
|
func (p *Puller) Jobs() ([]string, []string) {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
return p.queue.Jobs()
|
|
|
|
}
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
func invalidateFolder(cfg *config.Configuration, folderID string, err error) {
|
|
|
|
for i := range cfg.Folders {
|
|
|
|
folder := &cfg.Folders[i]
|
|
|
|
if folder.ID == folderID {
|
|
|
|
folder.Invalid = err.Error()
|
2014-05-15 00:18:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-12-28 23:11:32 +00:00
|
|
|
|
|
|
|
func removeDevice(devices []protocol.DeviceID, device protocol.DeviceID) []protocol.DeviceID {
|
|
|
|
for i := range devices {
|
|
|
|
if devices[i] == device {
|
|
|
|
devices[i] = devices[len(devices)-1]
|
|
|
|
return devices[:len(devices)-1]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return devices
|
|
|
|
}
|