Don't get stuck in idle while syncing from a disconnecting node

This commit is contained in:
Jakob Borg 2014-09-28 07:56:05 +02:00
parent 6f750582dd
commit b8ed135183
2 changed files with 27 additions and 1 deletions

View File

@ -65,6 +65,10 @@ func (s *Set) Replace(node protocol.NodeID, fs []protocol.FileInfo) {
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs) s.localVersion[node] = ldbReplace(s.db, []byte(s.repo), node[:], fs)
if len(fs) == 0 {
// Reset the local version if all files were removed.
s.localVersion[node] = 0
}
} }
func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) { func (s *Set) ReplaceWithDelete(node protocol.NodeID, fs []protocol.FileInfo) {

View File

@ -119,6 +119,19 @@ loop:
// No files were changed by the puller, so we are in // No files were changed by the puller, so we are in
// sync. Remember the local version number and // sync. Remember the local version number and
// schedule a resync a little bit into the future. // schedule a resync a little bit into the future.
if lv := p.model.RemoteLocalVersion(p.repo); lv < curVer {
// There's a corner case where the node we needed
// files from disconnected during the puller
// iteration. The files will have been removed from
// the index, so we've concluded that we don't need
// them, but at the same time we have the local
// version that includes those files in curVer. So we
// catch the case that localVersion might have
// decresed here.
l.Debugln(p,"adjusting curVer",lv)
curVer = lv
}
prevVer = curVer prevVer = curVer
pullTimer.Reset(nextPullIntv) pullTimer.Reset(nextPullIntv)
break break
@ -216,6 +229,14 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
changed := 0 changed := 0
files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool { files.WithNeed(protocol.LocalNodeID, func(intf protocol.FileIntf) bool {
// Needed items are delivered sorted lexicographically. This isn't
// really optimal from a performance point of view - it would be
// better if files were handled in random order, to spread the load
// over the cluster. But it means that we can be sure that we fully
// handle directories before the files that go inside them, which is
// nice.
file := intf.(protocol.FileInfo) file := intf.(protocol.FileInfo)
events.Default.Log(events.ItemStarted, map[string]string{ events.Default.Log(events.ItemStarted, map[string]string{
@ -238,7 +259,8 @@ func (p *Puller) pullerIteration(ncopiers, npullers, nfinishers int) int {
// A deleted file // A deleted file
p.deleteFile(file) p.deleteFile(file)
default: default:
// A new or changed file // A new or changed file. This is the only case where we do stuff
// in the background; the other three are done synchronously.
p.handleFile(file, copyChan, pullChan) p.handleFile(file, copyChan, pullChan)
} }