lib/model: Use semaphore to limit concurrent folder writes (fixes #6541) (#6573)

This commit is contained in:
Jakob Borg 2020-04-27 00:13:18 +02:00 committed by GitHub
parent 037934ec74
commit 6c73617974
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 17 additions and 5 deletions

View File

@ -57,6 +57,7 @@ type FolderConfiguration struct {
MarkerName string `xml:"markerName" json:"markerName"` MarkerName string `xml:"markerName" json:"markerName"`
CopyOwnershipFromParent bool `xml:"copyOwnershipFromParent" json:"copyOwnershipFromParent"` CopyOwnershipFromParent bool `xml:"copyOwnershipFromParent" json:"copyOwnershipFromParent"`
RawModTimeWindowS int `xml:"modTimeWindowS" json:"modTimeWindowS"` RawModTimeWindowS int `xml:"modTimeWindowS" json:"modTimeWindowS"`
MaxConcurrentWrites int `xml:"maxConcurrentWrites" json:"maxConcurrentWrites" default:"2"`
cachedFilesystem fs.Filesystem cachedFilesystem fs.Filesystem
cachedModTimeWindow time.Duration cachedModTimeWindow time.Duration

View File

@ -9,6 +9,7 @@ package model
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort" "sort"
@ -103,6 +104,7 @@ type sendReceiveFolder struct {
versioner versioner.Versioner versioner versioner.Versioner
queue *jobQueue queue *jobQueue
writeLimiter *byteSemaphore
pullErrors map[string]string // errors for most recent/current iteration pullErrors map[string]string // errors for most recent/current iteration
oldPullErrors map[string]string // errors from previous iterations for log filtering only oldPullErrors map[string]string // errors from previous iterations for log filtering only
@ -115,6 +117,7 @@ func newSendReceiveFolder(model *model, fset *db.FileSet, ignores *ignore.Matche
fs: fs, fs: fs,
versioner: ver, versioner: ver,
queue: newJobQueue(), queue: newJobQueue(),
writeLimiter: newByteSemaphore(cfg.MaxConcurrentWrites),
pullErrorsMut: sync.NewMutex(), pullErrorsMut: sync.NewMutex(),
} }
f.folder.puller = f f.folder.puller = f
@ -1261,10 +1264,9 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
return true return true
} }
_, err = dstFd.WriteAt(buf, block.Offset) _, err = f.limitedWriteAt(dstFd, buf, block.Offset)
if err != nil { if err != nil {
state.fail(errors.Wrap(err, "dst write")) state.fail(errors.Wrap(err, "dst write"))
} }
if offset == block.Offset { if offset == block.Offset {
state.copiedFromOrigin() state.copiedFromOrigin()
@ -1297,7 +1299,7 @@ func (f *sendReceiveFolder) copierRoutine(in <-chan copyBlocksState, pullChan ch
return false return false
} }
_, err = dstFd.WriteAt(buf, block.Offset) _, err = f.limitedWriteAt(dstFd, buf, block.Offset)
if err != nil { if err != nil {
state.fail(errors.Wrap(err, "dst write")) state.fail(errors.Wrap(err, "dst write"))
} }
@ -1446,7 +1448,7 @@ func (f *sendReceiveFolder) pullBlock(state pullBlockState, out chan<- *sharedPu
} }
// Save the block data we got from the cluster // Save the block data we got from the cluster
_, err = fd.WriteAt(buf, state.block.Offset) _, err = f.limitedWriteAt(fd, buf, state.block.Offset)
if err != nil { if err != nil {
state.fail(errors.Wrap(err, "save")) state.fail(errors.Wrap(err, "save"))
} else { } else {
@ -1936,6 +1938,14 @@ func (f *sendReceiveFolder) inWritableDir(fn func(string) error, path string) er
return inWritableDir(fn, f.fs, path, f.IgnorePerms) return inWritableDir(fn, f.fs, path, f.IgnorePerms)
} }
func (f *sendReceiveFolder) limitedWriteAt(fd io.WriterAt, data []byte, offset int64) (int, error) {
if err := f.writeLimiter.takeWithContext(f.ctx, 1); err != nil {
return 0, err
}
defer f.writeLimiter.give(1)
return fd.WriteAt(data, offset)
}
// A []FileError is sent as part of an event and will be JSON serialized. // A []FileError is sent as part of an event and will be JSON serialized.
type FileError struct { type FileError struct {
Path string `json:"path"` Path string `json:"path"`

View File

@ -107,6 +107,7 @@ func setupSendReceiveFolder(files ...protocol.FileInfo) (*model, *sendReceiveFol
}, },
queue: newJobQueue(), queue: newJobQueue(),
writeLimiter: newByteSemaphore(2),
pullErrors: make(map[string]string), pullErrors: make(map[string]string),
pullErrorsMut: sync.NewMutex(), pullErrorsMut: sync.NewMutex(),
} }