mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 10:58:57 +00:00
The test is quite odd and specific, but it does reproduce the issue that caused #9677, so I'd propose to add it to have a simple regression test for the basic scenario. Also the option to the fakefs might come handy for other scenarios where you want to quickly test some behaviour on a filesystem without nanosecond precision, without actually needing access to one.
This commit is contained in:
parent
a7f9ed4a80
commit
29f7510f5a
@ -54,18 +54,20 @@ const randomBlockShift = 14 // 128k
|
||||
// latency=d to set the amount of time each "disk" operation takes, where d is time.ParseDuration format
|
||||
// content=true to save actual file contents instead of generating pseudorandomly; n.b. memory usage
|
||||
// nostfolder=true skip the creation of .stfolder
|
||||
// timeprecisionsecond=true Modification times are stored with only second precision
|
||||
//
|
||||
// - Two fakeFS:s pointing at the same root path see the same files.
|
||||
type fakeFS struct {
|
||||
counters fakeFSCounters
|
||||
uri string
|
||||
mut sync.Mutex
|
||||
root *fakeEntry
|
||||
insens bool
|
||||
withContent bool
|
||||
latency time.Duration
|
||||
userCache *userCache
|
||||
groupCache *groupCache
|
||||
counters fakeFSCounters
|
||||
uri string
|
||||
mut sync.Mutex
|
||||
root *fakeEntry
|
||||
insens bool
|
||||
withContent bool
|
||||
timePrecisionSecond bool
|
||||
latency time.Duration
|
||||
userCache *userCache
|
||||
groupCache *groupCache
|
||||
}
|
||||
|
||||
type fakeFSCounters struct {
|
||||
@ -126,6 +128,7 @@ func newFakeFilesystem(rootURI string, _ ...Option) *fakeFS {
|
||||
fs.insens = params.Get("insens") == "true"
|
||||
fs.withContent = params.Get("content") == "true"
|
||||
nostfolder := params.Get("nostfolder") == "true"
|
||||
fs.timePrecisionSecond = params.Get("timeprecisionsecond") == "true"
|
||||
|
||||
if sizeavg == 0 {
|
||||
sizeavg = 1 << 20
|
||||
@ -259,6 +262,9 @@ func (fs *fakeFS) Chtimes(name string, _ time.Time, mtime time.Time) error {
|
||||
if entry == nil {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
if fs.timePrecisionSecond {
|
||||
mtime = mtime.Truncate(time.Second)
|
||||
}
|
||||
entry.mtime = mtime
|
||||
return nil
|
||||
}
|
||||
|
@ -7,8 +7,10 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
)
|
||||
@ -171,3 +173,64 @@ func TestIsParent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reproduces issue 9677:
|
||||
// The combination of caching the entire case FS and moving the case FS to be
|
||||
// the outmost layer of the FS lead to the mtime FS disappearing. This is
|
||||
// because in many places we intentionally create the filesystem without access
|
||||
// to the DB and thus without the mtime FS layer. With the case FS layer
|
||||
// outside, all the inner layers are also cached - notable without an mtime FS
|
||||
// layer. Later when we do try to create an FS with DB/mtime FS, we still get
|
||||
// the cached FS without mtime FS.
|
||||
func TestRepro9677MissingMtimeFS(t *testing.T) {
|
||||
mtimeDB := make(mapStore)
|
||||
name := "Testfile"
|
||||
nameLower := UnicodeLowercaseNormalized(name)
|
||||
testTime := time.Unix(1723491493, 123456789)
|
||||
|
||||
// Create a file with an mtime FS entry
|
||||
firstFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB))
|
||||
|
||||
// Create a file, set its mtime and check that we get the expected mtime when stat-ing.
|
||||
file, err := firstFS.Create(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file.Close()
|
||||
err = firstFS.Chtimes(name, testTime, testTime)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkMtime := func(fs Filesystem) {
|
||||
t.Helper()
|
||||
info, err := fs.Lstat(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !info.ModTime().Equal(testTime) {
|
||||
t.Errorf("Expected mtime %v for %v, got %v", testTime, name, info.ModTime())
|
||||
}
|
||||
info, err = fs.Lstat(nameLower)
|
||||
if !IsErrCaseConflict(err) {
|
||||
t.Errorf("Expected case-conflict error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
checkMtime(firstFS)
|
||||
|
||||
// Now syncthing gets upgraded (or even just restarted), which resets the
|
||||
// case FS registry as it lives in memory.
|
||||
globalCaseFilesystemRegistry = caseFilesystemRegistry{fss: make(map[fskey]*caseFilesystem)}
|
||||
|
||||
// This time we first create some filesystem without a database and thus no
|
||||
// mtime-FS, which is used in various places outside of the folder code. We
|
||||
// aren't actually going to do anything, this just adds an entry to the
|
||||
// caseFS cache. And that's the crucial bit: In the broken case this test is
|
||||
// reproducing, it will add the FS without mtime-FS, so all future FSes will
|
||||
// be without mtime, even if requested:
|
||||
NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{})
|
||||
|
||||
newFS := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("%v?insens=true&timeprecisionsecond=true", t.Name()), &OptionDetectCaseConflicts{}, NewMtimeOption(mtimeDB))
|
||||
checkMtime(newFS)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user