mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-10 15:20:56 +00:00
780fb3bac1
This changes the cache to cache less things, yet retain the required efficiency for our walk usecase. This uses less memory. Specifically, instead of keeping result and child caches for each path level, only keep a single cached child. In practice our operations are depth-first, or almost depth-first, and then we retain the same hit ratio for a smaller cache size. I improved the benchmark so that it counts the Lstat and DirNames operations performed, and they do not change significantly. The amount of allocated memory is reduced by 20% and the walk itself is actually slightly faster. This also removes the clear based on number of cached names (as that is not a thing any more) and the timer based clear (which was unused). This means we'll retain the last cache state forever until it's cleared by a write operation, but we did that before too and that state is now a lot smaller... The overhead compared to not using a casefs, for our typical "double walk" (walk the tree then stat everything again) is 2x the dirnames we would otherwise call, and no overhead on the stats (unchanged from old implementation) ``` name old time/op new time/op delta WalkCaseFakeFS100k/rawfs-8 306ms ± 1% 305ms ± 2% ~ (p=0.182 n=9+10) WalkCaseFakeFS100k/casefs-8 579ms ± 5% 557ms ± 1% -3.77% (p=0.000 n=10+10) name old B/entry new B/entry delta WalkCaseFakeFS100k/rawfs-8 590 ± 0% 590 ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 1.09k ± 0% 0.87k ± 0% -19.98% (p=0.000 n=10+10) name old DirNames/entry new DirNames/entry delta WalkCaseFakeFS100k/rawfs-8 0.51 ± 0% 0.51 ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 1.02 ± 0% 1.02 ± 0% ~ (all equal) name old DirNames/op new DirNames/op delta WalkCaseFakeFS100k/rawfs-8 51.2k ± 0% 51.2k ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 102k ± 0% 102k ± 0% ~ (all equal) name old Lstat/entry new Lstat/entry delta WalkCaseFakeFS100k/rawfs-8 3.02 ± 0% 3.02 ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 3.02 ± 0% 3.02 ± 0% ~ (all equal) name old Lstat/op new Lstat/op delta WalkCaseFakeFS100k/rawfs-8 302k ± 0% 302k ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 302k ± 0% 302k ± 0% ~ (all equal) name old allocs/entry new allocs/entry delta WalkCaseFakeFS100k/rawfs-8 15.7 ± 0% 15.7 ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 27.5 ± 0% 26.1 ± 0% -5.09% (p=0.000 n=10+10) name old ns/entry new ns/entry delta WalkCaseFakeFS100k/rawfs-8 2.02k ± 1% 2.02k ± 2% ~ (p=0.163 n=9+10) WalkCaseFakeFS100k/casefs-8 3.83k ± 5% 3.68k ± 1% -3.77% (p=0.000 n=10+10) name old alloc/op new alloc/op delta WalkCaseFakeFS100k/rawfs-8 89.2MB ± 0% 89.2MB ± 0% ~ (p=0.364 n=9+10) WalkCaseFakeFS100k/casefs-8 164MB ± 0% 131MB ± 0% -19.97% (p=0.000 n=10+10) name old allocs/op new allocs/op delta WalkCaseFakeFS100k/rawfs-8 2.38M ± 0% 2.38M ± 0% ~ (all equal) WalkCaseFakeFS100k/casefs-8 4.16M ± 0% 3.95M ± 0% -5.05% (p=0.000 n=10+10) ```
285 lines
7.2 KiB
Go
285 lines
7.2 KiB
Go
// Copyright (C) 2020 The Syncthing Authors.
|
|
//
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
package fs
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"runtime"
|
|
"sort"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func TestRealCase(t *testing.T) {
|
|
// Verify realCase lookups on various underlying filesystems.
|
|
|
|
t.Run("fake-sensitive", func(t *testing.T) {
|
|
testRealCase(t, newFakeFilesystem(t.Name()))
|
|
})
|
|
t.Run("fake-insensitive", func(t *testing.T) {
|
|
testRealCase(t, newFakeFilesystem(t.Name()+"?insens=true"))
|
|
})
|
|
t.Run("actual", func(t *testing.T) {
|
|
fsys, tmpDir := setup(t)
|
|
defer os.RemoveAll(tmpDir)
|
|
testRealCase(t, fsys)
|
|
})
|
|
}
|
|
|
|
func testRealCase(t *testing.T, fsys Filesystem) {
|
|
testFs := NewCaseFilesystem(fsys).(*caseFilesystem)
|
|
comps := []string{"Foo", "bar", "BAZ", "bAs"}
|
|
path := filepath.Join(comps...)
|
|
testFs.MkdirAll(filepath.Join(comps[:len(comps)-1]...), 0777)
|
|
fd, err := testFs.Create(path)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
fd.Close()
|
|
|
|
for i, tc := range []struct {
|
|
in string
|
|
len int
|
|
}{
|
|
{path, 4},
|
|
{strings.ToLower(path), 4},
|
|
{strings.ToUpper(path), 4},
|
|
{"foo", 1},
|
|
{"FOO", 1},
|
|
{"foO", 1},
|
|
{filepath.Join("Foo", "bar"), 2},
|
|
{filepath.Join("Foo", "bAr"), 2},
|
|
{filepath.Join("FoO", "bar"), 2},
|
|
{filepath.Join("foo", "bar", "BAZ"), 3},
|
|
{filepath.Join("Foo", "bar", "bAz"), 3},
|
|
{filepath.Join("foo", "bar", "BAZ"), 3}, // Repeat on purpose
|
|
} {
|
|
out, err := testFs.realCase(tc.in)
|
|
if err != nil {
|
|
t.Error(err)
|
|
} else if exp := filepath.Join(comps[:tc.len]...); out != exp {
|
|
t.Errorf("tc %v: Expected %v, got %v", i, exp, out)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestRealCaseSensitive(t *testing.T) {
|
|
// Verify that realCase returns the best on-disk case for case sensitive
|
|
// systems. Test is skipped if the underlying fs is insensitive.
|
|
|
|
t.Run("fake-sensitive", func(t *testing.T) {
|
|
testRealCaseSensitive(t, newFakeFilesystem(t.Name()))
|
|
})
|
|
t.Run("actual", func(t *testing.T) {
|
|
fsys, tmpDir := setup(t)
|
|
defer os.RemoveAll(tmpDir)
|
|
testRealCaseSensitive(t, fsys)
|
|
})
|
|
}
|
|
|
|
func testRealCaseSensitive(t *testing.T, fsys Filesystem) {
|
|
testFs := NewCaseFilesystem(fsys).(*caseFilesystem)
|
|
|
|
names := make([]string, 2)
|
|
names[0] = "foo"
|
|
names[1] = strings.ToUpper(names[0])
|
|
for _, n := range names {
|
|
if err := testFs.MkdirAll(n, 0777); err != nil {
|
|
if IsErrCaseConflict(err) {
|
|
t.Skip("Filesystem is case-insensitive")
|
|
}
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
|
|
for _, n := range names {
|
|
if rn, err := testFs.realCase(n); err != nil {
|
|
t.Error(err)
|
|
} else if rn != n {
|
|
t.Errorf("Got %v, expected %v", rn, n)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestCaseFSStat(t *testing.T) {
|
|
// Verify that a Stat() lookup behaves in a case sensitive manner
|
|
// regardless of the underlying fs.
|
|
|
|
t.Run("fake-sensitive", func(t *testing.T) {
|
|
testCaseFSStat(t, newFakeFilesystem(t.Name()))
|
|
})
|
|
t.Run("fake-insensitive", func(t *testing.T) {
|
|
testCaseFSStat(t, newFakeFilesystem(t.Name()+"?insens=true"))
|
|
})
|
|
t.Run("actual", func(t *testing.T) {
|
|
fsys, tmpDir := setup(t)
|
|
defer os.RemoveAll(tmpDir)
|
|
testCaseFSStat(t, fsys)
|
|
})
|
|
}
|
|
|
|
func testCaseFSStat(t *testing.T, fsys Filesystem) {
|
|
fd, err := fsys.Create("foo")
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
fd.Close()
|
|
|
|
// Check if the underlying fs is sensitive or not
|
|
sensitive := true
|
|
if _, err = fsys.Stat("FOO"); err == nil {
|
|
sensitive = false
|
|
}
|
|
|
|
testFs := NewCaseFilesystem(fsys)
|
|
_, err = testFs.Stat("FOO")
|
|
if sensitive {
|
|
if IsNotExist(err) {
|
|
t.Log("pass: case sensitive underlying fs")
|
|
} else {
|
|
t.Error("expected NotExist, not", err, "for sensitive fs")
|
|
}
|
|
} else if IsErrCaseConflict(err) {
|
|
t.Log("pass: case insensitive underlying fs")
|
|
} else {
|
|
t.Error("expected ErrCaseConflict, not", err, "for insensitive fs")
|
|
}
|
|
}
|
|
|
|
func BenchmarkWalkCaseFakeFS100k(b *testing.B) {
|
|
const entries = 100_000
|
|
fsys, paths, err := fakefsForBenchmark(entries, 0)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
b.Run("rawfs", func(b *testing.B) {
|
|
fakefs := unwrapFilesystem(fsys).(*fakefs)
|
|
fakefs.resetCounters()
|
|
benchmarkWalkFakeFS(b, fsys, paths)
|
|
fakefs.reportMetricsPerOp(b)
|
|
fakefs.reportMetricsPer(b, entries, "entry")
|
|
b.ReportAllocs()
|
|
})
|
|
b.Run("casefs", func(b *testing.B) {
|
|
// Construct the casefs manually or it will get cached and the benchmark is invalid.
|
|
casefs := &caseFilesystem{
|
|
Filesystem: fsys,
|
|
realCaser: newDefaultRealCaser(fsys),
|
|
}
|
|
fakefs := unwrapFilesystem(fsys).(*fakefs)
|
|
fakefs.resetCounters()
|
|
benchmarkWalkFakeFS(b, casefs, paths)
|
|
fakefs.reportMetricsPerOp(b)
|
|
fakefs.reportMetricsPer(b, entries, "entry")
|
|
b.ReportAllocs()
|
|
})
|
|
}
|
|
|
|
func benchmarkWalkFakeFS(b *testing.B, fsys Filesystem, paths []string) {
|
|
// Simulate a scanner pass over the filesystem. First walk it to
|
|
// discover all names, then stat each name individually to check if it's
|
|
// been deleted or not (pretending that they all existed in the
|
|
// database).
|
|
|
|
var ms0 runtime.MemStats
|
|
runtime.ReadMemStats(&ms0)
|
|
t0 := time.Now()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
if err := doubleWalkFS(fsys, paths); err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
|
|
t1 := time.Now()
|
|
var ms1 runtime.MemStats
|
|
runtime.ReadMemStats(&ms1)
|
|
|
|
// We add metrics per path entry
|
|
b.ReportMetric(float64(t1.Sub(t0))/float64(b.N)/float64(len(paths)), "ns/entry")
|
|
b.ReportMetric(float64(ms1.Mallocs-ms0.Mallocs)/float64(b.N)/float64(len(paths)), "allocs/entry")
|
|
b.ReportMetric(float64(ms1.TotalAlloc-ms0.TotalAlloc)/float64(b.N)/float64(len(paths)), "B/entry")
|
|
}
|
|
|
|
func TestStressCaseFS(t *testing.T) {
|
|
// Exercise a bunch of paralell operations for stressing out race
|
|
// conditions in the realnamer cache etc.
|
|
|
|
const limit = 10 * time.Second
|
|
if testing.Short() {
|
|
t.Skip("long test")
|
|
}
|
|
|
|
fsys, paths, err := fakefsForBenchmark(10_000, 0)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
for i := 0; i < runtime.NumCPU()/2+1; i++ {
|
|
t.Run(fmt.Sprintf("walker-%d", i), func(t *testing.T) {
|
|
// Walk the filesystem and stat everything
|
|
t.Parallel()
|
|
t0 := time.Now()
|
|
for time.Since(t0) < limit {
|
|
if err := doubleWalkFS(fsys, paths); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
})
|
|
t.Run(fmt.Sprintf("toucher-%d", i), func(t *testing.T) {
|
|
// Touch all the things
|
|
t.Parallel()
|
|
t0 := time.Now()
|
|
for time.Since(t0) < limit {
|
|
for _, p := range paths {
|
|
now := time.Now()
|
|
if err := fsys.Chtimes(p, now, now); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func doubleWalkFS(fsys Filesystem, paths []string) error {
|
|
if err := fsys.Walk("/", func(path string, info FileInfo, err error) error {
|
|
return err
|
|
}); err != nil {
|
|
return err
|
|
}
|
|
|
|
for _, p := range paths {
|
|
if _, err := fsys.Lstat(p); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func fakefsForBenchmark(nfiles int, latency time.Duration) (Filesystem, []string, error) {
|
|
fsys := NewFilesystem(FilesystemTypeFake, fmt.Sprintf("fakefsForBenchmark?files=%d&insens=true&latency=%s", nfiles, latency))
|
|
|
|
var paths []string
|
|
if err := fsys.Walk("/", func(path string, info FileInfo, err error) error {
|
|
paths = append(paths, path)
|
|
return err
|
|
}); err != nil {
|
|
return nil, nil, err
|
|
}
|
|
if len(paths) < nfiles {
|
|
return nil, nil, errors.New("didn't find enough stuff")
|
|
}
|
|
|
|
sort.Strings(paths)
|
|
|
|
return fsys, paths, nil
|
|
}
|