2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2014-03-08 22:02:01 +00:00
|
|
|
package scanner
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
import (
|
2014-08-26 08:11:25 +00:00
|
|
|
"bytes"
|
2017-04-26 00:15:23 +00:00
|
|
|
"context"
|
2024-08-10 11:58:20 +00:00
|
|
|
"crypto/sha256"
|
2020-03-03 21:40:00 +00:00
|
|
|
"errors"
|
2014-03-02 22:58:14 +00:00
|
|
|
"fmt"
|
2015-10-27 08:26:08 +00:00
|
|
|
"io"
|
2015-03-18 22:54:50 +00:00
|
|
|
"os"
|
2014-08-16 16:33:01 +00:00
|
|
|
"path/filepath"
|
2014-08-30 07:55:01 +00:00
|
|
|
rdebug "runtime/debug"
|
2014-07-30 18:10:46 +00:00
|
|
|
"sort"
|
2015-10-27 08:26:08 +00:00
|
|
|
"sync"
|
2014-03-02 22:58:14 +00:00
|
|
|
"testing"
|
2014-07-17 12:48:02 +00:00
|
|
|
|
2016-03-06 20:32:10 +00:00
|
|
|
"github.com/d4l3k/messagediff"
|
refactor: use modern Protobuf encoder (#9817)
At a high level, this is what I've done and why:
- I'm moving the protobuf generation for the `protocol`, `discovery` and
`db` packages to the modern alternatives, and using `buf` to generate
because it's nice and simple.
- After trying various approaches on how to integrate the new types with
the existing code, I opted for splitting off our own data model types
from the on-the-wire generated types. This means we can have a
`FileInfo` type with nicer ergonomics and lots of methods, while the
protobuf generated type stays clean and close to the wire protocol. It
does mean copying between the two when required, which certainly adds a
small amount of inefficiency. If we want to walk this back in the future
and use the raw generated type throughout, that's possible, this however
makes the refactor smaller (!) as it doesn't change everything about the
type for everyone at the same time.
- I have simply removed in cold blood a significant number of old
database migrations. These depended on previous generations of generated
messages of various kinds and were annoying to support in the new
fashion. The oldest supported database version now is the one from
Syncthing 1.9.0 from Sep 7, 2020.
- I changed config structs to be regular manually defined structs.
For the sake of discussion, some things I tried that turned out not to
work...
### Embedding / wrapping
Embedding the protobuf generated structs in our existing types as a data
container and keeping our methods and stuff:
```
package protocol
type FileInfo struct {
*generated.FileInfo
}
```
This generates a lot of problems because the internal shape of the
generated struct is quite different (different names, different types,
more pointers), because initializing it doesn't work like you'd expect
(i.e., you end up with an embedded nil pointer and a panic), and because
the types of child types don't get wrapped. That is, even if we also
have a similar wrapper around a `Vector`, that's not the type you get
when accessing `someFileInfo.Version`, you get the `*generated.Vector`
that doesn't have methods, etc.
### Aliasing
```
package protocol
type FileInfo = generated.FileInfo
```
Doesn't help because you can't attach methods to it, plus all the above.
### Generating the types into the target package like we do now and
attaching methods
This fails because of the different shape of the generated type (as in
the embedding case above) plus the generated struct already has a bunch
of methods that we can't necessarily override properly (like `String()`
and a bunch of getters).
### Methods to functions
I considered just moving all the methods we attach to functions in a
specific package, so that for example
```
package protocol
func (f FileInfo) Equal(other FileInfo) bool
```
would become
```
package fileinfos
func Equal(a, b *generated.FileInfo) bool
```
and this would mostly work, but becomes quite verbose and cumbersome,
and somewhat limits discoverability (you can't see what methods are
available on the type in auto completions, etc). In the end I did this
in some cases, like in the database layer where a lot of things like
`func (fv *FileVersion) IsEmpty() bool` becomes `func fvIsEmpty(fv
*generated.FileVersion)` because they were anyway just internal methods.
Fixes #8247
2024-12-01 15:50:17 +00:00
|
|
|
"golang.org/x/text/unicode/norm"
|
|
|
|
|
2022-07-28 17:36:39 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/build"
|
2019-08-15 14:29:37 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2017-04-01 09:04:11 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/ignore"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2023-05-03 08:25:36 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/rand"
|
2014-03-02 22:58:14 +00:00
|
|
|
)
|
|
|
|
|
2014-08-30 07:22:23 +00:00
|
|
|
type testfile struct {
|
2016-07-04 10:40:29 +00:00
|
|
|
name string
|
|
|
|
length int64
|
|
|
|
hash string
|
2014-08-30 07:22:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type testfileList []testfile
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
var testdata = testfileList{
|
|
|
|
{"afile", 4, "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"},
|
|
|
|
{"dir1", 128, ""},
|
|
|
|
{filepath.Join("dir1", "dfile"), 5, "49ae93732fcf8d63fe1cce759664982dbd5b23161f007dba8561862adc96d063"},
|
|
|
|
{"dir2", 128, ""},
|
|
|
|
{filepath.Join("dir2", "cfile"), 4, "bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c"},
|
|
|
|
{"excludes", 37, "df90b52f0c55dba7a7a940affe482571563b1ac57bd5be4d8a0291e7de928e06"},
|
|
|
|
{"further-excludes", 5, "7eb0a548094fa6295f7fd9200d69973e5f5ec5c04f2a86d998080ac43ecf89f1"},
|
|
|
|
}
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2014-08-30 07:55:01 +00:00
|
|
|
func init() {
|
|
|
|
// This test runs the risk of entering infinite recursion if it fails.
|
2015-04-28 15:34:55 +00:00
|
|
|
// Limit the stack size to 10 megs to crash early in that case instead of
|
2014-08-30 07:55:01 +00:00
|
|
|
// potentially taking down the box...
|
|
|
|
rdebug.SetMaxStack(10 * 1 << 20)
|
2023-05-03 08:25:36 +00:00
|
|
|
}
|
2018-05-14 07:47:23 +00:00
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
func newTestFs(opts ...fs.Option) fs.Filesystem {
|
|
|
|
// This mirrors some test data we used to have in a physical `testdata`
|
|
|
|
// directory here.
|
|
|
|
tfs := fs.NewFilesystem(fs.FilesystemTypeFake, rand.String(16)+"?content=true&nostfolder=true", opts...)
|
|
|
|
tfs.Mkdir("dir1", 0o755)
|
|
|
|
tfs.Mkdir("dir2", 0o755)
|
|
|
|
tfs.Mkdir("dir3", 0o755)
|
|
|
|
tfs.MkdirAll("dir2/dir21/dir22/dir23", 0o755)
|
|
|
|
tfs.MkdirAll("dir2/dir21/dir22/efile", 0o755)
|
|
|
|
tfs.MkdirAll("dir2/dir21/dira", 0o755)
|
|
|
|
tfs.MkdirAll("dir2/dir21/efile/ign", 0o755)
|
|
|
|
fs.WriteFile(tfs, "dir1/cfile", []byte("baz\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir1/dfile", []byte("quux\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/cfile", []byte("baz\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dfile", []byte("quux\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dir22/dir23/efile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dir22/efile/efile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dir22/efile/ign/efile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dira/efile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dira/ffile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/efile/ign/efile", []byte("\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/cfile", []byte("foo\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir2/dir21/dfile", []byte("quux\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir3/cfile", []byte("foo\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "dir3/dfile", []byte("quux\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "afile", []byte("foo\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "bfile", []byte("bar\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, ".stignore", []byte("#include excludes\n\nbfile\ndir1/cfile\n/dir2/dir21\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "excludes", []byte("dir2/dfile\n#include further-excludes\n"), 0o644)
|
|
|
|
fs.WriteFile(tfs, "further-excludes", []byte("dir3\n"), 0o644)
|
|
|
|
return tfs
|
2014-08-30 07:55:01 +00:00
|
|
|
}
|
|
|
|
|
2014-08-11 18:20:01 +00:00
|
|
|
func TestWalkSub(t *testing.T) {
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs()
|
2018-05-14 07:47:23 +00:00
|
|
|
ignores := ignore.New(testFs)
|
|
|
|
err := ignores.Load(".stignore")
|
2014-09-04 20:29:53 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cancel := testConfig()
|
|
|
|
defer cancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Subs = []string{"dir2"}
|
|
|
|
cfg.Matcher = ignores
|
|
|
|
fchan := Walk(context.TODO(), cfg)
|
2018-02-14 07:59:46 +00:00
|
|
|
var files []protocol.FileInfo
|
2014-08-11 18:20:01 +00:00
|
|
|
for f := range fchan {
|
2018-11-07 10:04:41 +00:00
|
|
|
if f.Err != nil {
|
|
|
|
t.Errorf("Error while scanning %v: %v", f.Err, f.Path)
|
|
|
|
}
|
|
|
|
files = append(files, f.File)
|
2014-08-11 18:20:01 +00:00
|
|
|
}
|
|
|
|
|
2014-08-30 07:22:23 +00:00
|
|
|
// The directory contains two files, where one is ignored from a higher
|
|
|
|
// level. We should see only the directory and one of the files.
|
|
|
|
|
|
|
|
if len(files) != 2 {
|
|
|
|
t.Fatalf("Incorrect length %d != 2", len(files))
|
2014-08-11 18:20:01 +00:00
|
|
|
}
|
2018-02-14 07:59:46 +00:00
|
|
|
if files[0].Name != "dir2" {
|
2014-08-30 07:22:23 +00:00
|
|
|
t.Errorf("Incorrect file %v != dir2", files[0])
|
|
|
|
}
|
2018-02-14 07:59:46 +00:00
|
|
|
if files[1].Name != filepath.Join("dir2", "cfile") {
|
2014-08-30 07:22:23 +00:00
|
|
|
t.Errorf("Incorrect file %v != dir2/cfile", files[1])
|
2014-08-11 18:20:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-02 22:58:14 +00:00
|
|
|
func TestWalk(t *testing.T) {
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs()
|
2018-05-14 07:47:23 +00:00
|
|
|
ignores := ignore.New(testFs)
|
|
|
|
err := ignores.Load(".stignore")
|
2014-09-04 20:29:53 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
t.Log(ignores)
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cancel := testConfig()
|
|
|
|
defer cancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Matcher = ignores
|
|
|
|
fchan := Walk(context.TODO(), cfg)
|
2014-05-04 16:20:25 +00:00
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
var tmp []protocol.FileInfo
|
2014-08-30 07:22:23 +00:00
|
|
|
for f := range fchan {
|
2018-11-07 10:04:41 +00:00
|
|
|
if f.Err != nil {
|
|
|
|
t.Errorf("Error while scanning %v: %v", f.Err, f.Path)
|
|
|
|
}
|
|
|
|
tmp = append(tmp, f.File)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
2014-08-30 07:22:23 +00:00
|
|
|
sort.Sort(fileList(tmp))
|
|
|
|
files := fileList(tmp).testfiles()
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2016-03-17 07:03:29 +00:00
|
|
|
if diff, equal := messagediff.PrettyDiff(testdata, files); !equal {
|
2016-03-06 20:32:10 +00:00
|
|
|
t.Errorf("Walk returned unexpected data. Diff:\n%s", diff)
|
2023-05-03 08:25:36 +00:00
|
|
|
t.Error(testdata[4], files[4])
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
func TestVerify(t *testing.T) {
|
|
|
|
blocksize := 16
|
|
|
|
// data should be an even multiple of blocksize long
|
|
|
|
data := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e")
|
|
|
|
buf := bytes.NewBuffer(data)
|
2015-11-17 20:20:46 +00:00
|
|
|
progress := newByteCounter()
|
|
|
|
defer progress.Close()
|
2014-09-27 12:44:15 +00:00
|
|
|
|
2017-04-26 00:15:23 +00:00
|
|
|
blocks, err := Blocks(context.TODO(), buf, blocksize, -1, progress, false)
|
2014-09-27 12:44:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
if exp := len(data) / blocksize; len(blocks) != exp {
|
|
|
|
t.Fatalf("Incorrect number of blocks %d != %d", len(blocks), exp)
|
|
|
|
}
|
|
|
|
|
2015-11-17 20:20:46 +00:00
|
|
|
if int64(len(data)) != progress.Total() {
|
|
|
|
t.Fatalf("Incorrect counter value %d != %d", len(data), progress.Total())
|
2015-08-26 22:49:06 +00:00
|
|
|
}
|
|
|
|
|
2014-09-27 12:44:15 +00:00
|
|
|
buf = bytes.NewBuffer(data)
|
2018-01-14 14:30:11 +00:00
|
|
|
err = verify(buf, blocksize, blocks)
|
2014-09-27 12:44:15 +00:00
|
|
|
t.Log(err)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal("Unexpected verify failure", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = bytes.NewBuffer(append(data, '\n'))
|
2018-01-14 14:30:11 +00:00
|
|
|
err = verify(buf, blocksize, blocks)
|
2014-09-27 12:44:15 +00:00
|
|
|
t.Log(err)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("Unexpected verify success")
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = bytes.NewBuffer(data[:len(data)-1])
|
2018-01-14 14:30:11 +00:00
|
|
|
err = verify(buf, blocksize, blocks)
|
2014-09-27 12:44:15 +00:00
|
|
|
t.Log(err)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("Unexpected verify success")
|
|
|
|
}
|
|
|
|
|
|
|
|
data[42] = 42
|
|
|
|
buf = bytes.NewBuffer(data)
|
2018-01-14 14:30:11 +00:00
|
|
|
err = verify(buf, blocksize, blocks)
|
2014-09-27 12:44:15 +00:00
|
|
|
t.Log(err)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("Unexpected verify success")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-18 22:54:50 +00:00
|
|
|
func TestNormalization(t *testing.T) {
|
2022-07-28 17:36:39 +00:00
|
|
|
if build.IsDarwin {
|
2015-03-18 22:54:50 +00:00
|
|
|
t.Skip("Normalization test not possible on darwin")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs()
|
2015-03-18 22:54:50 +00:00
|
|
|
|
|
|
|
tests := []string{
|
|
|
|
"0-A", // ASCII A -- accepted
|
|
|
|
"1-\xC3\x84", // NFC 'Ä' -- conflicts with the entry below, accepted
|
|
|
|
"1-\x41\xCC\x88", // NFD 'Ä' -- conflicts with the entry above, ignored
|
|
|
|
"2-\xC3\x85", // NFC 'Å' -- accepted
|
|
|
|
"3-\x41\xCC\x83", // NFD 'Ã' -- converted to NFC
|
|
|
|
"4-\xE2\x98\x95", // U+2615 HOT BEVERAGE (☕) -- accepted
|
|
|
|
"5-\xCD\xE2", // EUC-CN "wài" (外) -- ignored (not UTF8)
|
|
|
|
}
|
|
|
|
numInvalid := 2
|
2015-04-16 20:18:17 +00:00
|
|
|
|
2015-03-18 22:54:50 +00:00
|
|
|
numValid := len(tests) - numInvalid
|
|
|
|
|
|
|
|
for _, s1 := range tests {
|
|
|
|
// Create a directory for each of the interesting strings above
|
2023-05-03 08:25:36 +00:00
|
|
|
if err := testFs.MkdirAll(filepath.Join("normalization", s1), 0o755); err != nil {
|
2015-03-18 22:54:50 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s2 := range tests {
|
|
|
|
// Within each dir, create a file with each of the interesting
|
|
|
|
// file names. Ensure that the file doesn't exist when it's
|
|
|
|
// created. This detects and fails if there's file name
|
|
|
|
// normalization stuff at the filesystem level.
|
2023-05-03 08:25:36 +00:00
|
|
|
if fd, err := testFs.OpenFile(filepath.Join("normalization", s1, s2), os.O_CREATE|os.O_EXCL, 0o644); err != nil {
|
2015-03-18 22:54:50 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
} else {
|
2019-02-02 11:16:27 +00:00
|
|
|
fd.Write([]byte("test"))
|
2015-03-18 22:54:50 +00:00
|
|
|
fd.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can normalize a directory name, but we can't descend into it in the
|
|
|
|
// same pass due to how filepath.Walk works. So we run the scan twice to
|
|
|
|
// make sure it all gets done. In production, things will be correct
|
|
|
|
// eventually...
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
walkDir(testFs, "normalization", nil, nil, 0)
|
|
|
|
tmp := walkDir(testFs, "normalization", nil, nil, 0)
|
2015-03-18 22:54:50 +00:00
|
|
|
|
|
|
|
files := fileList(tmp).testfiles()
|
|
|
|
|
|
|
|
// We should have one file per combination, plus the directories
|
lib/scanner: Fix UTF-8 normalization on ZFS (fixes #4649)
It turns out that ZFS doesn't do any normalization when storing files,
but does do normalization "as part of any comparison process".
In practice, this seems to mean that if you LStat a normalized filename,
ZFS will return the FileInfo for the un-normalized version of that
filename.
This meant that our test to see whether a separate file with a
normalized version of the filename already exists was failing, as we
were detecting the same file.
The fix is to use os.SameFile, to see whether we're getting the same
FileInfo from the normalized and un-normalized versions of the same
filename.
One complication is that ZFS also seems to apply its magic to os.Rename,
meaning that we can't use it to rename an un-normalized file to its
normalized filename. Instead we have to move via a temporary object. If
the move to the temporary object fails, that's OK, we can skip it and
move on. If the move from the temporary object fails however, I'm not
sure of the best approach: the current one is to leave the temporary
file name as-is, and get Syncthing to syncronize it, so at least we
don't lose the file. I'm not sure if there are any implications of this
however.
As part of reworking normalizePath, I spotted that it appeared to be
returning the wrong thing: the doc and the surrounding code expecting it
to return the normalized filename, but it was returning the
un-normalized one. I fixed this, but it seems suspicious that, if the
previous behaviour was incorrect, noone ever ran afoul of it. Maybe all
filesystems will do some searching and give you a normalized filename if
you request an unnormalized one.
As part of this, I found that TestNormalization was broken: it was
passing, when in fact one of the files it should have verified was
present was missing. Maybe this was related to the above issue with
normalizePath's return value, I'm not sure. Fixed en route.
Kindly tested by @khinsen on the forum, and it appears to work.
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4646
2018-01-05 18:11:09 +00:00
|
|
|
// themselves, plus the "testdata/normalization" directory
|
2015-03-18 22:54:50 +00:00
|
|
|
|
lib/scanner: Fix UTF-8 normalization on ZFS (fixes #4649)
It turns out that ZFS doesn't do any normalization when storing files,
but does do normalization "as part of any comparison process".
In practice, this seems to mean that if you LStat a normalized filename,
ZFS will return the FileInfo for the un-normalized version of that
filename.
This meant that our test to see whether a separate file with a
normalized version of the filename already exists was failing, as we
were detecting the same file.
The fix is to use os.SameFile, to see whether we're getting the same
FileInfo from the normalized and un-normalized versions of the same
filename.
One complication is that ZFS also seems to apply its magic to os.Rename,
meaning that we can't use it to rename an un-normalized file to its
normalized filename. Instead we have to move via a temporary object. If
the move to the temporary object fails, that's OK, we can skip it and
move on. If the move from the temporary object fails however, I'm not
sure of the best approach: the current one is to leave the temporary
file name as-is, and get Syncthing to syncronize it, so at least we
don't lose the file. I'm not sure if there are any implications of this
however.
As part of reworking normalizePath, I spotted that it appeared to be
returning the wrong thing: the doc and the surrounding code expecting it
to return the normalized filename, but it was returning the
un-normalized one. I fixed this, but it seems suspicious that, if the
previous behaviour was incorrect, noone ever ran afoul of it. Maybe all
filesystems will do some searching and give you a normalized filename if
you request an unnormalized one.
As part of this, I found that TestNormalization was broken: it was
passing, when in fact one of the files it should have verified was
present was missing. Maybe this was related to the above issue with
normalizePath's return value, I'm not sure. Fixed en route.
Kindly tested by @khinsen on the forum, and it appears to work.
GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4646
2018-01-05 18:11:09 +00:00
|
|
|
expectedNum := numValid*numValid + numValid + 1
|
2015-03-18 22:54:50 +00:00
|
|
|
if len(files) != expectedNum {
|
2023-05-03 08:25:36 +00:00
|
|
|
t.Errorf("Expected %d files, got %d, numvalid %d", expectedNum, len(files), numValid)
|
2015-03-18 22:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The file names should all be in NFC form.
|
|
|
|
|
|
|
|
for _, f := range files {
|
|
|
|
t.Logf("%q (% x) %v", f.name, f.name, norm.NFC.IsNormalString(f.name))
|
|
|
|
if !norm.NFC.IsNormalString(f.name) {
|
|
|
|
t.Errorf("File name %q is not NFC normalized", f.name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-17 10:35:03 +00:00
|
|
|
func TestNormalizationDarwinCaseFS(t *testing.T) {
|
|
|
|
// This tests that normalization works on Darwin, through a CaseFS.
|
|
|
|
|
2022-07-28 17:36:39 +00:00
|
|
|
if !build.IsDarwin {
|
2021-05-17 10:35:03 +00:00
|
|
|
t.Skip("Normalization test not possible on non-Darwin")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs(new(fs.OptionDetectCaseConflicts))
|
2021-05-17 10:35:03 +00:00
|
|
|
|
|
|
|
testFs.RemoveAll("normalization")
|
|
|
|
defer testFs.RemoveAll("normalization")
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs.MkdirAll("normalization", 0o755)
|
2021-05-17 10:35:03 +00:00
|
|
|
|
|
|
|
const (
|
|
|
|
inNFC = "\xC3\x84"
|
|
|
|
inNFD = "\x41\xCC\x88"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Create dir in NFC
|
2023-05-03 08:25:36 +00:00
|
|
|
if err := testFs.Mkdir(filepath.Join("normalization", "dir-"+inNFC), 0o755); err != nil {
|
2021-05-17 10:35:03 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create file in NFC
|
|
|
|
fd, err := testFs.Create(filepath.Join("normalization", "dir-"+inNFC, "file-"+inNFC))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
fd.Close()
|
|
|
|
|
|
|
|
// Walk, which should normalize and return
|
|
|
|
walkDir(testFs, "normalization", nil, nil, 0)
|
|
|
|
tmp := walkDir(testFs, "normalization", nil, nil, 0)
|
|
|
|
if len(tmp) != 3 {
|
|
|
|
t.Error("Expected one file and one dir scanned")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify we see the normalized entries in the result
|
|
|
|
foundFile := false
|
|
|
|
foundDir := false
|
|
|
|
for _, f := range tmp {
|
|
|
|
if f.Name == filepath.Join("normalization", "dir-"+inNFD) {
|
|
|
|
foundDir = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if f.Name == filepath.Join("normalization", "dir-"+inNFD, "file-"+inNFD) {
|
|
|
|
foundFile = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundFile || !foundDir {
|
|
|
|
t.Error("Didn't find expected normalization form")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-28 15:17:29 +00:00
|
|
|
func TestIssue1507(_ *testing.T) {
|
2018-02-14 07:59:46 +00:00
|
|
|
w := &walker{}
|
2018-12-21 11:08:15 +00:00
|
|
|
w.Matcher = ignore.New(w.Filesystem)
|
2018-11-07 10:04:41 +00:00
|
|
|
h := make(chan protocol.FileInfo, 100)
|
|
|
|
f := make(chan ScanResult, 100)
|
|
|
|
fn := w.walkAndHashFiles(context.TODO(), h, f)
|
2015-03-22 14:06:29 +00:00
|
|
|
|
2019-02-02 11:16:27 +00:00
|
|
|
fn("", nil, protocol.ErrClosed)
|
2015-03-22 14:06:29 +00:00
|
|
|
}
|
|
|
|
|
2017-03-18 00:25:47 +00:00
|
|
|
func TestWalkSymlinkUnix(t *testing.T) {
|
2022-07-28 17:36:39 +00:00
|
|
|
if build.IsWindows {
|
2016-07-26 11:55:25 +00:00
|
|
|
t.Skip("skipping unsupported symlink test")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a folder with a symlink in it
|
2016-08-16 10:01:58 +00:00
|
|
|
os.RemoveAll("_symlinks")
|
2023-05-03 08:25:36 +00:00
|
|
|
os.Mkdir("_symlinks", 0o755)
|
2018-01-24 00:05:47 +00:00
|
|
|
defer os.RemoveAll("_symlinks")
|
2019-02-02 11:16:27 +00:00
|
|
|
os.Symlink("../testdata", "_symlinks/link")
|
2016-07-26 11:55:25 +00:00
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
fs := fs.NewFilesystem(fs.FilesystemTypeBasic, "_symlinks")
|
2018-01-24 00:05:47 +00:00
|
|
|
for _, path := range []string{".", "link"} {
|
|
|
|
// Scan it
|
2018-07-12 08:15:57 +00:00
|
|
|
files := walkDir(fs, path, nil, nil, 0)
|
2016-07-26 11:55:25 +00:00
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
// Verify that we got one symlink and with the correct attributes
|
2018-01-24 00:05:47 +00:00
|
|
|
if len(files) != 1 {
|
|
|
|
t.Errorf("expected 1 symlink, not %d", len(files))
|
|
|
|
}
|
2018-02-14 07:59:46 +00:00
|
|
|
if len(files[0].Blocks) != 0 {
|
|
|
|
t.Errorf("expected zero blocks for symlink, not %d", len(files[0].Blocks))
|
2018-01-24 00:05:47 +00:00
|
|
|
}
|
2018-02-14 07:59:46 +00:00
|
|
|
if files[0].SymlinkTarget != "../testdata" {
|
|
|
|
t.Errorf("expected symlink to have target destination, not %q", files[0].SymlinkTarget)
|
2018-01-24 00:05:47 +00:00
|
|
|
}
|
2016-07-26 11:55:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-16 18:08:50 +00:00
|
|
|
func TestBlocksizeHysteresis(t *testing.T) {
|
|
|
|
// Verify that we select the right block size in the presence of old
|
|
|
|
// file information.
|
|
|
|
|
2018-06-02 11:18:33 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("long and hard test")
|
|
|
|
}
|
|
|
|
|
2018-04-16 18:08:50 +00:00
|
|
|
sf := fs.NewWalkFilesystem(&singleFileFS{
|
|
|
|
name: "testfile.dat",
|
|
|
|
filesize: 500 << 20, // 500 MiB
|
|
|
|
})
|
|
|
|
|
|
|
|
current := make(fakeCurrentFiler)
|
|
|
|
|
|
|
|
runTest := func(expectedBlockSize int) {
|
2018-07-12 08:15:57 +00:00
|
|
|
files := walkDir(sf, ".", current, nil, 0)
|
2018-04-16 18:08:50 +00:00
|
|
|
if len(files) != 1 {
|
|
|
|
t.Fatalf("expected one file, not %d", len(files))
|
|
|
|
}
|
|
|
|
if s := files[0].BlockSize(); s != expectedBlockSize {
|
|
|
|
t.Fatalf("incorrect block size %d != expected %d", s, expectedBlockSize)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan with no previous knowledge. We should get a 512 KiB block size.
|
|
|
|
|
|
|
|
runTest(512 << 10)
|
|
|
|
|
|
|
|
// Scan on the assumption that previous size was 256 KiB. Retain 256 KiB
|
|
|
|
// block size.
|
|
|
|
|
|
|
|
current["testfile.dat"] = protocol.FileInfo{
|
|
|
|
Name: "testfile.dat",
|
|
|
|
Size: 500 << 20,
|
|
|
|
RawBlockSize: 256 << 10,
|
|
|
|
}
|
|
|
|
runTest(256 << 10)
|
|
|
|
|
|
|
|
// Scan on the assumption that previous size was 1 MiB. Retain 1 MiB
|
|
|
|
// block size.
|
|
|
|
|
|
|
|
current["testfile.dat"] = protocol.FileInfo{
|
|
|
|
Name: "testfile.dat",
|
|
|
|
Size: 500 << 20,
|
|
|
|
RawBlockSize: 1 << 20,
|
|
|
|
}
|
|
|
|
runTest(1 << 20)
|
|
|
|
|
|
|
|
// Scan on the assumption that previous size was 128 KiB. Move to 512
|
|
|
|
// KiB because the difference is large.
|
|
|
|
|
|
|
|
current["testfile.dat"] = protocol.FileInfo{
|
|
|
|
Name: "testfile.dat",
|
|
|
|
Size: 500 << 20,
|
|
|
|
RawBlockSize: 128 << 10,
|
|
|
|
}
|
|
|
|
runTest(512 << 10)
|
|
|
|
|
|
|
|
// Scan on the assumption that previous size was 2 MiB. Move to 512
|
|
|
|
// KiB because the difference is large.
|
|
|
|
|
|
|
|
current["testfile.dat"] = protocol.FileInfo{
|
|
|
|
Name: "testfile.dat",
|
|
|
|
Size: 500 << 20,
|
|
|
|
RawBlockSize: 2 << 20,
|
|
|
|
}
|
|
|
|
runTest(512 << 10)
|
|
|
|
}
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
func TestWalkReceiveOnly(t *testing.T) {
|
|
|
|
sf := fs.NewWalkFilesystem(&singleFileFS{
|
|
|
|
name: "testfile.dat",
|
|
|
|
filesize: 1024,
|
|
|
|
})
|
|
|
|
|
|
|
|
current := make(fakeCurrentFiler)
|
|
|
|
|
|
|
|
// Initial scan, no files in the CurrentFiler. Should pick up the file and
|
|
|
|
// set the ReceiveOnly flag on it, because that's the flag we give the
|
|
|
|
// walker to set.
|
|
|
|
|
|
|
|
files := walkDir(sf, ".", current, nil, protocol.FlagLocalReceiveOnly)
|
|
|
|
if len(files) != 1 {
|
|
|
|
t.Fatal("Should have scanned one file")
|
|
|
|
}
|
|
|
|
|
|
|
|
if files[0].LocalFlags != protocol.FlagLocalReceiveOnly {
|
|
|
|
t.Fatal("Should have set the ReceiveOnly flag")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the CurrentFiler and scan again. It should not return
|
|
|
|
// anything, because the file has not changed. This verifies that the
|
|
|
|
// ReceiveOnly flag is properly ignored and doesn't trigger a rescan
|
|
|
|
// every time.
|
|
|
|
|
|
|
|
cur := files[0]
|
|
|
|
current[cur.Name] = cur
|
|
|
|
|
|
|
|
files = walkDir(sf, ".", current, nil, protocol.FlagLocalReceiveOnly)
|
|
|
|
if len(files) != 0 {
|
|
|
|
t.Fatal("Should not have scanned anything")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now pretend the file was previously ignored instead. We should pick up
|
|
|
|
// the difference in flags and set just the LocalReceive flags.
|
|
|
|
|
|
|
|
cur.LocalFlags = protocol.FlagLocalIgnored
|
|
|
|
current[cur.Name] = cur
|
|
|
|
|
|
|
|
files = walkDir(sf, ".", current, nil, protocol.FlagLocalReceiveOnly)
|
|
|
|
if len(files) != 1 {
|
|
|
|
t.Fatal("Should have scanned one file")
|
|
|
|
}
|
|
|
|
|
|
|
|
if files[0].LocalFlags != protocol.FlagLocalReceiveOnly {
|
|
|
|
t.Fatal("Should have set the ReceiveOnly flag")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-26 06:24:58 +00:00
|
|
|
func TestScanOwnershipPOSIX(t *testing.T) {
|
|
|
|
// This test works on all operating systems because the FakeFS is always POSIXy.
|
|
|
|
|
|
|
|
fakeFS := fs.NewFilesystem(fs.FilesystemTypeFake, "TestScanOwnership")
|
|
|
|
current := make(fakeCurrentFiler)
|
|
|
|
|
|
|
|
fakeFS.Create("root-owned")
|
|
|
|
fakeFS.Create("user-owned")
|
|
|
|
fakeFS.Lchown("user-owned", "1234", "5678")
|
2023-05-03 08:25:36 +00:00
|
|
|
fakeFS.Mkdir("user-owned-dir", 0o755)
|
2022-07-26 06:24:58 +00:00
|
|
|
fakeFS.Lchown("user-owned-dir", "2345", "6789")
|
|
|
|
|
|
|
|
expected := []struct {
|
|
|
|
name string
|
|
|
|
uid, gid int
|
|
|
|
}{
|
|
|
|
{"root-owned", 0, 0},
|
|
|
|
{"user-owned", 1234, 5678},
|
|
|
|
{"user-owned-dir", 2345, 6789},
|
|
|
|
}
|
|
|
|
|
|
|
|
files := walkDir(fakeFS, ".", current, nil, 0)
|
|
|
|
if len(files) != len(expected) {
|
|
|
|
t.Fatalf("expected %d items, not %d", len(expected), len(files))
|
|
|
|
}
|
|
|
|
for i := range expected {
|
|
|
|
if files[i].Name != expected[i].name {
|
|
|
|
t.Errorf("expected %s, got %s", expected[i].name, files[i].Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if files[i].Platform.Unix == nil {
|
|
|
|
t.Error("failed to load POSIX data on", files[i].Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if files[i].Platform.Unix.UID != expected[i].uid {
|
|
|
|
t.Errorf("expected %d, got %d", expected[i].uid, files[i].Platform.Unix.UID)
|
|
|
|
}
|
|
|
|
if files[i].Platform.Unix.GID != expected[i].gid {
|
|
|
|
t.Errorf("expected %d, got %d", expected[i].gid, files[i].Platform.Unix.GID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestScanOwnershipWindows(t *testing.T) {
|
2022-07-28 17:36:39 +00:00
|
|
|
if !build.IsWindows {
|
2022-07-26 06:24:58 +00:00
|
|
|
t.Skip("This test only works on Windows")
|
|
|
|
}
|
|
|
|
|
|
|
|
testFS := fs.NewFilesystem(fs.FilesystemTypeBasic, t.TempDir())
|
|
|
|
current := make(fakeCurrentFiler)
|
|
|
|
|
|
|
|
fd, err := testFS.Create("user-owned")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
fd.Close()
|
|
|
|
|
|
|
|
files := walkDir(testFS, ".", current, nil, 0)
|
|
|
|
if len(files) != 1 {
|
|
|
|
t.Fatalf("expected %d items, not %d", 1, len(files))
|
|
|
|
}
|
|
|
|
t.Log(files[0])
|
|
|
|
|
|
|
|
// The file should have an owner name set.
|
|
|
|
if files[0].Platform.Windows == nil {
|
|
|
|
t.Fatal("failed to load Windows data")
|
|
|
|
}
|
|
|
|
if files[0].Platform.Windows.OwnerName == "" {
|
|
|
|
t.Errorf("expected owner name to be set")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
func walkDir(fs fs.Filesystem, dir string, cfiler CurrentFiler, matcher *ignore.Matcher, localFlags uint32) []protocol.FileInfo {
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cancel := testConfig()
|
|
|
|
defer cancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Filesystem = fs
|
|
|
|
cfg.Subs = []string{dir}
|
|
|
|
cfg.AutoNormalize = true
|
|
|
|
cfg.CurrentFiler = cfiler
|
|
|
|
cfg.Matcher = matcher
|
|
|
|
cfg.LocalFlags = localFlags
|
2022-08-12 05:47:20 +00:00
|
|
|
cfg.ScanOwnership = true
|
2019-08-15 14:29:37 +00:00
|
|
|
fchan := Walk(context.TODO(), cfg)
|
2015-03-18 22:54:50 +00:00
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
var tmp []protocol.FileInfo
|
2015-03-18 22:54:50 +00:00
|
|
|
for f := range fchan {
|
2018-11-07 10:04:41 +00:00
|
|
|
if f.Err == nil {
|
|
|
|
tmp = append(tmp, f.File)
|
|
|
|
}
|
2015-03-18 22:54:50 +00:00
|
|
|
}
|
|
|
|
sort.Sort(fileList(tmp))
|
|
|
|
|
2018-05-14 07:47:23 +00:00
|
|
|
return tmp
|
2015-03-18 22:54:50 +00:00
|
|
|
}
|
|
|
|
|
2018-02-14 07:59:46 +00:00
|
|
|
type fileList []protocol.FileInfo
|
2014-07-30 18:10:46 +00:00
|
|
|
|
2014-12-08 15:36:15 +00:00
|
|
|
func (l fileList) Len() int {
|
|
|
|
return len(l)
|
2014-07-30 18:10:46 +00:00
|
|
|
}
|
|
|
|
|
2014-12-08 15:36:15 +00:00
|
|
|
func (l fileList) Less(a, b int) bool {
|
2018-02-14 07:59:46 +00:00
|
|
|
return l[a].Name < l[b].Name
|
2014-07-30 18:10:46 +00:00
|
|
|
}
|
|
|
|
|
2014-12-08 15:36:15 +00:00
|
|
|
func (l fileList) Swap(a, b int) {
|
|
|
|
l[a], l[b] = l[b], l[a]
|
2014-07-30 18:10:46 +00:00
|
|
|
}
|
2014-08-30 07:22:23 +00:00
|
|
|
|
|
|
|
func (l fileList) testfiles() testfileList {
|
|
|
|
testfiles := make(testfileList, len(l))
|
|
|
|
for i, f := range l {
|
2018-02-14 07:59:46 +00:00
|
|
|
if len(f.Blocks) > 1 {
|
2014-08-30 07:22:23 +00:00
|
|
|
panic("simple test case stuff only supports a single block per file")
|
|
|
|
}
|
2018-02-14 07:59:46 +00:00
|
|
|
testfiles[i] = testfile{name: f.Name, length: f.FileSize()}
|
|
|
|
if len(f.Blocks) == 1 {
|
|
|
|
testfiles[i].hash = fmt.Sprintf("%x", f.Blocks[0].Hash)
|
2014-08-30 07:22:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return testfiles
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l testfileList) String() string {
|
|
|
|
var b bytes.Buffer
|
|
|
|
b.WriteString("{\n")
|
|
|
|
for _, f := range l {
|
2016-07-04 10:40:29 +00:00
|
|
|
fmt.Fprintf(&b, " %s (%d bytes): %s\n", f.name, f.length, f.hash)
|
2014-08-30 07:22:23 +00:00
|
|
|
}
|
|
|
|
b.WriteString("}")
|
|
|
|
return b.String()
|
|
|
|
}
|
2015-09-04 10:54:01 +00:00
|
|
|
|
2015-10-27 08:26:08 +00:00
|
|
|
var initOnce sync.Once
|
|
|
|
|
|
|
|
const (
|
2021-01-28 13:23:24 +00:00
|
|
|
testdataSize = 17<<20 + 1
|
2015-10-27 08:26:08 +00:00
|
|
|
testdataName = "_random.data"
|
2023-05-03 08:25:36 +00:00
|
|
|
testFsPath = "some_random_dir_path"
|
2015-10-27 08:26:08 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func BenchmarkHashFile(b *testing.B) {
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newDataFs()
|
2015-10-27 08:26:08 +00:00
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2023-08-04 17:57:30 +00:00
|
|
|
if _, err := HashFile(context.TODO(), "", testFs, testdataName, protocol.MinBlockSize, nil, true); err != nil {
|
2015-10-27 08:26:08 +00:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-18 09:33:17 +00:00
|
|
|
b.SetBytes(testdataSize)
|
2015-10-27 08:26:08 +00:00
|
|
|
b.ReportAllocs()
|
|
|
|
}
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
func newDataFs() fs.Filesystem {
|
|
|
|
tfs := fs.NewFilesystem(fs.FilesystemTypeFake, rand.String(16)+"?content=true")
|
|
|
|
fd, err := tfs.Create(testdataName)
|
2015-10-27 08:26:08 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
lr := io.LimitReader(rand.Reader, testdataSize)
|
|
|
|
if _, err := io.Copy(fd, lr); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := fd.Close(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2023-05-03 08:25:36 +00:00
|
|
|
|
|
|
|
return tfs
|
2015-10-27 08:26:08 +00:00
|
|
|
}
|
2017-04-26 00:15:23 +00:00
|
|
|
|
|
|
|
func TestStopWalk(t *testing.T) {
|
|
|
|
// Create tree that is 100 levels deep, with each level containing 100
|
|
|
|
// files (each 1 MB) and 100 directories (in turn containing 100 files
|
|
|
|
// and 100 directories, etc). That is, in total > 100^100 files and as
|
|
|
|
// many directories. It'll take a while to scan, giving us time to
|
|
|
|
// cancel it and make sure the scan stops.
|
|
|
|
|
2017-08-19 14:36:56 +00:00
|
|
|
// Use an errorFs as the backing fs for the rest of the interface
|
|
|
|
// The way we get it is a bit hacky tho.
|
|
|
|
errorFs := fs.NewFilesystem(fs.FilesystemType(-1), ".")
|
|
|
|
fs := fs.NewWalkFilesystem(&infiniteFS{errorFs, 100, 100, 1e6})
|
2017-04-26 00:15:23 +00:00
|
|
|
|
|
|
|
const numHashers = 4
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cfgCancel := testConfig()
|
|
|
|
defer cfgCancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Filesystem = fs
|
|
|
|
cfg.Hashers = numHashers
|
|
|
|
cfg.ProgressTickIntervalS = -1 // Don't attempt to build the full list of files before starting to scan...
|
|
|
|
fchan := Walk(ctx, cfg)
|
2017-04-26 00:15:23 +00:00
|
|
|
|
|
|
|
// Receive a few entries to make sure the walker is up and running,
|
|
|
|
// scanning both files and dirs. Do some quick sanity tests on the
|
|
|
|
// returned file entries to make sure we are not just reading crap from
|
|
|
|
// a closed channel or something.
|
|
|
|
dirs := 0
|
|
|
|
files := 0
|
|
|
|
for {
|
2018-11-07 10:04:41 +00:00
|
|
|
res := <-fchan
|
|
|
|
if res.Err != nil {
|
|
|
|
t.Errorf("Error while scanning %v: %v", res.Err, res.Path)
|
|
|
|
}
|
|
|
|
f := res.File
|
2017-04-26 00:15:23 +00:00
|
|
|
t.Log("Scanned", f)
|
2018-02-14 07:59:46 +00:00
|
|
|
if f.IsDirectory() {
|
2022-07-28 14:51:03 +00:00
|
|
|
if f.Name == "" || f.Permissions == 0 {
|
2017-04-26 00:15:23 +00:00
|
|
|
t.Error("Bad directory entry", f)
|
|
|
|
}
|
|
|
|
dirs++
|
|
|
|
} else {
|
2022-07-28 14:51:03 +00:00
|
|
|
if f.Name == "" || len(f.Blocks) == 0 || f.Permissions == 0 {
|
2017-04-26 00:15:23 +00:00
|
|
|
t.Error("Bad file entry", f)
|
|
|
|
}
|
|
|
|
files++
|
|
|
|
}
|
|
|
|
if dirs > 5 && files > 5 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel the walker.
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
// Empty out any waiting entries and wait for the channel to close.
|
|
|
|
// Count them, they should be zero or very few - essentially, each
|
|
|
|
// hasher has the choice of returning a fully handled entry or
|
|
|
|
// cancelling, but they should not start on another item.
|
|
|
|
extra := 0
|
|
|
|
for range fchan {
|
|
|
|
extra++
|
|
|
|
}
|
|
|
|
t.Log("Extra entries:", extra)
|
|
|
|
if extra > numHashers {
|
|
|
|
t.Error("unexpected extra entries received after cancel")
|
|
|
|
}
|
|
|
|
}
|
2018-01-14 14:30:11 +00:00
|
|
|
|
2018-03-12 12:18:59 +00:00
|
|
|
func TestIssue4799(t *testing.T) {
|
2023-05-03 08:25:36 +00:00
|
|
|
fs := fs.NewFilesystem(fs.FilesystemTypeFake, rand.String(16))
|
2018-03-12 12:18:59 +00:00
|
|
|
|
|
|
|
fd, err := fs.Create("foo")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
fd.Close()
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
files := walkDir(fs, "/foo", nil, nil, 0)
|
2018-03-12 12:18:59 +00:00
|
|
|
if len(files) != 1 || files[0].Name != "foo" {
|
|
|
|
t.Error(`Received unexpected file infos when walking "/foo"`, files)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-14 07:47:23 +00:00
|
|
|
func TestRecurseInclude(t *testing.T) {
|
|
|
|
stignore := `
|
|
|
|
!/dir1/cfile
|
|
|
|
!efile
|
|
|
|
!ffile
|
|
|
|
*
|
|
|
|
`
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs()
|
2018-05-14 07:47:23 +00:00
|
|
|
ignores := ignore.New(testFs, ignore.WithCache(true))
|
|
|
|
if err := ignores.Parse(bytes.NewBufferString(stignore), ".stignore"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2018-07-12 08:15:57 +00:00
|
|
|
files := walkDir(testFs, ".", nil, ignores, 0)
|
2018-05-14 07:47:23 +00:00
|
|
|
|
|
|
|
expected := []string{
|
|
|
|
filepath.Join("dir1"),
|
|
|
|
filepath.Join("dir1", "cfile"),
|
|
|
|
filepath.Join("dir2"),
|
|
|
|
filepath.Join("dir2", "dir21"),
|
|
|
|
filepath.Join("dir2", "dir21", "dir22"),
|
|
|
|
filepath.Join("dir2", "dir21", "dir22", "dir23"),
|
|
|
|
filepath.Join("dir2", "dir21", "dir22", "dir23", "efile"),
|
|
|
|
filepath.Join("dir2", "dir21", "dir22", "efile"),
|
|
|
|
filepath.Join("dir2", "dir21", "dir22", "efile", "efile"),
|
|
|
|
filepath.Join("dir2", "dir21", "dira"),
|
|
|
|
filepath.Join("dir2", "dir21", "dira", "efile"),
|
|
|
|
filepath.Join("dir2", "dir21", "dira", "ffile"),
|
|
|
|
filepath.Join("dir2", "dir21", "efile"),
|
|
|
|
filepath.Join("dir2", "dir21", "efile", "ign"),
|
|
|
|
filepath.Join("dir2", "dir21", "efile", "ign", "efile"),
|
|
|
|
}
|
|
|
|
if len(files) != len(expected) {
|
2023-05-03 08:25:36 +00:00
|
|
|
var filesString []string
|
|
|
|
for _, file := range files {
|
|
|
|
filesString = append(filesString, file.Name)
|
|
|
|
}
|
|
|
|
t.Fatalf("Got %d files %v, expected %d files at %v", len(files), filesString, len(expected), expected)
|
2018-05-14 07:47:23 +00:00
|
|
|
}
|
|
|
|
for i := range files {
|
|
|
|
if files[i].Name != expected[i] {
|
|
|
|
t.Errorf("Got %v, expected file at %v", files[i], expected[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 20:24:20 +00:00
|
|
|
func TestIssue4841(t *testing.T) {
|
2023-05-03 08:25:36 +00:00
|
|
|
fs := fs.NewFilesystem(fs.FilesystemTypeFake, rand.String(16))
|
2018-03-27 20:24:20 +00:00
|
|
|
|
|
|
|
fd, err := fs.Create("foo")
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
fd.Close()
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cancel := testConfig()
|
|
|
|
defer cancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Filesystem = fs
|
|
|
|
cfg.AutoNormalize = true
|
|
|
|
cfg.CurrentFiler = fakeCurrentFiler{"foo": {
|
|
|
|
Name: "foo",
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
LocalFlags: protocol.FlagLocalIgnored,
|
|
|
|
Version: protocol.Vector{}.Update(1),
|
|
|
|
}}
|
|
|
|
cfg.ShortID = protocol.LocalDeviceID.Short()
|
|
|
|
fchan := Walk(context.TODO(), cfg)
|
2018-03-27 20:24:20 +00:00
|
|
|
|
|
|
|
var files []protocol.FileInfo
|
|
|
|
for f := range fchan {
|
2018-11-07 10:04:41 +00:00
|
|
|
if f.Err != nil {
|
|
|
|
t.Errorf("Error while scanning %v: %v", f.Err, f.Path)
|
|
|
|
}
|
|
|
|
files = append(files, f.File)
|
2018-03-27 20:24:20 +00:00
|
|
|
}
|
|
|
|
sort.Sort(fileList(files))
|
|
|
|
|
|
|
|
if len(files) != 1 {
|
|
|
|
t.Fatalf("Expected 1 file, got %d: %v", len(files), files)
|
|
|
|
}
|
|
|
|
if expected := (protocol.Vector{}.Update(protocol.LocalDeviceID.Short())); !files[0].Version.Equal(expected) {
|
|
|
|
t.Fatalf("Expected Version == %v, got %v", expected, files[0].Version)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-17 13:52:15 +00:00
|
|
|
// TestNotExistingError reproduces https://github.com/syncthing/syncthing/issues/5385
|
|
|
|
func TestNotExistingError(t *testing.T) {
|
|
|
|
sub := "notExisting"
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := newTestFs()
|
2018-12-17 13:52:15 +00:00
|
|
|
if _, err := testFs.Lstat(sub); !fs.IsNotExist(err) {
|
|
|
|
t.Fatalf("Lstat returned error %v, while nothing should exist there.", err)
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
cfg, cancel := testConfig()
|
|
|
|
defer cancel()
|
2019-08-15 14:29:37 +00:00
|
|
|
cfg.Subs = []string{sub}
|
|
|
|
fchan := Walk(context.TODO(), cfg)
|
2018-12-17 13:52:15 +00:00
|
|
|
for f := range fchan {
|
|
|
|
t.Fatalf("Expected no result from scan, got %v", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 07:37:41 +00:00
|
|
|
func TestSkipIgnoredDirs(t *testing.T) {
|
2020-04-07 08:23:38 +00:00
|
|
|
fss := fs.NewFilesystem(fs.FilesystemTypeFake, "")
|
2019-11-26 07:37:41 +00:00
|
|
|
|
|
|
|
name := "foo/ignored"
|
2023-05-03 08:25:36 +00:00
|
|
|
err := fss.MkdirAll(name, 0o777)
|
2019-11-26 07:37:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
stat, err := fss.Lstat(name)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
w := &walker{}
|
|
|
|
|
|
|
|
pats := ignore.New(fss, ignore.WithCache(true))
|
|
|
|
|
|
|
|
stignore := `
|
|
|
|
/foo/ign*
|
|
|
|
!/f*
|
|
|
|
*
|
|
|
|
`
|
|
|
|
if err := pats.Parse(bytes.NewBufferString(stignore), ".stignore"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
lib/ignore: Optimise ignoring directories for filesystem watcher (fixes #9339) (#9340)
This improves the ignore handling so that directories can be fully
ignored (skipped in the watcher) in more cases. Specifically, where the
previous rule was that any complex `!`-pattern would disable skipping
directories, the new rule is that only matches on patterns *after* such
a `!`-pattern disable skipping. That is, the following now does the
intuitive thing:
```
/foo
/bar
!whatever
*
```
- `/foo/**` and `/bar/**` are completely skipped, since there is no
chance anything underneath them could ever be not-ignored
- `!whatever` toggles the "can't skip directories any more" flag
- Anything that matches `*` can't skip directories, because it's
possible we can have `whatever` match something deeper.
To enable this, some refactoring was necessary:
- The "can skip dirs" flag is now a property of the match result, not of
the pattern set as a whole.
- That meant returning a boolean is not good enough, we need to actually
return the entire `Result` (or, like, two booleans but that seemed
uglier and more annoying to use)
- `ShouldIgnore(string) boolean` went away with
`Match(string).IsIgnored()` being the obvious replacement (API
simplification!)
- The watcher then needed to import the `ignore` package (for the
`Result` type), but `fs` imports the watcher and `ignore` imports `fs`.
That's a cycle, so I broke out `Result` into a package of its own so
that it can be safely imported everywhere in things like `type Matcher
interface { Match(string) result.Result }`. There's a fair amount of
stuttering in `result.Result` and maybe we should go with something like
`ignoreresult.R` or so, leaving this open for discussion.
Tests refactored to suit, I think this change is in fact quite well
covered by the existing ones...
Also some noise because a few of the changed files were quite old and
got the `gofumpt` treatment by my editor. Sorry not sorry.
---------
Co-authored-by: Simon Frei <freisim93@gmail.com>
2024-01-15 10:13:22 +00:00
|
|
|
if m := pats.Match("whatever"); !m.CanSkipDir() {
|
|
|
|
t.Error("CanSkipDir should be true", m)
|
2019-11-26 07:37:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
w.Matcher = pats
|
|
|
|
|
|
|
|
fn := w.walkAndHashFiles(context.Background(), nil, nil)
|
|
|
|
|
|
|
|
if err := fn(name, stat, nil); err != fs.SkipDir {
|
|
|
|
t.Errorf("Expected %v, got %v", fs.SkipDir, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-07 08:23:38 +00:00
|
|
|
// https://github.com/syncthing/syncthing/issues/6487
|
|
|
|
func TestIncludedSubdir(t *testing.T) {
|
|
|
|
fss := fs.NewFilesystem(fs.FilesystemTypeFake, "")
|
|
|
|
|
|
|
|
name := filepath.Clean("foo/bar/included")
|
2023-05-03 08:25:36 +00:00
|
|
|
err := fss.MkdirAll(name, 0o777)
|
2020-04-07 08:23:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pats := ignore.New(fss, ignore.WithCache(true))
|
|
|
|
|
|
|
|
stignore := `
|
|
|
|
!/foo/bar
|
|
|
|
*
|
|
|
|
`
|
|
|
|
if err := pats.Parse(bytes.NewBufferString(stignore), ".stignore"); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
fchan := Walk(context.TODO(), Config{
|
|
|
|
CurrentFiler: make(fakeCurrentFiler),
|
|
|
|
Filesystem: fss,
|
|
|
|
Matcher: pats,
|
|
|
|
})
|
|
|
|
|
|
|
|
found := false
|
|
|
|
for f := range fchan {
|
|
|
|
if f.Err != nil {
|
|
|
|
t.Fatalf("Error while scanning %v: %v", f.Err, f.Path)
|
|
|
|
}
|
|
|
|
if f.File.IsIgnored() {
|
|
|
|
t.Error("File is ignored:", f.File.Name)
|
|
|
|
}
|
|
|
|
if f.File.Name == name {
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
t.Errorf("File not present in scan results")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-14 14:30:11 +00:00
|
|
|
// Verify returns nil or an error describing the mismatch between the block
|
|
|
|
// list and actual reader contents
|
|
|
|
func verify(r io.Reader, blocksize int, blocks []protocol.BlockInfo) error {
|
|
|
|
hf := sha256.New()
|
|
|
|
// A 32k buffer is used for copying into the hash function.
|
|
|
|
buf := make([]byte, 32<<10)
|
|
|
|
|
|
|
|
for i, block := range blocks {
|
|
|
|
lr := &io.LimitedReader{R: r, N: int64(blocksize)}
|
|
|
|
_, err := io.CopyBuffer(hf, lr, buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
hash := hf.Sum(nil)
|
|
|
|
hf.Reset()
|
|
|
|
|
|
|
|
if !bytes.Equal(hash, block.Hash) {
|
|
|
|
return fmt.Errorf("hash mismatch %x != %x for block %d", hash, block.Hash, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have reached the end now
|
|
|
|
bs := make([]byte, 1)
|
|
|
|
n, err := r.Read(bs)
|
|
|
|
if n != 0 || err != io.EOF {
|
2020-03-03 21:40:00 +00:00
|
|
|
return errors.New("file continues past end of blocks")
|
2018-01-14 14:30:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-03-27 20:24:20 +00:00
|
|
|
|
|
|
|
type fakeCurrentFiler map[string]protocol.FileInfo
|
|
|
|
|
|
|
|
func (fcf fakeCurrentFiler) CurrentFile(name string) (protocol.FileInfo, bool) {
|
|
|
|
f, ok := fcf[name]
|
|
|
|
return f, ok
|
|
|
|
}
|
2019-08-15 14:29:37 +00:00
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
func testConfig() (Config, context.CancelFunc) {
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger := events.NewLogger()
|
2020-11-17 12:19:04 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
go evLogger.Serve(ctx)
|
2019-08-15 14:29:37 +00:00
|
|
|
return Config{
|
2023-05-03 08:25:36 +00:00
|
|
|
Filesystem: newTestFs(),
|
2019-08-21 06:05:43 +00:00
|
|
|
Hashers: 2,
|
|
|
|
EventLogger: evLogger,
|
2020-11-17 12:19:04 +00:00
|
|
|
}, cancel
|
2019-08-15 14:29:37 +00:00
|
|
|
}
|
2022-08-12 05:48:00 +00:00
|
|
|
|
|
|
|
func BenchmarkWalk(b *testing.B) {
|
2023-05-03 08:25:36 +00:00
|
|
|
testFs := fs.NewFilesystem(fs.FilesystemTypeFake, rand.String(32))
|
2022-08-12 05:48:00 +00:00
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
2023-05-03 08:25:36 +00:00
|
|
|
if err := testFs.Mkdir(fmt.Sprintf("dir%d", i), 0o755); err != nil {
|
2022-08-12 05:48:00 +00:00
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
for j := 0; j < 100; j++ {
|
|
|
|
if fd, err := testFs.Create(fmt.Sprintf("dir%d/file%d", i, j)); err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
} else {
|
|
|
|
fd.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
walkDir(testFs, "/", nil, nil, 0)
|
|
|
|
}
|
|
|
|
}
|