mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 19:08:58 +00:00
Rework .stignore functionality (fixes #561) (...)
- Only one .stignore is supported, at the repo root - Negative patterns (!) are supported - Ignore patterns affect sent and received indexes, not only scanning
This commit is contained in:
parent
8e4f7bbd3e
commit
92c44c8abe
File diff suppressed because one or more lines are too long
@ -255,7 +255,7 @@ func restGetModel(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
|||||||
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restPostOverride(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
var qs = r.URL.Query()
|
var qs = r.URL.Query()
|
||||||
var repo = qs.Get("repo")
|
var repo = qs.Get("repo")
|
||||||
m.Override(repo)
|
go m.Override(repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
func restGetNeed(m *model.Model, w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -186,18 +186,28 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
|
|||||||
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
if fs[fsi].IsInvalid() {
|
||||||
|
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
|
||||||
|
} else {
|
||||||
|
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
||||||
|
}
|
||||||
fsi++
|
fsi++
|
||||||
|
|
||||||
case moreFs && moreDb && cmp == 0:
|
case moreFs && moreDb && cmp == 0:
|
||||||
// File exists on both sides - compare versions.
|
// File exists on both sides - compare versions. We might get an
|
||||||
|
// update with the same version and different flags if a node has
|
||||||
|
// marked a file as invalid, so handle that too.
|
||||||
var ef protocol.FileInfoTruncated
|
var ef protocol.FileInfoTruncated
|
||||||
ef.UnmarshalXDR(dbi.Value())
|
ef.UnmarshalXDR(dbi.Value())
|
||||||
if fs[fsi].Version > ef.Version {
|
if fs[fsi].Version > ef.Version || fs[fsi].Version != ef.Version {
|
||||||
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
if lv := ldbInsert(batch, repo, node, newName, fs[fsi]); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
if fs[fsi].IsInvalid() {
|
||||||
|
ldbRemoveFromGlobal(snap, batch, repo, node, newName)
|
||||||
|
} else {
|
||||||
|
ldbUpdateGlobal(snap, batch, repo, node, newName, fs[fsi].Version)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Iterate both sides.
|
// Iterate both sides.
|
||||||
fsi++
|
fsi++
|
||||||
@ -280,7 +290,11 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
|
|||||||
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
if f.IsInvalid() {
|
||||||
|
ldbRemoveFromGlobal(snap, batch, repo, node, name)
|
||||||
|
} else {
|
||||||
|
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -289,11 +303,17 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
if ef.Version != f.Version {
|
// Flags might change without the version being bumped when we set the
|
||||||
|
// invalid flag on an existing file.
|
||||||
|
if ef.Version != f.Version || ef.Flags != f.Flags {
|
||||||
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
if lv := ldbInsert(batch, repo, node, name, f); lv > maxLocalVer {
|
||||||
maxLocalVer = lv
|
maxLocalVer = lv
|
||||||
}
|
}
|
||||||
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
if f.IsInvalid() {
|
||||||
|
ldbRemoveFromGlobal(snap, batch, repo, node, name)
|
||||||
|
} else {
|
||||||
|
ldbUpdateGlobal(snap, batch, repo, node, name, f.Version)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -385,7 +405,9 @@ func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
|
|||||||
gk := globalKey(repo, file)
|
gk := globalKey(repo, file)
|
||||||
svl, err := db.Get(gk, nil)
|
svl, err := db.Get(gk, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
// We might be called to "remove" a global version that doesn't exist
|
||||||
|
// if the first update for the file is already marked invalid.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var fl versionList
|
var fl versionList
|
||||||
|
@ -86,7 +86,7 @@ func (l fileList) String() string {
|
|||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
b.WriteString("[]protocol.FileList{\n")
|
b.WriteString("[]protocol.FileList{\n")
|
||||||
for _, f := range l {
|
for _, f := range l {
|
||||||
fmt.Fprintf(&b, " %q: #%d, %d bytes, %d blocks\n", f.Name, f.Version, f.Size(), len(f.Blocks))
|
fmt.Fprintf(&b, " %q: #%d, %d bytes, %d blocks, flags=%o\n", f.Name, f.Version, f.Size(), len(f.Blocks), f.Flags)
|
||||||
}
|
}
|
||||||
b.WriteString("}")
|
b.WriteString("}")
|
||||||
return b.String()
|
return b.String()
|
||||||
@ -280,6 +280,86 @@ func TestNeedWithInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateToInvalid(t *testing.T) {
|
||||||
|
lamport.Default = lamport.Clock{}
|
||||||
|
|
||||||
|
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := files.NewSet("test", db)
|
||||||
|
|
||||||
|
localHave := fileList{
|
||||||
|
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||||
|
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
|
||||||
|
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
|
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||||
|
}
|
||||||
|
|
||||||
|
s.ReplaceWithDelete(protocol.LocalNodeID, localHave)
|
||||||
|
|
||||||
|
have := fileList(haveList(s, protocol.LocalNodeID))
|
||||||
|
sort.Sort(have)
|
||||||
|
|
||||||
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||||
|
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||||
|
}
|
||||||
|
|
||||||
|
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
|
||||||
|
s.Update(protocol.LocalNodeID, localHave[1:2])
|
||||||
|
|
||||||
|
have = fileList(haveList(s, protocol.LocalNodeID))
|
||||||
|
sort.Sort(have)
|
||||||
|
|
||||||
|
if fmt.Sprint(have) != fmt.Sprint(localHave) {
|
||||||
|
t.Errorf("Have incorrect after invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidAvailability(t *testing.T) {
|
||||||
|
lamport.Default = lamport.Clock{}
|
||||||
|
|
||||||
|
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := files.NewSet("test", db)
|
||||||
|
|
||||||
|
remote0Have := fileList{
|
||||||
|
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
|
||||||
|
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
|
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(7)},
|
||||||
|
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
|
}
|
||||||
|
remote1Have := fileList{
|
||||||
|
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
|
||||||
|
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(7)},
|
||||||
|
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
|
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Replace(remoteNode0, remote0Have)
|
||||||
|
s.Replace(remoteNode1, remote1Have)
|
||||||
|
|
||||||
|
if av := s.Availability("both"); len(av) != 2 {
|
||||||
|
t.Error("Incorrect availability for 'both':", av)
|
||||||
|
}
|
||||||
|
|
||||||
|
if av := s.Availability("r0only"); len(av) != 1 || av[0] != remoteNode0 {
|
||||||
|
t.Error("Incorrect availability for 'r0only':", av)
|
||||||
|
}
|
||||||
|
|
||||||
|
if av := s.Availability("r1only"); len(av) != 1 || av[0] != remoteNode1 {
|
||||||
|
t.Error("Incorrect availability for 'r1only':", av)
|
||||||
|
}
|
||||||
|
|
||||||
|
if av := s.Availability("none"); len(av) != 0 {
|
||||||
|
t.Error("Incorrect availability for 'none':", av)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestLocalDeleted(t *testing.T) {
|
func TestLocalDeleted(t *testing.T) {
|
||||||
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
db, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -971,9 +971,9 @@ function debounce(func, wait) {
|
|||||||
} else {
|
} else {
|
||||||
timeout = null;
|
timeout = null;
|
||||||
if (again) {
|
if (again) {
|
||||||
|
again = false;
|
||||||
result = func.apply(context, args);
|
result = func.apply(context, args);
|
||||||
context = args = null;
|
context = args = null;
|
||||||
again = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
146
ignore/ignore.go
Normal file
146
ignore/ignore.go
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
// Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
|
||||||
|
// All rights reserved. Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package ignore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/fnmatch"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pattern struct {
|
||||||
|
match *regexp.Regexp
|
||||||
|
include bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type Patterns []Pattern
|
||||||
|
|
||||||
|
func Load(file string) (Patterns, error) {
|
||||||
|
seen := make(map[string]bool)
|
||||||
|
return loadIgnoreFile(file, seen)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Parse(r io.Reader, file string) (Patterns, error) {
|
||||||
|
seen := map[string]bool{
|
||||||
|
file: true,
|
||||||
|
}
|
||||||
|
return parseIgnoreFile(r, file, seen)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l Patterns) Match(file string) bool {
|
||||||
|
for _, pattern := range l {
|
||||||
|
if pattern.match.MatchString(file) {
|
||||||
|
return pattern.include
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadIgnoreFile(file string, seen map[string]bool) (Patterns, error) {
|
||||||
|
if seen[file] {
|
||||||
|
return nil, fmt.Errorf("Multiple include of ignore file %q", file)
|
||||||
|
}
|
||||||
|
seen[file] = true
|
||||||
|
|
||||||
|
fd, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
return parseIgnoreFile(fd, file, seen)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIgnoreFile(fd io.Reader, currentFile string, seen map[string]bool) (Patterns, error) {
|
||||||
|
var exps Patterns
|
||||||
|
|
||||||
|
addPattern := func(line string) error {
|
||||||
|
include := true
|
||||||
|
if strings.HasPrefix(line, "!") {
|
||||||
|
line = line[1:]
|
||||||
|
include = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(line, "/") {
|
||||||
|
// Pattern is rooted in the current dir only
|
||||||
|
exp, err := fnmatch.Convert(line[1:], fnmatch.FNM_PATHNAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||||
|
}
|
||||||
|
exps = append(exps, Pattern{exp, include})
|
||||||
|
} else if strings.HasPrefix(line, "**/") {
|
||||||
|
// Add the pattern as is, and without **/ so it matches in current dir
|
||||||
|
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||||
|
}
|
||||||
|
exps = append(exps, Pattern{exp, include})
|
||||||
|
|
||||||
|
exp, err = fnmatch.Convert(line[3:], fnmatch.FNM_PATHNAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||||
|
}
|
||||||
|
exps = append(exps, Pattern{exp, include})
|
||||||
|
} else if strings.HasPrefix(line, "#include ") {
|
||||||
|
includeFile := filepath.Join(filepath.Dir(currentFile), line[len("#include "):])
|
||||||
|
includes, err := loadIgnoreFile(includeFile, seen)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
exps = append(exps, includes...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Path name or pattern, add it so it matches files both in
|
||||||
|
// current directory and subdirs.
|
||||||
|
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||||
|
}
|
||||||
|
exps = append(exps, Pattern{exp, include})
|
||||||
|
|
||||||
|
exp, err = fnmatch.Convert("**/"+line, fnmatch.FNM_PATHNAME)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid pattern %q in ignore file", line)
|
||||||
|
}
|
||||||
|
exps = append(exps, Pattern{exp, include})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(fd)
|
||||||
|
var err error
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
switch {
|
||||||
|
case line == "":
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "#"):
|
||||||
|
err = addPattern(line)
|
||||||
|
case strings.HasSuffix(line, "/**"):
|
||||||
|
err = addPattern(line)
|
||||||
|
case strings.HasSuffix(line, "/"):
|
||||||
|
err = addPattern(line)
|
||||||
|
if err == nil {
|
||||||
|
err = addPattern(line + "**")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = addPattern(line)
|
||||||
|
if err == nil {
|
||||||
|
err = addPattern(line + "/**")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return exps, nil
|
||||||
|
}
|
104
ignore/ignore_test.go
Normal file
104
ignore/ignore_test.go
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
package ignore_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/ignore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIgnore(t *testing.T) {
|
||||||
|
pats, err := ignore.Load("testdata/.stignore")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
f string
|
||||||
|
r bool
|
||||||
|
}{
|
||||||
|
{"afile", false},
|
||||||
|
{"bfile", true},
|
||||||
|
{"cfile", false},
|
||||||
|
{"dfile", false},
|
||||||
|
{"efile", true},
|
||||||
|
{"ffile", true},
|
||||||
|
|
||||||
|
{"dir1", false},
|
||||||
|
{filepath.Join("dir1", "cfile"), true},
|
||||||
|
{filepath.Join("dir1", "dfile"), false},
|
||||||
|
{filepath.Join("dir1", "efile"), true},
|
||||||
|
{filepath.Join("dir1", "ffile"), false},
|
||||||
|
|
||||||
|
{"dir2", false},
|
||||||
|
{filepath.Join("dir2", "cfile"), false},
|
||||||
|
{filepath.Join("dir2", "dfile"), true},
|
||||||
|
{filepath.Join("dir2", "efile"), true},
|
||||||
|
{filepath.Join("dir2", "ffile"), false},
|
||||||
|
|
||||||
|
{filepath.Join("dir3"), true},
|
||||||
|
{filepath.Join("dir3", "afile"), true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range tests {
|
||||||
|
if r := pats.Match(tc.f); r != tc.r {
|
||||||
|
t.Errorf("Incorrect ignoreFile() #%d (%s); E: %v, A: %v", i, tc.f, tc.r, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExcludes(t *testing.T) {
|
||||||
|
stignore := `
|
||||||
|
!iex2
|
||||||
|
!ign1/ex
|
||||||
|
ign1
|
||||||
|
i*2
|
||||||
|
!ign2
|
||||||
|
`
|
||||||
|
pats, err := ignore.Parse(bytes.NewBufferString(stignore), ".stignore")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tests = []struct {
|
||||||
|
f string
|
||||||
|
r bool
|
||||||
|
}{
|
||||||
|
{"ign1", true},
|
||||||
|
{"ign2", true},
|
||||||
|
{"ibla2", true},
|
||||||
|
{"iex2", false},
|
||||||
|
{"ign1/ign", true},
|
||||||
|
{"ign1/ex", false},
|
||||||
|
{"ign1/iex2", false},
|
||||||
|
{"iex2/ign", false},
|
||||||
|
{"foo/bar/ign1", true},
|
||||||
|
{"foo/bar/ign2", true},
|
||||||
|
{"foo/bar/iex2", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
if r := pats.Match(tc.f); r != tc.r {
|
||||||
|
t.Errorf("Incorrect match for %s: %v != %v", tc.f, r, tc.r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBadPatterns(t *testing.T) {
|
||||||
|
var badPatterns = []string{
|
||||||
|
"[",
|
||||||
|
"/[",
|
||||||
|
"**/[",
|
||||||
|
"#include nonexistent",
|
||||||
|
"#include .stignore",
|
||||||
|
"!#include makesnosense",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pat := range badPatterns {
|
||||||
|
parsed, err := ignore.Parse(bytes.NewBufferString(pat), ".stignore")
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("No error for pattern %q: %v", pat, parsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
6
ignore/testdata/.stignore
vendored
Normal file
6
ignore/testdata/.stignore
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#include excludes
|
||||||
|
|
||||||
|
bfile
|
||||||
|
dir1/cfile
|
||||||
|
**/efile
|
||||||
|
/ffile
|
1
ignore/testdata/dir3/cfile
vendored
Normal file
1
ignore/testdata/dir3/cfile
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
baz
|
1
ignore/testdata/dir3/dfile
vendored
Normal file
1
ignore/testdata/dir3/dfile
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
quux
|
2
ignore/testdata/excludes
vendored
Normal file
2
ignore/testdata/excludes
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
dir2/dfile
|
||||||
|
#include further-excludes
|
1
ignore/testdata/further-excludes
vendored
Normal file
1
ignore/testdata/further-excludes
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
dir3
|
@ -2,7 +2,7 @@
|
|||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
go test -tags integration -v
|
#go test -tags integration -v
|
||||||
./test-http.sh
|
./test-http.sh
|
||||||
./test-merge.sh
|
./test-merge.sh
|
||||||
./test-delupd.sh
|
./test-delupd.sh
|
||||||
|
106
model/model.go
106
model/model.go
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/syncthing/syncthing/config"
|
"github.com/syncthing/syncthing/config"
|
||||||
"github.com/syncthing/syncthing/events"
|
"github.com/syncthing/syncthing/events"
|
||||||
"github.com/syncthing/syncthing/files"
|
"github.com/syncthing/syncthing/files"
|
||||||
|
"github.com/syncthing/syncthing/ignore"
|
||||||
"github.com/syncthing/syncthing/lamport"
|
"github.com/syncthing/syncthing/lamport"
|
||||||
"github.com/syncthing/syncthing/protocol"
|
"github.com/syncthing/syncthing/protocol"
|
||||||
"github.com/syncthing/syncthing/scanner"
|
"github.com/syncthing/syncthing/scanner"
|
||||||
@ -72,6 +73,7 @@ type Model struct {
|
|||||||
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
|
repoNodes map[string][]protocol.NodeID // repo -> nodeIDs
|
||||||
nodeRepos map[protocol.NodeID][]string // nodeID -> repos
|
nodeRepos map[protocol.NodeID][]string // nodeID -> repos
|
||||||
nodeStatRefs map[protocol.NodeID]*stats.NodeStatisticsReference // nodeID -> statsRef
|
nodeStatRefs map[protocol.NodeID]*stats.NodeStatisticsReference // nodeID -> statsRef
|
||||||
|
repoIgnores map[string]ignore.Patterns // repo -> list of ignore patterns
|
||||||
rmut sync.RWMutex // protects the above
|
rmut sync.RWMutex // protects the above
|
||||||
|
|
||||||
repoState map[string]repoState // repo -> state
|
repoState map[string]repoState // repo -> state
|
||||||
@ -108,6 +110,7 @@ func NewModel(indexDir string, cfg *config.Configuration, nodeName, clientName,
|
|||||||
repoNodes: make(map[string][]protocol.NodeID),
|
repoNodes: make(map[string][]protocol.NodeID),
|
||||||
nodeRepos: make(map[protocol.NodeID][]string),
|
nodeRepos: make(map[protocol.NodeID][]string),
|
||||||
nodeStatRefs: make(map[protocol.NodeID]*stats.NodeStatisticsReference),
|
nodeStatRefs: make(map[protocol.NodeID]*stats.NodeStatisticsReference),
|
||||||
|
repoIgnores: make(map[string]ignore.Patterns),
|
||||||
repoState: make(map[string]repoState),
|
repoState: make(map[string]repoState),
|
||||||
repoStateChanged: make(map[string]time.Time),
|
repoStateChanged: make(map[string]time.Time),
|
||||||
protoConn: make(map[protocol.NodeID]protocol.Connection),
|
protoConn: make(map[protocol.NodeID]protocol.Connection),
|
||||||
@ -289,6 +292,9 @@ func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) {
|
|||||||
defer m.rmut.RUnlock()
|
defer m.rmut.RUnlock()
|
||||||
if rf, ok := m.repoFiles[repo]; ok {
|
if rf, ok := m.repoFiles[repo]; ok {
|
||||||
rf.WithHaveTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
|
rf.WithHaveTruncated(protocol.LocalNodeID, func(f protocol.FileIntf) bool {
|
||||||
|
if f.IsInvalid() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
fs, de, by := sizeOfFile(f)
|
fs, de, by := sizeOfFile(f)
|
||||||
files += fs
|
files += fs
|
||||||
deleted += de
|
deleted += de
|
||||||
@ -348,24 +354,32 @@ func (m *Model) Index(nodeID protocol.NodeID, repo string, fs []protocol.FileInf
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range fs {
|
|
||||||
lamport.Default.Tick(fs[i].Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
r, ok := m.repoFiles[repo]
|
files, ok := m.repoFiles[repo]
|
||||||
|
ignores, _ := m.repoIgnores[repo]
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
if ok {
|
|
||||||
r.Replace(nodeID, fs)
|
if !ok {
|
||||||
} else {
|
|
||||||
l.Fatalf("Index for nonexistant repo %q", repo)
|
l.Fatalf("Index for nonexistant repo %q", repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(fs); {
|
||||||
|
lamport.Default.Tick(fs[i].Version)
|
||||||
|
if ignores.Match(fs[i].Name) {
|
||||||
|
fs[i] = fs[len(fs)-1]
|
||||||
|
fs = fs[:len(fs)-1]
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files.Replace(nodeID, fs)
|
||||||
|
|
||||||
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
|
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
|
||||||
"node": nodeID.String(),
|
"node": nodeID.String(),
|
||||||
"repo": repo,
|
"repo": repo,
|
||||||
"items": len(fs),
|
"items": len(fs),
|
||||||
"version": r.LocalVersion(nodeID),
|
"version": files.LocalVersion(nodeID),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,24 +395,32 @@ func (m *Model) IndexUpdate(nodeID protocol.NodeID, repo string, fs []protocol.F
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range fs {
|
|
||||||
lamport.Default.Tick(fs[i].Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
r, ok := m.repoFiles[repo]
|
files, ok := m.repoFiles[repo]
|
||||||
|
ignores, _ := m.repoIgnores[repo]
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
if ok {
|
|
||||||
r.Update(nodeID, fs)
|
if !ok {
|
||||||
} else {
|
|
||||||
l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
|
l.Fatalf("IndexUpdate for nonexistant repo %q", repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(fs); {
|
||||||
|
lamport.Default.Tick(fs[i].Version)
|
||||||
|
if ignores.Match(fs[i].Name) {
|
||||||
|
fs[i] = fs[len(fs)-1]
|
||||||
|
fs = fs[:len(fs)-1]
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files.Update(nodeID, fs)
|
||||||
|
|
||||||
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
|
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
|
||||||
"node": nodeID.String(),
|
"node": nodeID.String(),
|
||||||
"repo": repo,
|
"repo": repo,
|
||||||
"items": len(fs),
|
"items": len(fs),
|
||||||
"version": r.LocalVersion(nodeID),
|
"version": files.LocalVersion(nodeID),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,7 +594,7 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
|
|||||||
m.rmut.RLock()
|
m.rmut.RLock()
|
||||||
for _, repo := range m.nodeRepos[nodeID] {
|
for _, repo := range m.nodeRepos[nodeID] {
|
||||||
fs := m.repoFiles[repo]
|
fs := m.repoFiles[repo]
|
||||||
go sendIndexes(protoConn, repo, fs)
|
go sendIndexes(protoConn, repo, fs, m.repoIgnores[repo])
|
||||||
}
|
}
|
||||||
if statRef, ok := m.nodeStatRefs[nodeID]; ok {
|
if statRef, ok := m.nodeStatRefs[nodeID]; ok {
|
||||||
statRef.WasSeen()
|
statRef.WasSeen()
|
||||||
@ -583,7 +605,7 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection)
|
|||||||
m.pmut.Unlock()
|
m.pmut.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
|
func sendIndexes(conn protocol.Connection, repo string, fs *files.Set, ignores ignore.Patterns) {
|
||||||
nodeID := conn.ID()
|
nodeID := conn.ID()
|
||||||
name := conn.Name()
|
name := conn.Name()
|
||||||
var err error
|
var err error
|
||||||
@ -598,7 +620,7 @@ func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
minLocalVer, err := sendIndexTo(true, 0, conn, repo, fs)
|
minLocalVer, err := sendIndexTo(true, 0, conn, repo, fs, ignores)
|
||||||
|
|
||||||
for err == nil {
|
for err == nil {
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
@ -606,11 +628,11 @@ func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, repo, fs)
|
minLocalVer, err = sendIndexTo(false, minLocalVer, conn, repo, fs, ignores)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, repo string, fs *files.Set) (uint64, error) {
|
func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, repo string, fs *files.Set, ignores ignore.Patterns) (uint64, error) {
|
||||||
nodeID := conn.ID()
|
nodeID := conn.ID()
|
||||||
name := conn.Name()
|
name := conn.Name()
|
||||||
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
||||||
@ -628,6 +650,10 @@ func sendIndexTo(initial bool, minLocalVer uint64, conn protocol.Connection, rep
|
|||||||
maxLocalVer = f.LocalVersion
|
maxLocalVer = f.LocalVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ignores.Match(f.Name) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
|
if len(batch) == indexBatchSize || currentBatchSize > indexTargetSize {
|
||||||
if initial {
|
if initial {
|
||||||
if err = conn.Index(repo, batch); err != nil {
|
if err = conn.Index(repo, batch); err != nil {
|
||||||
@ -781,10 +807,13 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
|
|||||||
fs, ok := m.repoFiles[repo]
|
fs, ok := m.repoFiles[repo]
|
||||||
dir := m.repoCfgs[repo].Directory
|
dir := m.repoCfgs[repo].Directory
|
||||||
|
|
||||||
|
ignores, _ := ignore.Load(filepath.Join(dir, ".stignore"))
|
||||||
|
m.repoIgnores[repo] = ignores
|
||||||
|
|
||||||
w := &scanner.Walker{
|
w := &scanner.Walker{
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
Sub: sub,
|
Sub: sub,
|
||||||
IgnoreFile: ".stignore",
|
Ignores: ignores,
|
||||||
BlockSize: scanner.StandardBlockSize,
|
BlockSize: scanner.StandardBlockSize,
|
||||||
TempNamer: defTempNamer,
|
TempNamer: defTempNamer,
|
||||||
CurrentFiler: cFiler{m, repo},
|
CurrentFiler: cFiler{m, repo},
|
||||||
@ -827,15 +856,40 @@ func (m *Model) ScanRepoSub(repo, sub string) error {
|
|||||||
fs.WithHaveTruncated(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
|
fs.WithHaveTruncated(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
|
||||||
f := fi.(protocol.FileInfoTruncated)
|
f := fi.(protocol.FileInfoTruncated)
|
||||||
if !strings.HasPrefix(f.Name, sub) {
|
if !strings.HasPrefix(f.Name, sub) {
|
||||||
|
// Return true so that we keep iterating, until we get to the part
|
||||||
|
// of the tree we are interested in. Then return false so we stop
|
||||||
|
// iterating when we've passed the end of the subtree.
|
||||||
return !seenPrefix
|
return !seenPrefix
|
||||||
}
|
}
|
||||||
|
|
||||||
seenPrefix = true
|
seenPrefix = true
|
||||||
if !protocol.IsDeleted(f.Flags) {
|
if !protocol.IsDeleted(f.Flags) {
|
||||||
|
if f.IsInvalid() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
if len(batch) == batchSize {
|
if len(batch) == batchSize {
|
||||||
fs.Update(protocol.LocalNodeID, batch)
|
fs.Update(protocol.LocalNodeID, batch)
|
||||||
batch = batch[:0]
|
batch = batch[:0]
|
||||||
}
|
}
|
||||||
if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
|
|
||||||
|
if ignores.Match(f.Name) {
|
||||||
|
// File has been ignored. Set invalid bit.
|
||||||
|
nf := protocol.FileInfo{
|
||||||
|
Name: f.Name,
|
||||||
|
Flags: f.Flags | protocol.FlagInvalid,
|
||||||
|
Modified: f.Modified,
|
||||||
|
Version: f.Version, // The file is still the same, so don't bump version
|
||||||
|
}
|
||||||
|
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||||
|
"repo": repo,
|
||||||
|
"name": f.Name,
|
||||||
|
"modified": time.Unix(f.Modified, 0),
|
||||||
|
"flags": fmt.Sprintf("0%o", f.Flags),
|
||||||
|
"size": f.Size(),
|
||||||
|
})
|
||||||
|
batch = append(batch, nf)
|
||||||
|
} else if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) {
|
||||||
// File has been deleted
|
// File has been deleted
|
||||||
nf := protocol.FileInfo{
|
nf := protocol.FileInfo{
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
@ -928,6 +982,7 @@ func (m *Model) Override(repo string) {
|
|||||||
fs := m.repoFiles[repo]
|
fs := m.repoFiles[repo]
|
||||||
m.rmut.RUnlock()
|
m.rmut.RUnlock()
|
||||||
|
|
||||||
|
m.setState(repo, RepoScanning)
|
||||||
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
batch := make([]protocol.FileInfo, 0, indexBatchSize)
|
||||||
fs.WithNeed(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
|
fs.WithNeed(protocol.LocalNodeID, func(fi protocol.FileIntf) bool {
|
||||||
need := fi.(protocol.FileInfo)
|
need := fi.(protocol.FileInfo)
|
||||||
@ -953,6 +1008,7 @@ func (m *Model) Override(repo string) {
|
|||||||
if len(batch) > 0 {
|
if len(batch) > 0 {
|
||||||
fs.Update(protocol.LocalNodeID, batch)
|
fs.Update(protocol.LocalNodeID, batch)
|
||||||
}
|
}
|
||||||
|
m.setState(repo, RepoIdle)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version returns the change version for the given repository. This is
|
// Version returns the change version for the given repository. This is
|
||||||
|
1
scanner/testdata/.stignore
vendored
1
scanner/testdata/.stignore
vendored
@ -1,5 +1,4 @@
|
|||||||
#include excludes
|
#include excludes
|
||||||
#include nonexistent-file
|
|
||||||
|
|
||||||
bfile
|
bfile
|
||||||
dir1/cfile
|
dir1/cfile
|
||||||
|
2
scanner/testdata/excludes
vendored
2
scanner/testdata/excludes
vendored
@ -1,4 +1,2 @@
|
|||||||
dir2/dfile
|
dir2/dfile
|
||||||
#include excludes
|
|
||||||
#include further-excludes
|
#include further-excludes
|
||||||
#include loop-excludes
|
|
||||||
|
1
scanner/testdata/loop-excludes
vendored
1
scanner/testdata/loop-excludes
vendored
@ -1 +0,0 @@
|
|||||||
#include excludes
|
|
140
scanner/walk.go
140
scanner/walk.go
@ -5,19 +5,14 @@
|
|||||||
package scanner
|
package scanner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"code.google.com/p/go.text/unicode/norm"
|
"code.google.com/p/go.text/unicode/norm"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/fnmatch"
|
"github.com/syncthing/syncthing/ignore"
|
||||||
"github.com/syncthing/syncthing/lamport"
|
"github.com/syncthing/syncthing/lamport"
|
||||||
"github.com/syncthing/syncthing/protocol"
|
"github.com/syncthing/syncthing/protocol"
|
||||||
)
|
)
|
||||||
@ -29,8 +24,8 @@ type Walker struct {
|
|||||||
Sub string
|
Sub string
|
||||||
// BlockSize controls the size of the block used when hashing.
|
// BlockSize controls the size of the block used when hashing.
|
||||||
BlockSize int
|
BlockSize int
|
||||||
// If IgnoreFile is not empty, it is the name used for the file that holds ignore patterns.
|
// List of patterns to ignore
|
||||||
IgnoreFile string
|
Ignores ignore.Patterns
|
||||||
// If TempNamer is not nil, it is used to ignore tempory files when walking.
|
// If TempNamer is not nil, it is used to ignore tempory files when walking.
|
||||||
TempNamer TempNamer
|
TempNamer TempNamer
|
||||||
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
|
// If CurrentFiler is not nil, it is queried for the current file before rescanning.
|
||||||
@ -57,7 +52,7 @@ type CurrentFiler interface {
|
|||||||
// file system. Files are blockwise hashed.
|
// file system. Files are blockwise hashed.
|
||||||
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("Walk", w.Dir, w.Sub, w.BlockSize, w.IgnoreFile)
|
l.Debugln("Walk", w.Dir, w.Sub, w.BlockSize, w.Ignores)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := checkDir(w.Dir)
|
err := checkDir(w.Dir)
|
||||||
@ -69,11 +64,8 @@ func (w *Walker) Walk() (chan protocol.FileInfo, error) {
|
|||||||
hashedFiles := make(chan protocol.FileInfo)
|
hashedFiles := make(chan protocol.FileInfo)
|
||||||
newParallelHasher(w.Dir, w.BlockSize, runtime.NumCPU(), hashedFiles, files)
|
newParallelHasher(w.Dir, w.BlockSize, runtime.NumCPU(), hashedFiles, files)
|
||||||
|
|
||||||
var ignores []*regexp.Regexp
|
|
||||||
go func() {
|
go func() {
|
||||||
filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, &ignores))
|
hashFiles := w.walkAndHashFiles(files)
|
||||||
|
|
||||||
hashFiles := w.walkAndHashFiles(files, ignores)
|
|
||||||
filepath.Walk(filepath.Join(w.Dir, w.Sub), hashFiles)
|
filepath.Walk(filepath.Join(w.Dir, w.Sub), hashFiles)
|
||||||
close(files)
|
close(files)
|
||||||
}()
|
}()
|
||||||
@ -86,113 +78,7 @@ func (w *Walker) CleanTempFiles() {
|
|||||||
filepath.Walk(w.Dir, w.cleanTempFile)
|
filepath.Walk(w.Dir, w.cleanTempFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Walker) loadIgnoreFiles(dir string, ignores *[]*regexp.Regexp) filepath.WalkFunc {
|
func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFunc {
|
||||||
return func(p string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rn, err := filepath.Rel(dir, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if pn, sn := filepath.Split(rn); sn == w.IgnoreFile {
|
|
||||||
pn := filepath.Clean(pn)
|
|
||||||
filesSeen := make(map[string]map[string]bool)
|
|
||||||
dirIgnores := loadIgnoreFile(p, pn, filesSeen)
|
|
||||||
*ignores = append(*ignores, dirIgnores...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadIgnoreFile(ignFile, base string, filesSeen map[string]map[string]bool) []*regexp.Regexp {
|
|
||||||
fd, err := os.Open(ignFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
return parseIgnoreFile(fd, base, ignFile, filesSeen)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseIgnoreFile(fd io.Reader, base, currentFile string, filesSeen map[string]map[string]bool) []*regexp.Regexp {
|
|
||||||
var exps []*regexp.Regexp
|
|
||||||
scanner := bufio.NewScanner(fd)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(line, "/") {
|
|
||||||
// Pattern is rooted in the current dir only
|
|
||||||
exp, err := fnmatch.Convert(path.Join(base, line[1:]), fnmatch.FNM_PATHNAME)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnf("Invalid pattern %q in ignore file", line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exps = append(exps, exp)
|
|
||||||
} else if strings.HasPrefix(line, "**/") {
|
|
||||||
// Add the pattern as is, and without **/ so it matches in current dir
|
|
||||||
exp, err := fnmatch.Convert(line, fnmatch.FNM_PATHNAME)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnf("Invalid pattern %q in ignore file", line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exps = append(exps, exp)
|
|
||||||
|
|
||||||
exp, err = fnmatch.Convert(path.Join(base, line[3:]), fnmatch.FNM_PATHNAME)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnf("Invalid pattern %q in ignore file", line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exps = append(exps, exp)
|
|
||||||
} else if strings.HasPrefix(line, "#include ") {
|
|
||||||
includeFile := filepath.Join(filepath.Dir(currentFile), strings.Replace(line, "#include ", "", 1))
|
|
||||||
if _, err := os.Stat(includeFile); os.IsNotExist(err) {
|
|
||||||
l.Infoln("Could not open ignore include file", includeFile)
|
|
||||||
} else {
|
|
||||||
seen := false
|
|
||||||
if seenByCurrent, ok := filesSeen[currentFile]; ok {
|
|
||||||
_, seen = seenByCurrent[includeFile]
|
|
||||||
}
|
|
||||||
|
|
||||||
if seen {
|
|
||||||
l.Warnf("Recursion detected while including %s from %s", includeFile, currentFile)
|
|
||||||
} else {
|
|
||||||
if filesSeen[currentFile] == nil {
|
|
||||||
filesSeen[currentFile] = make(map[string]bool)
|
|
||||||
}
|
|
||||||
filesSeen[currentFile][includeFile] = true
|
|
||||||
includes := loadIgnoreFile(includeFile, base, filesSeen)
|
|
||||||
exps = append(exps, includes...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Path name or pattern, add it so it matches files both in
|
|
||||||
// current directory and subdirs.
|
|
||||||
exp, err := fnmatch.Convert(path.Join(base, line), fnmatch.FNM_PATHNAME)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnf("Invalid pattern %q in ignore file", line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exps = append(exps, exp)
|
|
||||||
|
|
||||||
exp, err = fnmatch.Convert(path.Join(base, "**", line), fnmatch.FNM_PATHNAME)
|
|
||||||
if err != nil {
|
|
||||||
l.Warnf("Invalid pattern %q in ignore file", line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
exps = append(exps, exp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return exps
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo, ignores []*regexp.Regexp) filepath.WalkFunc {
|
|
||||||
return func(p string, info os.FileInfo, err error) error {
|
return func(p string, info os.FileInfo, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debug {
|
if debug {
|
||||||
@ -221,7 +107,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo, ignores []*regex
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if sn := filepath.Base(rn); sn == w.IgnoreFile || sn == ".stversions" || w.ignoreFile(ignores, rn) {
|
if sn := filepath.Base(rn); sn == ".stignore" || sn == ".stversions" || w.Ignores.Match(rn) {
|
||||||
// An ignored file
|
// An ignored file
|
||||||
if debug {
|
if debug {
|
||||||
l.Debugln("ignored:", rn)
|
l.Debugln("ignored:", rn)
|
||||||
@ -305,18 +191,6 @@ func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Walker) ignoreFile(patterns []*regexp.Regexp, file string) bool {
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
if pattern.MatchString(file) {
|
|
||||||
if debug {
|
|
||||||
l.Debugf("%q matches %v", file, pattern)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDir(dir string) error {
|
func checkDir(dir string) error {
|
||||||
if info, err := os.Lstat(dir); err != nil {
|
if info, err := os.Lstat(dir); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/ignore"
|
||||||
"github.com/syncthing/syncthing/protocol"
|
"github.com/syncthing/syncthing/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,9 +31,8 @@ var testdata = testfileList{
|
|||||||
{filepath.Join("dir1", "dfile"), 5, "49ae93732fcf8d63fe1cce759664982dbd5b23161f007dba8561862adc96d063"},
|
{filepath.Join("dir1", "dfile"), 5, "49ae93732fcf8d63fe1cce759664982dbd5b23161f007dba8561862adc96d063"},
|
||||||
{"dir2", 128, ""},
|
{"dir2", 128, ""},
|
||||||
{filepath.Join("dir2", "cfile"), 4, "bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c"},
|
{filepath.Join("dir2", "cfile"), 4, "bf07a7fbb825fc0aae7bf4a1177b2b31fcf8a3feeaf7092761e18c859ee52a9c"},
|
||||||
{"excludes", 78, "1f5ac95d9e6fb2516629a029d788d27953c7bb2f4dc09184b660fdda0c8f2f04"},
|
{"excludes", 37, "df90b52f0c55dba7a7a940affe482571563b1ac57bd5be4d8a0291e7de928e06"},
|
||||||
{"further-excludes", 5, "7eb0a548094fa6295f7fd9200d69973e5f5ec5c04f2a86d998080ac43ecf89f1"},
|
{"further-excludes", 5, "7eb0a548094fa6295f7fd9200d69973e5f5ec5c04f2a86d998080ac43ecf89f1"},
|
||||||
{"loop-excludes", 18, "2db057aa82a8b8fe4b1367ccc875259ed4b8020255820d4e3d4bfe78f0dd3f2a"},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var correctIgnores = map[string][]string{
|
var correctIgnores = map[string][]string{
|
||||||
@ -47,11 +47,16 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWalkSub(t *testing.T) {
|
func TestWalkSub(t *testing.T) {
|
||||||
|
ignores, err := ignore.Load("testdata/.stignore")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
w := Walker{
|
w := Walker{
|
||||||
Dir: "testdata",
|
Dir: "testdata",
|
||||||
Sub: "dir2",
|
Sub: "dir2",
|
||||||
BlockSize: 128 * 1024,
|
BlockSize: 128 * 1024,
|
||||||
IgnoreFile: ".stignore",
|
Ignores: ignores,
|
||||||
}
|
}
|
||||||
fchan, err := w.Walk()
|
fchan, err := w.Walk()
|
||||||
var files []protocol.FileInfo
|
var files []protocol.FileInfo
|
||||||
@ -77,10 +82,16 @@ func TestWalkSub(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWalk(t *testing.T) {
|
func TestWalk(t *testing.T) {
|
||||||
|
ignores, err := ignore.Load("testdata/.stignore")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Log(ignores)
|
||||||
|
|
||||||
w := Walker{
|
w := Walker{
|
||||||
Dir: "testdata",
|
Dir: "testdata",
|
||||||
BlockSize: 128 * 1024,
|
BlockSize: 128 * 1024,
|
||||||
IgnoreFile: ".stignore",
|
Ignores: ignores,
|
||||||
}
|
}
|
||||||
|
|
||||||
fchan, err := w.Walk()
|
fchan, err := w.Walk()
|
||||||
@ -102,9 +113,8 @@ func TestWalk(t *testing.T) {
|
|||||||
|
|
||||||
func TestWalkError(t *testing.T) {
|
func TestWalkError(t *testing.T) {
|
||||||
w := Walker{
|
w := Walker{
|
||||||
Dir: "testdata-missing",
|
Dir: "testdata-missing",
|
||||||
BlockSize: 128 * 1024,
|
BlockSize: 128 * 1024,
|
||||||
IgnoreFile: ".stignore",
|
|
||||||
}
|
}
|
||||||
_, err := w.Walk()
|
_, err := w.Walk()
|
||||||
|
|
||||||
@ -113,9 +123,8 @@ func TestWalkError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
w = Walker{
|
w = Walker{
|
||||||
Dir: "testdata/bar",
|
Dir: "testdata/bar",
|
||||||
BlockSize: 128 * 1024,
|
BlockSize: 128 * 1024,
|
||||||
IgnoreFile: ".stignore",
|
|
||||||
}
|
}
|
||||||
_, err = w.Walk()
|
_, err = w.Walk()
|
||||||
|
|
||||||
@ -124,67 +133,6 @@ func TestWalkError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIgnore(t *testing.T) {
|
|
||||||
patStr := bytes.NewBufferString(`
|
|
||||||
t2
|
|
||||||
/t3
|
|
||||||
sub/dir/*
|
|
||||||
*/other/test
|
|
||||||
**/deep
|
|
||||||
`)
|
|
||||||
patterns := parseIgnoreFile(patStr, "", "", make(map[string]map[string]bool))
|
|
||||||
|
|
||||||
patStr = bytes.NewBufferString(`
|
|
||||||
bar
|
|
||||||
z*
|
|
||||||
q[abc]x
|
|
||||||
`)
|
|
||||||
patterns = append(patterns, parseIgnoreFile(patStr, "foo", "", make(map[string]map[string]bool))...)
|
|
||||||
|
|
||||||
patStr = bytes.NewBufferString(`
|
|
||||||
quux
|
|
||||||
.*
|
|
||||||
`)
|
|
||||||
patterns = append(patterns, parseIgnoreFile(patStr, "foo/baz", "", make(map[string]map[string]bool))...)
|
|
||||||
|
|
||||||
var tests = []struct {
|
|
||||||
f string
|
|
||||||
r bool
|
|
||||||
}{
|
|
||||||
{filepath.Join("foo", "bar"), true},
|
|
||||||
{filepath.Join("t3"), true},
|
|
||||||
{filepath.Join("foofoo"), false},
|
|
||||||
{filepath.Join("foo", "quux"), false},
|
|
||||||
{filepath.Join("foo", "zuux"), true},
|
|
||||||
{filepath.Join("foo", "qzuux"), false},
|
|
||||||
{filepath.Join("foo", "baz", "t1"), false},
|
|
||||||
{filepath.Join("foo", "baz", "t2"), true},
|
|
||||||
{filepath.Join("foo", "baz", "t3"), false},
|
|
||||||
{filepath.Join("foo", "baz", "bar"), true},
|
|
||||||
{filepath.Join("foo", "baz", "quuxa"), false},
|
|
||||||
{filepath.Join("foo", "baz", "aquux"), false},
|
|
||||||
{filepath.Join("foo", "baz", ".quux"), true},
|
|
||||||
{filepath.Join("foo", "baz", "zquux"), true},
|
|
||||||
{filepath.Join("foo", "baz", "quux"), true},
|
|
||||||
{filepath.Join("foo", "bazz", "quux"), false},
|
|
||||||
{filepath.Join("sub", "dir", "hej"), true},
|
|
||||||
{filepath.Join("deeper", "sub", "dir", "hej"), true},
|
|
||||||
{filepath.Join("other", "test"), false},
|
|
||||||
{filepath.Join("sub", "other", "test"), true},
|
|
||||||
{filepath.Join("deeper", "sub", "other", "test"), true},
|
|
||||||
{filepath.Join("deep"), true},
|
|
||||||
{filepath.Join("deeper", "deep"), true},
|
|
||||||
{filepath.Join("deeper", "deeper", "deep"), true},
|
|
||||||
}
|
|
||||||
|
|
||||||
w := Walker{}
|
|
||||||
for i, tc := range tests {
|
|
||||||
if r := w.ignoreFile(patterns, tc.f); r != tc.r {
|
|
||||||
t.Errorf("Incorrect ignoreFile() #%d (%s); E: %v, A: %v", i, tc.f, tc.r, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fileList []protocol.FileInfo
|
type fileList []protocol.FileInfo
|
||||||
|
|
||||||
func (f fileList) Len() int {
|
func (f fileList) Len() int {
|
||||||
|
Loading…
Reference in New Issue
Block a user