2017-10-20 14:52:55 +00:00
|
|
|
// Copyright (C) 2016 The Syncthing Authors.
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
|
|
|
|
package watchaggregator
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2017-11-10 17:05:31 +00:00
|
|
|
"runtime"
|
2017-10-20 14:52:55 +00:00
|
|
|
"strconv"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestMain(m *testing.M) {
|
|
|
|
maxFiles = 32
|
|
|
|
maxFilesPerDir = 8
|
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
ret := m.Run()
|
|
|
|
|
|
|
|
maxFiles = 512
|
|
|
|
maxFilesPerDir = 128
|
|
|
|
|
|
|
|
os.Exit(ret)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2018-05-26 09:08:23 +00:00
|
|
|
testNotifyDelayS = 1
|
|
|
|
testNotifyTimeout = 2 * time.Second
|
|
|
|
timeoutWithinBatch = time.Second
|
2017-10-20 14:52:55 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
folderRoot = filepath.Clean("/home/someuser/syncthing")
|
|
|
|
defaultFolderCfg = config.FolderConfiguration{
|
|
|
|
FilesystemType: fs.FilesystemTypeBasic,
|
|
|
|
Path: folderRoot,
|
|
|
|
FSWatcherDelayS: testNotifyDelayS,
|
|
|
|
}
|
|
|
|
defaultCfg = config.Wrap("", config.Configuration{
|
|
|
|
Folders: []config.FolderConfiguration{defaultFolderCfg},
|
|
|
|
})
|
|
|
|
)
|
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
// Represents possibly multiple (different event types) expected paths from
|
|
|
|
// aggregation, that should be received back to back.
|
2017-10-20 14:52:55 +00:00
|
|
|
type expectedBatch struct {
|
2018-05-26 09:08:23 +00:00
|
|
|
paths [][]string
|
2017-10-20 14:52:55 +00:00
|
|
|
afterMs int
|
|
|
|
beforeMs int
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestAggregate checks whether maxFilesPerDir+1 events in one dir are
|
|
|
|
// aggregated to parent dir
|
|
|
|
func TestAggregate(t *testing.T) {
|
|
|
|
inProgress := make(map[string]struct{})
|
|
|
|
|
|
|
|
folderCfg := defaultFolderCfg.Copy()
|
|
|
|
folderCfg.ID = "Aggregate"
|
2019-02-02 10:45:17 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2017-11-10 17:05:31 +00:00
|
|
|
a := newAggregator(folderCfg, ctx)
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
// checks whether maxFilesPerDir events in one dir are kept as is
|
|
|
|
for i := 0; i < maxFilesPerDir; i++ {
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", strconv.Itoa(i)),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
2018-05-26 09:08:23 +00:00
|
|
|
if l := len(getEventPaths(a.root, ".", a)); l != maxFilesPerDir {
|
|
|
|
t.Errorf("Unexpected number of events stored, got %v, expected %v", l, maxFilesPerDir)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checks whether maxFilesPerDir+1 events in one dir are aggregated to parent dir
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", "new"),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"parent"})
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
// checks that adding an event below "parent" does not change anything
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", "extra"),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"parent"})
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
// again test aggregation in "parent" but with event in subdirs
|
2018-05-26 09:08:23 +00:00
|
|
|
a = newAggregator(folderCfg, ctx)
|
2017-10-20 14:52:55 +00:00
|
|
|
for i := 0; i < maxFilesPerDir; i++ {
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", strconv.Itoa(i)),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", "sub", "new"),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"parent"})
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
// test aggregation in root
|
2018-05-26 09:08:23 +00:00
|
|
|
a = newAggregator(folderCfg, ctx)
|
2017-10-20 14:52:55 +00:00
|
|
|
for i := 0; i < maxFiles; i++ {
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: strconv.Itoa(i),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
2018-05-26 09:08:23 +00:00
|
|
|
if len(getEventPaths(a.root, ".", a)) != maxFiles {
|
2017-10-20 14:52:55 +00:00
|
|
|
t.Errorf("Unexpected number of events stored in root")
|
|
|
|
}
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join("parent", "sub", "new"),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"."})
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
// checks that adding an event when "." is already stored is a noop
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: "anythingelse",
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"."})
|
2017-10-20 14:52:55 +00:00
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
a = newAggregator(folderCfg, ctx)
|
2017-10-20 14:52:55 +00:00
|
|
|
filesPerDir := maxFilesPerDir / 2
|
|
|
|
dirs := make([]string, maxFiles/filesPerDir+1)
|
|
|
|
for i := 0; i < maxFiles/filesPerDir+1; i++ {
|
|
|
|
dirs[i] = "dir" + strconv.Itoa(i)
|
|
|
|
}
|
|
|
|
for _, dir := range dirs {
|
|
|
|
for i := 0; i < filesPerDir; i++ {
|
2019-02-02 10:45:17 +00:00
|
|
|
a.newEvent(fs.Event{
|
|
|
|
Name: filepath.Join(dir, strconv.Itoa(i)),
|
|
|
|
Type: fs.NonRemove,
|
|
|
|
}, inProgress)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-26 09:08:23 +00:00
|
|
|
compareBatchToExpectedDirect(t, getEventPaths(a.root, ".", a), []string{"."})
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestInProgress checks that ignoring files currently edited by Syncthing works
|
|
|
|
func TestInProgress(t *testing.T) {
|
|
|
|
testCase := func(c chan<- fs.Event) {
|
|
|
|
events.Default.Log(events.ItemStarted, map[string]string{
|
|
|
|
"item": "inprogress",
|
|
|
|
})
|
|
|
|
sleepMs(100)
|
|
|
|
c <- fs.Event{Name: "inprogress", Type: fs.NonRemove}
|
|
|
|
sleepMs(1000)
|
|
|
|
events.Default.Log(events.ItemFinished, map[string]interface{}{
|
|
|
|
"item": "inprogress",
|
|
|
|
})
|
|
|
|
sleepMs(100)
|
|
|
|
c <- fs.Event{Name: "notinprogress", Type: fs.NonRemove}
|
|
|
|
sleepMs(800)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedBatches := []expectedBatch{
|
2018-05-26 09:08:23 +00:00
|
|
|
{[][]string{{"notinprogress"}}, 2000, 3500},
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testScenario(t, "InProgress", testCase, expectedBatches)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDelay checks that recurring changes to the same path are delayed
|
|
|
|
// and different types separated and ordered correctly
|
|
|
|
func TestDelay(t *testing.T) {
|
|
|
|
file := filepath.Join("parent", "file")
|
|
|
|
delayed := "delayed"
|
|
|
|
del := "deleted"
|
2018-05-26 09:08:23 +00:00
|
|
|
delAfter := "deletedAfter"
|
2017-10-20 14:52:55 +00:00
|
|
|
both := filepath.Join("parent", "sub", "both")
|
|
|
|
testCase := func(c chan<- fs.Event) {
|
|
|
|
sleepMs(200)
|
|
|
|
c <- fs.Event{Name: file, Type: fs.NonRemove}
|
|
|
|
delay := time.Duration(300) * time.Millisecond
|
|
|
|
timer := time.NewTimer(delay)
|
|
|
|
<-timer.C
|
|
|
|
timer.Reset(delay)
|
|
|
|
c <- fs.Event{Name: delayed, Type: fs.NonRemove}
|
|
|
|
c <- fs.Event{Name: both, Type: fs.NonRemove}
|
|
|
|
c <- fs.Event{Name: both, Type: fs.Remove}
|
|
|
|
c <- fs.Event{Name: del, Type: fs.Remove}
|
|
|
|
for i := 0; i < 9; i++ {
|
|
|
|
<-timer.C
|
|
|
|
timer.Reset(delay)
|
|
|
|
c <- fs.Event{Name: delayed, Type: fs.NonRemove}
|
|
|
|
}
|
2018-05-26 09:08:23 +00:00
|
|
|
c <- fs.Event{Name: delAfter, Type: fs.Remove}
|
2017-10-20 14:52:55 +00:00
|
|
|
<-timer.C
|
|
|
|
}
|
|
|
|
|
|
|
|
// batches that we expect to receive with time interval in milliseconds
|
|
|
|
expectedBatches := []expectedBatch{
|
2018-05-26 09:08:23 +00:00
|
|
|
{[][]string{{file}}, 500, 2500},
|
|
|
|
{[][]string{{delayed}, {both}, {del}}, 2500, 4500},
|
|
|
|
{[][]string{{delayed}, {delAfter}}, 3600, 7000},
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
testScenario(t, "Delay", testCase, expectedBatches)
|
|
|
|
}
|
|
|
|
|
2018-08-13 07:14:03 +00:00
|
|
|
// TestNoDelay checks that no delay occurs if there are no non-remove events
|
|
|
|
func TestNoDelay(t *testing.T) {
|
|
|
|
mixed := "foo"
|
|
|
|
del := "bar"
|
|
|
|
testCase := func(c chan<- fs.Event) {
|
|
|
|
c <- fs.Event{Name: mixed, Type: fs.NonRemove}
|
|
|
|
c <- fs.Event{Name: mixed, Type: fs.Remove}
|
|
|
|
c <- fs.Event{Name: del, Type: fs.Remove}
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedBatches := []expectedBatch{
|
|
|
|
{[][]string{{mixed}, {del}}, 500, 2000},
|
|
|
|
}
|
|
|
|
|
|
|
|
testScenario(t, "NoDelay", testCase, expectedBatches)
|
|
|
|
}
|
|
|
|
|
2017-10-20 14:52:55 +00:00
|
|
|
func getEventPaths(dir *eventDir, dirPath string, a *aggregator) []string {
|
|
|
|
var paths []string
|
|
|
|
for childName, childDir := range dir.dirs {
|
|
|
|
for _, path := range getEventPaths(childDir, filepath.Join(dirPath, childName), a) {
|
|
|
|
paths = append(paths, path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for name := range dir.events {
|
|
|
|
paths = append(paths, filepath.Join(dirPath, name))
|
|
|
|
}
|
|
|
|
return paths
|
|
|
|
}
|
|
|
|
|
|
|
|
func sleepMs(ms int) {
|
|
|
|
time.Sleep(time.Duration(ms) * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
|
|
|
func durationMs(ms int) time.Duration {
|
|
|
|
return time.Duration(ms) * time.Millisecond
|
|
|
|
}
|
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
func compareBatchToExpected(batch []string, expectedPaths []string) (missing []string, unexpected []string) {
|
2017-10-20 14:52:55 +00:00
|
|
|
for _, expected := range expectedPaths {
|
|
|
|
expected = filepath.Clean(expected)
|
|
|
|
found := false
|
|
|
|
for i, received := range batch {
|
|
|
|
if expected == received {
|
|
|
|
found = true
|
|
|
|
batch = append(batch[:i], batch[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
2018-05-26 09:08:23 +00:00
|
|
|
missing = append(missing, expected)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, received := range batch {
|
2018-05-26 09:08:23 +00:00
|
|
|
unexpected = append(unexpected, received)
|
|
|
|
}
|
|
|
|
return missing, unexpected
|
|
|
|
}
|
|
|
|
|
|
|
|
func compareBatchToExpectedDirect(t *testing.T, batch []string, expectedPaths []string) {
|
|
|
|
t.Helper()
|
|
|
|
missing, unexpected := compareBatchToExpected(batch, expectedPaths)
|
|
|
|
for _, p := range missing {
|
|
|
|
t.Errorf("Did not receive event %s", p)
|
|
|
|
}
|
|
|
|
for _, p := range unexpected {
|
|
|
|
t.Errorf("Received unexpected event %s", p)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testScenario(t *testing.T, name string, testCase func(c chan<- fs.Event), expectedBatches []expectedBatch) {
|
2018-05-26 09:08:23 +00:00
|
|
|
t.Helper()
|
2017-10-20 14:52:55 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
eventChan := make(chan fs.Event)
|
|
|
|
watchChan := make(chan []string)
|
|
|
|
|
|
|
|
folderCfg := defaultFolderCfg.Copy()
|
|
|
|
folderCfg.ID = name
|
2017-11-10 17:05:31 +00:00
|
|
|
a := newAggregator(folderCfg, ctx)
|
2017-10-20 14:52:55 +00:00
|
|
|
a.notifyTimeout = testNotifyTimeout
|
|
|
|
|
|
|
|
startTime := time.Now()
|
|
|
|
go a.mainLoop(eventChan, watchChan, defaultCfg)
|
|
|
|
|
2017-11-10 17:05:31 +00:00
|
|
|
sleepMs(20)
|
2017-10-20 14:52:55 +00:00
|
|
|
|
2017-11-10 17:05:31 +00:00
|
|
|
go testCase(eventChan)
|
|
|
|
|
|
|
|
testAggregatorOutput(t, watchChan, expectedBatches, startTime)
|
2017-10-20 14:52:55 +00:00
|
|
|
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
|
2017-11-10 17:05:31 +00:00
|
|
|
func testAggregatorOutput(t *testing.T, fsWatchChan <-chan []string, expectedBatches []expectedBatch, startTime time.Time) {
|
2018-05-26 09:08:23 +00:00
|
|
|
t.Helper()
|
2017-10-20 14:52:55 +00:00
|
|
|
var received []string
|
|
|
|
var elapsedTime time.Duration
|
2018-05-26 09:08:23 +00:00
|
|
|
var batchIndex, innerIndex int
|
2017-11-10 17:05:31 +00:00
|
|
|
timeout := time.NewTimer(10 * time.Second)
|
2017-10-20 14:52:55 +00:00
|
|
|
for {
|
|
|
|
select {
|
2017-11-10 17:05:31 +00:00
|
|
|
case <-timeout.C:
|
|
|
|
t.Errorf("Timeout: Received only %d batches (%d expected)", batchIndex, len(expectedBatches))
|
2017-10-20 14:52:55 +00:00
|
|
|
return
|
|
|
|
case received = <-fsWatchChan:
|
|
|
|
}
|
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
if batchIndex >= len(expectedBatches) {
|
|
|
|
t.Errorf("Received batch %d, expected only %d", batchIndex+1, len(expectedBatches))
|
|
|
|
continue
|
|
|
|
}
|
2017-10-20 14:52:55 +00:00
|
|
|
|
2017-11-10 17:05:31 +00:00
|
|
|
if runtime.GOOS != "darwin" {
|
2018-05-26 09:08:23 +00:00
|
|
|
now := time.Since(startTime)
|
|
|
|
if innerIndex == 0 {
|
|
|
|
switch {
|
|
|
|
case now < durationMs(expectedBatches[batchIndex].afterMs):
|
2018-08-13 07:14:03 +00:00
|
|
|
t.Errorf("Received batch %d after %v (too soon)", batchIndex+1, now)
|
2018-05-26 09:08:23 +00:00
|
|
|
|
|
|
|
case now > durationMs(expectedBatches[batchIndex].beforeMs):
|
2018-08-13 07:14:03 +00:00
|
|
|
t.Errorf("Received batch %d after %v (too late)", batchIndex+1, now)
|
2018-05-26 09:08:23 +00:00
|
|
|
}
|
|
|
|
} else if innerTime := now - elapsedTime; innerTime > timeoutWithinBatch {
|
|
|
|
t.Errorf("Receive part %d of batch %d after %v (too late)", innerIndex+1, batchIndex+1, innerTime)
|
2017-11-10 17:05:31 +00:00
|
|
|
}
|
2018-05-26 09:08:23 +00:00
|
|
|
elapsedTime = now
|
2017-11-10 17:05:31 +00:00
|
|
|
}
|
2017-10-20 14:52:55 +00:00
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
expected := expectedBatches[batchIndex].paths[innerIndex]
|
|
|
|
|
|
|
|
if len(received) != len(expected) {
|
|
|
|
t.Errorf("Received %v events instead of %v for batch %v", len(received), len(expected), batchIndex+1)
|
|
|
|
}
|
|
|
|
missing, unexpected := compareBatchToExpected(received, expected)
|
|
|
|
for _, p := range missing {
|
|
|
|
t.Errorf("Did not receive event %s in batch %d (%d)", p, batchIndex+1, innerIndex+1)
|
|
|
|
}
|
|
|
|
for _, p := range unexpected {
|
|
|
|
t.Errorf("Received unexpected event %s in batch %d (%d)", p, batchIndex+1, innerIndex+1)
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
2017-11-10 17:05:31 +00:00
|
|
|
|
2018-05-26 09:08:23 +00:00
|
|
|
if innerIndex == len(expectedBatches[batchIndex].paths)-1 {
|
|
|
|
if batchIndex == len(expectedBatches)-1 {
|
|
|
|
// received everything we expected to
|
|
|
|
return
|
|
|
|
}
|
|
|
|
innerIndex = 0
|
|
|
|
batchIndex++
|
|
|
|
} else {
|
|
|
|
innerIndex++
|
2017-11-10 17:05:31 +00:00
|
|
|
}
|
2017-10-20 14:52:55 +00:00
|
|
|
}
|
|
|
|
}
|