2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2014-05-15 03:26:55 +00:00
|
|
|
package model
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2019-11-19 08:56:53 +00:00
|
|
|
"context"
|
2015-02-07 10:52:42 +00:00
|
|
|
"encoding/json"
|
2014-03-02 22:58:14 +00:00
|
|
|
"fmt"
|
2019-02-12 15:04:04 +00:00
|
|
|
"io"
|
2015-04-04 20:55:24 +00:00
|
|
|
"io/ioutil"
|
2015-03-04 23:33:48 +00:00
|
|
|
"math/rand"
|
2014-03-02 22:58:14 +00:00
|
|
|
"os"
|
2015-02-07 10:52:42 +00:00
|
|
|
"path/filepath"
|
2016-03-18 08:28:44 +00:00
|
|
|
"runtime"
|
2019-02-05 23:07:21 +00:00
|
|
|
"runtime/pprof"
|
2020-05-11 18:15:11 +00:00
|
|
|
"sort"
|
2015-03-04 23:33:48 +00:00
|
|
|
"strconv"
|
2017-12-07 07:08:24 +00:00
|
|
|
"strings"
|
2016-11-30 07:54:20 +00:00
|
|
|
"sync"
|
2018-10-05 08:26:25 +00:00
|
|
|
"sync/atomic"
|
2014-03-02 22:58:14 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2021-02-12 19:30:51 +00:00
|
|
|
"github.com/pkg/errors"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
2019-11-29 08:11:52 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db/backend"
|
2019-07-19 17:37:29 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2017-08-19 14:36:56 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2016-05-28 04:17:34 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/ignore"
|
2018-01-01 14:39:23 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/osutil"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2021-03-03 07:53:50 +00:00
|
|
|
protocolmocks "github.com/syncthing/syncthing/lib/protocol/mocks"
|
2016-06-26 10:17:20 +00:00
|
|
|
srand "github.com/syncthing/syncthing/lib/rand"
|
2019-05-18 06:53:59 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/testutils"
|
2021-06-25 09:38:04 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/util"
|
2018-01-01 14:39:23 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/versioner"
|
2014-03-02 22:58:14 +00:00
|
|
|
)
|
|
|
|
|
2014-07-12 21:06:48 +00:00
|
|
|
var testDataExpected = map[string]protocol.FileInfo{
|
2014-09-28 11:05:43 +00:00
|
|
|
"foo": {
|
2016-08-06 13:05:59 +00:00
|
|
|
Name: "foo",
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
ModifiedS: 0,
|
|
|
|
Blocks: []protocol.BlockInfo{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
|
2014-03-02 22:58:14 +00:00
|
|
|
},
|
2014-09-28 11:05:43 +00:00
|
|
|
"empty": {
|
2016-08-06 13:05:59 +00:00
|
|
|
Name: "empty",
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
ModifiedS: 0,
|
|
|
|
Blocks: []protocol.BlockInfo{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
|
2014-03-02 22:58:14 +00:00
|
|
|
},
|
2014-09-28 11:05:43 +00:00
|
|
|
"bar": {
|
2016-08-06 13:05:59 +00:00
|
|
|
Name: "bar",
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
ModifiedS: 0,
|
|
|
|
Blocks: []protocol.BlockInfo{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
|
2014-03-02 22:58:14 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
// Fix expected test data to match reality
|
|
|
|
for n, f := range testDataExpected {
|
|
|
|
fi, _ := os.Stat("testdata/" + n)
|
2016-07-04 10:40:29 +00:00
|
|
|
f.Permissions = uint32(fi.Mode())
|
2016-08-06 13:05:59 +00:00
|
|
|
f.ModifiedS = fi.ModTime().Unix()
|
2016-07-04 10:40:29 +00:00
|
|
|
f.Size = fi.Size()
|
2014-03-02 22:58:14 +00:00
|
|
|
testDataExpected[n] = f
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 13:03:10 +00:00
|
|
|
func TestMain(m *testing.M) {
|
2019-02-12 15:04:04 +00:00
|
|
|
tmpName, err := prepareTmpFile(defaultFs)
|
|
|
|
if err != nil {
|
2018-03-13 13:03:10 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
exitCode := m.Run()
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
defaultCfgWrapperCancel()
|
2018-08-16 10:11:48 +00:00
|
|
|
os.Remove(defaultCfgWrapper.ConfigPath())
|
2018-03-13 13:03:10 +00:00
|
|
|
defaultFs.Remove(tmpName)
|
|
|
|
defaultFs.RemoveAll(config.DefaultMarkerName)
|
|
|
|
|
|
|
|
os.Exit(exitCode)
|
|
|
|
}
|
|
|
|
|
2019-02-12 15:04:04 +00:00
|
|
|
func prepareTmpFile(to fs.Filesystem) (string, error) {
|
|
|
|
tmpName := fs.TempName("file")
|
|
|
|
in, err := defaultFs.Open("tmpfile")
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer in.Close()
|
|
|
|
out, err := to.Create(tmpName)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer out.Close()
|
|
|
|
if _, err = io.Copy(out, in); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
future := time.Now().Add(time.Hour)
|
|
|
|
if err := os.Chtimes(filepath.Join("testdata", tmpName), future, future); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return tmpName, nil
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
func newState(t testing.TB, cfg config.Configuration) (*testModel, context.CancelFunc) {
|
|
|
|
wcfg, cancel := createTmpWrapper(cfg)
|
2017-12-07 07:08:24 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2018-07-15 17:26:20 +00:00
|
|
|
|
|
|
|
for _, dev := range cfg.Devices {
|
2021-03-03 07:53:50 +00:00
|
|
|
m.AddConnection(newFakeConnection(dev.DeviceID, m), protocol.Hello{})
|
2018-07-15 17:26:20 +00:00
|
|
|
}
|
2019-02-12 12:18:13 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
return m, cancel
|
2017-12-07 07:08:24 +00:00
|
|
|
}
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
func createClusterConfig(remote protocol.DeviceID, ids ...string) protocol.ClusterConfig {
|
|
|
|
cc := protocol.ClusterConfig{
|
|
|
|
Folders: make([]protocol.Folder, len(ids)),
|
|
|
|
}
|
|
|
|
for i, id := range ids {
|
|
|
|
cc.Folders[i] = protocol.Folder{
|
|
|
|
ID: id,
|
|
|
|
Label: id,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return addFolderDevicesToClusterConfig(cc, remote)
|
|
|
|
}
|
|
|
|
|
|
|
|
func addFolderDevicesToClusterConfig(cc protocol.ClusterConfig, remote protocol.DeviceID) protocol.ClusterConfig {
|
|
|
|
for i := range cc.Folders {
|
|
|
|
cc.Folders[i].Devices = []protocol.Device{
|
|
|
|
{ID: myID},
|
|
|
|
{ID: remote},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cc
|
|
|
|
}
|
|
|
|
|
2019-02-12 12:18:13 +00:00
|
|
|
func TestRequest(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2015-01-16 11:25:54 +00:00
|
|
|
// Existing, shared file
|
2020-11-09 14:33:32 +00:00
|
|
|
res, err := m.Request(device1, "default", "foo", 0, 6, 0, nil, 0, false)
|
2014-03-02 22:58:14 +00:00
|
|
|
if err != nil {
|
2020-05-08 15:56:49 +00:00
|
|
|
t.Fatal(err)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
2018-11-13 07:53:55 +00:00
|
|
|
bs := res.Data()
|
2016-03-31 15:12:46 +00:00
|
|
|
if !bytes.Equal(bs, []byte("foobar")) {
|
2014-03-02 22:58:14 +00:00
|
|
|
t.Errorf("Incorrect data from request: %q", string(bs))
|
|
|
|
}
|
|
|
|
|
2015-01-16 11:25:54 +00:00
|
|
|
// Existing, nonshared file
|
2020-11-09 14:33:32 +00:00
|
|
|
_, err = m.Request(device2, "default", "foo", 0, 6, 0, nil, 0, false)
|
2015-01-16 11:25:54 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("Unexpected nil error on insecure file read")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Nonexistent file
|
2020-11-09 14:33:32 +00:00
|
|
|
_, err = m.Request(device1, "default", "nonexistent", 0, 6, 0, nil, 0, false)
|
2015-01-16 11:25:54 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("Unexpected nil error on insecure file read")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shared folder, but disallowed file name
|
2020-11-09 14:33:32 +00:00
|
|
|
_, err = m.Request(device1, "default", "../walk.go", 0, 6, 0, nil, 0, false)
|
2014-03-02 22:58:14 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("Unexpected nil error on insecure file read")
|
|
|
|
}
|
2015-01-18 01:12:06 +00:00
|
|
|
|
|
|
|
// Negative offset
|
2020-11-09 14:33:32 +00:00
|
|
|
_, err = m.Request(device1, "default", "foo", 0, -4, 0, nil, 0, false)
|
2015-01-18 01:12:06 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("Unexpected nil error on insecure file read")
|
|
|
|
}
|
|
|
|
|
2015-07-29 20:38:22 +00:00
|
|
|
// Larger block than available
|
2020-11-09 14:33:32 +00:00
|
|
|
_, err = m.Request(device1, "default", "foo", 0, 42, 0, []byte("hash necessary but not checked"), 0, false)
|
2015-01-18 01:12:06 +00:00
|
|
|
if err == nil {
|
2020-11-09 14:33:32 +00:00
|
|
|
t.Error("Unexpected nil error on read past end of file")
|
|
|
|
}
|
|
|
|
_, err = m.Request(device1, "default", "foo", 0, 42, 0, nil, 0, false)
|
|
|
|
if err != nil {
|
|
|
|
t.Error("Unexpected error when large read should be permitted")
|
2015-01-18 01:12:06 +00:00
|
|
|
}
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func genFiles(n int) []protocol.FileInfo {
|
|
|
|
files := make([]protocol.FileInfo, n)
|
|
|
|
t := time.Now().Unix()
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
files[i] = protocol.FileInfo{
|
2016-08-06 13:05:59 +00:00
|
|
|
Name: fmt.Sprintf("file%d", i),
|
|
|
|
ModifiedS: t,
|
2016-08-07 16:21:59 +00:00
|
|
|
Sequence: int64(i + 1),
|
2016-08-06 13:05:59 +00:00
|
|
|
Blocks: []protocol.BlockInfo{{Offset: 0, Size: 100, Hash: []byte("some hash bytes")}},
|
2017-12-14 09:51:17 +00:00
|
|
|
Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}},
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkIndex_10000(b *testing.B) {
|
|
|
|
benchmarkIndex(b, 10000)
|
|
|
|
}
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkIndex_100(b *testing.B) {
|
|
|
|
benchmarkIndex(b, 100)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func benchmarkIndex(b *testing.B, nfiles int) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
|
|
|
defer wcfgCancel()
|
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
2015-05-23 13:08:17 +00:00
|
|
|
|
|
|
|
files := genFiles(nfiles)
|
2021-06-03 12:58:50 +00:00
|
|
|
must(b, m.Index(device1, fcfg.ID, files))
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2021-06-03 12:58:50 +00:00
|
|
|
must(b, m.Index(device1, fcfg.ID, files))
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
2015-05-23 13:08:17 +00:00
|
|
|
b.ReportAllocs()
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkIndexUpdate_10000_10000(b *testing.B) {
|
|
|
|
benchmarkIndexUpdate(b, 10000, 10000)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkIndexUpdate_10000_100(b *testing.B) {
|
|
|
|
benchmarkIndexUpdate(b, 10000, 100)
|
|
|
|
}
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkIndexUpdate_10000_1(b *testing.B) {
|
|
|
|
benchmarkIndexUpdate(b, 10000, 1)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func benchmarkIndexUpdate(b *testing.B, nfiles, nufiles int) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
|
|
|
defer wcfgCancel()
|
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
2015-05-23 13:08:17 +00:00
|
|
|
|
|
|
|
files := genFiles(nfiles)
|
|
|
|
ufiles := genFiles(nufiles)
|
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
must(b, m.Index(device1, fcfg.ID, files))
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2021-06-03 12:58:50 +00:00
|
|
|
must(b, m.IndexUpdate(device1, fcfg.ID, ufiles))
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
2015-05-23 13:08:17 +00:00
|
|
|
b.ReportAllocs()
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 22:14:56 +00:00
|
|
|
func BenchmarkRequestOut(b *testing.B) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(b, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
const n = 1000
|
2016-08-07 16:21:59 +00:00
|
|
|
files := genFiles(n)
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2021-03-03 07:53:50 +00:00
|
|
|
fc := newFakeConnection(device1, m)
|
2016-11-30 09:32:28 +00:00
|
|
|
for _, f := range files {
|
2016-12-13 10:24:10 +00:00
|
|
|
fc.addFile(f.Name, 0644, protocol.FileInfoTypeFile, []byte("some data to return"))
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
2020-09-29 11:17:38 +00:00
|
|
|
m.AddConnection(fc, protocol.Hello{})
|
2021-05-16 15:23:27 +00:00
|
|
|
must(b, m.Index(device1, "default", files))
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-11-09 14:33:32 +00:00
|
|
|
data, err := m.requestGlobal(context.Background(), device1, "default", files[i%n].Name, 0, 0, 32, nil, 0, false)
|
2014-03-02 22:58:14 +00:00
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
if data == nil {
|
|
|
|
b.Error("nil data")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-03-28 13:36:57 +00:00
|
|
|
|
2016-12-09 22:14:56 +00:00
|
|
|
func BenchmarkRequestInSingleFile(b *testing.B) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(b, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-12-09 22:14:56 +00:00
|
|
|
|
|
|
|
buf := make([]byte, 128<<10)
|
|
|
|
rand.Read(buf)
|
2020-07-28 09:13:15 +00:00
|
|
|
mustRemove(b, defaultFs.RemoveAll("request"))
|
|
|
|
defer func() { mustRemove(b, defaultFs.RemoveAll("request")) }()
|
|
|
|
must(b, defaultFs.MkdirAll("request/for/a/file/in/a/couple/of/dirs", 0755))
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(b, defaultFs, "request/for/a/file/in/a/couple/of/dirs/128k", buf)
|
2016-12-09 22:14:56 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-11-09 14:33:32 +00:00
|
|
|
if _, err := m.Request(device1, "default", "request/for/a/file/in/a/couple/of/dirs/128k", 0, 128<<10, 0, nil, 0, false); err != nil {
|
2016-12-09 22:14:56 +00:00
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.SetBytes(128 << 10)
|
|
|
|
}
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
func TestDeviceRename(t *testing.T) {
|
2020-09-29 11:17:38 +00:00
|
|
|
hello := protocol.Hello{
|
2014-08-14 22:15:26 +00:00
|
|
|
ClientName: "syncthing",
|
|
|
|
ClientVersion: "v0.9.4",
|
|
|
|
}
|
2014-11-12 23:42:17 +00:00
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
rawCfg := config.New(device1)
|
|
|
|
rawCfg.Devices = []config.DeviceConfiguration{
|
2014-08-14 22:15:26 +00:00
|
|
|
{
|
2014-09-28 11:00:38 +00:00
|
|
|
DeviceID: device1,
|
2014-08-14 22:15:26 +00:00
|
|
|
},
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg, cfgCancel := createTmpWrapper(rawCfg)
|
|
|
|
defer cfgCancel()
|
2014-08-14 22:15:26 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
|
2015-06-28 15:05:29 +00:00
|
|
|
|
2016-03-25 20:29:07 +00:00
|
|
|
if cfg.Devices()[device1].Name != "" {
|
|
|
|
t.Errorf("Device already has a name")
|
2015-06-28 15:05:29 +00:00
|
|
|
}
|
|
|
|
|
2021-03-03 07:53:50 +00:00
|
|
|
conn := newFakeConnection(device1, m)
|
2016-03-25 20:29:07 +00:00
|
|
|
|
|
|
|
m.AddConnection(conn, hello)
|
2015-06-28 15:05:29 +00:00
|
|
|
|
2015-06-20 21:00:33 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2014-08-14 22:15:26 +00:00
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
if cfg.Devices()[device1].Name != "" {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Errorf("Device already has a name")
|
2014-08-14 22:15:26 +00:00
|
|
|
}
|
|
|
|
|
2021-03-22 20:50:19 +00:00
|
|
|
m.Closed(conn.ID(), protocol.ErrTimeout)
|
2016-03-25 20:29:07 +00:00
|
|
|
hello.DeviceName = "tester"
|
|
|
|
m.AddConnection(conn, hello)
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
if cfg.Devices()[device1].Name != "tester" {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Errorf("Device did not get a name")
|
2014-08-14 22:15:26 +00:00
|
|
|
}
|
|
|
|
|
2021-03-22 20:50:19 +00:00
|
|
|
m.Closed(conn.ID(), protocol.ErrTimeout)
|
2016-03-25 20:29:07 +00:00
|
|
|
hello.DeviceName = "tester2"
|
|
|
|
m.AddConnection(conn, hello)
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
if cfg.Devices()[device1].Name != "tester" {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Errorf("Device name got overwritten")
|
2014-08-14 22:15:26 +00:00
|
|
|
}
|
2014-11-12 23:42:17 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
must(t, cfg.Save())
|
|
|
|
cfgw, _, err := config.Load(cfg.ConfigPath(), myID, events.NoopLogger)
|
2014-11-12 23:42:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if cfgw.Devices()[device1].Name != "tester" {
|
|
|
|
t.Errorf("Device name not saved in config")
|
|
|
|
}
|
2016-04-09 07:43:47 +00:00
|
|
|
|
2021-03-22 20:50:19 +00:00
|
|
|
m.Closed(conn.ID(), protocol.ErrTimeout)
|
2016-04-09 07:43:47 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
waiter, err := cfg.Modify(func(cfg *config.Configuration) {
|
|
|
|
cfg.Options.OverwriteRemoteDevNames = true
|
|
|
|
})
|
|
|
|
must(t, err)
|
|
|
|
waiter.Wait()
|
2016-04-09 07:43:47 +00:00
|
|
|
|
|
|
|
hello.DeviceName = "tester2"
|
|
|
|
m.AddConnection(conn, hello)
|
|
|
|
|
|
|
|
if cfg.Devices()[device1].Name != "tester2" {
|
|
|
|
t.Errorf("Device name not overwritten")
|
|
|
|
}
|
2014-08-14 22:15:26 +00:00
|
|
|
}
|
2014-09-19 11:21:58 +00:00
|
|
|
|
|
|
|
func TestClusterConfig(t *testing.T) {
|
2014-10-06 07:25:45 +00:00
|
|
|
cfg := config.New(device1)
|
2014-09-28 11:00:38 +00:00
|
|
|
cfg.Devices = []config.DeviceConfiguration{
|
2014-09-19 11:21:58 +00:00
|
|
|
{
|
2014-09-28 11:05:25 +00:00
|
|
|
DeviceID: device1,
|
2014-09-23 14:04:20 +00:00
|
|
|
Introducer: true,
|
2014-09-19 11:21:58 +00:00
|
|
|
},
|
|
|
|
{
|
2014-09-28 11:00:38 +00:00
|
|
|
DeviceID: device2,
|
2014-09-19 11:21:58 +00:00
|
|
|
},
|
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
cfg.Folders = []config.FolderConfiguration{
|
2014-09-19 11:21:58 +00:00
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
2018-05-08 07:19:34 +00:00
|
|
|
Path: "testdata1",
|
2014-09-28 11:00:38 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2},
|
2014-09-19 11:21:58 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2018-05-08 07:19:34 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata2",
|
|
|
|
Paused: true, // should still be included
|
2014-09-28 11:00:38 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2},
|
2014-09-19 11:21:58 +00:00
|
|
|
},
|
|
|
|
},
|
2018-05-08 07:19:34 +00:00
|
|
|
{
|
|
|
|
ID: "folder3",
|
|
|
|
Path: "testdata3",
|
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
// should not be included, does not include device2
|
|
|
|
},
|
|
|
|
},
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(cfg)
|
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
|
2015-06-20 21:00:33 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2014-09-19 11:21:58 +00:00
|
|
|
|
2021-06-03 13:39:49 +00:00
|
|
|
cm, _ := m.generateClusterConfig(device2)
|
2014-09-19 11:21:58 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
if l := len(cm.Folders); l != 2 {
|
|
|
|
t.Fatalf("Incorrect number of folders %d != 2", l)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
r := cm.Folders[0]
|
|
|
|
if r.ID != "folder1" {
|
|
|
|
t.Errorf("Incorrect folder %q != folder1", r.ID)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
if l := len(r.Devices); l != 2 {
|
|
|
|
t.Errorf("Incorrect number of devices %d != 2", l)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-10-29 21:56:24 +00:00
|
|
|
if id := r.Devices[0].ID; id != device1 {
|
2016-11-17 06:45:45 +00:00
|
|
|
t.Errorf("Incorrect device ID %s != %s", id, device1)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if !r.Devices[0].Introducer {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Error("Device1 should be flagged as Introducer")
|
2014-09-23 14:04:20 +00:00
|
|
|
}
|
2016-10-29 21:56:24 +00:00
|
|
|
if id := r.Devices[1].ID; id != device2 {
|
2016-11-17 06:45:45 +00:00
|
|
|
t.Errorf("Incorrect device ID %s != %s", id, device2)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if r.Devices[1].Introducer {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Error("Device2 should not be flagged as Introducer")
|
2014-09-23 14:04:20 +00:00
|
|
|
}
|
2014-09-19 11:21:58 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
r = cm.Folders[1]
|
|
|
|
if r.ID != "folder2" {
|
|
|
|
t.Errorf("Incorrect folder %q != folder2", r.ID)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
if l := len(r.Devices); l != 2 {
|
|
|
|
t.Errorf("Incorrect number of devices %d != 2", l)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-10-29 21:56:24 +00:00
|
|
|
if id := r.Devices[0].ID; id != device1 {
|
2016-11-17 06:45:45 +00:00
|
|
|
t.Errorf("Incorrect device ID %s != %s", id, device1)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if !r.Devices[0].Introducer {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Error("Device1 should be flagged as Introducer")
|
2014-09-23 14:04:20 +00:00
|
|
|
}
|
2016-10-29 21:56:24 +00:00
|
|
|
if id := r.Devices[1].ID; id != device2 {
|
2016-11-17 06:45:45 +00:00
|
|
|
t.Errorf("Incorrect device ID %s != %s", id, device2)
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
if r.Devices[1].Introducer {
|
2014-09-28 11:00:38 +00:00
|
|
|
t.Error("Device2 should not be flagged as Introducer")
|
2014-09-23 14:04:20 +00:00
|
|
|
}
|
2014-09-19 11:21:58 +00:00
|
|
|
}
|
2014-09-20 22:29:33 +00:00
|
|
|
|
2016-11-07 16:40:48 +00:00
|
|
|
func TestIntroducer(t *testing.T) {
|
|
|
|
var introducedByAnyone protocol.DeviceID
|
|
|
|
|
|
|
|
// LocalDeviceID is a magic value meaning don't check introducer
|
|
|
|
contains := func(cfg config.FolderConfiguration, id, introducedBy protocol.DeviceID) bool {
|
|
|
|
for _, dev := range cfg.Devices {
|
|
|
|
if dev.DeviceID.Equals(id) {
|
|
|
|
if introducedBy.Equals(introducedByAnyone) {
|
|
|
|
return true
|
|
|
|
}
|
2018-05-01 21:56:20 +00:00
|
|
|
return dev.IntroducedBy.Equals(introducedBy)
|
2016-11-07 16:40:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2021-06-03 13:02:57 +00:00
|
|
|
cc := basicClusterConfig(myID, device1, "folder1", "folder2")
|
2020-10-21 09:51:53 +00:00
|
|
|
cc.Folders[0].Devices = append(cc.Folders[0].Devices, protocol.Device{
|
|
|
|
ID: device2,
|
|
|
|
Introducer: true,
|
|
|
|
SkipIntroductionRemovals: true,
|
2016-11-07 16:40:48 +00:00
|
|
|
})
|
2021-06-03 13:02:57 +00:00
|
|
|
cc.Folders[1].Devices = append(cc.Folders[1].Devices, protocol.Device{
|
|
|
|
ID: device2,
|
|
|
|
Introducer: true,
|
|
|
|
SkipIntroductionRemovals: true,
|
|
|
|
EncryptionPasswordToken: []byte("faketoken"),
|
|
|
|
})
|
2020-10-21 09:51:53 +00:00
|
|
|
m.ClusterConfig(device1, cc)
|
2016-11-07 16:40:48 +00:00
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if newDev, ok := m.cfg.Device(device2); !ok || !newDev.Introducer || !newDev.SkipIntroductionRemovals {
|
2020-10-21 09:51:53 +00:00
|
|
|
t.Error("device 2 missing or wrong flags")
|
2016-11-07 16:40:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder1"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected folder 1 to have device2 introduced by device 1")
|
|
|
|
}
|
|
|
|
|
2021-06-03 13:02:57 +00:00
|
|
|
for _, devCfg := range m.cfg.Folders()["folder2"].Devices {
|
|
|
|
if devCfg.DeviceID == device2 {
|
|
|
|
t.Error("Device was added even though it's untrusted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2020-10-21 09:51:53 +00:00
|
|
|
cc = basicClusterConfig(myID, device1, "folder2")
|
|
|
|
cc.Folders[0].Devices = append(cc.Folders[0].Devices, protocol.Device{
|
|
|
|
ID: device2,
|
|
|
|
Introducer: true,
|
|
|
|
SkipIntroductionRemovals: true,
|
2016-11-07 16:40:48 +00:00
|
|
|
})
|
2020-10-21 09:51:53 +00:00
|
|
|
m.ClusterConfig(device1, cc)
|
2016-11-07 16:40:48 +00:00
|
|
|
|
|
|
|
// Should not get introducer, as it's already unset, and it's an existing device.
|
2019-05-19 12:29:07 +00:00
|
|
|
if newDev, ok := m.cfg.Device(device2); !ok || newDev.Introducer || newDev.SkipIntroductionRemovals {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 missing or changed flags")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if contains(m.cfg.Folders()["folder1"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder2"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be added to folder 2")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{})
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if _, ok := m.cfg.Device(device2); ok {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 should have been removed")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if contains(m.cfg.Folders()["folder1"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if contains(m.cfg.Folders()["folder2"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be removed from folder 2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two cases when removals should not happen
|
|
|
|
// 1. Introducer flag no longer set on device
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{})
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if _, ok := m.cfg.Device(device2); !ok {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 should not have been removed")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder1"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder2"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be removed from folder 2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. SkipIntroductionRemovals is set
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
SkipIntroductionRemovals: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2020-10-21 09:51:53 +00:00
|
|
|
cc = basicClusterConfig(myID, device1, "folder2")
|
|
|
|
cc.Folders[0].Devices = append(cc.Folders[0].Devices, protocol.Device{
|
|
|
|
ID: device2,
|
|
|
|
Introducer: true,
|
|
|
|
SkipIntroductionRemovals: true,
|
2016-11-07 16:40:48 +00:00
|
|
|
})
|
2020-10-21 09:51:53 +00:00
|
|
|
m.ClusterConfig(device1, cc)
|
2016-11-07 16:40:48 +00:00
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if _, ok := m.cfg.Device(device2); !ok {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 should not have been removed")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder1"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder2"], device2, device1) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be added to folder 2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test device not being removed as it's shared without an introducer.
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{})
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if _, ok := m.cfg.Device(device2); !ok {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 should not have been removed")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if contains(m.cfg.Folders()["folder1"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder2"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be removed from folder 2")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test device not being removed as it's shared by a different introducer.
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
|
|
|
m, cancel = newState(t, config.Configuration{
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
{DeviceID: device2, IntroducedBy: device1},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2017-08-19 14:36:56 +00:00
|
|
|
ID: "folder2",
|
|
|
|
Path: "testdata",
|
2016-11-07 16:40:48 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
2019-02-06 08:32:03 +00:00
|
|
|
{DeviceID: device2, IntroducedBy: myID},
|
2016-11-07 16:40:48 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2016-11-07 16:40:48 +00:00
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{})
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if _, ok := m.cfg.Device(device2); !ok {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("device 2 should not have been removed")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if contains(m.cfg.Folders()["folder1"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 to be removed from folder 1")
|
|
|
|
}
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if !contains(m.cfg.Folders()["folder2"], device2, introducedByAnyone) {
|
2016-11-07 16:40:48 +00:00
|
|
|
t.Error("expected device 2 not to be removed from folder 2")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-22 16:01:52 +00:00
|
|
|
func TestIssue4897(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, config.Configuration{
|
2018-04-22 16:01:52 +00:00
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
Introducer: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Folders: []config.FolderConfiguration{
|
|
|
|
{
|
|
|
|
ID: "folder1",
|
|
|
|
Path: "testdata",
|
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
Paused: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
2018-04-22 16:01:52 +00:00
|
|
|
|
2021-06-03 13:39:49 +00:00
|
|
|
cm, _ := m.generateClusterConfig(device1)
|
2018-04-22 16:01:52 +00:00
|
|
|
if l := len(cm.Folders); l != 1 {
|
|
|
|
t.Errorf("Cluster config contains %v folders, expected 1", l)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-05 23:07:21 +00:00
|
|
|
// TestIssue5063 is about a panic in connection with modifying config in quick
|
|
|
|
// succession, related with auto accepted folders. It's unclear what exactly, a
|
|
|
|
// relevant bit seems to be here:
|
|
|
|
// PR-comments: https://github.com/syncthing/syncthing/pull/5069/files#r203146546
|
|
|
|
// Issue: https://github.com/syncthing/syncthing/pull/5509
|
2018-07-26 21:14:12 +00:00
|
|
|
func TestIssue5063(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2018-07-26 21:14:12 +00:00
|
|
|
|
2019-05-25 17:51:13 +00:00
|
|
|
m.pmut.Lock()
|
|
|
|
for _, c := range m.conn {
|
|
|
|
conn := c.(*fakeConnection)
|
2021-03-03 07:53:50 +00:00
|
|
|
conn.CloseCalls(func(_ error) {})
|
2021-03-22 20:50:19 +00:00
|
|
|
defer m.Closed(c.ID(), errStopped) // to unblock deferred m.Stop()
|
2019-05-25 17:51:13 +00:00
|
|
|
}
|
|
|
|
m.pmut.Unlock()
|
|
|
|
|
2019-02-05 23:07:21 +00:00
|
|
|
wg := sync.WaitGroup{}
|
|
|
|
|
|
|
|
addAndVerify := func(id string) {
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2018-07-26 21:14:12 +00:00
|
|
|
t.Error("expected shared", id)
|
|
|
|
}
|
2019-02-05 23:07:21 +00:00
|
|
|
wg.Done()
|
2018-07-26 21:14:12 +00:00
|
|
|
}
|
|
|
|
|
2019-02-05 23:07:21 +00:00
|
|
|
reps := 10
|
|
|
|
ids := make([]string, reps)
|
|
|
|
for i := 0; i < reps; i++ {
|
2018-07-26 21:14:12 +00:00
|
|
|
wg.Add(1)
|
2019-02-05 23:07:21 +00:00
|
|
|
ids[i] = srand.String(8)
|
|
|
|
go addAndVerify(ids[i])
|
2018-07-26 21:14:12 +00:00
|
|
|
}
|
2019-02-05 23:07:21 +00:00
|
|
|
defer func() {
|
|
|
|
for _, id := range ids {
|
2019-02-13 18:54:04 +00:00
|
|
|
os.RemoveAll(id)
|
2019-02-05 23:07:21 +00:00
|
|
|
}
|
|
|
|
}()
|
2018-07-26 21:14:12 +00:00
|
|
|
|
2019-02-05 23:07:21 +00:00
|
|
|
finished := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(finished)
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-finished:
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
|
|
|
t.Fatal("Timed out before all devices were added")
|
|
|
|
}
|
2018-07-26 21:14:12 +00:00
|
|
|
}
|
|
|
|
|
2017-12-07 07:08:24 +00:00
|
|
|
func TestAutoAcceptRejected(t *testing.T) {
|
|
|
|
// Nothing happens if AutoAcceptFolders not set
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
2018-01-03 07:42:25 +00:00
|
|
|
for i := range tcfg.Devices {
|
|
|
|
tcfg.Devices[i].AutoAcceptFolders = false
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2017-12-07 07:08:24 +00:00
|
|
|
id := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2017-12-07 07:08:24 +00:00
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
if cfg, ok := m.cfg.Folder(id); ok && cfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("unexpected shared", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptNewFolder(t *testing.T) {
|
|
|
|
// New folder
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2017-12-07 07:08:24 +00:00
|
|
|
id := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("expected shared", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-15 17:26:20 +00:00
|
|
|
func TestAutoAcceptNewFolderFromTwoDevices(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2018-07-15 17:26:20 +00:00
|
|
|
id := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("expected shared", id)
|
|
|
|
}
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device2) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("unexpected expected shared", id)
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device2, createClusterConfig(device2, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device2) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("expected shared", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptNewFolderFromOnlyOneDevice(t *testing.T) {
|
|
|
|
modifiedCfg := defaultAutoAcceptCfg.Copy()
|
|
|
|
modifiedCfg.Devices[2].AutoAcceptFolders = false
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, modifiedCfg)
|
2018-07-15 17:26:20 +00:00
|
|
|
id := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("expected shared", id)
|
|
|
|
}
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device2) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("unexpected expected shared", id)
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device2, createClusterConfig(device2, id))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device2) {
|
2018-07-15 17:26:20 +00:00
|
|
|
t.Error("unexpected shared", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptNewFolderPremutationsNoPanic(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("short tests only")
|
|
|
|
}
|
2019-01-11 12:56:05 +00:00
|
|
|
|
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2018-07-15 17:26:20 +00:00
|
|
|
id := srand.String(8)
|
|
|
|
label := srand.String(8)
|
|
|
|
premutations := []protocol.Folder{
|
|
|
|
{ID: id, Label: id},
|
|
|
|
{ID: id, Label: label},
|
|
|
|
{ID: label, Label: id},
|
|
|
|
{ID: label, Label: label},
|
|
|
|
}
|
|
|
|
localFolders := append(premutations, protocol.Folder{})
|
|
|
|
for _, localFolder := range localFolders {
|
|
|
|
for _, localFolderPaused := range []bool{false, true} {
|
|
|
|
for _, dev1folder := range premutations {
|
|
|
|
for _, dev2folder := range premutations {
|
|
|
|
cfg := defaultAutoAcceptCfg.Copy()
|
|
|
|
if localFolder.Label != "" {
|
2021-02-04 20:10:41 +00:00
|
|
|
fcfg := newFolderConfiguration(defaultCfgWrapper, localFolder.ID, localFolder.Label, fs.FilesystemTypeBasic, localFolder.ID)
|
2018-07-15 17:26:20 +00:00
|
|
|
fcfg.Paused = localFolderPaused
|
|
|
|
cfg.Folders = append(cfg.Folders, fcfg)
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, cfg)
|
2018-07-15 17:26:20 +00:00
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{dev1folder},
|
|
|
|
})
|
|
|
|
m.ClusterConfig(device2, protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{dev2folder},
|
|
|
|
})
|
2019-05-19 12:29:07 +00:00
|
|
|
cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
cancel()
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.RemoveAll(id)
|
|
|
|
testOs.RemoveAll(label)
|
2018-07-15 17:26:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-07 07:08:24 +00:00
|
|
|
func TestAutoAcceptMultipleFolders(t *testing.T) {
|
|
|
|
// Multiple new folders
|
|
|
|
id1 := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id1)
|
2017-12-07 07:08:24 +00:00
|
|
|
id2 := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id2)
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id1, id2))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id1); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("expected shared", id1)
|
|
|
|
}
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id2); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("expected shared", id2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptExistingFolder(t *testing.T) {
|
|
|
|
// Existing folder
|
|
|
|
id := srand.String(8)
|
|
|
|
idOther := srand.String(8) // To check that path does not get changed.
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
|
|
|
defer os.RemoveAll(idOther)
|
2017-12-07 07:08:24 +00:00
|
|
|
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
|
|
|
tcfg.Folders = []config.FolderConfiguration{
|
|
|
|
{
|
|
|
|
ID: id,
|
2019-01-11 12:56:05 +00:00
|
|
|
Path: idOther, // To check that path does not get changed.
|
2017-12-07 07:08:24 +00:00
|
|
|
},
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or shared", id)
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2017-12-07 07:08:24 +00:00
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) || fcfg.Path != idOther {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or unshared, or path changed", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptNewAndExistingFolder(t *testing.T) {
|
|
|
|
// New and existing folder
|
|
|
|
id1 := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id1)
|
2017-12-07 07:08:24 +00:00
|
|
|
id2 := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id2)
|
2017-12-07 07:08:24 +00:00
|
|
|
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
|
|
|
tcfg.Folders = []config.FolderConfiguration{
|
|
|
|
{
|
|
|
|
ID: id1,
|
2019-01-11 12:56:05 +00:00
|
|
|
Path: id1, // from previous test case, to verify that path doesn't get changed.
|
2017-12-07 07:08:24 +00:00
|
|
|
},
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id1); !ok || fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or shared", id1)
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id1, id2))
|
2017-12-07 07:08:24 +00:00
|
|
|
|
|
|
|
for i, id := range []string{id1, id2} {
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or unshared", i, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptAlreadyShared(t *testing.T) {
|
|
|
|
// Already shared
|
|
|
|
id := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
2017-12-07 07:08:24 +00:00
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
|
|
|
tcfg.Folders = []config.FolderConfiguration{
|
|
|
|
{
|
|
|
|
ID: id,
|
2019-01-11 12:56:05 +00:00
|
|
|
Path: id,
|
2017-12-07 07:08:24 +00:00
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or not shared", id)
|
|
|
|
}
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2017-12-07 07:08:24 +00:00
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("missing folder, or not shared", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptNameConflict(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2017-12-07 07:08:24 +00:00
|
|
|
id := srand.String(8)
|
|
|
|
label := srand.String(8)
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.MkdirAll(id, 0777)
|
|
|
|
testOs.MkdirAll(label, 0777)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
|
|
|
defer os.RemoveAll(label)
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2017-12-07 07:08:24 +00:00
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: id,
|
|
|
|
Label: label,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); ok && fcfg.SharedWith(device1) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("unexpected folder", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptPrefersLabel(t *testing.T) {
|
|
|
|
// Prefers label, falls back to ID.
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2017-12-07 07:08:24 +00:00
|
|
|
id := srand.String(8)
|
|
|
|
label := srand.String(8)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
|
|
|
defer os.RemoveAll(label)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, addFolderDevicesToClusterConfig(protocol.ClusterConfig{
|
2017-12-07 07:08:24 +00:00
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: id,
|
|
|
|
Label: label,
|
|
|
|
},
|
|
|
|
},
|
2020-11-09 14:33:32 +00:00
|
|
|
}, device1))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) || !strings.HasSuffix(fcfg.Path, label) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("expected shared, or wrong path", id, label, fcfg.Path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptFallsBackToID(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2017-12-07 07:08:24 +00:00
|
|
|
// Prefers label, falls back to ID.
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultAutoAcceptCfg)
|
2017-12-07 07:08:24 +00:00
|
|
|
id := srand.String(8)
|
|
|
|
label := srand.String(8)
|
2019-01-11 12:56:05 +00:00
|
|
|
t.Log(id, label)
|
|
|
|
testOs.MkdirAll(label, 0777)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(label)
|
|
|
|
defer os.RemoveAll(id)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, addFolderDevicesToClusterConfig(protocol.ClusterConfig{
|
2017-12-07 07:08:24 +00:00
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: id,
|
|
|
|
Label: label,
|
|
|
|
},
|
|
|
|
},
|
2020-11-09 14:33:32 +00:00
|
|
|
}, device1))
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) || !strings.HasSuffix(fcfg.Path, id) {
|
2017-12-07 07:08:24 +00:00
|
|
|
t.Error("expected shared, or wrong path", id, label, fcfg.Path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-03 07:42:25 +00:00
|
|
|
func TestAutoAcceptPausedWhenFolderConfigChanged(t *testing.T) {
|
|
|
|
// Existing folder
|
|
|
|
id := srand.String(8)
|
|
|
|
idOther := srand.String(8) // To check that path does not get changed.
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
|
|
|
defer os.RemoveAll(idOther)
|
2018-01-03 07:42:25 +00:00
|
|
|
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
2021-02-04 20:10:41 +00:00
|
|
|
fcfg := newFolderConfiguration(defaultCfgWrapper, id, "", fs.FilesystemTypeBasic, idOther)
|
2018-01-03 07:42:25 +00:00
|
|
|
fcfg.Paused = true
|
|
|
|
// The order of devices here is wrong (cfg.clean() sorts them), which will cause the folder to restart.
|
|
|
|
// Because of the restart, folder gets removed from m.deviceFolder, which means that generateClusterConfig will not panic.
|
|
|
|
// This wasn't an issue before, yet keeping the test case to prove that it still isn't.
|
|
|
|
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{
|
|
|
|
DeviceID: device1,
|
|
|
|
})
|
|
|
|
tcfg.Folders = []config.FolderConfiguration{fcfg}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2018-06-06 21:34:11 +00:00
|
|
|
t.Error("missing folder, or not shared", id)
|
2018-01-03 07:42:25 +00:00
|
|
|
}
|
|
|
|
if _, ok := m.folderRunners[id]; ok {
|
|
|
|
t.Fatal("folder running?")
|
|
|
|
}
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2018-01-03 07:42:25 +00:00
|
|
|
m.generateClusterConfig(device1)
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("missing folder")
|
2019-01-11 12:56:05 +00:00
|
|
|
} else if fcfg.Path != idOther {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("folder path changed")
|
|
|
|
} else {
|
|
|
|
for _, dev := range fcfg.DeviceIDs() {
|
|
|
|
if dev == device1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Error("device missing")
|
|
|
|
}
|
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
if _, ok := m.folderRunners[id]; ok {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("folder started")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAutoAcceptPausedWhenFolderConfigNotChanged(t *testing.T) {
|
|
|
|
// Existing folder
|
|
|
|
id := srand.String(8)
|
|
|
|
idOther := srand.String(8) // To check that path does not get changed.
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(id)
|
|
|
|
defer os.RemoveAll(idOther)
|
2018-01-03 07:42:25 +00:00
|
|
|
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
2021-02-04 20:10:41 +00:00
|
|
|
fcfg := newFolderConfiguration(defaultCfgWrapper, id, "", fs.FilesystemTypeBasic, idOther)
|
2018-01-03 07:42:25 +00:00
|
|
|
fcfg.Paused = true
|
|
|
|
// The new folder is exactly the same as the one constructed by handleAutoAccept, which means
|
|
|
|
// the folder will not be restarted (even if it's paused), yet handleAutoAccept used to add the folder
|
|
|
|
// to m.deviceFolders which had caused panics when calling generateClusterConfig, as the folder
|
|
|
|
// did not have a file set.
|
|
|
|
fcfg.Devices = append([]config.FolderDeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
},
|
|
|
|
}, fcfg.Devices...) // Need to ensure this device order to avoid folder restart.
|
|
|
|
tcfg.Folders = []config.FolderConfiguration{fcfg}
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok || !fcfg.SharedWith(device1) {
|
2018-06-06 21:34:11 +00:00
|
|
|
t.Error("missing folder, or not shared", id)
|
2018-01-03 07:42:25 +00:00
|
|
|
}
|
|
|
|
if _, ok := m.folderRunners[id]; ok {
|
|
|
|
t.Fatal("folder running?")
|
|
|
|
}
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(device1, createClusterConfig(device1, id))
|
2018-01-03 07:42:25 +00:00
|
|
|
m.generateClusterConfig(device1)
|
|
|
|
|
2019-05-19 12:29:07 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder(id); !ok {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("missing folder")
|
2019-01-11 12:56:05 +00:00
|
|
|
} else if fcfg.Path != idOther {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("folder path changed")
|
|
|
|
} else {
|
|
|
|
for _, dev := range fcfg.DeviceIDs() {
|
|
|
|
if dev == device1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Error("device missing")
|
|
|
|
}
|
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
if _, ok := m.folderRunners[id]; ok {
|
2018-01-03 07:42:25 +00:00
|
|
|
t.Error("folder started")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
func TestAutoAcceptEnc(t *testing.T) {
|
|
|
|
tcfg := defaultAutoAcceptCfg.Copy()
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, tcfg)
|
2020-11-09 14:33:32 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
|
|
|
|
id := srand.String(8)
|
|
|
|
defer os.RemoveAll(id)
|
|
|
|
|
|
|
|
token := []byte("token")
|
|
|
|
basicCC := func() protocol.ClusterConfig {
|
|
|
|
return protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{{
|
|
|
|
ID: id,
|
|
|
|
Label: id,
|
|
|
|
}}}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Earlier tests might cause the connection to get closed, thus ClusterConfig
|
|
|
|
// would panic.
|
|
|
|
clusterConfig := func(deviceID protocol.DeviceID, cm protocol.ClusterConfig) {
|
2021-03-03 07:53:50 +00:00
|
|
|
m.AddConnection(newFakeConnection(deviceID, m), protocol.Hello{})
|
2020-11-09 14:33:32 +00:00
|
|
|
m.ClusterConfig(deviceID, cm)
|
|
|
|
}
|
|
|
|
|
|
|
|
clusterConfig(device1, basicCC())
|
|
|
|
if _, ok := m.cfg.Folder(id); ok {
|
|
|
|
t.Fatal("unexpected added")
|
|
|
|
}
|
|
|
|
cc := basicCC()
|
|
|
|
cc.Folders[0].Devices = []protocol.Device{{ID: device1}}
|
|
|
|
clusterConfig(device1, cc)
|
|
|
|
if _, ok := m.cfg.Folder(id); ok {
|
|
|
|
t.Fatal("unexpected added")
|
|
|
|
}
|
|
|
|
cc = basicCC()
|
|
|
|
cc.Folders[0].Devices = []protocol.Device{{ID: myID}}
|
|
|
|
clusterConfig(device1, cc)
|
|
|
|
if _, ok := m.cfg.Folder(id); ok {
|
|
|
|
t.Fatal("unexpected added")
|
|
|
|
}
|
|
|
|
|
|
|
|
// New folder, encrypted -> add as enc
|
|
|
|
|
|
|
|
cc = createClusterConfig(device1, id)
|
|
|
|
cc.Folders[0].Devices[1].EncryptionPasswordToken = token
|
|
|
|
clusterConfig(device1, cc)
|
|
|
|
if cfg, ok := m.cfg.Folder(id); !ok {
|
|
|
|
t.Fatal("unexpected unadded")
|
|
|
|
} else {
|
|
|
|
if !cfg.SharedWith(device1) {
|
|
|
|
t.Fatal("unexpected unshared")
|
|
|
|
}
|
|
|
|
if cfg.Type != config.FolderTypeReceiveEncrypted {
|
|
|
|
t.Fatal("Folder not added as receiveEncrypted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// New device, unencrypted on encrypted folder -> reject
|
|
|
|
|
|
|
|
clusterConfig(device2, createClusterConfig(device2, id))
|
|
|
|
if cfg, _ := m.cfg.Folder(id); cfg.SharedWith(device2) {
|
|
|
|
t.Fatal("unexpected shared")
|
|
|
|
}
|
|
|
|
|
|
|
|
// New device, encrypted on encrypted folder -> share
|
|
|
|
|
|
|
|
cc = createClusterConfig(device2, id)
|
|
|
|
cc.Folders[0].Devices[1].EncryptionPasswordToken = token
|
|
|
|
clusterConfig(device2, cc)
|
|
|
|
if cfg, _ := m.cfg.Folder(id); !cfg.SharedWith(device2) {
|
|
|
|
t.Fatal("unexpected unshared")
|
|
|
|
}
|
|
|
|
|
|
|
|
// New folder, no encrypted -> add "normal"
|
|
|
|
|
|
|
|
id = srand.String(8)
|
|
|
|
defer os.RemoveAll(id)
|
|
|
|
|
|
|
|
clusterConfig(device1, createClusterConfig(device1, id))
|
|
|
|
if cfg, ok := m.cfg.Folder(id); !ok {
|
|
|
|
t.Fatal("unexpected unadded")
|
|
|
|
} else {
|
|
|
|
if !cfg.SharedWith(device1) {
|
|
|
|
t.Fatal("unexpected unshared")
|
|
|
|
}
|
|
|
|
if cfg.Type != config.FolderTypeSendReceive {
|
|
|
|
t.Fatal("Folder not added as send-receive")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// New device, encrypted on unencrypted folder -> reject
|
|
|
|
|
|
|
|
cc = createClusterConfig(device2, id)
|
|
|
|
cc.Folders[0].Devices[1].EncryptionPasswordToken = token
|
|
|
|
clusterConfig(device2, cc)
|
|
|
|
if cfg, _ := m.cfg.Folder(id); cfg.SharedWith(device2) {
|
|
|
|
t.Fatal("unexpected shared")
|
|
|
|
}
|
|
|
|
|
|
|
|
// New device, unencrypted on unencrypted folder -> share
|
|
|
|
|
|
|
|
clusterConfig(device2, createClusterConfig(device2, id))
|
|
|
|
if cfg, _ := m.cfg.Folder(id); !cfg.SharedWith(device2) {
|
|
|
|
t.Fatal("unexpected unshared")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
func changeIgnores(t *testing.T, m *testModel, expected []string) {
|
2014-09-20 22:29:33 +00:00
|
|
|
arrEqual := func(a, b []string) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range a {
|
|
|
|
if a[i] != b[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
ignores, _, err := m.LoadIgnores("default")
|
2014-09-20 22:29:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !arrEqual(ignores, expected) {
|
|
|
|
t.Errorf("Incorrect ignores: %v != %v", ignores, expected)
|
|
|
|
}
|
|
|
|
|
|
|
|
ignores = append(ignores, "pox")
|
|
|
|
|
|
|
|
err = m.SetIgnores("default", ignores)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
ignores2, _, err := m.LoadIgnores("default")
|
2014-09-20 22:29:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !arrEqual(ignores, ignores2) {
|
|
|
|
t.Errorf("Incorrect ignores: %v != %v", ignores2, ignores)
|
|
|
|
}
|
|
|
|
|
2017-04-03 13:47:13 +00:00
|
|
|
if runtime.GOOS == "darwin" {
|
|
|
|
// see above
|
|
|
|
time.Sleep(time.Second)
|
2017-04-06 09:55:54 +00:00
|
|
|
} else {
|
|
|
|
time.Sleep(time.Millisecond)
|
2017-04-03 13:47:13 +00:00
|
|
|
}
|
2014-09-20 22:29:33 +00:00
|
|
|
err = m.SetIgnores("default", expected)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
ignores, _, err = m.LoadIgnores("default")
|
2014-09-20 22:29:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !arrEqual(ignores, expected) {
|
|
|
|
t.Errorf("Incorrect ignores: %v != %v", ignores, expected)
|
|
|
|
}
|
2017-04-01 09:58:06 +00:00
|
|
|
}
|
2014-09-20 22:29:33 +00:00
|
|
|
|
2017-04-01 09:58:06 +00:00
|
|
|
func TestIgnores(t *testing.T) {
|
|
|
|
// Assure a clean start state
|
2020-07-28 09:13:15 +00:00
|
|
|
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
|
|
|
mustRemove(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, defaultFs, ".stignore", []byte(".*\nquux\n"))
|
2017-04-01 09:58:06 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2017-04-01 09:58:06 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
folderIgnoresAlwaysReload(t, m, defaultFolderConfig)
|
2017-06-11 10:27:12 +00:00
|
|
|
|
2017-04-01 09:58:06 +00:00
|
|
|
// Make sure the initial scan has finished (ScanFolders is blocking)
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
expected := []string{
|
|
|
|
".*",
|
|
|
|
"quux",
|
|
|
|
}
|
|
|
|
|
|
|
|
changeIgnores(t, m, expected)
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
_, _, err := m.LoadIgnores("doesnotexist")
|
2014-09-20 22:29:33 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("No error")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = m.SetIgnores("doesnotexist", expected)
|
|
|
|
if err == nil {
|
|
|
|
t.Error("No error")
|
|
|
|
}
|
|
|
|
|
2015-12-30 21:30:47 +00:00
|
|
|
// Invalid path, marker should be missing, hence returns an error.
|
2020-05-06 06:34:54 +00:00
|
|
|
fcfg := config.FolderConfiguration{ID: "fresh", Path: "XXX"}
|
2020-08-18 07:26:33 +00:00
|
|
|
ignores := ignore.New(fcfg.Filesystem(), ignore.WithCache(m.cfg.Options().CacheIgnoredFiles))
|
2020-05-06 06:34:54 +00:00
|
|
|
m.fmut.Lock()
|
|
|
|
m.folderCfgs[fcfg.ID] = fcfg
|
|
|
|
m.folderIgnores[fcfg.ID] = ignores
|
|
|
|
m.fmut.Unlock()
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
_, _, err = m.LoadIgnores("fresh")
|
2015-12-30 21:30:47 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Error("No error")
|
2014-09-20 22:29:33 +00:00
|
|
|
}
|
2017-04-01 09:58:06 +00:00
|
|
|
|
|
|
|
// Repeat tests with paused folder
|
|
|
|
pausedDefaultFolderConfig := defaultFolderConfig
|
|
|
|
pausedDefaultFolderConfig.Paused = true
|
|
|
|
|
2020-08-18 07:26:33 +00:00
|
|
|
m.restartFolder(defaultFolderConfig, pausedDefaultFolderConfig, false)
|
2017-04-01 09:58:06 +00:00
|
|
|
// Here folder initialization is not an issue as a paused folder isn't
|
|
|
|
// added to the model and thus there is no initial scan happening.
|
|
|
|
|
|
|
|
changeIgnores(t, m, expected)
|
2017-08-22 06:48:25 +00:00
|
|
|
|
|
|
|
// Make sure no .stignore file is considered valid
|
2019-01-11 12:56:05 +00:00
|
|
|
defer func() {
|
2020-07-28 09:13:15 +00:00
|
|
|
must(t, defaultFs.Rename(".stignore.bak", ".stignore"))
|
2019-01-11 12:56:05 +00:00
|
|
|
}()
|
2020-07-28 09:13:15 +00:00
|
|
|
must(t, defaultFs.Rename(".stignore", ".stignore.bak"))
|
2017-08-22 06:48:25 +00:00
|
|
|
changeIgnores(t, m, []string{})
|
2014-09-20 22:29:33 +00:00
|
|
|
}
|
2015-01-25 23:11:23 +00:00
|
|
|
|
2019-12-02 07:19:02 +00:00
|
|
|
func TestEmptyIgnores(t *testing.T) {
|
|
|
|
// Assure a clean start state
|
2020-07-28 09:13:15 +00:00
|
|
|
mustRemove(t, defaultFs.RemoveAll(config.DefaultMarkerName))
|
|
|
|
must(t, defaultFs.MkdirAll(config.DefaultMarkerName, 0644))
|
2019-12-02 07:19:02 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-12-02 07:19:02 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
if err := m.SetIgnores("default", []string{}); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat("testdata/.stignore"); err == nil {
|
|
|
|
t.Error(".stignore was created despite being empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.SetIgnores("default", []string{".*", "quux"}); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat("testdata/.stignore"); os.IsNotExist(err) {
|
|
|
|
t.Error(".stignore does not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.SetIgnores("default", []string{}); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if _, err := os.Stat("testdata/.stignore"); err == nil {
|
|
|
|
t.Error(".stignore should have been deleted because it is empty")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
func waitForState(t *testing.T, sub events.Subscription, folder, expected string) {
|
2019-03-09 10:36:55 +00:00
|
|
|
t.Helper()
|
2020-01-11 07:14:05 +00:00
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
var error string
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ev := <-sub.C():
|
|
|
|
data := ev.Data.(map[string]interface{})
|
|
|
|
if data["folder"].(string) == folder {
|
|
|
|
if data["error"] == nil {
|
|
|
|
error = ""
|
|
|
|
} else {
|
|
|
|
error = data["error"].(string)
|
|
|
|
}
|
|
|
|
if error == expected {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("Timed out waiting for status: %s, current status: %v", expected, error)
|
2019-03-09 10:36:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-28 14:25:42 +00:00
|
|
|
func TestROScanRecovery(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2015-03-28 14:25:42 +00:00
|
|
|
fcfg := config.FolderConfiguration{
|
|
|
|
ID: "default",
|
2019-01-11 12:56:05 +00:00
|
|
|
Path: "rotestfolder",
|
2016-12-16 22:23:35 +00:00
|
|
|
Type: config.FolderTypeSendOnly,
|
2015-03-28 14:25:42 +00:00
|
|
|
RescanIntervalS: 1,
|
2017-11-05 12:18:05 +00:00
|
|
|
MarkerName: config.DefaultMarkerName,
|
2015-03-28 14:25:42 +00:00
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg, cancel := createTmpWrapper(config.Configuration{
|
2015-03-28 14:25:42 +00:00
|
|
|
Folders: []config.FolderConfiguration{fcfg},
|
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
|
|
|
|
|
|
|
|
set := newFileSet(t, "default", defaultFs, m.db)
|
|
|
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
|
|
|
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
|
|
|
})
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.RemoveAll(fcfg.Path)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
sub := m.evLogger.Subscribe(events.StateChanged)
|
|
|
|
defer sub.Unsubscribe()
|
2015-06-20 21:00:33 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "folder path missing")
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Mkdir(fcfg.Path, 0700)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-16 14:30:29 +00:00
|
|
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-02-05 18:01:05 +00:00
|
|
|
fd := testOs.Create(filepath.Join(fcfg.Path, config.DefaultMarkerName))
|
2015-03-28 14:25:42 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "")
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Remove(filepath.Join(fcfg.Path, config.DefaultMarkerName))
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-16 14:30:29 +00:00
|
|
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Remove(fcfg.Path)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "folder path missing")
|
2015-03-28 14:25:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestRWScanRecovery(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2015-03-28 14:25:42 +00:00
|
|
|
fcfg := config.FolderConfiguration{
|
|
|
|
ID: "default",
|
2019-01-11 12:56:05 +00:00
|
|
|
Path: "rwtestfolder",
|
2016-12-16 22:23:35 +00:00
|
|
|
Type: config.FolderTypeSendReceive,
|
2015-03-28 14:25:42 +00:00
|
|
|
RescanIntervalS: 1,
|
2017-11-05 12:18:05 +00:00
|
|
|
MarkerName: config.DefaultMarkerName,
|
2015-03-28 14:25:42 +00:00
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg, cancel := createTmpWrapper(config.Configuration{
|
2015-03-28 14:25:42 +00:00
|
|
|
Folders: []config.FolderConfiguration{fcfg},
|
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.RemoveAll(fcfg.Path)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
set := newFileSet(t, "default", defaultFs, m.db)
|
|
|
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
|
|
|
{Name: "dummyfile", Version: protocol.Vector{Counters: []protocol.Counter{{ID: 42, Value: 1}}}},
|
|
|
|
})
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
sub := m.evLogger.Subscribe(events.StateChanged)
|
|
|
|
defer sub.Unsubscribe()
|
2015-06-20 21:00:33 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "folder path missing")
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Mkdir(fcfg.Path, 0700)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-16 14:30:29 +00:00
|
|
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-02-05 18:01:05 +00:00
|
|
|
fd := testOs.Create(filepath.Join(fcfg.Path, config.DefaultMarkerName))
|
2015-03-28 14:25:42 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "")
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Remove(filepath.Join(fcfg.Path, config.DefaultMarkerName))
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-16 14:30:29 +00:00
|
|
|
waitForState(t, sub, "default", config.ErrMarkerMissing.Error())
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Remove(fcfg.Path)
|
2015-03-28 14:25:42 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "folder path missing")
|
2015-03-28 14:25:42 +00:00
|
|
|
}
|
|
|
|
|
2015-02-07 10:52:42 +00:00
|
|
|
func TestGlobalDirectoryTree(t *testing.T) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wCancel := setupModelWithConnection(t)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer wCancel()
|
2020-10-03 18:46:17 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
2015-02-07 10:52:42 +00:00
|
|
|
|
|
|
|
b := func(isfile bool, path ...string) protocol.FileInfo {
|
2016-07-04 10:40:29 +00:00
|
|
|
typ := protocol.FileInfoTypeDirectory
|
2015-02-07 10:52:42 +00:00
|
|
|
blocks := []protocol.BlockInfo{}
|
|
|
|
if isfile {
|
2016-07-04 10:40:29 +00:00
|
|
|
typ = protocol.FileInfoTypeFile
|
2015-02-07 10:52:42 +00:00
|
|
|
blocks = []protocol.BlockInfo{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}}
|
|
|
|
}
|
|
|
|
return protocol.FileInfo{
|
2016-08-06 13:05:59 +00:00
|
|
|
Name: filepath.Join(path...),
|
|
|
|
Type: typ,
|
|
|
|
ModifiedS: 0x666,
|
|
|
|
Blocks: blocks,
|
|
|
|
Size: 0xa,
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
}
|
2021-02-01 08:27:34 +00:00
|
|
|
f := func(name string) *TreeEntry {
|
|
|
|
return &TreeEntry{
|
|
|
|
Name: name,
|
|
|
|
ModTime: time.Unix(0x666, 0),
|
|
|
|
Size: 0xa,
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d := func(name string, entries ...*TreeEntry) *TreeEntry {
|
|
|
|
return &TreeEntry{
|
|
|
|
Name: name,
|
|
|
|
ModTime: time.Unix(0x666, 0),
|
|
|
|
Size: 128,
|
|
|
|
Type: protocol.FileInfoTypeDirectory,
|
|
|
|
Children: entries,
|
|
|
|
}
|
|
|
|
}
|
2015-02-07 10:52:42 +00:00
|
|
|
|
|
|
|
testdata := []protocol.FileInfo{
|
|
|
|
b(false, "another"),
|
|
|
|
b(false, "another", "directory"),
|
|
|
|
b(true, "another", "directory", "afile"),
|
|
|
|
b(false, "another", "directory", "with"),
|
|
|
|
b(false, "another", "directory", "with", "a"),
|
|
|
|
b(true, "another", "directory", "with", "a", "file"),
|
|
|
|
b(true, "another", "directory", "with", "file"),
|
|
|
|
b(true, "another", "file"),
|
|
|
|
|
|
|
|
b(false, "other"),
|
|
|
|
b(false, "other", "rand"),
|
|
|
|
b(false, "other", "random"),
|
|
|
|
b(false, "other", "random", "dir"),
|
|
|
|
b(false, "other", "random", "dirx"),
|
|
|
|
b(false, "other", "randomx"),
|
|
|
|
|
|
|
|
b(false, "some"),
|
|
|
|
b(false, "some", "directory"),
|
|
|
|
b(false, "some", "directory", "with"),
|
|
|
|
b(false, "some", "directory", "with", "a"),
|
|
|
|
b(true, "some", "directory", "with", "a", "file"),
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
b(true, "zzrootfile"),
|
|
|
|
}
|
|
|
|
expectedResult := []*TreeEntry{
|
|
|
|
d("another",
|
|
|
|
d("directory",
|
|
|
|
f("afile"),
|
|
|
|
d("with",
|
|
|
|
d("a",
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
d("other",
|
|
|
|
d("rand"),
|
|
|
|
d("random",
|
|
|
|
d("dir"),
|
|
|
|
d("dirx"),
|
|
|
|
),
|
|
|
|
d("randomx"),
|
|
|
|
),
|
|
|
|
d("some",
|
|
|
|
d("directory",
|
|
|
|
d("with",
|
|
|
|
d("a",
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
f("zzrootfile"),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
mm := func(data interface{}) string {
|
2021-02-01 08:27:34 +00:00
|
|
|
bytes, err := json.MarshalIndent(data, "", " ")
|
2015-02-07 10:52:42 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return string(bytes)
|
|
|
|
}
|
|
|
|
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device1, "default", testdata))
|
2015-02-07 10:52:42 +00:00
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ := m.GlobalDirectoryTree("default", "", -1, false)
|
2015-02-07 10:52:42 +00:00
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(expectedResult) {
|
2021-02-01 08:27:34 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n============\n%s", mm(result), mm(expectedResult))
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "another", -1, false)
|
2015-02-07 10:52:42 +00:00
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
if mm(result) != mm(findByName(expectedResult, "another").Children) {
|
|
|
|
t.Errorf("Does not match:\n%s\n============\n%s", mm(result), mm(findByName(expectedResult, "another").Children))
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "", 0, false)
|
|
|
|
currentResult := []*TreeEntry{
|
|
|
|
d("another"),
|
|
|
|
d("other"),
|
|
|
|
d("some"),
|
|
|
|
f("zzrootfile"),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2021-02-01 08:27:34 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n============\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
|
|
|
result, _ = m.GlobalDirectoryTree("default", "", 1, false)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("another",
|
|
|
|
d("directory"),
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
d("other",
|
|
|
|
d("rand"),
|
|
|
|
d("random"),
|
|
|
|
d("randomx"),
|
|
|
|
),
|
|
|
|
d("some",
|
|
|
|
d("directory"),
|
|
|
|
),
|
|
|
|
f("zzrootfile"),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "", -1, true)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("another",
|
|
|
|
d("directory",
|
|
|
|
d("with",
|
|
|
|
d("a"),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
d("other",
|
|
|
|
d("rand"),
|
|
|
|
d("random",
|
|
|
|
d("dir"),
|
|
|
|
d("dirx"),
|
|
|
|
),
|
|
|
|
d("randomx"),
|
|
|
|
),
|
|
|
|
d("some",
|
|
|
|
d("directory",
|
|
|
|
d("with",
|
|
|
|
d("a"),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "", 1, true)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("another",
|
|
|
|
d("directory"),
|
|
|
|
),
|
|
|
|
d("other",
|
|
|
|
d("rand"),
|
|
|
|
d("random"),
|
|
|
|
d("randomx"),
|
|
|
|
),
|
|
|
|
d("some",
|
|
|
|
d("directory"),
|
|
|
|
),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "another", 0, false)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("directory"),
|
|
|
|
f("file"),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "some/directory", 0, false)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("with"),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "some/directory", 1, false)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("with",
|
|
|
|
d("a"),
|
|
|
|
),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "some/directory", 2, false)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("with",
|
|
|
|
d("a",
|
|
|
|
f("file"),
|
|
|
|
),
|
|
|
|
),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "another", -1, true)
|
|
|
|
currentResult = []*TreeEntry{
|
|
|
|
d("directory",
|
|
|
|
d("with",
|
|
|
|
d("a"),
|
|
|
|
),
|
|
|
|
),
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
|
|
|
|
// No prefix matching!
|
2021-02-01 08:27:34 +00:00
|
|
|
result, _ = m.GlobalDirectoryTree("default", "som", -1, false)
|
|
|
|
currentResult = []*TreeEntry{}
|
2015-02-07 10:52:42 +00:00
|
|
|
|
2015-04-20 13:37:04 +00:00
|
|
|
if mm(result) != mm(currentResult) {
|
2015-02-07 10:52:42 +00:00
|
|
|
t.Errorf("Does not match:\n%s\n%s", mm(result), mm(currentResult))
|
|
|
|
}
|
|
|
|
}
|
2015-03-04 23:34:03 +00:00
|
|
|
|
|
|
|
func genDeepFiles(n, d int) []protocol.FileInfo {
|
|
|
|
rand.Seed(int64(n))
|
|
|
|
files := make([]protocol.FileInfo, n)
|
|
|
|
t := time.Now().Unix()
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
path := ""
|
|
|
|
for i := 0; i <= d; i++ {
|
|
|
|
path = filepath.Join(path, strconv.Itoa(rand.Int()))
|
|
|
|
}
|
|
|
|
|
|
|
|
sofar := ""
|
|
|
|
for _, path := range filepath.SplitList(path) {
|
|
|
|
sofar = filepath.Join(sofar, path)
|
|
|
|
files[i] = protocol.FileInfo{
|
|
|
|
Name: sofar,
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
2016-08-06 13:05:59 +00:00
|
|
|
files[i].ModifiedS = t
|
2016-06-20 21:00:39 +00:00
|
|
|
files[i].Blocks = []protocol.BlockInfo{{Offset: 0, Size: 100, Hash: []byte("some hash bytes")}}
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return files
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkTree_10000_50(b *testing.B) {
|
2015-05-23 13:08:17 +00:00
|
|
|
benchmarkTree(b, 10000, 50)
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkTree_100_50(b *testing.B) {
|
|
|
|
benchmarkTree(b, 100, 50)
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func BenchmarkTree_100_10(b *testing.B) {
|
|
|
|
benchmarkTree(b, 100, 10)
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
|
|
|
|
2015-05-23 13:08:17 +00:00
|
|
|
func benchmarkTree(b *testing.B, n1, n2 int) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(b)
|
|
|
|
defer wcfgCancel()
|
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
2015-06-20 21:00:33 +00:00
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
m.ScanFolder(fcfg.ID)
|
2015-05-23 13:08:17 +00:00
|
|
|
files := genDeepFiles(n1, n2)
|
2015-03-04 23:34:03 +00:00
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
must(b, m.Index(device1, fcfg.ID, files))
|
2015-03-04 23:34:03 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2021-06-03 12:58:50 +00:00
|
|
|
m.GlobalDirectoryTree(fcfg.ID, "", -1, false)
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
2015-05-23 13:08:17 +00:00
|
|
|
b.ReportAllocs()
|
2015-03-04 23:34:03 +00:00
|
|
|
}
|
2015-07-21 11:14:33 +00:00
|
|
|
|
2016-05-09 12:56:21 +00:00
|
|
|
func TestIssue3028(t *testing.T) {
|
|
|
|
// Create two files that we'll delete, one with a name that is a prefix of the other.
|
|
|
|
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, defaultFs, "testrm", []byte("Hello"))
|
|
|
|
writeFile(t, defaultFs, "testrm2", []byte("Hello"))
|
2020-07-28 09:13:15 +00:00
|
|
|
defer func() {
|
|
|
|
mustRemove(t, defaultFs.Remove("testrm"))
|
|
|
|
mustRemove(t, defaultFs.Remove("testrm2"))
|
|
|
|
}()
|
2016-05-09 12:56:21 +00:00
|
|
|
|
|
|
|
// Create a model and default folder
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-05-09 12:56:21 +00:00
|
|
|
|
|
|
|
// Get a count of how many files are there now
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
locorigfiles := localSize(t, m, "default").Files
|
|
|
|
globorigfiles := globalSize(t, m, "default").Files
|
2016-05-09 12:56:21 +00:00
|
|
|
|
|
|
|
// Delete and rescan specifically these two
|
|
|
|
|
2020-07-28 09:13:15 +00:00
|
|
|
must(t, defaultFs.Remove("testrm"))
|
|
|
|
must(t, defaultFs.Remove("testrm2"))
|
2016-06-29 06:37:34 +00:00
|
|
|
m.ScanFolderSubdirs("default", []string{"testrm", "testrm2"})
|
2016-05-09 12:56:21 +00:00
|
|
|
|
|
|
|
// Verify that the number of files decreased by two and the number of
|
|
|
|
// deleted files increases by two
|
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
loc := localSize(t, m, "default")
|
|
|
|
glob := globalSize(t, m, "default")
|
2016-10-17 12:10:17 +00:00
|
|
|
if loc.Files != locorigfiles-2 {
|
|
|
|
t.Errorf("Incorrect local accounting; got %d current files, expected %d", loc.Files, locorigfiles-2)
|
2016-05-09 12:56:21 +00:00
|
|
|
}
|
2016-10-17 12:10:17 +00:00
|
|
|
if glob.Files != globorigfiles-2 {
|
|
|
|
t.Errorf("Incorrect global accounting; got %d current files, expected %d", glob.Files, globorigfiles-2)
|
2016-05-09 12:56:21 +00:00
|
|
|
}
|
2016-10-17 12:10:17 +00:00
|
|
|
if loc.Deleted != 2 {
|
|
|
|
t.Errorf("Incorrect local accounting; got %d deleted files, expected 2", loc.Deleted)
|
2016-05-09 12:56:21 +00:00
|
|
|
}
|
2016-10-17 12:10:17 +00:00
|
|
|
if glob.Deleted != 2 {
|
|
|
|
t.Errorf("Incorrect global accounting; got %d deleted files, expected 2", glob.Deleted)
|
2016-05-09 12:56:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-09 15:08:59 +00:00
|
|
|
func TestIssue4357(t *testing.T) {
|
2018-03-13 13:03:10 +00:00
|
|
|
cfg := defaultCfgWrapper.RawCopy()
|
2017-11-04 07:20:11 +00:00
|
|
|
// Create a separate wrapper not to pollute other tests.
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(config.Configuration{})
|
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
|
2017-09-09 15:08:59 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2017-10-03 22:53:02 +00:00
|
|
|
|
|
|
|
// Force the model to wire itself and add the folders
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, cfg)
|
2017-09-09 15:08:59 +00:00
|
|
|
|
|
|
|
if _, ok := m.folderCfgs["default"]; !ok {
|
|
|
|
t.Error("Folder should be running")
|
|
|
|
}
|
|
|
|
|
2017-10-03 22:53:02 +00:00
|
|
|
newCfg := wrapper.RawCopy()
|
2017-09-09 15:08:59 +00:00
|
|
|
newCfg.Folders[0].Paused = true
|
2017-10-03 22:53:02 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, newCfg)
|
2017-09-09 15:08:59 +00:00
|
|
|
|
|
|
|
if _, ok := m.folderCfgs["default"]; ok {
|
|
|
|
t.Error("Folder should not be running")
|
|
|
|
}
|
|
|
|
|
2017-10-03 22:53:02 +00:00
|
|
|
if _, ok := m.cfg.Folder("default"); !ok {
|
|
|
|
t.Error("should still have folder in config")
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, config.Configuration{})
|
2017-10-03 22:53:02 +00:00
|
|
|
|
|
|
|
if _, ok := m.cfg.Folder("default"); ok {
|
|
|
|
t.Error("should not have folder in config")
|
|
|
|
}
|
2017-09-09 15:08:59 +00:00
|
|
|
|
|
|
|
// Add the folder back, should be running
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, cfg)
|
2017-09-09 15:08:59 +00:00
|
|
|
|
|
|
|
if _, ok := m.folderCfgs["default"]; !ok {
|
|
|
|
t.Error("Folder should be running")
|
|
|
|
}
|
2017-10-03 22:53:02 +00:00
|
|
|
if _, ok := m.cfg.Folder("default"); !ok {
|
|
|
|
t.Error("should still have folder in config")
|
|
|
|
}
|
2017-09-09 15:08:59 +00:00
|
|
|
|
|
|
|
// Should not panic when removing a running folder.
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, config.Configuration{})
|
2017-10-03 22:53:02 +00:00
|
|
|
|
2017-09-09 15:08:59 +00:00
|
|
|
if _, ok := m.folderCfgs["default"]; ok {
|
|
|
|
t.Error("Folder should not be running")
|
|
|
|
}
|
2017-10-03 22:53:02 +00:00
|
|
|
if _, ok := m.cfg.Folder("default"); ok {
|
|
|
|
t.Error("should not have folder in config")
|
|
|
|
}
|
2017-09-09 15:08:59 +00:00
|
|
|
}
|
|
|
|
|
2016-06-26 10:17:20 +00:00
|
|
|
func TestIssue2782(t *testing.T) {
|
2017-10-24 07:58:55 +00:00
|
|
|
// CheckHealth should accept a symlinked folder, when using tilde-expanded path.
|
2016-06-26 10:17:20 +00:00
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("not reliable on Windows")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
home := os.Getenv("HOME")
|
|
|
|
if home == "" {
|
|
|
|
t.Skip("no home")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the test env. Needs to be based on $HOME as tilde expansion is
|
|
|
|
// part of the issue. Skip the test if any of this fails, as we are a
|
|
|
|
// bit outside of our stated domain here...
|
|
|
|
|
|
|
|
testName := ".syncthing-test." + srand.String(16)
|
|
|
|
testDir := filepath.Join(home, testName)
|
2016-08-16 10:01:58 +00:00
|
|
|
if err := os.RemoveAll(testDir); err != nil {
|
2016-06-26 10:17:20 +00:00
|
|
|
t.Skip(err)
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
if err := os.MkdirAll(testDir+"/syncdir", 0755); err != nil {
|
2016-06-26 10:17:20 +00:00
|
|
|
t.Skip(err)
|
|
|
|
}
|
|
|
|
if err := ioutil.WriteFile(testDir+"/syncdir/file", []byte("hello, world\n"), 0644); err != nil {
|
|
|
|
t.Skip(err)
|
|
|
|
}
|
|
|
|
if err := os.Symlink("syncdir", testDir+"/synclink"); err != nil {
|
|
|
|
t.Skip(err)
|
|
|
|
}
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(testDir)
|
2016-06-26 10:17:20 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-06-26 10:17:20 +00:00
|
|
|
|
|
|
|
if err := m.ScanFolder("default"); err != nil {
|
|
|
|
t.Error("scan error:", err)
|
|
|
|
}
|
|
|
|
|
2017-10-24 07:58:55 +00:00
|
|
|
m.fmut.Lock()
|
2019-02-02 11:09:07 +00:00
|
|
|
runner := m.folderRunners["default"]
|
2017-10-24 07:58:55 +00:00
|
|
|
m.fmut.Unlock()
|
2020-04-21 08:15:59 +00:00
|
|
|
if _, _, err := runner.getState(); err != nil {
|
|
|
|
t.Error("folder error:", err)
|
2016-06-26 10:17:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-07 16:21:59 +00:00
|
|
|
func TestIndexesForUnknownDevicesDropped(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, defaultCfgWrapper, myID, "syncthing", "dev", nil)
|
2016-08-07 16:21:59 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
files := newFileSet(t, "default", defaultFs, m.db)
|
2017-11-12 20:20:34 +00:00
|
|
|
files.Drop(device1)
|
|
|
|
files.Update(device1, genFiles(1))
|
|
|
|
files.Drop(device2)
|
|
|
|
files.Update(device2, genFiles(1))
|
2016-08-07 16:21:59 +00:00
|
|
|
|
|
|
|
if len(files.ListDevices()) != 2 {
|
|
|
|
t.Error("expected two devices")
|
|
|
|
}
|
|
|
|
|
2020-08-18 07:26:33 +00:00
|
|
|
m.newFolder(defaultFolderConfig, false)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-08-07 16:21:59 +00:00
|
|
|
|
|
|
|
// Remote sequence is cached, hence need to recreated.
|
2020-12-21 11:59:22 +00:00
|
|
|
files = newFileSet(t, "default", defaultFs, m.db)
|
2016-08-07 16:21:59 +00:00
|
|
|
|
2019-11-22 20:30:16 +00:00
|
|
|
if l := len(files.ListDevices()); l != 1 {
|
|
|
|
t.Errorf("Expected one device got %v", l)
|
2016-08-07 16:21:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSharedWithClearedOnDisconnect(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, cancel := createTmpWrapper(defaultCfg)
|
|
|
|
defer cancel()
|
|
|
|
addDevice2(t, wcfg, wcfg.FolderList()[0])
|
|
|
|
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.Remove(wcfg.ConfigPath())
|
2016-08-07 16:21:59 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-08-07 16:21:59 +00:00
|
|
|
|
2021-03-03 07:53:50 +00:00
|
|
|
conn1 := newFakeConnection(device1, m)
|
2020-09-29 11:17:38 +00:00
|
|
|
m.AddConnection(conn1, protocol.Hello{})
|
2021-03-03 07:53:50 +00:00
|
|
|
conn2 := newFakeConnection(device2, m)
|
2020-09-29 11:17:38 +00:00
|
|
|
m.AddConnection(conn2, protocol.Hello{})
|
2016-08-07 16:21:59 +00:00
|
|
|
|
|
|
|
m.ClusterConfig(device1, protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: "default",
|
|
|
|
Devices: []protocol.Device{
|
2019-02-06 08:32:03 +00:00
|
|
|
{ID: myID},
|
2016-10-29 21:56:24 +00:00
|
|
|
{ID: device1},
|
|
|
|
{ID: device2},
|
2016-08-07 16:21:59 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
m.ClusterConfig(device2, protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: "default",
|
|
|
|
Devices: []protocol.Device{
|
2019-02-06 08:32:03 +00:00
|
|
|
{ID: myID},
|
2016-10-29 21:56:24 +00:00
|
|
|
{ID: device1},
|
|
|
|
{ID: device2},
|
2016-08-07 16:21:59 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder("default"); !ok || !fcfg.SharedWith(device1) {
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("not shared with device1")
|
|
|
|
}
|
2018-06-06 21:34:11 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder("default"); !ok || !fcfg.SharedWith(device2) {
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("not shared with device2")
|
|
|
|
}
|
|
|
|
|
2021-06-17 11:57:44 +00:00
|
|
|
select {
|
|
|
|
case <-conn2.Closed():
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("conn already closed")
|
2021-06-17 11:57:44 +00:00
|
|
|
default:
|
2016-08-07 16:21:59 +00:00
|
|
|
}
|
|
|
|
|
2019-02-06 08:32:03 +00:00
|
|
|
if _, err := wcfg.RemoveDevice(device2); err != nil {
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(100 * time.Millisecond) // Committer notification happens in a separate routine
|
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
fcfg, ok := m.cfg.Folder("default")
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("default folder missing")
|
|
|
|
}
|
|
|
|
if !fcfg.SharedWith(device1) {
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("not shared with device1")
|
|
|
|
}
|
2018-06-06 21:34:11 +00:00
|
|
|
if fcfg.SharedWith(device2) {
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("shared with device2")
|
|
|
|
}
|
2018-06-06 21:34:11 +00:00
|
|
|
for _, dev := range fcfg.Devices {
|
|
|
|
if dev.DeviceID == device2 {
|
|
|
|
t.Error("still there")
|
|
|
|
}
|
|
|
|
}
|
2016-08-07 16:21:59 +00:00
|
|
|
|
2021-06-17 11:57:44 +00:00
|
|
|
select {
|
|
|
|
case <-conn2.Closed():
|
|
|
|
default:
|
2016-08-07 16:21:59 +00:00
|
|
|
t.Error("connection not closed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := wcfg.Devices()[device2]; ok {
|
|
|
|
t.Error("device still in config")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := m.conn[device2]; ok {
|
|
|
|
t.Error("conn not missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := m.helloMessages[device2]; ok {
|
|
|
|
t.Error("hello not missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := m.deviceDownloads[device2]; ok {
|
|
|
|
t.Error("downloads not missing")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-02 06:45:46 +00:00
|
|
|
func TestIssue3496(t *testing.T) {
|
2016-12-19 11:26:26 +00:00
|
|
|
t.Skip("This test deletes files that the other test depend on. Needs fixing.")
|
|
|
|
|
2016-09-02 06:45:46 +00:00
|
|
|
// It seems like lots of deleted files can cause negative completion
|
|
|
|
// percentages. Lets make sure that doesn't happen. Also do some general
|
|
|
|
// checks on the completion calculation stuff.
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-09-02 06:45:46 +00:00
|
|
|
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
2021-05-16 15:23:27 +00:00
|
|
|
addFakeConn(m, device1, "default")
|
|
|
|
addFakeConn(m, device2, "default")
|
2016-09-02 06:45:46 +00:00
|
|
|
|
|
|
|
// Reach into the model and grab the current file list...
|
|
|
|
|
|
|
|
m.fmut.RLock()
|
|
|
|
fs := m.folderFiles["default"]
|
|
|
|
m.fmut.RUnlock()
|
|
|
|
var localFiles []protocol.FileInfo
|
2021-03-07 12:43:22 +00:00
|
|
|
snap := fsetSnapshot(t, fs)
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
2016-09-02 06:45:46 +00:00
|
|
|
localFiles = append(localFiles, i.(protocol.FileInfo))
|
|
|
|
return true
|
|
|
|
})
|
2020-01-21 17:23:08 +00:00
|
|
|
snap.Release()
|
2016-09-02 06:45:46 +00:00
|
|
|
|
|
|
|
// Mark all files as deleted and fake it as update from device1
|
|
|
|
|
|
|
|
for i := range localFiles {
|
|
|
|
localFiles[i].Deleted = true
|
|
|
|
localFiles[i].Version = localFiles[i].Version.Update(device1.Short())
|
|
|
|
localFiles[i].Blocks = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also add a small file that we're supposed to need, or the global size
|
|
|
|
// stuff will bail out early due to the entire folder being zero size.
|
|
|
|
|
|
|
|
localFiles = append(localFiles, protocol.FileInfo{
|
|
|
|
Name: "fake",
|
|
|
|
Size: 1234,
|
|
|
|
Type: protocol.FileInfoTypeFile,
|
|
|
|
Version: protocol.Vector{Counters: []protocol.Counter{{ID: device1.Short(), Value: 42}}},
|
|
|
|
})
|
|
|
|
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.IndexUpdate(device1, "default", localFiles))
|
2016-09-02 06:45:46 +00:00
|
|
|
|
|
|
|
// Check that the completion percentage for us makes sense
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
comp := m.testCompletion(protocol.LocalDeviceID, "default")
|
2016-09-02 06:45:46 +00:00
|
|
|
if comp.NeedBytes > comp.GlobalBytes {
|
|
|
|
t.Errorf("Need more bytes than exist, not possible: %d > %d", comp.NeedBytes, comp.GlobalBytes)
|
|
|
|
}
|
|
|
|
if comp.CompletionPct < 0 {
|
|
|
|
t.Errorf("Less than zero percent complete, not possible: %.02f%%", comp.CompletionPct)
|
|
|
|
}
|
|
|
|
if comp.NeedBytes == 0 {
|
|
|
|
t.Error("Need no bytes even though some files are deleted")
|
|
|
|
}
|
|
|
|
if comp.CompletionPct == 100 {
|
|
|
|
t.Errorf("Fully complete, not possible: %.02f%%", comp.CompletionPct)
|
|
|
|
}
|
|
|
|
t.Log(comp)
|
2016-09-02 08:45:30 +00:00
|
|
|
|
|
|
|
// Check that NeedSize does the correct thing
|
2021-06-05 15:01:23 +00:00
|
|
|
need := needSizeLocal(t, m, "default")
|
2016-10-17 12:10:17 +00:00
|
|
|
if need.Files != 1 || need.Bytes != 1234 {
|
2016-09-02 08:45:30 +00:00
|
|
|
// The one we added synthetically above
|
2016-10-17 12:10:17 +00:00
|
|
|
t.Errorf("Incorrect need size; %d, %d != 1, 1234", need.Files, need.Bytes)
|
2016-09-02 08:45:30 +00:00
|
|
|
}
|
2017-12-14 09:51:17 +00:00
|
|
|
if int(need.Deleted) != len(localFiles)-1 {
|
2016-09-02 08:45:30 +00:00
|
|
|
// The rest
|
2016-10-17 12:10:17 +00:00
|
|
|
t.Errorf("Incorrect need deletes; %d != %d", need.Deleted, len(localFiles)-1)
|
2016-09-02 08:45:30 +00:00
|
|
|
}
|
2016-09-02 06:45:46 +00:00
|
|
|
}
|
|
|
|
|
2016-12-16 11:21:22 +00:00
|
|
|
func TestIssue3804(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-12-16 11:21:22 +00:00
|
|
|
|
|
|
|
// Subdirs ending in slash should be accepted
|
|
|
|
|
|
|
|
if err := m.ScanFolderSubdirs("default", []string{"baz/", "foo"}); err != nil {
|
|
|
|
t.Error("Unexpected error:", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 10:33:07 +00:00
|
|
|
func TestIssue3829(t *testing.T) {
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-12-21 10:33:07 +00:00
|
|
|
|
|
|
|
// Empty subdirs should be accepted
|
|
|
|
|
|
|
|
if err := m.ScanFolderSubdirs("default", []string{""}); err != nil {
|
|
|
|
t.Error("Unexpected error:", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-21 18:41:25 +00:00
|
|
|
func TestNoRequestsFromPausedDevices(t *testing.T) {
|
2017-01-01 12:27:18 +00:00
|
|
|
t.Skip("broken, fails randomly, #3843")
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, cancel := createTmpWrapper(defaultCfg)
|
|
|
|
defer cancel()
|
|
|
|
addDevice2(t, wcfg, wcfg.FolderList()[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2016-12-21 18:41:25 +00:00
|
|
|
|
|
|
|
file := testDataExpected["foo"]
|
|
|
|
files := m.folderFiles["default"]
|
|
|
|
files.Update(device1, []protocol.FileInfo{file})
|
|
|
|
files.Update(device2, []protocol.FileInfo{file})
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
avail := m.testAvailability("default", file, file.Blocks[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
if len(avail) != 0 {
|
|
|
|
t.Errorf("should not be available, no connections")
|
|
|
|
}
|
|
|
|
|
2021-05-16 15:23:27 +00:00
|
|
|
addFakeConn(m, device1, "default")
|
|
|
|
addFakeConn(m, device2, "default")
|
2016-12-21 18:41:25 +00:00
|
|
|
|
|
|
|
// !!! This is not what I'd expect to happen, as we don't even know if the peer has the original index !!!
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
avail = m.testAvailability("default", file, file.Blocks[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
if len(avail) != 2 {
|
|
|
|
t.Errorf("should have two available")
|
|
|
|
}
|
|
|
|
|
|
|
|
cc := protocol.ClusterConfig{
|
|
|
|
Folders: []protocol.Folder{
|
|
|
|
{
|
|
|
|
ID: "default",
|
|
|
|
Devices: []protocol.Device{
|
|
|
|
{ID: device1},
|
|
|
|
{ID: device2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
m.ClusterConfig(device1, cc)
|
|
|
|
m.ClusterConfig(device2, cc)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
avail = m.testAvailability("default", file, file.Blocks[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
if len(avail) != 2 {
|
|
|
|
t.Errorf("should have two available")
|
|
|
|
}
|
|
|
|
|
2021-03-22 20:50:19 +00:00
|
|
|
m.Closed(device1, errDeviceUnknown)
|
|
|
|
m.Closed(device2, errDeviceUnknown)
|
2016-12-21 18:41:25 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
avail = m.testAvailability("default", file, file.Blocks[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
if len(avail) != 0 {
|
|
|
|
t.Errorf("should have no available")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that remote paused folders are not used.
|
|
|
|
|
2021-05-16 15:23:27 +00:00
|
|
|
addFakeConn(m, device1, "default")
|
|
|
|
addFakeConn(m, device2, "default")
|
2016-12-21 18:41:25 +00:00
|
|
|
|
|
|
|
m.ClusterConfig(device1, cc)
|
|
|
|
ccp := cc
|
|
|
|
ccp.Folders[0].Paused = true
|
|
|
|
m.ClusterConfig(device1, ccp)
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
avail = m.testAvailability("default", file, file.Blocks[0])
|
2016-12-21 18:41:25 +00:00
|
|
|
if len(avail) != 1 {
|
|
|
|
t.Errorf("should have one available")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-25 08:27:54 +00:00
|
|
|
// TestIssue2571 tests replacing a directory with content with a symlink
|
|
|
|
func TestIssue2571(t *testing.T) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Scanning symlinks isn't supported on windows")
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2019-03-08 20:29:09 +00:00
|
|
|
testFs := fcfg.Filesystem()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer os.RemoveAll(testFs.URI())
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
for _, dir := range []string{"toLink", "linkTarget"} {
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.MkdirAll(dir, 0775))
|
2018-02-25 08:27:54 +00:00
|
|
|
fd, err := testFs.Create(filepath.Join(dir, "a"))
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-02-25 08:27:54 +00:00
|
|
|
fd.Close()
|
|
|
|
}
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-02-25 08:27:54 +00:00
|
|
|
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.RemoveAll("toLink"))
|
2018-02-25 08:27:54 +00:00
|
|
|
|
2020-07-28 09:13:15 +00:00
|
|
|
must(t, fs.DebugSymlinkForTestsOnly(testFs, testFs, "linkTarget", "toLink"))
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if dir, ok := m.testCurrentFolderFile("default", "toLink"); !ok {
|
2018-02-25 08:27:54 +00:00
|
|
|
t.Fatalf("Dir missing in db")
|
|
|
|
} else if !dir.IsSymlink() {
|
|
|
|
t.Errorf("Dir wasn't changed to symlink")
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if file, ok := m.testCurrentFolderFile("default", filepath.Join("toLink", "a")); !ok {
|
2018-02-25 08:27:54 +00:00
|
|
|
t.Fatalf("File missing in db")
|
|
|
|
} else if !file.Deleted {
|
|
|
|
t.Errorf("File below symlink has not been marked as deleted")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestIssue4573 tests that contents of an unavailable dir aren't marked deleted
|
|
|
|
func TestIssue4573(t *testing.T) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Can't make the dir inaccessible on windows")
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2019-03-08 20:29:09 +00:00
|
|
|
testFs := fcfg.Filesystem()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer os.RemoveAll(testFs.URI())
|
2019-02-12 15:04:04 +00:00
|
|
|
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.MkdirAll("inaccessible", 0755))
|
2019-02-12 15:04:04 +00:00
|
|
|
defer testFs.Chmod("inaccessible", 0777)
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
file := filepath.Join("inaccessible", "a")
|
2019-02-12 15:04:04 +00:00
|
|
|
fd, err := testFs.Create(file)
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-02-25 08:27:54 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-02-25 08:27:54 +00:00
|
|
|
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.Chmod("inaccessible", 0000))
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if file, ok := m.testCurrentFolderFile("default", file); !ok {
|
2018-02-25 08:27:54 +00:00
|
|
|
t.Fatalf("File missing in db")
|
|
|
|
} else if file.Deleted {
|
|
|
|
t.Errorf("Inaccessible file has been marked as deleted.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestInternalScan checks whether various fs operations are correctly represented
|
|
|
|
// in the db after scanning.
|
|
|
|
func TestInternalScan(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2019-03-08 20:29:09 +00:00
|
|
|
testFs := fcfg.Filesystem()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer os.RemoveAll(testFs.URI())
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
testCases := map[string]func(protocol.FileInfo) bool{
|
|
|
|
"removeDir": func(f protocol.FileInfo) bool {
|
|
|
|
return !f.Deleted
|
|
|
|
},
|
|
|
|
"dirToFile": func(f protocol.FileInfo) bool {
|
|
|
|
return f.Deleted || f.IsDirectory()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
baseDirs := []string{"dirToFile", "removeDir"}
|
|
|
|
for _, dir := range baseDirs {
|
|
|
|
sub := filepath.Join(dir, "subDir")
|
|
|
|
for _, dir := range []string{dir, sub} {
|
2019-03-09 18:45:36 +00:00
|
|
|
if err := testFs.MkdirAll(dir, 0775); err != nil {
|
2018-02-25 08:27:54 +00:00
|
|
|
t.Fatalf("%v: %v", dir, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
testCases[sub] = func(f protocol.FileInfo) bool {
|
|
|
|
return !f.Deleted
|
|
|
|
}
|
|
|
|
for _, dir := range []string{dir, sub} {
|
|
|
|
file := filepath.Join(dir, "a")
|
|
|
|
fd, err := testFs.Create(file)
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-02-25 08:27:54 +00:00
|
|
|
fd.Close()
|
|
|
|
testCases[file] = func(f protocol.FileInfo) bool {
|
|
|
|
return !f.Deleted
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-02-25 08:27:54 +00:00
|
|
|
|
|
|
|
for _, dir := range baseDirs {
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.RemoveAll(dir))
|
2018-02-25 08:27:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fd, err := testFs.Create("dirToFile")
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-02-25 08:27:54 +00:00
|
|
|
fd.Close()
|
|
|
|
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
|
|
|
for path, cond := range testCases {
|
2021-03-07 12:43:22 +00:00
|
|
|
if f, ok := m.testCurrentFolderFile("default", path); !ok {
|
2018-02-25 08:27:54 +00:00
|
|
|
t.Fatalf("%v missing in db", path)
|
|
|
|
} else if cond(f) {
|
|
|
|
t.Errorf("Incorrect db entry for %v", path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-05 12:18:05 +00:00
|
|
|
func TestCustomMarkerName(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2020-07-28 09:13:15 +00:00
|
|
|
fcfg := testFolderConfigTmp()
|
|
|
|
fcfg.ID = "default"
|
|
|
|
fcfg.RescanIntervalS = 1
|
|
|
|
fcfg.MarkerName = "myfile"
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg, cancel := createTmpWrapper(config.Configuration{
|
2017-11-05 12:18:05 +00:00
|
|
|
Folders: []config.FolderConfiguration{fcfg},
|
|
|
|
Devices: []config.DeviceConfiguration{
|
|
|
|
{
|
|
|
|
DeviceID: device1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2017-11-05 12:18:05 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.RemoveAll(fcfg.Path)
|
2017-11-05 12:18:05 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, cfg, myID, "syncthing", "dev", nil)
|
|
|
|
set := newFileSet(t, "default", defaultFs, m.db)
|
|
|
|
set.Update(protocol.LocalDeviceID, []protocol.FileInfo{
|
|
|
|
{Name: "dummyfile"},
|
|
|
|
})
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
sub := m.evLogger.Subscribe(events.StateChanged)
|
|
|
|
defer sub.Unsubscribe()
|
2017-11-05 12:18:05 +00:00
|
|
|
m.ServeBackground()
|
2020-07-28 09:13:15 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Path)
|
2017-11-05 12:18:05 +00:00
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "folder path missing")
|
2017-11-05 12:18:05 +00:00
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs.Mkdir(fcfg.Path, 0700)
|
2019-02-05 18:01:05 +00:00
|
|
|
fd := testOs.Create(filepath.Join(fcfg.Path, "myfile"))
|
2017-11-05 12:18:05 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2020-01-11 07:14:05 +00:00
|
|
|
waitForState(t, sub, "default", "")
|
2017-11-05 12:18:05 +00:00
|
|
|
}
|
|
|
|
|
2017-12-07 08:42:03 +00:00
|
|
|
func TestRemoveDirWithContent(t *testing.T) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
|
|
|
defer wcfgCancel()
|
|
|
|
tfs := fcfg.Filesystem()
|
|
|
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
2017-12-07 08:42:03 +00:00
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
tfs.MkdirAll("dirwith", 0755)
|
2017-12-07 08:42:03 +00:00
|
|
|
content := filepath.Join("dirwith", "content")
|
2021-06-03 12:58:50 +00:00
|
|
|
fd, err := tfs.Create(content)
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2017-12-07 08:42:03 +00:00
|
|
|
fd.Close()
|
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
must(t, m.ScanFolder(fcfg.ID))
|
2017-12-07 08:42:03 +00:00
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
dir, ok := m.testCurrentFolderFile(fcfg.ID, "dirwith")
|
2017-12-07 08:42:03 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Can't get dir \"dirwith\" after initial scan")
|
|
|
|
}
|
|
|
|
dir.Deleted = true
|
|
|
|
dir.Version = dir.Version.Update(device1.Short()).Update(device1.Short())
|
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
file, ok := m.testCurrentFolderFile(fcfg.ID, content)
|
2017-12-07 08:42:03 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Can't get file \"%v\" after initial scan", content)
|
|
|
|
}
|
|
|
|
file.Deleted = true
|
|
|
|
file.Version = file.Version.Update(device1.Short()).Update(device1.Short())
|
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
must(t, m.IndexUpdate(device1, fcfg.ID, []protocol.FileInfo{dir, file}))
|
2017-12-07 08:42:03 +00:00
|
|
|
|
|
|
|
// Is there something we could trigger on instead of just waiting?
|
|
|
|
timeout := time.NewTimer(5 * time.Second)
|
|
|
|
for {
|
2021-06-03 12:58:50 +00:00
|
|
|
dir, ok := m.testCurrentFolderFile(fcfg.ID, "dirwith")
|
2017-12-07 08:42:03 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Can't get dir \"dirwith\" after index update")
|
|
|
|
}
|
2021-06-03 12:58:50 +00:00
|
|
|
file, ok := m.testCurrentFolderFile(fcfg.ID, content)
|
2017-12-07 08:42:03 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("Can't get file \"%v\" after index update", content)
|
|
|
|
}
|
|
|
|
if dir.Deleted && file.Deleted {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
if !dir.Deleted && !file.Deleted {
|
|
|
|
t.Errorf("Neither the dir nor its content was deleted before timing out.")
|
|
|
|
} else if !dir.Deleted {
|
|
|
|
t.Errorf("The dir was not deleted before timing out.")
|
|
|
|
} else {
|
|
|
|
t.Errorf("The content of the dir was not deleted before timing out.")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIssue4475(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
|
|
|
|
defer wcfgCancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2019-03-08 20:29:09 +00:00
|
|
|
testFs := fcfg.Filesystem()
|
2017-12-07 08:42:03 +00:00
|
|
|
|
|
|
|
// Scenario: Dir is deleted locally and before syncing/index exchange
|
|
|
|
// happens, a file is create in that dir on the remote.
|
|
|
|
// This should result in the directory being recreated and added to the
|
|
|
|
// db locally.
|
|
|
|
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, testFs.MkdirAll("delDir", 0755))
|
2017-12-07 08:42:03 +00:00
|
|
|
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
2018-06-06 21:34:11 +00:00
|
|
|
if fcfg, ok := m.cfg.Folder("default"); !ok || !fcfg.SharedWith(device1) {
|
2017-12-07 08:42:03 +00:00
|
|
|
t.Fatal("not shared with device1")
|
|
|
|
}
|
|
|
|
|
|
|
|
fileName := filepath.Join("delDir", "file")
|
|
|
|
conn.addFile(fileName, 0644, protocol.FileInfoTypeFile, nil)
|
|
|
|
conn.sendIndexUpdate()
|
|
|
|
|
|
|
|
// Is there something we could trigger on instead of just waiting?
|
|
|
|
timeout := time.NewTimer(5 * time.Second)
|
|
|
|
created := false
|
|
|
|
for {
|
|
|
|
if !created {
|
2021-03-07 12:43:22 +00:00
|
|
|
if _, ok := m.testCurrentFolderFile("default", fileName); ok {
|
2017-12-07 08:42:03 +00:00
|
|
|
created = true
|
|
|
|
}
|
|
|
|
} else {
|
2021-03-07 12:43:22 +00:00
|
|
|
dir, ok := m.testCurrentFolderFile("default", "delDir")
|
2017-12-07 08:42:03 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("can't get dir from db")
|
|
|
|
}
|
|
|
|
if !dir.Deleted {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
if created {
|
|
|
|
t.Errorf("Timed out before file from remote was created")
|
|
|
|
} else {
|
|
|
|
t.Errorf("Timed out before directory was resurrected in db")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-01 14:39:23 +00:00
|
|
|
func TestVersionRestore(t *testing.T) {
|
|
|
|
// We create a bunch of files which we restore
|
|
|
|
// In each file, we write the filename as the content
|
|
|
|
// We verify that the content matches at the expected filenames
|
|
|
|
// after the restore operation.
|
|
|
|
dir, err := ioutil.TempDir("", "")
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2019-02-13 18:54:04 +00:00
|
|
|
defer os.RemoveAll(dir)
|
2018-01-01 14:39:23 +00:00
|
|
|
|
2021-02-04 20:10:41 +00:00
|
|
|
fcfg := newFolderConfiguration(defaultCfgWrapper, "default", "default", fs.FilesystemTypeBasic, dir)
|
2018-01-01 14:39:23 +00:00
|
|
|
fcfg.Versioning.Type = "simple"
|
|
|
|
fcfg.FSWatcherEnabled = false
|
|
|
|
filesystem := fcfg.Filesystem()
|
|
|
|
|
|
|
|
rawConfig := config.Configuration{
|
|
|
|
Folders: []config.FolderConfiguration{fcfg},
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg, cancel := createTmpWrapper(rawConfig)
|
|
|
|
defer cancel()
|
2018-01-01 14:39:23 +00:00
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, cfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-01-01 14:39:23 +00:00
|
|
|
m.ScanFolder("default")
|
|
|
|
|
2019-06-25 05:56:11 +00:00
|
|
|
sentinel, err := time.ParseInLocation(versioner.TimeFormat, "20180101-010101", time.Local)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2018-01-01 14:39:23 +00:00
|
|
|
|
|
|
|
for _, file := range []string{
|
|
|
|
// Versions directory
|
|
|
|
".stversions/file~20171210-040404.txt", // will be restored
|
|
|
|
".stversions/existing~20171210-040404", // exists, should expect to be archived.
|
|
|
|
".stversions/something~20171210-040404", // will become directory, hence error
|
|
|
|
".stversions/dir/file~20171210-040404.txt",
|
|
|
|
".stversions/dir/file~20171210-040405.txt",
|
|
|
|
".stversions/dir/file~20171210-040406.txt",
|
|
|
|
".stversions/very/very/deep/one~20171210-040406.txt", // lives deep down, no directory exists.
|
|
|
|
".stversions/dir/existing~20171210-040406.txt", // exists, should expect to be archived.
|
2019-04-28 22:30:16 +00:00
|
|
|
".stversions/dir/cat", // untagged which was used by trashcan, supported
|
2018-01-01 14:39:23 +00:00
|
|
|
|
|
|
|
// "file.txt" will be restored
|
|
|
|
"existing",
|
|
|
|
"something/file", // Becomes directory
|
|
|
|
"dir/file.txt",
|
|
|
|
"dir/existing.txt",
|
|
|
|
} {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
file = filepath.FromSlash(file)
|
|
|
|
}
|
|
|
|
dir := filepath.Dir(file)
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, filesystem.MkdirAll(dir, 0755))
|
2018-01-01 14:39:23 +00:00
|
|
|
if fd, err := filesystem.Create(file); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if _, err := fd.Write([]byte(file)); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if err := fd.Close(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if err := filesystem.Chtimes(file, sentinel, sentinel); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
versions, err := m.GetFolderVersions("default")
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-01-01 14:39:23 +00:00
|
|
|
expectedVersions := map[string]int{
|
|
|
|
"file.txt": 1,
|
|
|
|
"existing": 1,
|
|
|
|
"something": 1,
|
2019-06-25 05:56:11 +00:00
|
|
|
"dir/file.txt": 3,
|
2018-01-01 14:39:23 +00:00
|
|
|
"dir/existing.txt": 1,
|
|
|
|
"very/very/deep/one.txt": 1,
|
2019-04-28 22:30:16 +00:00
|
|
|
"dir/cat": 1,
|
2018-01-01 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for name, vers := range versions {
|
|
|
|
cnt, ok := expectedVersions[name]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("unexpected %s", name)
|
|
|
|
}
|
|
|
|
if len(vers) != cnt {
|
2018-01-12 07:26:33 +00:00
|
|
|
t.Errorf("%s: %d != %d", name, cnt, len(vers))
|
2018-01-01 14:39:23 +00:00
|
|
|
}
|
|
|
|
// Delete, so we can check if we didn't hit something we expect afterwards.
|
|
|
|
delete(expectedVersions, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
for name := range expectedVersions {
|
|
|
|
t.Errorf("not found expected %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restoring non existing folder fails.
|
|
|
|
_, err = m.RestoreFolderVersions("does not exist", nil)
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("expected an error")
|
|
|
|
}
|
|
|
|
|
|
|
|
makeTime := func(s string) time.Time {
|
2019-06-11 07:16:55 +00:00
|
|
|
tm, err := time.ParseInLocation(versioner.TimeFormat, s, time.Local)
|
2018-01-01 14:39:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
return tm.Truncate(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
restore := map[string]time.Time{
|
|
|
|
"file.txt": makeTime("20171210-040404"),
|
|
|
|
"existing": makeTime("20171210-040404"),
|
|
|
|
"something": makeTime("20171210-040404"),
|
|
|
|
"dir/file.txt": makeTime("20171210-040406"),
|
|
|
|
"dir/existing.txt": makeTime("20171210-040406"),
|
|
|
|
"very/very/deep/one.txt": makeTime("20171210-040406"),
|
|
|
|
}
|
|
|
|
|
2019-06-25 05:56:11 +00:00
|
|
|
beforeRestore := time.Now().Truncate(time.Second)
|
|
|
|
|
2018-01-01 14:39:23 +00:00
|
|
|
ferr, err := m.RestoreFolderVersions("default", restore)
|
2019-03-09 18:45:36 +00:00
|
|
|
must(t, err)
|
2018-01-01 14:39:23 +00:00
|
|
|
|
2021-02-12 19:30:51 +00:00
|
|
|
if err, ok := ferr["something"]; len(ferr) > 1 || !ok || !errors.Is(err, versioner.ErrDirectory) {
|
2018-01-01 14:39:23 +00:00
|
|
|
t.Fatalf("incorrect error or count: %d %s", len(ferr), ferr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Failed items are not expected to be restored.
|
|
|
|
// Remove them from expectations
|
|
|
|
for name := range ferr {
|
|
|
|
delete(restore, name)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that content of files matches to the version they've been restored.
|
|
|
|
for file, version := range restore {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
file = filepath.FromSlash(file)
|
|
|
|
}
|
2019-06-11 07:16:55 +00:00
|
|
|
tag := version.In(time.Local).Truncate(time.Second).Format(versioner.TimeFormat)
|
2018-01-01 14:39:23 +00:00
|
|
|
taggedName := filepath.Join(".stversions", versioner.TagFilename(file, tag))
|
|
|
|
fd, err := filesystem.Open(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
|
|
|
content, err := ioutil.ReadAll(fd)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(content, []byte(taggedName)) {
|
|
|
|
t.Errorf("%s: %s != %s", file, string(content), taggedName)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 05:56:11 +00:00
|
|
|
// Simple versioner uses now for timestamp generation, so we can check
|
|
|
|
// if existing stuff was correctly archived as we restored (oppose to deleteD), and version time as after beforeRestore
|
2018-01-01 14:39:23 +00:00
|
|
|
expectArchived := map[string]struct{}{
|
|
|
|
"existing": {},
|
|
|
|
"dir/file.txt": {},
|
|
|
|
"dir/existing.txt": {},
|
|
|
|
}
|
|
|
|
|
2019-06-25 05:56:11 +00:00
|
|
|
allFileVersions, err := m.GetFolderVersions("default")
|
|
|
|
must(t, err)
|
|
|
|
for file, versions := range allFileVersions {
|
|
|
|
key := file
|
2018-01-01 14:39:23 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
file = filepath.FromSlash(file)
|
|
|
|
}
|
2019-06-25 05:56:11 +00:00
|
|
|
for _, version := range versions {
|
|
|
|
if version.VersionTime.Equal(beforeRestore) || version.VersionTime.After(beforeRestore) {
|
|
|
|
fd, err := filesystem.Open(".stversions/" + versioner.TagFilename(file, version.VersionTime.Format(versioner.TimeFormat)))
|
|
|
|
must(t, err)
|
|
|
|
defer fd.Close()
|
|
|
|
|
|
|
|
content, err := ioutil.ReadAll(fd)
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
// Even if they are at the archived path, content should have the non
|
|
|
|
// archived name.
|
|
|
|
if !bytes.Equal(content, []byte(file)) {
|
|
|
|
t.Errorf("%s (%s): %s != %s", file, fd.Name(), string(content), file)
|
|
|
|
}
|
|
|
|
_, ok := expectArchived[key]
|
|
|
|
if !ok {
|
|
|
|
t.Error("unexpected archived file with future timestamp", file, version.VersionTime)
|
|
|
|
}
|
|
|
|
delete(expectArchived, key)
|
|
|
|
}
|
2018-01-01 14:39:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 05:56:11 +00:00
|
|
|
if len(expectArchived) != 0 {
|
|
|
|
t.Fatal("missed some archived files", expectArchived)
|
|
|
|
}
|
2018-01-01 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2017-12-15 20:01:56 +00:00
|
|
|
func TestPausedFolders(t *testing.T) {
|
|
|
|
// Create a separate wrapper not to pollute other tests.
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(defaultCfgWrapper.RawCopy())
|
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2017-12-15 20:01:56 +00:00
|
|
|
|
|
|
|
if err := m.ScanFolder("default"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pausedConfig := wrapper.RawCopy()
|
|
|
|
pausedConfig.Folders[0].Paused = true
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, pausedConfig)
|
2017-12-15 20:01:56 +00:00
|
|
|
|
2018-01-14 17:01:06 +00:00
|
|
|
if err := m.ScanFolder("default"); err != ErrFolderPaused {
|
2017-12-15 20:01:56 +00:00
|
|
|
t.Errorf("Expected folder paused error, received: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if err := m.ScanFolder("nonexistent"); err != ErrFolderMissing {
|
2017-12-15 20:01:56 +00:00
|
|
|
t.Errorf("Expected missing folder error, received: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-05 08:30:39 +00:00
|
|
|
func TestIssue4094(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2018-05-05 08:30:39 +00:00
|
|
|
// Create a separate wrapper not to pollute other tests.
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(config.Configuration{})
|
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, wrapper, myID, "syncthing", "dev", nil)
|
2018-05-05 08:30:39 +00:00
|
|
|
m.ServeBackground()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-05-05 08:30:39 +00:00
|
|
|
|
|
|
|
// Force the model to wire itself and add the folders
|
2019-01-11 12:56:05 +00:00
|
|
|
folderPath := "nonexistent"
|
|
|
|
defer testOs.RemoveAll(folderPath)
|
2018-05-05 08:30:39 +00:00
|
|
|
cfg := defaultCfgWrapper.RawCopy()
|
|
|
|
fcfg := config.FolderConfiguration{
|
|
|
|
ID: "folder1",
|
|
|
|
Path: folderPath,
|
|
|
|
Paused: true,
|
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cfg.Folders = []config.FolderConfiguration{fcfg}
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, cfg)
|
2018-05-05 08:30:39 +00:00
|
|
|
|
|
|
|
if err := m.SetIgnores(fcfg.ID, []string{"foo"}); err != nil {
|
|
|
|
t.Fatalf("failed setting ignores: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := fcfg.Filesystem().Lstat(".stignore"); err != nil {
|
|
|
|
t.Fatalf("failed stating .stignore: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIssue4903(t *testing.T) {
|
2019-01-11 12:56:05 +00:00
|
|
|
testOs := &fatalOs{t}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(config.Configuration{})
|
|
|
|
defer cancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-05-05 08:30:39 +00:00
|
|
|
|
|
|
|
// Force the model to wire itself and add the folders
|
2019-01-11 12:56:05 +00:00
|
|
|
folderPath := "nonexistent"
|
|
|
|
defer testOs.RemoveAll(folderPath)
|
2018-05-05 08:30:39 +00:00
|
|
|
cfg := defaultCfgWrapper.RawCopy()
|
|
|
|
fcfg := config.FolderConfiguration{
|
|
|
|
ID: "folder1",
|
|
|
|
Path: folderPath,
|
|
|
|
Paused: true,
|
|
|
|
Devices: []config.FolderDeviceConfiguration{
|
|
|
|
{DeviceID: device1},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cfg.Folders = []config.FolderConfiguration{fcfg}
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, wrapper, cfg)
|
2018-05-05 08:30:39 +00:00
|
|
|
|
|
|
|
if err := fcfg.CheckPath(); err != config.ErrPathMissing {
|
|
|
|
t.Fatalf("expected path missing error, got: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := fcfg.Filesystem().Lstat("."); !fs.IsNotExist(err) {
|
|
|
|
t.Fatalf("Expected missing path error, got: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 17:07:52 +00:00
|
|
|
func TestIssue5002(t *testing.T) {
|
|
|
|
// recheckFile should not panic when given an index equal to the number of blocks
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, defaultCfgWrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-06-13 17:07:52 +00:00
|
|
|
|
|
|
|
if err := m.ScanFolder("default"); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
file, ok := m.testCurrentFolderFile("default", "foo")
|
2018-06-13 17:07:52 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("test file should exist")
|
|
|
|
}
|
2019-04-12 13:21:07 +00:00
|
|
|
blockSize := int32(file.BlockSize())
|
2018-06-13 17:07:52 +00:00
|
|
|
|
2020-06-26 14:47:03 +00:00
|
|
|
m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size-int64(blockSize), []byte{1, 2, 3, 4}, 0)
|
|
|
|
m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size, []byte{1, 2, 3, 4}, 0) // panic
|
|
|
|
m.recheckFile(protocol.LocalDeviceID, "default", "foo", file.Size+int64(blockSize), []byte{1, 2, 3, 4}, 0)
|
2018-06-13 17:07:52 +00:00
|
|
|
}
|
|
|
|
|
2018-08-25 08:32:35 +00:00
|
|
|
func TestParentOfUnignored(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, cancel := newState(t, defaultCfg)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer cancel()
|
2019-05-19 12:29:07 +00:00
|
|
|
defer defaultFolderConfig.Filesystem().Remove(".stignore")
|
2018-08-25 08:32:35 +00:00
|
|
|
|
|
|
|
m.SetIgnores("default", []string{"!quux", "*"})
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if parent, ok := m.testCurrentFolderFile("default", "baz"); !ok {
|
2018-08-25 08:32:35 +00:00
|
|
|
t.Errorf(`Directory "baz" missing in db`)
|
|
|
|
} else if parent.IsIgnored() {
|
|
|
|
t.Errorf(`Directory "baz" is ignored`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-11 12:56:05 +00:00
|
|
|
// TestFolderRestartZombies reproduces issue 5233, where multiple concurrent folder
|
|
|
|
// restarts would leave more than one folder runner alive.
|
2018-10-05 08:26:25 +00:00
|
|
|
func TestFolderRestartZombies(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(defaultCfg.Copy())
|
|
|
|
defer cancel()
|
|
|
|
waiter, err := wrapper.Modify(func(cfg *config.Configuration) {
|
|
|
|
cfg.Options.RawMaxFolderConcurrency = -1
|
|
|
|
_, i, _ := cfg.Folder("default")
|
|
|
|
cfg.Folders[i].FilesystemType = fs.FilesystemTypeFake
|
|
|
|
})
|
|
|
|
must(t, err)
|
|
|
|
waiter.Wait()
|
2018-10-05 08:26:25 +00:00
|
|
|
folderCfg, _ := wrapper.Folder("default")
|
|
|
|
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-10-05 08:26:25 +00:00
|
|
|
|
|
|
|
// Make sure the folder is up and running, because we want to count it.
|
|
|
|
m.ScanFolder("default")
|
|
|
|
|
|
|
|
// Check how many running folders we have running before the test.
|
|
|
|
if r := atomic.LoadInt32(&m.foldersRunning); r != 1 {
|
|
|
|
t.Error("Expected one running folder, not", r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run a few parallel configuration changers for one second. Each waits
|
|
|
|
// for the commit to complete, but there are many of them.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < 25; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
t0 := time.Now()
|
|
|
|
for time.Since(t0) < time.Second {
|
2021-01-15 14:43:34 +00:00
|
|
|
fcfg := folderCfg.Copy()
|
|
|
|
fcfg.MaxConflicts = rand.Int() // safe change that should cause a folder restart
|
|
|
|
setFolder(t, wrapper, fcfg)
|
2018-10-05 08:26:25 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the above to complete and check how many folders we have
|
|
|
|
// running now. It should not have increased.
|
|
|
|
wg.Wait()
|
2018-10-07 11:58:25 +00:00
|
|
|
// Make sure the folder is up and running, because we want to count it.
|
|
|
|
m.ScanFolder("default")
|
2018-10-05 08:26:25 +00:00
|
|
|
if r := atomic.LoadInt32(&m.foldersRunning); r != 1 {
|
|
|
|
t.Error("Expected one running folder, not", r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
func TestRequestLimit(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wrapper, cancel := createTmpWrapper(defaultCfg.Copy())
|
|
|
|
defer cancel()
|
|
|
|
waiter, err := wrapper.Modify(func(cfg *config.Configuration) {
|
|
|
|
_, i, _ := cfg.Device(device1)
|
|
|
|
cfg.Devices[i].MaxRequestKiB = 1
|
|
|
|
})
|
|
|
|
must(t, err)
|
|
|
|
waiter.Wait()
|
2020-12-21 11:59:22 +00:00
|
|
|
m, _ := setupModelWithConnectionFromWrapper(t, wrapper)
|
2019-05-19 12:29:07 +00:00
|
|
|
defer cleanupModel(m)
|
2018-11-13 07:53:55 +00:00
|
|
|
|
|
|
|
file := "tmpfile"
|
|
|
|
befReq := time.Now()
|
2020-11-09 14:33:32 +00:00
|
|
|
first, err := m.Request(device1, "default", file, 0, 2000, 0, nil, 0, false)
|
2018-11-13 07:53:55 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("First request failed: %v", err)
|
|
|
|
}
|
|
|
|
reqDur := time.Since(befReq)
|
|
|
|
returned := make(chan struct{})
|
|
|
|
go func() {
|
2020-11-09 14:33:32 +00:00
|
|
|
second, err := m.Request(device1, "default", file, 0, 2000, 0, nil, 0, false)
|
2018-11-13 07:53:55 +00:00
|
|
|
if err != nil {
|
2019-11-08 17:53:51 +00:00
|
|
|
t.Errorf("Second request failed: %v", err)
|
2018-11-13 07:53:55 +00:00
|
|
|
}
|
|
|
|
close(returned)
|
|
|
|
second.Close()
|
|
|
|
}()
|
|
|
|
time.Sleep(10 * reqDur)
|
|
|
|
select {
|
|
|
|
case <-returned:
|
|
|
|
t.Fatalf("Second request returned before first was done")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
first.Close()
|
|
|
|
select {
|
|
|
|
case <-returned:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatalf("Second request did not return after first was done")
|
|
|
|
}
|
|
|
|
}
|
2019-01-05 17:10:02 +00:00
|
|
|
|
2019-05-18 06:53:59 +00:00
|
|
|
// TestConnCloseOnRestart checks that there is no deadlock when calling Close
|
|
|
|
// on a protocol connection that has a blocking reader (blocking writer can't
|
|
|
|
// be done as the test requires clusterconfigs to go through).
|
|
|
|
func TestConnCloseOnRestart(t *testing.T) {
|
2019-06-05 06:01:59 +00:00
|
|
|
oldCloseTimeout := protocol.CloseTimeout
|
|
|
|
protocol.CloseTimeout = 100 * time.Millisecond
|
|
|
|
defer func() {
|
|
|
|
protocol.CloseTimeout = oldCloseTimeout
|
|
|
|
}()
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2019-05-25 14:00:32 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
2019-05-18 06:53:59 +00:00
|
|
|
|
|
|
|
br := &testutils.BlockingRW{}
|
|
|
|
nw := &testutils.NoopRW{}
|
2021-03-22 20:50:19 +00:00
|
|
|
m.AddConnection(protocol.NewConnection(device1, br, nw, testutils.NoopCloser{}, m, new(protocolmocks.ConnectionInfo), protocol.CompressionNever, nil), protocol.Hello{})
|
2019-05-27 09:58:09 +00:00
|
|
|
m.pmut.RLock()
|
|
|
|
if len(m.closed) != 1 {
|
|
|
|
t.Fatalf("Expected just one conn (len(m.conn) == %v)", len(m.conn))
|
|
|
|
}
|
|
|
|
closed := m.closed[device1]
|
|
|
|
m.pmut.RUnlock()
|
2019-05-18 06:53:59 +00:00
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
waiter, err := w.RemoveDevice(device1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2019-05-18 06:53:59 +00:00
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
2020-10-02 09:49:51 +00:00
|
|
|
waiter.Wait()
|
2019-05-18 06:53:59 +00:00
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
case <-time.After(5 * time.Second):
|
2020-10-02 09:49:51 +00:00
|
|
|
t.Fatal("Timed out before config took effect")
|
2019-05-18 06:53:59 +00:00
|
|
|
}
|
2019-05-27 09:58:09 +00:00
|
|
|
select {
|
|
|
|
case <-closed:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("Timed out before connection was closed")
|
2019-05-18 06:53:59 +00:00
|
|
|
}
|
|
|
|
}
|
2019-07-19 17:37:29 +00:00
|
|
|
|
2019-07-23 19:48:53 +00:00
|
|
|
func TestModTimeWindow(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2019-07-23 19:48:53 +00:00
|
|
|
tfs := fcfg.Filesystem()
|
|
|
|
fcfg.RawModTimeWindowS = 2
|
2021-01-15 14:43:34 +00:00
|
|
|
setFolder(t, w, fcfg)
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2019-07-23 19:48:53 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
|
|
|
|
|
|
|
name := "foo"
|
|
|
|
|
|
|
|
fd, err := tfs.Create(name)
|
|
|
|
must(t, err)
|
|
|
|
stat, err := fd.Stat()
|
|
|
|
must(t, err)
|
|
|
|
modTime := stat.ModTime()
|
|
|
|
fd.Close()
|
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
2020-06-16 04:31:55 +00:00
|
|
|
// Get current version
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
fi, ok := m.testCurrentFolderFile("default", name)
|
2019-07-23 19:48:53 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("File missing")
|
|
|
|
}
|
2020-06-16 04:31:55 +00:00
|
|
|
v := fi.Version
|
|
|
|
|
|
|
|
// Update time on disk 1s
|
2019-07-23 19:48:53 +00:00
|
|
|
|
|
|
|
err = tfs.Chtimes(name, time.Now(), modTime.Add(time.Second))
|
|
|
|
must(t, err)
|
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
2020-06-16 04:31:55 +00:00
|
|
|
// No change due to within window
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
fi, _ = m.testCurrentFolderFile("default", name)
|
2019-07-23 19:48:53 +00:00
|
|
|
if !fi.Version.Equal(v) {
|
|
|
|
t.Fatalf("Got version %v, expected %v", fi.Version, v)
|
|
|
|
}
|
|
|
|
|
2020-06-16 04:31:55 +00:00
|
|
|
// Update to be outside window
|
|
|
|
|
2019-07-23 19:48:53 +00:00
|
|
|
err = tfs.Chtimes(name, time.Now(), modTime.Add(2*time.Second))
|
|
|
|
must(t, err)
|
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
2020-06-16 04:31:55 +00:00
|
|
|
// Version should have updated
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
fi, _ = m.testCurrentFolderFile("default", name)
|
2020-06-16 04:31:55 +00:00
|
|
|
if fi.Version.Compare(v) != protocol.Greater {
|
|
|
|
t.Fatalf("Got result %v, expected %v", fi.Version.Compare(v), protocol.Greater)
|
2019-07-23 19:48:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-19 17:37:29 +00:00
|
|
|
func TestDevicePause(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
|
|
|
defer wcfgCancel()
|
2019-07-19 17:37:29 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
|
|
|
|
2019-08-15 14:29:37 +00:00
|
|
|
sub := m.evLogger.Subscribe(events.DevicePaused)
|
|
|
|
defer sub.Unsubscribe()
|
|
|
|
|
2019-07-19 17:37:29 +00:00
|
|
|
m.pmut.RLock()
|
|
|
|
closed := m.closed[device1]
|
|
|
|
m.pmut.RUnlock()
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
pauseDevice(t, m.cfg, device1, true)
|
2019-07-19 17:37:29 +00:00
|
|
|
|
|
|
|
timeout := time.NewTimer(5 * time.Second)
|
|
|
|
select {
|
|
|
|
case <-sub.C():
|
|
|
|
select {
|
|
|
|
case <-closed:
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timed out before connection was closed")
|
|
|
|
}
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timed out before device was paused")
|
|
|
|
}
|
|
|
|
}
|
2019-11-08 16:03:25 +00:00
|
|
|
|
|
|
|
func TestDeviceWasSeen(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
|
|
|
defer wcfgCancel()
|
2019-11-08 16:03:25 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
|
|
|
|
|
|
|
m.deviceWasSeen(device1)
|
|
|
|
|
2019-11-30 12:03:24 +00:00
|
|
|
stats, err := m.DeviceStatistics()
|
|
|
|
if err != nil {
|
|
|
|
t.Error("Unexpected error:", err)
|
|
|
|
}
|
2021-01-11 14:14:44 +00:00
|
|
|
entry := stats[device1]
|
2019-11-08 16:03:25 +00:00
|
|
|
if time.Since(entry.LastSeen) > time.Second {
|
|
|
|
t.Error("device should have been seen now")
|
|
|
|
}
|
|
|
|
}
|
2020-02-01 07:02:18 +00:00
|
|
|
|
|
|
|
func TestNewLimitedRequestResponse(t *testing.T) {
|
2021-06-25 09:38:04 +00:00
|
|
|
l0 := util.NewSemaphore(0)
|
|
|
|
l1 := util.NewSemaphore(1024)
|
|
|
|
l2 := (*util.Semaphore)(nil)
|
2020-02-01 07:02:18 +00:00
|
|
|
|
|
|
|
// Should take 500 bytes from any non-unlimited non-nil limiters.
|
|
|
|
res := newLimitedRequestResponse(500, l0, l1, l2)
|
|
|
|
|
2021-06-25 09:38:04 +00:00
|
|
|
if l1.Available() != 1024-500 {
|
2020-02-01 07:02:18 +00:00
|
|
|
t.Error("should have taken bytes from limited limiter")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Closing the result should return the bytes.
|
|
|
|
res.Close()
|
|
|
|
|
|
|
|
// Try to take 1024 bytes to make sure the bytes were returned.
|
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
2021-06-25 09:38:04 +00:00
|
|
|
l1.Take(1024)
|
2020-02-01 07:02:18 +00:00
|
|
|
close(done)
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Error("Bytes weren't returned in a timely fashion")
|
|
|
|
}
|
|
|
|
}
|
2020-02-12 06:35:24 +00:00
|
|
|
|
2020-02-12 10:59:12 +00:00
|
|
|
func TestSummaryPausedNoError(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
|
|
|
pauseFolder(t, wcfg, fcfg.ID, true)
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-02-12 10:59:12 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
fss := NewFolderSummaryService(wcfg, m, myID, events.NoopLogger)
|
|
|
|
if _, err := fss.Summary(fcfg.ID); err != nil {
|
|
|
|
t.Error("Expected no error getting a summary for a paused folder:", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 06:35:24 +00:00
|
|
|
func TestFolderAPIErrors(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
|
|
|
pauseFolder(t, wcfg, fcfg.ID, true)
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-02-12 06:35:24 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
methods := []func(folder string) error{
|
|
|
|
m.ScanFolder,
|
|
|
|
func(folder string) error {
|
|
|
|
return m.ScanFolderSubdirs(folder, nil)
|
|
|
|
},
|
|
|
|
func(folder string) error {
|
|
|
|
_, err := m.GetFolderVersions(folder)
|
|
|
|
return err
|
|
|
|
},
|
|
|
|
func(folder string) error {
|
|
|
|
_, err := m.RestoreFolderVersions(folder, nil)
|
|
|
|
return err
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, method := range methods {
|
|
|
|
if err := method(fcfg.ID); err != ErrFolderPaused {
|
|
|
|
t.Errorf(`Expected "%v", got "%v" (method no %v)`, ErrFolderPaused, err, i)
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
if err := method("notexisting"); err != ErrFolderMissing {
|
|
|
|
t.Errorf(`Expected "%v", got "%v" (method no %v)`, ErrFolderMissing, err, i)
|
2020-02-12 06:35:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-11 18:15:11 +00:00
|
|
|
|
|
|
|
func TestRenameSequenceOrder(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-05-11 18:15:11 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
numFiles := 20
|
|
|
|
|
|
|
|
ffs := fcfg.Filesystem()
|
|
|
|
for i := 0; i < numFiles; i++ {
|
|
|
|
v := fmt.Sprintf("%d", i)
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, v, []byte(v))
|
2020-05-11 18:15:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
count := 0
|
|
|
|
snap := dbSnapshot(t, m, "default")
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
2020-05-11 18:15:11 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
if count != numFiles {
|
|
|
|
t.Errorf("Unexpected count: %d != %d", count, numFiles)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Modify all the files, other than the ones we expect to rename
|
|
|
|
for i := 0; i < numFiles; i++ {
|
|
|
|
if i == 3 || i == 17 || i == 16 || i == 4 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
v := fmt.Sprintf("%d", i)
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, v, []byte(v+"-new"))
|
2020-05-11 18:15:11 +00:00
|
|
|
}
|
|
|
|
// Rename
|
|
|
|
must(t, ffs.Rename("3", "17"))
|
|
|
|
must(t, ffs.Rename("16", "4"))
|
|
|
|
|
|
|
|
// Scan
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
// Verify sequence of a appearing is followed by c disappearing.
|
|
|
|
snap = dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
|
|
|
|
var firstExpectedSequence int64
|
|
|
|
var secondExpectedSequence int64
|
|
|
|
failed := false
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithHaveSequence(0, func(i protocol.FileIntf) bool {
|
2020-05-11 18:15:11 +00:00
|
|
|
t.Log(i)
|
|
|
|
if i.FileName() == "17" {
|
|
|
|
firstExpectedSequence = i.SequenceNo() + 1
|
|
|
|
}
|
|
|
|
if i.FileName() == "4" {
|
|
|
|
secondExpectedSequence = i.SequenceNo() + 1
|
|
|
|
}
|
|
|
|
if i.FileName() == "3" {
|
|
|
|
failed = i.SequenceNo() != firstExpectedSequence || failed
|
|
|
|
}
|
|
|
|
if i.FileName() == "16" {
|
|
|
|
failed = i.SequenceNo() != secondExpectedSequence || failed
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if failed {
|
|
|
|
t.Fail()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 12:39:27 +00:00
|
|
|
func TestRenameSameFile(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-05-16 12:39:27 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
ffs := fcfg.Filesystem()
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, "file", []byte("file"))
|
2020-05-16 12:39:27 +00:00
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
count := 0
|
|
|
|
snap := dbSnapshot(t, m, "default")
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
if count != 1 {
|
|
|
|
t.Errorf("Unexpected count: %d != %d", count, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
must(t, ffs.Rename("file", "file1"))
|
2020-06-18 06:15:47 +00:00
|
|
|
must(t, osutil.Copy(fs.CopyRangeMethodStandard, ffs, ffs, "file1", "file0"))
|
|
|
|
must(t, osutil.Copy(fs.CopyRangeMethodStandard, ffs, ffs, "file1", "file2"))
|
|
|
|
must(t, osutil.Copy(fs.CopyRangeMethodStandard, ffs, ffs, "file1", "file3"))
|
|
|
|
must(t, osutil.Copy(fs.CopyRangeMethodStandard, ffs, ffs, "file1", "file4"))
|
2020-05-16 12:39:27 +00:00
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap = dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
|
|
|
|
prevSeq := int64(0)
|
|
|
|
seen := false
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithHaveSequence(0, func(i protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
if i.SequenceNo() <= prevSeq {
|
|
|
|
t.Fatalf("non-increasing sequences: %d <= %d", i.SequenceNo(), prevSeq)
|
|
|
|
}
|
|
|
|
if i.FileName() == "file" {
|
|
|
|
if seen {
|
|
|
|
t.Fatal("already seen file")
|
|
|
|
}
|
|
|
|
seen = true
|
|
|
|
}
|
|
|
|
prevSeq = i.SequenceNo()
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRenameEmptyFile(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-05-16 12:39:27 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
ffs := fcfg.Filesystem()
|
|
|
|
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, "file", []byte("data"))
|
|
|
|
writeFile(t, ffs, "empty", nil)
|
2020-05-16 12:39:27 +00:00
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap := dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
empty, eok := snap.Get(protocol.LocalDeviceID, "empty")
|
|
|
|
if !eok {
|
|
|
|
t.Fatal("failed to find empty file")
|
|
|
|
}
|
|
|
|
file, fok := snap.Get(protocol.LocalDeviceID, "file")
|
|
|
|
if !fok {
|
|
|
|
t.Fatal("failed to find non-empty file")
|
|
|
|
}
|
|
|
|
|
|
|
|
count := 0
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(empty.BlocksHash, func(_ protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if count != 0 {
|
|
|
|
t.Fatalf("Found %d entries for empty file, expected 0", count)
|
|
|
|
}
|
|
|
|
|
|
|
|
count = 0
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(file.BlocksHash, func(_ protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if count != 1 {
|
|
|
|
t.Fatalf("Found %d entries for non-empty file, expected 1", count)
|
|
|
|
}
|
|
|
|
|
|
|
|
must(t, ffs.Rename("file", "new-file"))
|
|
|
|
must(t, ffs.Rename("empty", "new-empty"))
|
|
|
|
|
|
|
|
// Scan
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap = dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
|
|
|
|
count = 0
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(empty.BlocksHash, func(_ protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
count++
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if count != 0 {
|
|
|
|
t.Fatalf("Found %d entries for empty file, expected 0", count)
|
|
|
|
}
|
|
|
|
|
|
|
|
count = 0
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(file.BlocksHash, func(i protocol.FileIntf) bool {
|
2020-05-16 12:39:27 +00:00
|
|
|
count++
|
|
|
|
if i.FileName() != "new-file" {
|
|
|
|
t.Fatalf("unexpected file name %s, expected new-file", i.FileName())
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
if count != 1 {
|
|
|
|
t.Fatalf("Found %d entries for non-empty file, expected 1", count)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-11 18:15:11 +00:00
|
|
|
func TestBlockListMap(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-05-11 18:15:11 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
ffs := fcfg.Filesystem()
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, "one", []byte("content"))
|
|
|
|
writeFile(t, ffs, "two", []byte("content"))
|
|
|
|
writeFile(t, ffs, "three", []byte("content"))
|
|
|
|
writeFile(t, ffs, "four", []byte("content"))
|
|
|
|
writeFile(t, ffs, "five", []byte("content"))
|
2020-05-11 18:15:11 +00:00
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap := dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
fi, ok := snap.Get(protocol.LocalDeviceID, "one")
|
|
|
|
if !ok {
|
|
|
|
t.Error("failed to find existing file")
|
|
|
|
}
|
|
|
|
var paths []string
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(fi.BlocksHash, func(fi protocol.FileIntf) bool {
|
2020-05-11 18:15:11 +00:00
|
|
|
paths = append(paths, fi.FileName())
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
expected := []string{"one", "two", "three", "four", "five"}
|
|
|
|
if !equalStringsInAnyOrder(paths, expected) {
|
|
|
|
t.Errorf("expected %q got %q", expected, paths)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fudge the files around
|
|
|
|
// Remove
|
|
|
|
must(t, ffs.Remove("one"))
|
|
|
|
|
|
|
|
// Modify
|
|
|
|
must(t, ffs.Remove("two"))
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, "two", []byte("mew-content"))
|
2020-05-11 18:15:11 +00:00
|
|
|
|
|
|
|
// Rename
|
|
|
|
must(t, ffs.Rename("three", "new-three"))
|
|
|
|
|
|
|
|
// Change type
|
|
|
|
must(t, ffs.Remove("four"))
|
|
|
|
must(t, ffs.Mkdir("four", 0644))
|
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
// Check we're left with 2 of the 5
|
|
|
|
snap = dbSnapshot(t, m, "default")
|
|
|
|
defer snap.Release()
|
|
|
|
|
|
|
|
paths = paths[:0]
|
2020-05-30 07:50:23 +00:00
|
|
|
snap.WithBlocksHash(fi.BlocksHash, func(fi protocol.FileIntf) bool {
|
2020-05-11 18:15:11 +00:00
|
|
|
paths = append(paths, fi.FileName())
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
expected = []string{"new-three", "five"}
|
|
|
|
if !equalStringsInAnyOrder(paths, expected) {
|
|
|
|
t.Errorf("expected %q got %q", expected, paths)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-28 09:13:15 +00:00
|
|
|
func TestScanRenameCaseOnly(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-07-28 09:13:15 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
ffs := fcfg.Filesystem()
|
|
|
|
name := "foo"
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, name, []byte("contents"))
|
2020-07-28 09:13:15 +00:00
|
|
|
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap := dbSnapshot(t, m, fcfg.ID)
|
|
|
|
defer snap.Release()
|
|
|
|
found := false
|
|
|
|
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
|
|
|
if found {
|
|
|
|
t.Fatal("got more than one file")
|
|
|
|
}
|
|
|
|
if i.FileName() != name {
|
|
|
|
t.Fatalf("got file %v, expected %v", i.FileName(), name)
|
|
|
|
}
|
|
|
|
found = true
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
snap.Release()
|
|
|
|
|
|
|
|
upper := strings.ToUpper(name)
|
|
|
|
must(t, ffs.Rename(name, upper))
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
snap = dbSnapshot(t, m, fcfg.ID)
|
|
|
|
defer snap.Release()
|
|
|
|
found = false
|
|
|
|
snap.WithHave(protocol.LocalDeviceID, func(i protocol.FileIntf) bool {
|
|
|
|
if i.FileName() == name {
|
|
|
|
if i.IsDeleted() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
t.Fatal("renamed file not deleted")
|
|
|
|
}
|
|
|
|
if i.FileName() != upper {
|
|
|
|
t.Fatalf("got file %v, expected %v", i.FileName(), upper)
|
|
|
|
}
|
|
|
|
if found {
|
|
|
|
t.Fatal("got more than the expected files")
|
|
|
|
}
|
|
|
|
found = true
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderAdd(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
testConfigChangeTriggersClusterConfigs(t, false, true, nil, func(wrapper config.Wrapper) {
|
2020-06-22 20:26:26 +00:00
|
|
|
fcfg := testFolderConfigTmp()
|
|
|
|
fcfg.ID = "second"
|
|
|
|
fcfg.Label = "second"
|
2020-11-09 14:33:32 +00:00
|
|
|
fcfg.Devices = []config.FolderDeviceConfiguration{{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: protocol.EmptyDeviceID,
|
|
|
|
}}
|
2021-01-15 14:43:34 +00:00
|
|
|
setFolder(t, wrapper, fcfg)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderShare(t *testing.T) {
|
|
|
|
testConfigChangeTriggersClusterConfigs(t, true, true, nil, func(cfg config.Wrapper) {
|
2020-06-22 20:26:26 +00:00
|
|
|
fcfg := cfg.FolderList()[0]
|
2020-11-09 14:33:32 +00:00
|
|
|
fcfg.Devices = []config.FolderDeviceConfiguration{{
|
|
|
|
DeviceID: device2,
|
|
|
|
IntroducedBy: protocol.EmptyDeviceID,
|
|
|
|
}}
|
2021-01-15 14:43:34 +00:00
|
|
|
setFolder(t, cfg, fcfg)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderUnshare(t *testing.T) {
|
|
|
|
testConfigChangeTriggersClusterConfigs(t, true, false, nil, func(cfg config.Wrapper) {
|
2020-06-22 20:26:26 +00:00
|
|
|
fcfg := cfg.FolderList()[0]
|
|
|
|
fcfg.Devices = nil
|
2021-01-15 14:43:34 +00:00
|
|
|
setFolder(t, cfg, fcfg)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderRemove(t *testing.T) {
|
|
|
|
testConfigChangeTriggersClusterConfigs(t, true, false, nil, func(cfg config.Wrapper) {
|
2020-06-22 20:26:26 +00:00
|
|
|
rcfg := cfg.RawCopy()
|
|
|
|
rcfg.Folders = nil
|
2021-01-15 14:43:34 +00:00
|
|
|
replace(t, cfg, rcfg)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderPause(t *testing.T) {
|
|
|
|
testConfigChangeTriggersClusterConfigs(t, true, false, nil, func(cfg config.Wrapper) {
|
2021-01-15 14:43:34 +00:00
|
|
|
pauseFolder(t, cfg, cfg.FolderList()[0].ID, true)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func TestClusterConfigOnFolderUnpause(t *testing.T) {
|
|
|
|
testConfigChangeTriggersClusterConfigs(t, true, false, func(cfg config.Wrapper) {
|
2021-01-15 14:43:34 +00:00
|
|
|
pauseFolder(t, cfg, cfg.FolderList()[0].ID, true)
|
2020-06-22 20:26:26 +00:00
|
|
|
}, func(cfg config.Wrapper) {
|
2021-01-15 14:43:34 +00:00
|
|
|
pauseFolder(t, cfg, cfg.FolderList()[0].ID, false)
|
2020-06-22 20:26:26 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-07-03 06:48:37 +00:00
|
|
|
func TestAddFolderCompletion(t *testing.T) {
|
|
|
|
// Empty folders are always 100% complete.
|
2020-09-07 07:35:37 +00:00
|
|
|
comp := newFolderCompletion(db.Counts{}, db.Counts{}, 0)
|
|
|
|
comp.add(newFolderCompletion(db.Counts{}, db.Counts{}, 0))
|
2020-07-03 06:48:37 +00:00
|
|
|
if comp.CompletionPct != 100 {
|
|
|
|
t.Error(comp.CompletionPct)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Completion is of the whole
|
2020-09-07 07:35:37 +00:00
|
|
|
comp = newFolderCompletion(db.Counts{Bytes: 100}, db.Counts{}, 0) // 100% complete
|
|
|
|
comp.add(newFolderCompletion(db.Counts{Bytes: 400}, db.Counts{Bytes: 50}, 0)) // 82.5% complete
|
|
|
|
if comp.CompletionPct != 90 { // 100 * (1 - 50/500)
|
2020-07-03 06:48:37 +00:00
|
|
|
t.Error(comp.CompletionPct)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 11:41:45 +00:00
|
|
|
func TestScanDeletedROChangedOnSR(t *testing.T) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wCancel := setupModelWithConnection(t)
|
|
|
|
ffs := fcfg.Filesystem()
|
2021-01-15 14:43:34 +00:00
|
|
|
defer wCancel()
|
2021-06-03 12:58:50 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, ffs.URI())
|
2020-07-30 11:41:45 +00:00
|
|
|
fcfg.Type = config.FolderTypeReceiveOnly
|
2021-06-03 12:58:50 +00:00
|
|
|
setFolder(t, m.cfg, fcfg)
|
|
|
|
|
2020-07-30 11:41:45 +00:00
|
|
|
name := "foo"
|
|
|
|
|
2021-11-10 08:46:21 +00:00
|
|
|
writeFile(t, ffs, name, []byte(name))
|
2020-07-30 11:41:45 +00:00
|
|
|
m.ScanFolders()
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
file, ok := m.testCurrentFolderFile(fcfg.ID, name)
|
2020-07-30 11:41:45 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("file missing in db")
|
|
|
|
}
|
|
|
|
// A remote must have the file, otherwise the deletion below is
|
|
|
|
// automatically resolved as not a ro-changed item.
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.IndexUpdate(device1, fcfg.ID, []protocol.FileInfo{file}))
|
2020-07-30 11:41:45 +00:00
|
|
|
|
|
|
|
must(t, ffs.Remove(name))
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
if receiveOnlyChangedSize(t, m, fcfg.ID).Deleted != 1 {
|
|
|
|
t.Fatal("expected one receive only changed deleted item")
|
|
|
|
}
|
|
|
|
|
|
|
|
fcfg.Type = config.FolderTypeSendReceive
|
2021-06-03 12:58:50 +00:00
|
|
|
setFolder(t, m.cfg, fcfg)
|
2020-07-30 11:41:45 +00:00
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
if receiveOnlyChangedSize(t, m, fcfg.ID).Deleted != 0 {
|
|
|
|
t.Fatal("expected no receive only changed deleted item")
|
|
|
|
}
|
|
|
|
if localSize(t, m, fcfg.ID).Deleted != 1 {
|
|
|
|
t.Fatal("expected one local deleted item")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSecond bool, pre func(config.Wrapper), fn func(config.Wrapper)) {
|
2020-06-22 20:26:26 +00:00
|
|
|
t.Helper()
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, _, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, wcfg)
|
2020-06-22 20:26:26 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
2021-02-04 20:10:41 +00:00
|
|
|
setDevice(t, wcfg, newDeviceConfiguration(wcfg.DefaultDevice(), device2, "device2"))
|
2020-06-22 20:26:26 +00:00
|
|
|
|
|
|
|
if pre != nil {
|
|
|
|
pre(wcfg)
|
|
|
|
}
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
cc1 := make(chan struct{}, 1)
|
|
|
|
cc2 := make(chan struct{}, 1)
|
2021-03-03 07:53:50 +00:00
|
|
|
fc1 := newFakeConnection(device1, m)
|
|
|
|
fc1.ClusterConfigCalls(func(_ protocol.ClusterConfig) {
|
|
|
|
cc1 <- struct{}{}
|
|
|
|
})
|
|
|
|
fc2 := newFakeConnection(device2, m)
|
|
|
|
fc2.ClusterConfigCalls(func(_ protocol.ClusterConfig) {
|
|
|
|
cc2 <- struct{}{}
|
|
|
|
})
|
2020-09-29 11:17:38 +00:00
|
|
|
m.AddConnection(fc1, protocol.Hello{})
|
|
|
|
m.AddConnection(fc2, protocol.Hello{})
|
2020-06-22 20:26:26 +00:00
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
// Initial CCs
|
|
|
|
select {
|
|
|
|
case <-cc1:
|
|
|
|
default:
|
|
|
|
t.Fatal("missing initial CC from device1")
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-cc2:
|
|
|
|
default:
|
|
|
|
t.Fatal("missing initial CC from device2")
|
|
|
|
}
|
|
|
|
|
2020-06-22 20:26:26 +00:00
|
|
|
t.Log("Applying config change")
|
|
|
|
|
|
|
|
fn(wcfg)
|
|
|
|
|
2020-10-02 09:49:51 +00:00
|
|
|
timeout := time.NewTimer(time.Second)
|
|
|
|
if expectFirst {
|
|
|
|
select {
|
|
|
|
case <-cc1:
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Errorf("timed out before receiving cluste rconfig for first device")
|
|
|
|
}
|
2020-06-22 20:26:26 +00:00
|
|
|
}
|
2020-10-02 09:49:51 +00:00
|
|
|
if expectSecond {
|
|
|
|
select {
|
|
|
|
case <-cc2:
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Errorf("timed out before receiving cluste rconfig for second device")
|
|
|
|
}
|
2020-06-22 20:26:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-07 18:18:25 +00:00
|
|
|
// The end result of the tested scenario is that the global version entry has an
|
|
|
|
// empty version vector and is not deleted, while everything is actually deleted.
|
|
|
|
// That then causes these files to be considered as needed, while they are not.
|
|
|
|
// https://github.com/syncthing/syncthing/issues/6961
|
|
|
|
func TestIssue6961(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
wcfg, fcfg, wcfgCancel := tmpDefaultWrapper()
|
|
|
|
defer wcfgCancel()
|
2020-09-07 18:18:25 +00:00
|
|
|
tfs := fcfg.Filesystem()
|
2021-01-15 14:43:34 +00:00
|
|
|
waiter, err := wcfg.Modify(func(cfg *config.Configuration) {
|
2021-02-04 20:10:41 +00:00
|
|
|
cfg.SetDevice(newDeviceConfiguration(cfg.Defaults.Device, device2, "device2"))
|
2021-01-15 14:43:34 +00:00
|
|
|
fcfg.Type = config.FolderTypeReceiveOnly
|
|
|
|
fcfg.Devices = append(fcfg.Devices, config.FolderDeviceConfiguration{DeviceID: device2})
|
|
|
|
cfg.SetFolder(fcfg)
|
|
|
|
})
|
|
|
|
must(t, err)
|
|
|
|
waiter.Wait()
|
2020-09-10 08:54:41 +00:00
|
|
|
// Always recalc/repair when opening a fileset.
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, wcfg, myID, "syncthing", "dev", nil)
|
|
|
|
m.db.Close()
|
|
|
|
m.db, err = db.NewLowlevel(backend.OpenMemory(), m.evLogger, db.WithRecheckInterval(time.Millisecond))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-09-10 08:54:41 +00:00
|
|
|
m.ServeBackground()
|
|
|
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
2021-06-03 12:58:50 +00:00
|
|
|
addFakeConn(m, device1, fcfg.ID)
|
|
|
|
addFakeConn(m, device2, fcfg.ID)
|
2020-09-10 08:54:41 +00:00
|
|
|
m.ScanFolders()
|
2020-09-07 18:18:25 +00:00
|
|
|
|
|
|
|
name := "foo"
|
|
|
|
version := protocol.Vector{}.Update(device1.Short())
|
|
|
|
|
|
|
|
// Remote, valid and existing file
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device1, fcfg.ID, []protocol.FileInfo{{Name: name, Version: version, Sequence: 1}}))
|
2020-09-07 18:18:25 +00:00
|
|
|
// Remote, invalid (receive-only) and existing file
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
|
2020-09-07 18:18:25 +00:00
|
|
|
// Create a local file
|
|
|
|
if fd, err := tfs.OpenFile(name, fs.OptCreate, 0666); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else {
|
|
|
|
fd.Close()
|
|
|
|
}
|
|
|
|
if info, err := tfs.Lstat(name); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else {
|
|
|
|
l.Infoln("intest", info.Mode)
|
|
|
|
}
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
// Get rid of valid global
|
2021-01-15 14:43:34 +00:00
|
|
|
waiter, err = wcfg.RemoveDevice(device1)
|
2020-09-07 18:18:25 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
waiter.Wait()
|
|
|
|
|
|
|
|
// Delete the local file
|
|
|
|
must(t, tfs.Remove(name))
|
|
|
|
m.ScanFolders()
|
|
|
|
|
|
|
|
// Drop ther remote index, add some other file.
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device2, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}}))
|
2020-09-07 18:18:25 +00:00
|
|
|
|
2020-09-10 08:54:41 +00:00
|
|
|
// Pause and unpause folder to create new db.FileSet and thus recalculate everything
|
2021-01-15 14:43:34 +00:00
|
|
|
pauseFolder(t, wcfg, fcfg.ID, true)
|
|
|
|
pauseFolder(t, wcfg, fcfg.ID, false)
|
2020-09-07 18:18:25 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedDeletes != 0 {
|
2020-09-07 18:18:25 +00:00
|
|
|
t.Error("Expected 0 needed deletes, got", comp.NeedDeletes)
|
|
|
|
} else {
|
|
|
|
t.Log(comp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-07 18:03:18 +00:00
|
|
|
func TestCompletionEmptyGlobal(t *testing.T) {
|
2021-06-03 12:58:50 +00:00
|
|
|
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
|
2021-01-15 14:43:34 +00:00
|
|
|
defer wcfgCancel()
|
2020-09-07 18:03:18 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem().URI())
|
|
|
|
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
|
|
|
|
m.fmut.Lock()
|
|
|
|
m.folderFiles[fcfg.ID].Update(protocol.LocalDeviceID, files)
|
|
|
|
m.fmut.Unlock()
|
|
|
|
files[0].Deleted = true
|
|
|
|
files[0].Version = files[0].Version.Update(device1.Short())
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.IndexUpdate(device1, fcfg.ID, files))
|
2021-03-07 12:43:22 +00:00
|
|
|
comp := m.testCompletion(protocol.LocalDeviceID, fcfg.ID)
|
2020-09-07 18:03:18 +00:00
|
|
|
if comp.CompletionPct != 95 {
|
|
|
|
t.Error("Expected completion of 95%, got", comp.CompletionPct)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 06:26:10 +00:00
|
|
|
func TestNeedMetaAfterIndexReset(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
|
|
|
addDevice2(t, w, fcfg)
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2020-10-21 06:26:10 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, fcfg.Path)
|
2021-06-03 12:58:50 +00:00
|
|
|
addFakeConn(m, device1, fcfg.ID)
|
|
|
|
addFakeConn(m, device2, fcfg.ID)
|
2020-10-21 06:26:10 +00:00
|
|
|
|
|
|
|
var seq int64 = 1
|
|
|
|
files := []protocol.FileInfo{{Name: "foo", Size: 10, Version: protocol.Vector{}.Update(device1.Short()), Sequence: seq}}
|
|
|
|
|
|
|
|
// Start with two remotes having one file, then both deleting it, then
|
|
|
|
// only one adding it again.
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device1, fcfg.ID, files))
|
|
|
|
must(t, m.Index(device2, fcfg.ID, files))
|
2020-10-21 06:26:10 +00:00
|
|
|
seq++
|
|
|
|
files[0].SetDeleted(device2.Short())
|
|
|
|
files[0].Sequence = seq
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.IndexUpdate(device2, fcfg.ID, files))
|
|
|
|
must(t, m.IndexUpdate(device1, fcfg.ID, files))
|
2020-10-21 06:26:10 +00:00
|
|
|
seq++
|
|
|
|
files[0].Deleted = false
|
|
|
|
files[0].Size = 20
|
|
|
|
files[0].Version = files[0].Version.Update(device1.Short())
|
|
|
|
files[0].Sequence = seq
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.IndexUpdate(device1, fcfg.ID, files))
|
2020-10-21 06:26:10 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
|
2020-10-21 06:26:10 +00:00
|
|
|
t.Error("Expected one needed item for device2, got", comp.NeedItems)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pretend we had an index reset on device 1
|
2021-05-16 15:23:27 +00:00
|
|
|
must(t, m.Index(device1, fcfg.ID, files))
|
2021-03-07 12:43:22 +00:00
|
|
|
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
|
2020-10-21 06:26:10 +00:00
|
|
|
t.Error("Expected one needed item for device2, got", comp.NeedItems)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-09 14:33:32 +00:00
|
|
|
func TestCcCheckEncryption(t *testing.T) {
|
2021-02-08 16:13:28 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping on short testing - generating encryption tokens is slow")
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2020-11-17 12:19:04 +00:00
|
|
|
m.cancel()
|
2020-11-09 14:33:32 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
|
|
|
pw := "foo"
|
|
|
|
token := protocol.PasswordToken(fcfg.ID, pw)
|
|
|
|
m.folderEncryptionPasswordTokens[fcfg.ID] = token
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
tokenRemote, tokenLocal []byte
|
|
|
|
isEncryptedRemote, isEncryptedLocal bool
|
|
|
|
expectedErr error
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
tokenRemote: token,
|
|
|
|
tokenLocal: token,
|
|
|
|
expectedErr: errEncryptionInvConfigRemote,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
isEncryptedRemote: true,
|
|
|
|
isEncryptedLocal: true,
|
|
|
|
expectedErr: errEncryptionInvConfigLocal,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: token,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: false,
|
|
|
|
expectedErr: errEncryptionNotEncryptedLocal,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: token,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: true,
|
|
|
|
isEncryptedLocal: false,
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: token,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: true,
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: token,
|
|
|
|
isEncryptedRemote: true,
|
|
|
|
isEncryptedLocal: false,
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: token,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: true,
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: token,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: false,
|
|
|
|
expectedErr: errEncryptionNotEncryptedLocal,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: true,
|
|
|
|
isEncryptedLocal: false,
|
2021-05-11 05:55:44 +00:00
|
|
|
expectedErr: errEncryptionPlainForRemoteEncrypted,
|
2020-11-09 14:33:32 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: true,
|
2021-05-11 05:55:44 +00:00
|
|
|
expectedErr: errEncryptionPlainForReceiveEncrypted,
|
2020-11-09 14:33:32 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
tokenRemote: nil,
|
|
|
|
tokenLocal: nil,
|
|
|
|
isEncryptedRemote: false,
|
|
|
|
isEncryptedLocal: false,
|
|
|
|
expectedErr: nil,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tc := range testCases {
|
|
|
|
tfcfg := fcfg.Copy()
|
|
|
|
if tc.isEncryptedLocal {
|
|
|
|
tfcfg.Type = config.FolderTypeReceiveEncrypted
|
|
|
|
m.folderEncryptionPasswordTokens[fcfg.ID] = token
|
|
|
|
}
|
|
|
|
dcfg := config.FolderDeviceConfiguration{DeviceID: device1}
|
|
|
|
if tc.isEncryptedRemote {
|
|
|
|
dcfg.EncryptionPassword = pw
|
|
|
|
}
|
|
|
|
|
2021-06-03 12:58:50 +00:00
|
|
|
deviceInfos := &clusterConfigDeviceInfo{
|
2020-11-09 14:33:32 +00:00
|
|
|
remote: protocol.Device{ID: device1, EncryptionPasswordToken: tc.tokenRemote},
|
|
|
|
local: protocol.Device{ID: myID, EncryptionPasswordToken: tc.tokenLocal},
|
|
|
|
}
|
|
|
|
err := m.ccCheckEncryption(tfcfg, dcfg, deviceInfos, false)
|
|
|
|
if err != tc.expectedErr {
|
|
|
|
t.Errorf("Testcase %v: Expected error %v, got %v", i, tc.expectedErr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if tc.expectedErr == nil {
|
|
|
|
err := m.ccCheckEncryption(tfcfg, dcfg, deviceInfos, true)
|
|
|
|
if tc.isEncryptedRemote || tc.isEncryptedLocal {
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Testcase %v: Expected no error, got %v", i, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err != errEncryptionNotEncryptedUntrusted {
|
|
|
|
t.Errorf("Testcase %v: Expected error %v, got %v", i, errEncryptionNotEncryptedUntrusted, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil || (!tc.isEncryptedRemote && !tc.isEncryptedLocal) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if tc.isEncryptedLocal {
|
|
|
|
m.folderEncryptionPasswordTokens[fcfg.ID] = []byte("notAMatch")
|
|
|
|
} else {
|
|
|
|
dcfg.EncryptionPassword = "notAMatch"
|
|
|
|
}
|
|
|
|
err = m.ccCheckEncryption(tfcfg, dcfg, deviceInfos, false)
|
|
|
|
if err != errEncryptionPassword {
|
|
|
|
t.Errorf("Testcase %v: Expected error %v, got %v", i, errEncryptionPassword, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-20 13:13:50 +00:00
|
|
|
func TestCCFolderNotRunning(t *testing.T) {
|
|
|
|
// Create the folder, but don't start it.
|
2021-01-15 14:43:34 +00:00
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2020-11-20 13:13:50 +00:00
|
|
|
tfs := fcfg.Filesystem()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := newModel(t, w, myID, "syncthing", "dev", nil)
|
2020-11-20 13:13:50 +00:00
|
|
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
|
|
|
|
|
|
|
// A connection can happen before all the folders are started.
|
2021-06-03 13:39:49 +00:00
|
|
|
cc, _ := m.generateClusterConfig(device1)
|
2020-11-20 13:13:50 +00:00
|
|
|
if l := len(cc.Folders); l != 1 {
|
|
|
|
t.Fatalf("Expected 1 folder in CC, got %v", l)
|
|
|
|
}
|
|
|
|
folder := cc.Folders[0]
|
|
|
|
if id := folder.ID; id != fcfg.ID {
|
|
|
|
t.Fatalf("Expected folder %v, got %v", fcfg.ID, id)
|
|
|
|
}
|
|
|
|
if l := len(folder.Devices); l != 2 {
|
|
|
|
t.Fatalf("Expected 2 devices in CC, got %v", l)
|
|
|
|
}
|
|
|
|
local := folder.Devices[1]
|
|
|
|
if local.ID != myID {
|
|
|
|
local = folder.Devices[0]
|
|
|
|
}
|
|
|
|
if !folder.Paused && local.IndexID == 0 {
|
|
|
|
t.Errorf("Folder isn't paused, but index-id is zero")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func TestPendingFolder(t *testing.T) {
|
2021-01-15 14:43:34 +00:00
|
|
|
w, _, wCancel := tmpDefaultWrapper()
|
|
|
|
defer wCancel()
|
2020-12-21 11:59:22 +00:00
|
|
|
m := setupModel(t, w)
|
2020-12-17 18:54:31 +00:00
|
|
|
defer cleanupModel(m)
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
setDevice(t, w, config.DeviceConfiguration{DeviceID: device2})
|
2020-12-17 18:54:31 +00:00
|
|
|
pfolder := "default"
|
2021-04-11 13:24:08 +00:00
|
|
|
of := db.ObservedFolder{
|
|
|
|
Time: time.Now().Truncate(time.Second),
|
|
|
|
Label: pfolder,
|
|
|
|
}
|
|
|
|
if err := m.db.AddOrUpdatePendingFolder(pfolder, of, device2); err != nil {
|
2020-12-17 18:54:31 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
deviceFolders, err := m.PendingFolders(protocol.EmptyDeviceID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if pf, ok := deviceFolders[pfolder]; !ok {
|
|
|
|
t.Errorf("folder %v not pending", pfolder)
|
|
|
|
} else if _, ok := pf.OfferedBy[device2]; !ok {
|
|
|
|
t.Errorf("folder %v not pending for device %v", pfolder, device2)
|
|
|
|
} else if len(pf.OfferedBy) > 1 {
|
|
|
|
t.Errorf("folder %v pending for too many devices %v", pfolder, pf.OfferedBy)
|
|
|
|
}
|
|
|
|
|
|
|
|
device3, err := protocol.DeviceIDFromString("AIBAEAQ-CAIBAEC-AQCAIBA-EAQCAIA-BAEAQCA-IBAEAQC-CAIBAEA-QCAIBA7")
|
2021-03-21 09:32:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
setDevice(t, w, config.DeviceConfiguration{DeviceID: device3})
|
2021-04-11 13:24:08 +00:00
|
|
|
if err := m.db.AddOrUpdatePendingFolder(pfolder, of, device3); err != nil {
|
2020-12-17 18:54:31 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
deviceFolders, err = m.PendingFolders(device2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if pf, ok := deviceFolders[pfolder]; !ok {
|
|
|
|
t.Errorf("folder %v not pending when filtered", pfolder)
|
|
|
|
} else if _, ok := pf.OfferedBy[device2]; !ok {
|
|
|
|
t.Errorf("folder %v not pending for device %v when filtered", pfolder, device2)
|
|
|
|
} else if _, ok := pf.OfferedBy[device3]; ok {
|
|
|
|
t.Errorf("folder %v pending for device %v, but not filtered out", pfolder, device3)
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
waiter, err := w.RemoveDevice(device3)
|
2020-12-17 18:54:31 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
waiter.Wait()
|
|
|
|
deviceFolders, err = m.PendingFolders(protocol.EmptyDeviceID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if pf, ok := deviceFolders[pfolder]; !ok {
|
|
|
|
t.Errorf("folder %v not pending", pfolder)
|
|
|
|
} else if _, ok := pf.OfferedBy[device3]; ok {
|
|
|
|
t.Errorf("folder %v pending for removed device %v", pfolder, device3)
|
|
|
|
}
|
|
|
|
|
|
|
|
waiter, err = w.RemoveFolder(pfolder)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
waiter.Wait()
|
|
|
|
deviceFolders, err = m.PendingFolders(protocol.EmptyDeviceID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
} else if _, ok := deviceFolders[pfolder]; ok {
|
|
|
|
t.Errorf("folder %v still pending after local removal", pfolder)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-10 08:46:21 +00:00
|
|
|
func TestDeletedNotLocallyChangedReceiveOnly(t *testing.T) {
|
|
|
|
deletedNotLocallyChanged(t, config.FolderTypeReceiveOnly)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDeletedNotLocallyChangedReceiveEncrypted(t *testing.T) {
|
|
|
|
deletedNotLocallyChanged(t, config.FolderTypeReceiveEncrypted)
|
|
|
|
}
|
|
|
|
|
|
|
|
func deletedNotLocallyChanged(t *testing.T, ft config.FolderType) {
|
|
|
|
w, fcfg, wCancel := tmpDefaultWrapper()
|
|
|
|
tfs := fcfg.Filesystem()
|
|
|
|
fcfg.Type = ft
|
|
|
|
setFolder(t, w, fcfg)
|
|
|
|
defer wCancel()
|
|
|
|
m := setupModel(t, w)
|
|
|
|
defer cleanupModelAndRemoveDir(m, tfs.URI())
|
|
|
|
|
|
|
|
name := "foo"
|
|
|
|
writeFile(t, tfs, name, nil)
|
|
|
|
must(t, m.ScanFolder(fcfg.ID))
|
|
|
|
|
|
|
|
fi, ok, err := m.CurrentFolderFile(fcfg.ID, name)
|
|
|
|
must(t, err)
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("File hasn't been added")
|
|
|
|
}
|
|
|
|
if !fi.IsReceiveOnlyChanged() {
|
|
|
|
t.Fatal("File isn't receive-only-changed")
|
|
|
|
}
|
|
|
|
|
|
|
|
must(t, tfs.Remove(name))
|
|
|
|
must(t, m.ScanFolder(fcfg.ID))
|
|
|
|
|
|
|
|
_, ok, err = m.CurrentFolderFile(fcfg.ID, name)
|
|
|
|
must(t, err)
|
|
|
|
if ok {
|
|
|
|
t.Error("Expected file to be removed from db")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-11 18:15:11 +00:00
|
|
|
func equalStringsInAnyOrder(a, b []string) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
sort.Strings(a)
|
|
|
|
sort.Strings(b)
|
|
|
|
for i := range a {
|
|
|
|
if a[i] != b[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|