This commit is contained in:
Jakob Borg 2023-05-24 12:23:05 +02:00
parent 72f3c5647b
commit 463b5d7c6c
8 changed files with 82 additions and 73 deletions

View File

@ -14,6 +14,7 @@ import (
"github.com/syncthing/syncthing/lib/protocol"
protocolmocks "github.com/syncthing/syncthing/lib/protocol/mocks"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/scanner"
)
@ -33,6 +34,7 @@ func newFakeConnection(id protocol.DeviceID, model Model) *fakeConnection {
return f.fileData[name], nil
})
f.IDReturns(id)
f.ConnectionIDReturns(rand.String(16))
f.CloseCalls(func(err error) {
f.closeOnce.Do(func() {
close(f.closed)

View File

@ -30,7 +30,7 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -45,7 +45,7 @@ func TestRecvOnlyRevertDeletes(t *testing.T) {
// Send and index update for the known stuff
must(t, m.Index(device1Conn, "ro", knownFiles))
must(t, m.Index(conn, "ro", knownFiles))
f.updateLocalsFromScanning(knownFiles)
size := globalSize(t, m, "ro")
@ -112,7 +112,7 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -122,7 +122,7 @@ func TestRecvOnlyRevertNeeds(t *testing.T) {
// Send and index update for the known stuff
must(t, m.Index(device1Conn, "ro", knownFiles))
must(t, m.Index(conn, "ro", knownFiles))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -202,7 +202,7 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -212,7 +212,7 @@ func TestRecvOnlyUndoChanges(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(device1Conn, "ro", knownFiles))
must(t, m.Index(conn, "ro", knownFiles))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -272,7 +272,7 @@ func TestRecvOnlyDeletedRemoteDrop(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -282,7 +282,7 @@ func TestRecvOnlyDeletedRemoteDrop(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(device1Conn, "ro", knownFiles))
must(t, m.Index(conn, "ro", knownFiles))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -337,7 +337,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -347,7 +347,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
// Send an index update for the known stuff
must(t, m.Index(device1Conn, "ro", knownFiles))
must(t, m.Index(conn, "ro", knownFiles))
f.updateLocalsFromScanning(knownFiles)
// Scan the folder.
@ -402,7 +402,7 @@ func TestRecvOnlyRemoteUndoChanges(t *testing.T) {
return true
})
snap.Release()
must(t, m.IndexUpdate(device1Conn, "ro", files))
must(t, m.IndexUpdate(conn, "ro", files))
// Ensure the pull to resolve conflicts (content identical) happened
must(t, f.doInSync(func() error {
@ -427,7 +427,7 @@ func TestRecvOnlyRevertOwnID(t *testing.T) {
defer wcfgCancel()
ffs := f.Filesystem(nil)
defer cleanupModel(m)
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
// Create some test data
@ -470,7 +470,7 @@ func TestRecvOnlyRevertOwnID(t *testing.T) {
}()
// Receive an index update with an older version, but valid and then revert
must(t, m.Index(device1Conn, f.ID, []protocol.FileInfo{fi}))
must(t, m.Index(conn, f.ID, []protocol.FileInfo{fi}))
f.Revert()
select {

View File

@ -1278,7 +1278,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
m, f, wcfgCancel := setupSendReceiveFolder(t)
defer wcfgCancel()
addFakeConn(m, device1, f.ID)
conn := addFakeConn(m, device1, f.ID)
name := "foo"
if fd, err := f.mtimefs.Create(name); err != nil {
@ -1296,7 +1296,7 @@ func TestPullSymlinkOverExistingWindows(t *testing.T) {
if !ok {
t.Fatal("file missing")
}
must(t, m.Index(device1Conn, f.ID, []protocol.FileInfo{{Name: name, Type: protocol.FileInfoTypeSymlink, Version: file.Version.Update(device1.Short())}}))
must(t, m.Index(conn, f.ID, []protocol.FileInfo{{Name: name, Type: protocol.FileInfoTypeSymlink, Version: file.Version.Update(device1.Short())}}))
scanChan := make(chan string)

View File

@ -572,7 +572,7 @@ func (r *indexHandlerRegistry) ReceiveIndex(folder string, fs []protocol.FileInf
is, isOk := r.indexHandlers[folder]
if !isOk {
l.Infof("%v for nonexistent or paused folder %q", op, folder)
return ErrFolderMissing
return fmt.Errorf("%s: %w", folder, ErrFolderMissing)
}
return is.receive(fs, update, op)
}

View File

@ -255,8 +255,9 @@ func NewModel(cfg config.Wrapper, id protocol.DeviceID, clientName, clientVersio
remoteFolderStates: make(map[protocol.DeviceID]map[string]remoteFolderState),
indexHandlers: make(map[protocol.DeviceID]*indexHandlerRegistry),
}
for devID := range cfg.Devices() {
for devID, cfg := range cfg.Devices() {
m.deviceStatRefs[devID] = stats.NewDeviceStatisticsReference(m.db, devID)
m.setConnRequestLimiters(cfg)
}
m.Add(m.progressEmitter)
m.Add(svcutil.AsService(m.serve, m.String()))
@ -1359,16 +1360,16 @@ func (m *model) ensureIndexHandler(conn protocol.Connection) *indexHandlerRegist
// the other side has decided to start using a new primary
// connection but we haven't seen it close yet. Ideally it will
// close shortly by itself...
l.Infoln("Abandoning old index handler for", deviceID)
l.Infof("Abandoning old index handler for %s (%s) in favour of %s", deviceID.Short(), indexHandlerRegistry.conn.ConnectionID(), connID)
}
// Create a new index handler for this device.
indexHandlerRegistry = newIndexHandlerRegistry(conn, m.deviceDownloads[deviceID], m.closed[connID], m.Supervisor, m.evLogger)
for id, fcfg := range m.folderCfgs {
l.Infoln("Registering folder", id, "for", deviceID.Short())
indexHandlerRegistry.RegisterFolderState(fcfg, m.folderFiles[id], m.folderRunners[id])
}
m.indexHandlers[deviceID] = indexHandlerRegistry
m.deviceDownloads[deviceID] = newDeviceDownloadState()
return indexHandlerRegistry
}
@ -1880,7 +1881,6 @@ func (m *model) Closed(conn protocol.Connection, err error) {
m.progressEmitter.temporaryIndexUnsubscribe(conn)
if idxh, ok := m.indexHandlers[deviceID]; ok && idxh.conn.ConnectionID() == connID {
delete(m.indexHandlers, deviceID)
delete(m.deviceDownloads, deviceID)
}
m.scheduleConnectionPromotion()
}
@ -1890,6 +1890,7 @@ func (m *model) Closed(conn protocol.Connection, err error) {
delete(m.connRequestLimiters, deviceID)
delete(m.helloMessages, deviceID)
delete(m.remoteFolderStates, deviceID)
delete(m.deviceDownloads, deviceID)
m.deviceDidClose(deviceID, time.Since(conn.EstablishedAt()))
} else {
// Some connections remain
@ -2346,6 +2347,9 @@ func (m *model) AddConnection(conn protocol.Connection, hello protocol.Hello) {
m.closed[connID] = closed
m.helloMessages[deviceID] = hello
m.deviceConns[deviceID] = append(m.deviceConns[deviceID], connID)
if m.deviceDownloads[deviceID] == nil {
m.deviceDownloads[deviceID] = newDeviceDownloadState()
}
event := map[string]string{
"id": deviceID.String(),
@ -3075,15 +3079,9 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
m.evLogger.Log(events.DeviceResumed, map[string]string{"device": deviceID.String()})
}
// 0: default, <0: no limiting
m.pmut.Lock()
switch {
case toCfg.MaxRequestKiB > 0:
m.connRequestLimiters[deviceID] = util.NewSemaphore(1024 * toCfg.MaxRequestKiB)
case toCfg.MaxRequestKiB == 0:
m.connRequestLimiters[deviceID] = util.NewSemaphore(1024 * defaultPullerPendingKiB)
if toCfg.MaxRequestKiB != fromCfg.MaxRequestKiB {
m.setConnRequestLimiters(toCfg)
}
m.pmut.Unlock()
}
// Clean up after removed devices
removedDevices := make([]protocol.DeviceID, 0, len(fromDevices))
@ -3133,6 +3131,18 @@ func (m *model) CommitConfiguration(from, to config.Configuration) bool {
return true
}
func (m *model) setConnRequestLimiters(cfg config.DeviceConfiguration) {
m.pmut.Lock()
// 0: default, <0: no limiting
switch {
case cfg.MaxRequestKiB > 0:
m.connRequestLimiters[cfg.DeviceID] = util.NewSemaphore(1024 * cfg.MaxRequestKiB)
case cfg.MaxRequestKiB == 0:
m.connRequestLimiters[cfg.DeviceID] = util.NewSemaphore(1024 * defaultPullerPendingKiB)
}
m.pmut.Unlock()
}
func (m *model) cleanPending(existingDevices map[protocol.DeviceID]config.DeviceConfiguration, existingFolders map[string]config.FolderConfiguration, ignoredDevices deviceIDSet, removedFolders map[string]struct{}) {
var removedPendingFolders []map[string]string
pendingFolders, err := m.db.PendingFolders()

View File

@ -1703,7 +1703,7 @@ func TestRWScanRecovery(t *testing.T) {
}
func TestGlobalDirectoryTree(t *testing.T) {
m, _, fcfg, wCancel := setupModelWithConnection(t)
m, conn, fcfg, wCancel := setupModelWithConnection(t)
defer wCancel()
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem(nil).URI())
@ -1806,7 +1806,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
return string(bytes)
}
must(t, m.Index(device1Conn, "default", testdata))
must(t, m.Index(conn, "default", testdata))
result, _ := m.GlobalDirectoryTree("default", "", -1, false)
@ -2159,7 +2159,7 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
conn2 := newFakeConnection(device2, m)
m.AddConnection(conn2, protocol.Hello{})
m.ClusterConfig(device1Conn, protocol.ClusterConfig{
m.ClusterConfig(conn1, protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -2171,7 +2171,7 @@ func TestSharedWithClearedOnDisconnect(t *testing.T) {
},
},
})
m.ClusterConfig(device2Conn, protocol.ClusterConfig{
m.ClusterConfig(conn2, protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -2397,7 +2397,7 @@ func TestCustomMarkerName(t *testing.T) {
}
func TestRemoveDirWithContent(t *testing.T) {
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
defer wcfgCancel()
tfs := fcfg.Filesystem(nil)
defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -2424,7 +2424,7 @@ func TestRemoveDirWithContent(t *testing.T) {
file.Deleted = true
file.Version = file.Version.Update(device1.Short()).Update(device1.Short())
must(t, m.IndexUpdate(device1Conn, fcfg.ID, []protocol.FileInfo{dir, file}))
must(t, m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{dir, file}))
// Is there something we could trigger on instead of just waiting?
timeout := time.NewTimer(5 * time.Second)
@ -2914,19 +2914,19 @@ func TestRequestLimit(t *testing.T) {
})
must(t, err)
waiter.Wait()
m, _ := setupModelWithConnectionFromWrapper(t, wrapper)
m, conn := setupModelWithConnectionFromWrapper(t, wrapper)
defer cleanupModel(m)
m.ScanFolder("default")
befReq := time.Now()
first, err := m.Request(device1Conn, "default", file, 0, 2000, 0, nil, 0, false)
first, err := m.Request(conn, "default", file, 0, 2000, 0, nil, 0, false)
if err != nil {
t.Fatalf("First request failed: %v", err)
}
reqDur := time.Since(befReq)
returned := make(chan struct{})
go func() {
second, err := m.Request(device1Conn, "default", file, 0, 2000, 0, nil, 0, false)
second, err := m.Request(conn, "default", file, 0, 2000, 0, nil, 0, false)
if err != nil {
t.Errorf("Second request failed: %v", err)
}
@ -3568,7 +3568,7 @@ func TestAddFolderCompletion(t *testing.T) {
}
func TestScanDeletedROChangedOnSR(t *testing.T) {
m, _, fcfg, wCancel := setupModelWithConnection(t)
m, conn, fcfg, wCancel := setupModelWithConnection(t)
ffs := fcfg.Filesystem(nil)
defer wCancel()
defer cleanupModelAndRemoveDir(m, ffs.URI())
@ -3586,7 +3586,7 @@ func TestScanDeletedROChangedOnSR(t *testing.T) {
}
// A remote must have the file, otherwise the deletion below is
// automatically resolved as not a ro-changed item.
must(t, m.IndexUpdate(device1Conn, fcfg.ID, []protocol.FileInfo{file}))
must(t, m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{file}))
must(t, ffs.Remove(name))
m.ScanFolders()
@ -3632,6 +3632,7 @@ func testConfigChangeTriggersClusterConfigs(t *testing.T, expectFirst, expectSec
})
m.AddConnection(fc1, protocol.Hello{})
m.AddConnection(fc2, protocol.Hello{})
m.promoteConnections()
// Initial CCs
select {
@ -3691,17 +3692,17 @@ func TestIssue6961(t *testing.T) {
}
m.ServeBackground()
defer cleanupModelAndRemoveDir(m, tfs.URI())
addFakeConn(m, device1, fcfg.ID)
addFakeConn(m, device2, fcfg.ID)
conn1 := addFakeConn(m, device1, fcfg.ID)
conn2 := addFakeConn(m, device2, fcfg.ID)
m.ScanFolders()
name := "foo"
version := protocol.Vector{}.Update(device1.Short())
// Remote, valid and existing file
must(t, m.Index(device1Conn, fcfg.ID, []protocol.FileInfo{{Name: name, Version: version, Sequence: 1}}))
must(t, m.Index(conn1, fcfg.ID, []protocol.FileInfo{{Name: name, Version: version, Sequence: 1}}))
// Remote, invalid (receive-only) and existing file
must(t, m.Index(device2Conn, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
must(t, m.Index(conn2, fcfg.ID, []protocol.FileInfo{{Name: name, RawInvalid: true, Sequence: 1}}))
// Create a local file
if fd, err := tfs.OpenFile(name, fs.OptCreate, 0o666); err != nil {
t.Fatal(err)
@ -3727,7 +3728,7 @@ func TestIssue6961(t *testing.T) {
m.ScanFolders()
// Drop the remote index, add some other file.
must(t, m.Index(device2Conn, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}}))
must(t, m.Index(conn2, fcfg.ID, []protocol.FileInfo{{Name: "bar", RawInvalid: true, Sequence: 1}}))
// Pause and unpause folder to create new db.FileSet and thus recalculate everything
pauseFolder(t, wcfg, fcfg.ID, true)
@ -3741,7 +3742,7 @@ func TestIssue6961(t *testing.T) {
}
func TestCompletionEmptyGlobal(t *testing.T) {
m, _, fcfg, wcfgCancel := setupModelWithConnection(t)
m, conn, fcfg, wcfgCancel := setupModelWithConnection(t)
defer wcfgCancel()
defer cleanupModelAndRemoveDir(m, fcfg.Filesystem(nil).URI())
files := []protocol.FileInfo{{Name: "foo", Version: protocol.Vector{}.Update(myID.Short()), Sequence: 1}}
@ -3750,7 +3751,7 @@ func TestCompletionEmptyGlobal(t *testing.T) {
m.fmut.Unlock()
files[0].Deleted = true
files[0].Version = files[0].Version.Update(device1.Short())
must(t, m.IndexUpdate(device1Conn, fcfg.ID, files))
must(t, m.IndexUpdate(conn, fcfg.ID, files))
comp := m.testCompletion(protocol.LocalDeviceID, fcfg.ID)
if comp.CompletionPct != 95 {
t.Error("Expected completion of 95%, got", comp.CompletionPct)
@ -3763,34 +3764,34 @@ func TestNeedMetaAfterIndexReset(t *testing.T) {
addDevice2(t, w, fcfg)
m := setupModel(t, w)
defer cleanupModelAndRemoveDir(m, fcfg.Path)
addFakeConn(m, device1, fcfg.ID)
addFakeConn(m, device2, fcfg.ID)
conn1 := addFakeConn(m, device1, fcfg.ID)
conn2 := addFakeConn(m, device2, fcfg.ID)
var seq int64 = 1
files := []protocol.FileInfo{{Name: "foo", Size: 10, Version: protocol.Vector{}.Update(device1.Short()), Sequence: seq}}
// Start with two remotes having one file, then both deleting it, then
// only one adding it again.
must(t, m.Index(device1Conn, fcfg.ID, files))
must(t, m.Index(device2Conn, fcfg.ID, files))
must(t, m.Index(conn1, fcfg.ID, files))
must(t, m.Index(conn2, fcfg.ID, files))
seq++
files[0].SetDeleted(device2.Short())
files[0].Sequence = seq
must(t, m.IndexUpdate(device2Conn, fcfg.ID, files))
must(t, m.IndexUpdate(device1Conn, fcfg.ID, files))
must(t, m.IndexUpdate(conn1, fcfg.ID, files))
must(t, m.IndexUpdate(conn2, fcfg.ID, files))
seq++
files[0].Deleted = false
files[0].Size = 20
files[0].Version = files[0].Version.Update(device1.Short())
files[0].Sequence = seq
must(t, m.IndexUpdate(device1Conn, fcfg.ID, files))
must(t, m.IndexUpdate(conn1, fcfg.ID, files))
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}
// Pretend we had an index reset on device 1
must(t, m.Index(device1Conn, fcfg.ID, files))
must(t, m.Index(conn1, fcfg.ID, files))
if comp := m.testCompletion(device2, fcfg.ID); comp.NeedItems != 1 {
t.Error("Expected one needed item for device2, got", comp.NeedItems)
}

View File

@ -1122,7 +1122,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
cc := basicClusterConfig(device1, myID, fcfg.ID)
cc.Folders[0].Paused = true
m.ClusterConfig(device1Conn, cc)
m.ClusterConfig(fc, cc)
seq++
files[0].Sequence = seq
@ -1143,7 +1143,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
// Remote unpaused
cc.Folders[0].Paused = false
m.ClusterConfig(device1Conn, cc)
m.ClusterConfig(fc, cc)
select {
case <-time.After(5 * time.Second):
t.Fatal("timed out before receiving index")
@ -1168,12 +1168,12 @@ func TestRequestIndexSenderPause(t *testing.T) {
// Local and remote paused, then first resume remote, then local
cc.Folders[0].Paused = true
m.ClusterConfig(device1Conn, cc)
m.ClusterConfig(fc, cc)
pauseFolder(t, m.cfg, fcfg.ID, true)
cc.Folders[0].Paused = false
m.ClusterConfig(device1Conn, cc)
m.ClusterConfig(fc, cc)
pauseFolder(t, m.cfg, fcfg.ID, false)
@ -1190,7 +1190,7 @@ func TestRequestIndexSenderPause(t *testing.T) {
// Folder removed on remote
cc = protocol.ClusterConfig{}
m.ClusterConfig(device1Conn, cc)
m.ClusterConfig(fc, cc)
seq++
files[0].Sequence = seq
@ -1304,7 +1304,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
return nil
})
m.AddConnection(fc, protocol.Hello{})
m.ClusterConfig(device1Conn, protocol.ClusterConfig{
m.ClusterConfig(fc, protocol.ClusterConfig{
Folders: []protocol.Folder{
{
ID: "default",
@ -1354,7 +1354,7 @@ func TestRequestReceiveEncrypted(t *testing.T) {
}
// Simulate request from device that is untrusted too, i.e. with non-empty, but garbage hash
_, err := m.Request(device1Conn, fcfg.ID, name, 0, 1064, 0, []byte("garbage"), 0, false)
_, err := m.Request(fc, fcfg.ID, name, 0, 1064, 0, []byte("garbage"), 0, false)
must(t, err)
changed, err := m.LocalChangedFolderFiles(fcfg.ID, 1, 10)
@ -1380,7 +1380,7 @@ func TestRequestGlobalInvalidToValid(t *testing.T) {
})
must(t, err)
waiter.Wait()
addFakeConn(m, device2, fcfg.ID)
conn := addFakeConn(m, device2, fcfg.ID)
tfs := fcfg.Filesystem(nil)
defer cleanupModelAndRemoveDir(m, tfs.URI())
@ -1405,7 +1405,7 @@ func TestRequestGlobalInvalidToValid(t *testing.T) {
file := fc.files[0]
fc.mut.Unlock()
file.SetIgnored()
m.IndexUpdate(device2Conn, fcfg.ID, []protocol.FileInfo{prepareFileInfoForIndex(file)})
m.IndexUpdate(conn, fcfg.ID, []protocol.FileInfo{prepareFileInfoForIndex(file)})
// Wait for the ignored file to be received and possible pulled
timeout := time.After(10 * time.Second)

View File

@ -30,22 +30,18 @@ var (
defaultFolderConfig config.FolderConfiguration
defaultCfg config.Configuration
defaultAutoAcceptCfg config.Configuration
device1Conn = &mocks.Connection{
IDStub: func() protocol.DeviceID {
return device1
},
}
device2Conn = &mocks.Connection{
IDStub: func() protocol.DeviceID {
return device2
},
}
device1Conn = &mocks.Connection{}
device2Conn = &mocks.Connection{}
)
func init() {
myID, _ = protocol.DeviceIDFromString("ZNWFSWE-RWRV2BD-45BLMCV-LTDE2UR-4LJDW6J-R5BPWEB-TXD27XJ-IZF5RA4")
device1, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR")
device2, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY")
device1Conn.IDReturns(device1)
device1Conn.ConnectionIDReturns(rand.String(16))
device2Conn.IDReturns(device2)
device2Conn.ConnectionIDReturns(rand.String(16))
cfg := config.New(myID)
cfg.Options.MinHomeDiskFree.Value = 0 // avoids unnecessary free space checks