2014-11-16 20:13:20 +00:00
// Copyright (C) 2014 The Syncthing Authors.
2014-10-04 14:48:33 +00:00
//
2015-03-07 20:36:35 +00:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 06:52:18 +00:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-10-04 14:48:33 +00:00
package model
import (
2017-12-07 08:42:03 +00:00
"bytes"
2017-04-26 00:15:23 +00:00
"context"
2016-12-14 23:30:29 +00:00
"crypto/rand"
"io"
2017-12-07 08:42:03 +00:00
"io/ioutil"
2014-10-10 23:27:17 +00:00
"os"
"path/filepath"
2019-01-25 08:52:21 +00:00
"runtime"
2014-10-04 14:48:33 +00:00
"testing"
2014-12-09 23:58:58 +00:00
"time"
2014-10-04 14:48:33 +00:00
2018-02-25 09:14:02 +00:00
"github.com/syncthing/syncthing/lib/config"
2015-10-31 11:31:25 +00:00
"github.com/syncthing/syncthing/lib/db"
2018-05-14 19:01:35 +00:00
"github.com/syncthing/syncthing/lib/events"
2016-12-14 23:30:29 +00:00
"github.com/syncthing/syncthing/lib/fs"
2017-12-07 08:42:03 +00:00
"github.com/syncthing/syncthing/lib/ignore"
2019-10-22 19:55:51 +00:00
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 17:38:46 +00:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sync"
2014-10-04 14:48:33 +00:00
)
var blocks = [ ] protocol . BlockInfo {
{ Hash : [ ] uint8 { 0xfa , 0x43 , 0x23 , 0x9b , 0xce , 0xe7 , 0xb9 , 0x7c , 0xa6 , 0x2f , 0x0 , 0x7c , 0xc6 , 0x84 , 0x87 , 0x56 , 0xa , 0x39 , 0xe1 , 0x9f , 0x74 , 0xf3 , 0xdd , 0xe7 , 0x48 , 0x6d , 0xb3 , 0xf9 , 0x8d , 0xf8 , 0xe4 , 0x71 } } , // Zero'ed out block
{ Offset : 0 , Size : 0x20000 , Hash : [ ] uint8 { 0x7e , 0xad , 0xbc , 0x36 , 0xae , 0xbb , 0xcf , 0x74 , 0x43 , 0xe2 , 0x7a , 0x5a , 0x4b , 0xb8 , 0x5b , 0xce , 0xe6 , 0x9e , 0x1e , 0x10 , 0xf9 , 0x8a , 0xbc , 0x77 , 0x95 , 0x2 , 0x29 , 0x60 , 0x9e , 0x96 , 0xae , 0x6c } } ,
{ Offset : 131072 , Size : 0x20000 , Hash : [ ] uint8 { 0x3c , 0xc4 , 0x20 , 0xf4 , 0xb , 0x2e , 0xcb , 0xb9 , 0x5d , 0xce , 0x34 , 0xa8 , 0xc3 , 0x92 , 0xea , 0xf3 , 0xda , 0x88 , 0x33 , 0xee , 0x7a , 0xb6 , 0xe , 0xf1 , 0x82 , 0x5e , 0xb0 , 0xa9 , 0x26 , 0xa9 , 0xc0 , 0xef } } ,
{ Offset : 262144 , Size : 0x20000 , Hash : [ ] uint8 { 0x76 , 0xa8 , 0xc , 0x69 , 0xd7 , 0x5c , 0x52 , 0xfd , 0xdf , 0x55 , 0xef , 0x44 , 0xc1 , 0xd6 , 0x25 , 0x48 , 0x4d , 0x98 , 0x48 , 0x4d , 0xaa , 0x50 , 0xf6 , 0x6b , 0x32 , 0x47 , 0x55 , 0x81 , 0x6b , 0xed , 0xee , 0xfb } } ,
{ Offset : 393216 , Size : 0x20000 , Hash : [ ] uint8 { 0x44 , 0x1e , 0xa4 , 0xf2 , 0x8d , 0x1f , 0xc3 , 0x1b , 0x9d , 0xa5 , 0x18 , 0x5e , 0x59 , 0x1b , 0xd8 , 0x5c , 0xba , 0x7d , 0xb9 , 0x8d , 0x70 , 0x11 , 0x5c , 0xea , 0xa1 , 0x57 , 0x4d , 0xcb , 0x3c , 0x5b , 0xf8 , 0x6c } } ,
{ Offset : 524288 , Size : 0x20000 , Hash : [ ] uint8 { 0x8 , 0x40 , 0xd0 , 0x5e , 0x80 , 0x0 , 0x0 , 0x7c , 0x8b , 0xb3 , 0x8b , 0xf7 , 0x7b , 0x23 , 0x26 , 0x28 , 0xab , 0xda , 0xcf , 0x86 , 0x8f , 0xc2 , 0x8a , 0x39 , 0xc6 , 0xe6 , 0x69 , 0x59 , 0x97 , 0xb6 , 0x1a , 0x43 } } ,
{ Offset : 655360 , Size : 0x20000 , Hash : [ ] uint8 { 0x38 , 0x8e , 0x44 , 0xcb , 0x30 , 0xd8 , 0x90 , 0xf , 0xce , 0x7 , 0x4b , 0x58 , 0x86 , 0xde , 0xce , 0x59 , 0xa2 , 0x46 , 0xd2 , 0xf9 , 0xba , 0xaf , 0x35 , 0x87 , 0x38 , 0xdf , 0xd2 , 0xd , 0xf9 , 0x45 , 0xed , 0x91 } } ,
{ Offset : 786432 , Size : 0x20000 , Hash : [ ] uint8 { 0x32 , 0x28 , 0xcd , 0xf , 0x37 , 0x21 , 0xe5 , 0xd4 , 0x1e , 0x58 , 0x87 , 0x73 , 0x8e , 0x36 , 0xdf , 0xb2 , 0x70 , 0x78 , 0x56 , 0xc3 , 0x42 , 0xff , 0xf7 , 0x8f , 0x37 , 0x95 , 0x0 , 0x26 , 0xa , 0xac , 0x54 , 0x72 } } ,
{ Offset : 917504 , Size : 0x20000 , Hash : [ ] uint8 { 0x96 , 0x6b , 0x15 , 0x6b , 0xc4 , 0xf , 0x19 , 0x18 , 0xca , 0xbb , 0x5f , 0xd6 , 0xbb , 0xa2 , 0xc6 , 0x2a , 0xac , 0xbb , 0x8a , 0xb9 , 0xce , 0xec , 0x4c , 0xdb , 0x78 , 0xec , 0x57 , 0x5d , 0x33 , 0xf9 , 0x8e , 0xaf } } ,
}
2015-09-04 10:01:00 +00:00
var folders = [ ] string { "default" }
2018-01-14 14:30:11 +00:00
var diffTestData = [ ] struct {
a string
b string
s int
d [ ] protocol . BlockInfo
} {
{ "contents" , "contents" , 1024 , [ ] protocol . BlockInfo { } } ,
{ "" , "" , 1024 , [ ] protocol . BlockInfo { } } ,
{ "contents" , "contents" , 3 , [ ] protocol . BlockInfo { } } ,
{ "contents" , "cantents" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } } } ,
{ "contents" , "contants" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } } } ,
{ "contents" , "cantants" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } , { Offset : 3 , Size : 3 } } } ,
{ "contents" , "" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 0 } } } ,
{ "" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } , { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
{ "con" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
{ "contents" , "con" , 3 , nil } ,
{ "contents" , "cont" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } } ,
{ "cont" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
}
2019-03-04 12:27:10 +00:00
func setupFile ( filename string , blockNumbers [ ] int ) protocol . FileInfo {
2014-10-10 23:27:17 +00:00
// Create existing file
2015-12-21 21:47:26 +00:00
existingBlocks := make ( [ ] protocol . BlockInfo , len ( blockNumbers ) )
for i := range blockNumbers {
existingBlocks [ i ] = blocks [ blockNumbers [ i ] ]
}
return protocol . FileInfo {
2016-07-04 10:40:29 +00:00
Name : filename ,
Blocks : existingBlocks ,
2014-10-04 14:48:33 +00:00
}
2015-12-21 21:47:26 +00:00
}
2014-10-04 14:48:33 +00:00
2019-03-07 14:15:14 +00:00
func createFile ( t * testing . T , name string , fs fs . Filesystem ) protocol . FileInfo {
t . Helper ( )
f , err := fs . Create ( name )
2019-03-09 18:45:36 +00:00
must ( t , err )
2019-03-07 14:15:14 +00:00
f . Close ( )
fi , err := fs . Stat ( name )
2019-03-09 18:45:36 +00:00
must ( t , err )
2019-03-07 14:15:14 +00:00
file , err := scanner . CreateFileInfo ( fi , name , fs )
2019-03-09 18:45:36 +00:00
must ( t , err )
2019-03-07 14:15:14 +00:00
return file
}
2019-03-08 20:29:09 +00:00
func setupSendReceiveFolder ( files ... protocol . FileInfo ) ( * model , * sendReceiveFolder ) {
2019-02-12 15:04:04 +00:00
w := createTmpWrapper ( defaultCfg )
2019-02-26 08:09:25 +00:00
model := newModel ( w , myID , "syncthing" , "dev" , db . OpenMemory ( ) , nil )
2019-03-08 20:29:09 +00:00
fcfg := testFolderConfigTmp ( )
2019-11-08 09:56:16 +00:00
model . addFolder ( fcfg )
2019-02-12 15:04:04 +00:00
2017-04-01 09:58:06 +00:00
f := & sendReceiveFolder {
2016-04-26 20:19:30 +00:00
folder : folder {
2019-08-15 14:29:37 +00:00
stateTracker : newStateTracker ( "default" , model . evLogger ) ,
2017-04-20 00:20:34 +00:00
model : model ,
2019-03-11 06:28:54 +00:00
fset : model . folderFiles [ fcfg . ID ] ,
2017-04-20 00:20:34 +00:00
initialScanFinished : make ( chan struct { } ) ,
2017-04-26 00:15:23 +00:00
ctx : context . TODO ( ) ,
2019-02-12 15:04:04 +00:00
FolderConfiguration : fcfg ,
2016-04-26 20:19:30 +00:00
} ,
2016-12-14 23:30:29 +00:00
2018-11-07 10:04:41 +00:00
queue : newJobQueue ( ) ,
pullErrors : make ( map [ string ] string ) ,
pullErrorsMut : sync . NewMutex ( ) ,
2014-10-04 14:48:33 +00:00
}
2018-08-25 08:16:38 +00:00
f . fs = fs . NewMtimeFS ( f . Filesystem ( ) , db . NewNamespacedKV ( model . db , "mtime" ) )
2017-04-01 09:58:06 +00:00
2019-04-07 11:29:17 +00:00
// Update index
if files != nil {
f . updateLocalsFromScanning ( files )
}
2017-04-01 09:58:06 +00:00
// Folders are never actually started, so no initial scan will be done
2017-04-20 00:20:34 +00:00
close ( f . initialScanFinished )
2017-04-01 09:58:06 +00:00
2019-03-08 20:29:09 +00:00
return model , f
2015-12-21 21:47:26 +00:00
}
2019-08-15 14:29:37 +00:00
func cleanupSRFolder ( f * sendReceiveFolder , m * model ) {
m . evLogger . Stop ( )
os . Remove ( m . cfg . ConfigPath ( ) )
2019-10-22 19:55:51 +00:00
os . RemoveAll ( f . Filesystem ( ) . URI ( ) )
2019-08-15 14:29:37 +00:00
}
2015-12-21 21:47:26 +00:00
// Layout of the files: (indexes from the above array)
// 12345678 - Required file
// 02005008 - Existing file (currently in the index)
// 02340070 - Temp file on the disk
func TestHandleFile ( t * testing . T ) {
// After the diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
2014-10-04 14:48:33 +00:00
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 }
2019-03-04 12:27:10 +00:00
existingFile := setupFile ( "filex" , existingBlocks )
2015-12-21 21:47:26 +00:00
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( existingFile )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-02-12 15:04:04 +00:00
2014-10-08 22:41:23 +00:00
copyChan := make ( chan copyBlocksState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-04 14:48:33 +00:00
2019-03-25 11:59:22 +00:00
f . handleFile ( requiredFile , copyChan , dbUpdateChan )
2014-10-04 14:48:33 +00:00
// Receive the results
toCopy := <- copyChan
2014-10-08 22:41:23 +00:00
if len ( toCopy . blocks ) != 8 {
t . Errorf ( "Unexpected count of copy blocks: %d != 8" , len ( toCopy . blocks ) )
2014-10-04 14:48:33 +00:00
}
2016-04-15 10:59:41 +00:00
for _ , block := range blocks [ 1 : ] {
found := false
for _ , toCopyBlock := range toCopy . blocks {
if string ( toCopyBlock . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-04 14:48:33 +00:00
}
}
}
func TestHandleFileWithTemp ( t * testing . T ) {
// After diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
// After dropping out blocks already on the temp file we should:
// Copy: 5, 8
// Pull: 1, 6
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 }
2019-03-04 12:27:10 +00:00
existingFile := setupFile ( "file" , existingBlocks )
2014-10-04 14:48:33 +00:00
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( existingFile )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-02-12 15:04:04 +00:00
if _ , err := prepareTmpFile ( f . Filesystem ( ) ) ; err != nil {
t . Fatal ( err )
}
2014-10-08 22:41:23 +00:00
copyChan := make ( chan copyBlocksState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-04 14:48:33 +00:00
2019-03-25 11:59:22 +00:00
f . handleFile ( requiredFile , copyChan , dbUpdateChan )
2014-10-04 14:48:33 +00:00
// Receive the results
toCopy := <- copyChan
2014-10-08 22:41:23 +00:00
if len ( toCopy . blocks ) != 4 {
t . Errorf ( "Unexpected count of copy blocks: %d != 4" , len ( toCopy . blocks ) )
2014-10-04 14:48:33 +00:00
}
2016-04-15 10:59:41 +00:00
for _ , idx := range [ ] int { 1 , 5 , 6 , 8 } {
found := false
block := blocks [ idx ]
for _ , toCopyBlock := range toCopy . blocks {
if string ( toCopyBlock . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-04 14:48:33 +00:00
}
}
}
2014-10-10 23:27:17 +00:00
func TestCopierFinder ( t * testing . T ) {
// After diff between required and existing we should:
// Copy: 1, 2, 3, 4, 6, 7, 8
// Since there is no existing file, nor a temp file
// After dropping out blocks found locally:
// Pull: 1, 5, 6, 8
2019-02-12 15:04:04 +00:00
tempFile := fs . TempName ( "file2" )
2014-10-10 23:27:17 +00:00
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 3 , 4 , 0 , 0 , 7 , 0 }
2019-03-04 12:27:10 +00:00
existingFile := setupFile ( fs . TempName ( "file" ) , existingBlocks )
2014-10-10 23:27:17 +00:00
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
requiredFile . Name = "file2"
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( existingFile )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-02-12 15:04:04 +00:00
if _ , err := prepareTmpFile ( f . Filesystem ( ) ) ; err != nil {
t . Fatal ( err )
}
2014-10-10 23:27:17 +00:00
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState , 4 )
finisherChan := make ( chan * sharedPullerState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-10 23:27:17 +00:00
// Run a single fetcher routine
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherChan )
2014-10-10 23:27:17 +00:00
2019-03-25 11:59:22 +00:00
f . handleFile ( requiredFile , copyChan , dbUpdateChan )
2014-10-10 23:27:17 +00:00
2019-11-08 17:53:51 +00:00
timeout := time . After ( 10 * time . Second )
pulls := make ( [ ] pullBlockState , 4 )
for i := 0 ; i < 4 ; i ++ {
select {
case pulls [ i ] = <- pullChan :
case <- timeout :
t . Fatalf ( "Timed out before receiving all 4 states on pullChan (already got %v)" , i )
}
}
var finish * sharedPullerState
select {
case finish = <- finisherChan :
case <- timeout :
t . Fatal ( "Timed out before receiving 4 states on pullChan" )
}
2019-08-16 07:35:19 +00:00
defer cleanupSharedPullerState ( finish )
2014-10-10 23:27:17 +00:00
select {
case <- pullChan :
2016-12-14 23:30:29 +00:00
t . Fatal ( "Pull channel has data to be read" )
2014-10-10 23:27:17 +00:00
case <- finisherChan :
t . Fatal ( "Finisher channel has data to be read" )
default :
}
2016-04-15 10:59:41 +00:00
// Verify that the right blocks went into the pull list.
// They are pulled in random order.
for _ , idx := range [ ] int { 1 , 5 , 6 , 8 } {
found := false
block := blocks [ idx ]
for _ , pulledBlock := range pulls {
if string ( pulledBlock . block . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-10 23:27:17 +00:00
}
2016-04-15 10:59:41 +00:00
if string ( finish . file . Blocks [ idx - 1 ] . Hash ) != string ( blocks [ idx ] . Hash ) {
t . Errorf ( "Block %d mismatch: %s != %s" , idx , finish . file . Blocks [ idx - 1 ] . String ( ) , blocks [ idx ] . String ( ) )
2014-10-10 23:27:17 +00:00
}
}
// Verify that the fetched blocks have actually been written to the temp file
2019-02-12 15:04:04 +00:00
blks , err := scanner . HashFile ( context . TODO ( ) , f . Filesystem ( ) , tempFile , protocol . MinBlockSize , nil , false )
2014-10-10 23:27:17 +00:00
if err != nil {
t . Log ( err )
}
for _ , eq := range [ ] int { 2 , 3 , 4 , 7 } {
if string ( blks [ eq - 1 ] . Hash ) != string ( blocks [ eq ] . Hash ) {
t . Errorf ( "Block %d mismatch: %s != %s" , eq , blks [ eq - 1 ] . String ( ) , blocks [ eq ] . String ( ) )
}
}
}
2014-10-22 14:24:11 +00:00
2016-12-14 23:30:29 +00:00
func TestWeakHash ( t * testing . T ) {
2019-02-12 15:04:04 +00:00
// Setup the model/pull environment
2019-03-08 20:29:09 +00:00
model , fo := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( fo , model )
2019-03-08 20:29:09 +00:00
ffs := fo . Filesystem ( )
2019-01-11 12:56:05 +00:00
2019-02-12 15:04:04 +00:00
tempFile := fs . TempName ( "weakhash" )
2016-12-14 23:30:29 +00:00
var shift int64 = 10
var size int64 = 1 << 20
2018-04-16 18:08:50 +00:00
expectBlocks := int ( size / protocol . MinBlockSize )
expectPulls := int ( shift / protocol . MinBlockSize )
2016-12-14 23:30:29 +00:00
if shift > 0 {
expectPulls ++
}
2019-02-12 15:04:04 +00:00
f , err := ffs . Create ( "weakhash" )
2019-03-09 18:45:36 +00:00
must ( t , err )
2016-12-14 23:30:29 +00:00
defer f . Close ( )
2019-02-12 15:04:04 +00:00
_ , err = io . CopyN ( f , rand . Reader , size )
2016-12-14 23:30:29 +00:00
if err != nil {
t . Error ( err )
}
info , err := f . Stat ( )
if err != nil {
t . Error ( err )
}
// Create two files, second file has `shifted` bytes random prefix, yet
// both are of the same length, for example:
// File 1: abcdefgh
// File 2: xyabcdef
f . Seek ( 0 , os . SEEK_SET )
2018-04-16 18:08:50 +00:00
existing , err := scanner . Blocks ( context . TODO ( ) , f , protocol . MinBlockSize , size , nil , true )
2016-12-14 23:30:29 +00:00
if err != nil {
t . Error ( err )
}
f . Seek ( 0 , os . SEEK_SET )
remainder := io . LimitReader ( f , size - shift )
prefix := io . LimitReader ( rand . Reader , shift )
nf := io . MultiReader ( prefix , remainder )
2018-04-16 18:08:50 +00:00
desired , err := scanner . Blocks ( context . TODO ( ) , nf , protocol . MinBlockSize , size , nil , true )
2016-12-14 23:30:29 +00:00
if err != nil {
t . Error ( err )
}
existingFile := protocol . FileInfo {
Name : "weakhash" ,
Blocks : existing ,
Size : size ,
ModifiedS : info . ModTime ( ) . Unix ( ) ,
ModifiedNs : int32 ( info . ModTime ( ) . Nanosecond ( ) ) ,
}
desiredFile := protocol . FileInfo {
Name : "weakhash" ,
Size : size ,
Blocks : desired ,
ModifiedS : info . ModTime ( ) . Unix ( ) + 1 ,
}
2019-04-07 11:29:17 +00:00
fo . updateLocalsFromScanning ( [ ] protocol . FileInfo { existingFile } )
2019-02-12 15:04:04 +00:00
2016-12-14 23:30:29 +00:00
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState , expectBlocks )
finisherChan := make ( chan * sharedPullerState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2016-12-14 23:30:29 +00:00
// Run a single fetcher routine
go fo . copierRoutine ( copyChan , pullChan , finisherChan )
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
2017-01-04 21:04:13 +00:00
fo . WeakHashThresholdPct = 101
2019-03-25 11:59:22 +00:00
fo . handleFile ( desiredFile , copyChan , dbUpdateChan )
2016-12-14 23:30:29 +00:00
var pulls [ ] pullBlockState
for len ( pulls ) < expectBlocks {
select {
case pull := <- pullChan :
pulls = append ( pulls , pull )
2016-12-16 12:05:27 +00:00
case <- time . After ( 10 * time . Second ) :
t . Errorf ( "timed out, got %d pulls expected %d" , len ( pulls ) , expectPulls )
2016-12-14 23:30:29 +00:00
}
}
finish := <- finisherChan
select {
case <- pullChan :
t . Fatal ( "Pull channel has data to be read" )
case <- finisherChan :
t . Fatal ( "Finisher channel has data to be read" )
default :
}
2019-08-16 07:35:19 +00:00
cleanupSharedPullerState ( finish )
2019-02-12 15:04:04 +00:00
if err := ffs . Remove ( tempFile ) ; err != nil {
t . Fatal ( err )
}
2016-12-14 23:30:29 +00:00
// Test 2 - using weak hash, expectPulls blocks pulled.
2017-01-04 21:04:13 +00:00
fo . WeakHashThresholdPct = - 1
2019-03-25 11:59:22 +00:00
fo . handleFile ( desiredFile , copyChan , dbUpdateChan )
2016-12-14 23:30:29 +00:00
pulls = pulls [ : 0 ]
for len ( pulls ) < expectPulls {
select {
case pull := <- pullChan :
pulls = append ( pulls , pull )
2016-12-16 12:05:27 +00:00
case <- time . After ( 10 * time . Second ) :
t . Errorf ( "timed out, got %d pulls expected %d" , len ( pulls ) , expectPulls )
2016-12-14 23:30:29 +00:00
}
}
finish = <- finisherChan
2019-08-16 07:35:19 +00:00
cleanupSharedPullerState ( finish )
2016-12-14 23:30:29 +00:00
expectShifted := expectBlocks - expectPulls
if finish . copyOriginShifted != expectShifted {
t . Errorf ( "did not copy %d shifted" , expectShifted )
}
}
2017-11-04 07:20:11 +00:00
// Test that updating a file removes its old blocks from the blockmap
2014-10-22 14:24:11 +00:00
func TestCopierCleanup ( t * testing . T ) {
2015-01-18 01:12:06 +00:00
iterFn := func ( folder , file string , index int32 ) bool {
2014-10-22 14:24:11 +00:00
return true
}
// Create a file
2019-03-04 12:27:10 +00:00
file := setupFile ( "test" , [ ] int { 0 } )
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( file )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2014-10-22 14:24:11 +00:00
file . Blocks = [ ] protocol . BlockInfo { blocks [ 1 ] }
2019-02-06 08:32:03 +00:00
file . Version = file . Version . Update ( myID . Short ( ) )
2014-10-22 14:24:11 +00:00
// Update index (removing old blocks)
2019-04-07 11:29:17 +00:00
f . updateLocalsFromScanning ( [ ] protocol . FileInfo { file } )
2014-10-22 14:24:11 +00:00
2015-09-04 10:01:00 +00:00
if m . finder . Iterate ( folders , blocks [ 0 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Unexpected block found" )
}
2015-09-04 10:01:00 +00:00
if ! m . finder . Iterate ( folders , blocks [ 1 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Expected block not found" )
}
file . Blocks = [ ] protocol . BlockInfo { blocks [ 0 ] }
2019-02-06 08:32:03 +00:00
file . Version = file . Version . Update ( myID . Short ( ) )
2014-10-22 14:24:11 +00:00
// Update index (removing old blocks)
2019-04-07 11:29:17 +00:00
f . updateLocalsFromScanning ( [ ] protocol . FileInfo { file } )
2014-10-22 14:24:11 +00:00
2015-09-04 10:01:00 +00:00
if ! m . finder . Iterate ( folders , blocks [ 0 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Unexpected block found" )
}
2015-09-04 10:01:00 +00:00
if m . finder . Iterate ( folders , blocks [ 1 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Expected block not found" )
}
}
2014-10-24 22:20:08 +00:00
2015-01-07 23:12:12 +00:00
func TestDeregisterOnFailInCopy ( t * testing . T ) {
2019-03-04 12:27:10 +00:00
file := setupFile ( "filex" , [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 } )
2015-01-07 23:12:12 +00:00
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2015-03-04 23:33:48 +00:00
2018-12-11 08:42:03 +00:00
// Set up our evet subscription early
2019-08-15 14:29:37 +00:00
s := m . evLogger . Subscribe ( events . ItemFinished )
2018-12-11 08:42:03 +00:00
2015-01-07 23:12:12 +00:00
// queue.Done should be called by the finisher routine
2016-08-06 13:05:59 +00:00
f . queue . Push ( "filex" , 0 , time . Time { } )
2016-04-26 20:19:30 +00:00
f . queue . Pop ( )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . queue . lenProgress ( ) != 1 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Expected file in progress" )
}
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState )
finisherBufferChan := make ( chan * sharedPullerState )
finisherChan := make ( chan * sharedPullerState )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherBufferChan )
2019-03-11 06:28:54 +00:00
go f . finisherRoutine ( finisherChan , dbUpdateChan , make ( chan string ) )
2015-01-07 23:12:12 +00:00
2019-03-25 11:59:22 +00:00
f . handleFile ( file , copyChan , dbUpdateChan )
2015-01-07 23:12:12 +00:00
2015-04-28 15:34:55 +00:00
// Receive a block at puller, to indicate that at least a single copier
2015-01-07 23:12:12 +00:00
// loop has been performed.
toPull := <- pullChan
2018-12-11 08:42:03 +00:00
// Close the file, causing errors on further access
2019-03-04 13:01:52 +00:00
toPull . sharedPullerState . fail ( os . ErrNotExist )
2018-12-11 08:42:03 +00:00
2015-01-07 23:12:12 +00:00
// Unblock copier
2018-12-11 08:42:03 +00:00
go func ( ) {
for range pullChan {
}
} ( )
2015-01-07 23:12:12 +00:00
select {
case state := <- finisherBufferChan :
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 1 || f . queue . lenProgress ( ) != 1 || f . queue . lenQueued ( ) != 0 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Could not find file" )
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
2018-05-14 19:01:35 +00:00
2018-12-11 08:42:03 +00:00
t0 := time . Now ( )
if ev , err := s . Poll ( time . Minute ) ; err != nil {
2018-05-14 19:01:35 +00:00
t . Fatal ( "Got error waiting for ItemFinished event:" , err )
} else if n := ev . Data . ( map [ string ] interface { } ) [ "item" ] ; n != state . file . Name {
t . Fatal ( "Got ItemFinished event for wrong file:" , n )
}
2018-12-11 08:42:03 +00:00
t . Log ( "event took" , time . Since ( t0 ) )
2015-01-07 23:12:12 +00:00
2015-10-14 05:38:13 +00:00
state . mut . Lock ( )
2019-08-16 07:35:19 +00:00
stateWriter := state . writer
2015-10-14 05:38:13 +00:00
state . mut . Unlock ( )
2019-08-16 07:35:19 +00:00
if stateWriter != nil {
2015-01-07 23:12:12 +00:00
t . Fatal ( "File not closed?" )
}
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
// Doing it again should have no effect
finisherChan <- state
2018-12-11 08:42:03 +00:00
if _ , err := s . Poll ( time . Second ) ; err != events . ErrTimeout {
t . Fatal ( "Expected timeout, not another event" , err )
}
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
2018-12-11 08:42:03 +00:00
2015-01-07 23:12:12 +00:00
case <- time . After ( time . Second ) :
t . Fatal ( "Didn't get anything to the finisher" )
}
}
func TestDeregisterOnFailInPull ( t * testing . T ) {
2019-03-04 12:27:10 +00:00
file := setupFile ( "filex" , [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 } )
2015-01-07 23:12:12 +00:00
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2015-01-07 23:12:12 +00:00
2018-12-11 08:42:03 +00:00
// Set up our evet subscription early
2019-08-15 14:29:37 +00:00
s := m . evLogger . Subscribe ( events . ItemFinished )
2018-12-11 08:42:03 +00:00
2015-01-07 23:12:12 +00:00
// queue.Done should be called by the finisher routine
2016-08-06 13:05:59 +00:00
f . queue . Push ( "filex" , 0 , time . Time { } )
2016-04-26 20:19:30 +00:00
f . queue . Pop ( )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . queue . lenProgress ( ) != 1 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Expected file in progress" )
}
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState )
finisherBufferChan := make ( chan * sharedPullerState )
finisherChan := make ( chan * sharedPullerState )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherBufferChan )
go f . pullerRoutine ( pullChan , finisherBufferChan )
2019-03-11 06:28:54 +00:00
go f . finisherRoutine ( finisherChan , dbUpdateChan , make ( chan string ) )
2015-01-07 23:12:12 +00:00
2019-03-25 11:59:22 +00:00
f . handleFile ( file , copyChan , dbUpdateChan )
2015-01-07 23:12:12 +00:00
2015-11-12 02:20:34 +00:00
// Receive at finisher, we should error out as puller has nowhere to pull
2015-01-07 23:12:12 +00:00
// from.
2018-05-14 19:01:35 +00:00
timeout = time . Second
2015-01-07 23:12:12 +00:00
select {
case state := <- finisherBufferChan :
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 1 || f . queue . lenProgress ( ) != 1 || f . queue . lenQueued ( ) != 0 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Could not find file" )
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
2018-05-14 19:01:35 +00:00
2018-12-11 08:42:03 +00:00
t0 := time . Now ( )
if ev , err := s . Poll ( time . Minute ) ; err != nil {
2018-05-14 19:01:35 +00:00
t . Fatal ( "Got error waiting for ItemFinished event:" , err )
} else if n := ev . Data . ( map [ string ] interface { } ) [ "item" ] ; n != state . file . Name {
t . Fatal ( "Got ItemFinished event for wrong file:" , n )
}
2018-12-11 08:42:03 +00:00
t . Log ( "event took" , time . Since ( t0 ) )
2015-01-07 23:12:12 +00:00
2015-10-14 05:38:13 +00:00
state . mut . Lock ( )
2019-08-16 07:35:19 +00:00
stateWriter := state . writer
2015-10-14 05:38:13 +00:00
state . mut . Unlock ( )
2019-08-16 07:35:19 +00:00
if stateWriter != nil {
2015-01-07 23:12:12 +00:00
t . Fatal ( "File not closed?" )
}
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
// Doing it again should have no effect
finisherChan <- state
2018-12-11 08:42:03 +00:00
if _ , err := s . Poll ( time . Second ) ; err != events . ErrTimeout {
t . Fatal ( "Expected timeout, not another event" , err )
}
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
case <- time . After ( time . Second ) :
t . Fatal ( "Didn't get anything to the finisher" )
}
}
2017-12-07 08:42:03 +00:00
func TestIssue3164 ( t * testing . T ) {
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-03-08 20:29:09 +00:00
ffs := f . Filesystem ( )
tmpDir := ffs . URI ( )
2017-12-07 08:42:03 +00:00
2019-02-12 15:04:04 +00:00
ignDir := filepath . Join ( "issue3164" , "oktodelete" )
subDir := filepath . Join ( ignDir , "foobar" )
2019-03-09 18:45:36 +00:00
must ( t , ffs . MkdirAll ( subDir , 0777 ) )
must ( t , ioutil . WriteFile ( filepath . Join ( tmpDir , subDir , "file" ) , [ ] byte ( "Hello" ) , 0644 ) )
must ( t , ioutil . WriteFile ( filepath . Join ( tmpDir , ignDir , "file" ) , [ ] byte ( "Hello" ) , 0644 ) )
2017-12-07 08:42:03 +00:00
file := protocol . FileInfo {
Name : "issue3164" ,
}
2019-02-12 15:04:04 +00:00
matcher := ignore . New ( ffs )
2019-03-09 18:45:36 +00:00
must ( t , matcher . Parse ( bytes . NewBufferString ( "(?d)oktodelete" ) , "" ) )
2019-03-11 06:28:54 +00:00
f . ignores = matcher
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2019-03-11 06:28:54 +00:00
f . deleteDir ( file , dbUpdateChan , make ( chan string ) )
2017-12-07 08:42:03 +00:00
2019-02-12 15:04:04 +00:00
if _ , err := ffs . Stat ( "issue3164" ) ; ! fs . IsNotExist ( err ) {
2017-12-07 08:42:03 +00:00
t . Fatal ( err )
}
}
2018-01-14 14:30:11 +00:00
func TestDiff ( t * testing . T ) {
for i , test := range diffTestData {
a , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . a ) , test . s , - 1 , nil , false )
b , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . b ) , test . s , - 1 , nil , false )
_ , d := blockDiff ( a , b )
if len ( d ) != len ( test . d ) {
t . Fatalf ( "Incorrect length for diff %d; %d != %d" , i , len ( d ) , len ( test . d ) )
} else {
for j := range test . d {
if d [ j ] . Offset != test . d [ j ] . Offset {
t . Errorf ( "Incorrect offset for diff %d block %d; %d != %d" , i , j , d [ j ] . Offset , test . d [ j ] . Offset )
}
if d [ j ] . Size != test . d [ j ] . Size {
t . Errorf ( "Incorrect length for diff %d block %d; %d != %d" , i , j , d [ j ] . Size , test . d [ j ] . Size )
}
}
}
}
}
2018-01-14 21:52:41 +00:00
func BenchmarkDiff ( b * testing . B ) {
testCases := make ( [ ] struct { a , b [ ] protocol . BlockInfo } , 0 , len ( diffTestData ) )
for _ , test := range diffTestData {
a , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . a ) , test . s , - 1 , nil , false )
b , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . b ) , test . s , - 1 , nil , false )
testCases = append ( testCases , struct { a , b [ ] protocol . BlockInfo } { a , b } )
}
b . ReportAllocs ( )
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
for _ , tc := range testCases {
blockDiff ( tc . a , tc . b )
}
}
}
2018-01-14 14:30:11 +00:00
func TestDiffEmpty ( t * testing . T ) {
emptyCases := [ ] struct {
a [ ] protocol . BlockInfo
b [ ] protocol . BlockInfo
need int
have int
} {
{ nil , nil , 0 , 0 } ,
{ [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } , nil , 0 , 0 } ,
{ nil , [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } , 1 , 0 } ,
}
for _ , emptyCase := range emptyCases {
h , n := blockDiff ( emptyCase . a , emptyCase . b )
if len ( h ) != emptyCase . have {
t . Errorf ( "incorrect have: %d != %d" , len ( h ) , emptyCase . have )
}
if len ( n ) != emptyCase . need {
t . Errorf ( "incorrect have: %d != %d" , len ( h ) , emptyCase . have )
}
}
}
2018-11-13 08:36:16 +00:00
// TestDeleteIgnorePerms checks, that a file gets deleted when the IgnorePerms
// option is true and the permissions do not match between the file on disk and
// in the db.
func TestDeleteIgnorePerms ( t * testing . T ) {
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-03-08 20:29:09 +00:00
ffs := f . Filesystem ( )
2018-11-13 08:36:16 +00:00
f . IgnorePerms = true
name := "deleteIgnorePerms"
file , err := ffs . Create ( name )
if err != nil {
t . Error ( err )
}
defer file . Close ( )
stat , err := file . Stat ( )
2019-03-09 18:45:36 +00:00
must ( t , err )
2018-11-13 08:36:16 +00:00
fi , err := scanner . CreateFileInfo ( stat , name , ffs )
2019-03-09 18:45:36 +00:00
must ( t , err )
2018-11-13 08:36:16 +00:00
ffs . Chmod ( name , 0600 )
scanChan := make ( chan string )
finished := make ( chan struct { } )
go func ( ) {
err = f . checkToBeDeleted ( fi , scanChan )
close ( finished )
} ( )
select {
case <- scanChan :
<- finished
case <- finished :
}
2019-03-09 18:45:36 +00:00
must ( t , err )
2018-11-13 08:36:16 +00:00
}
2019-01-25 08:52:21 +00:00
func TestCopyOwner ( t * testing . T ) {
// Verifies that owner and group are copied from the parent, for both
// files and directories.
if runtime . GOOS == "windows" {
t . Skip ( "copying owner not supported on Windows" )
}
const (
expOwner = 1234
expGroup = 5678
)
// Set up a folder with the CopyParentOwner bit and backed by a fake
// filesystem.
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-02-12 15:04:04 +00:00
f . folder . FolderConfiguration = config . NewFolderConfiguration ( m . id , f . ID , f . Label , fs . FilesystemTypeFake , "/TestCopyOwner" )
f . folder . FolderConfiguration . CopyOwnershipFromParent = true
2019-01-25 08:52:21 +00:00
f . fs = f . Filesystem ( )
// Create a parent dir with a certain owner/group.
f . fs . Mkdir ( "foo" , 0755 )
f . fs . Lchown ( "foo" , expOwner , expGroup )
dir := protocol . FileInfo {
Name : "foo/bar" ,
Type : protocol . FileInfoTypeDirectory ,
Permissions : 0755 ,
}
// Have the folder create a subdirectory, verify that it's the correct
// owner/group.
dbUpdateChan := make ( chan dbUpdateJob , 1 )
defer close ( dbUpdateChan )
2019-03-11 06:28:54 +00:00
f . handleDir ( dir , dbUpdateChan , nil )
2019-01-25 08:52:21 +00:00
<- dbUpdateChan // empty the channel for later
info , err := f . fs . Lstat ( "foo/bar" )
if err != nil {
t . Fatal ( "Unexpected error (dir):" , err )
}
if info . Owner ( ) != expOwner || info . Group ( ) != expGroup {
t . Fatalf ( "Expected dir owner/group to be %d/%d, not %d/%d" , expOwner , expGroup , info . Owner ( ) , info . Group ( ) )
}
// Have the folder create a file, verify it's the correct owner/group.
// File is zero sized to avoid having to handle copies/pulls.
file := protocol . FileInfo {
Name : "foo/bar/baz" ,
Type : protocol . FileInfoTypeFile ,
Permissions : 0644 ,
}
// Wire some stuff. The flow here is handleFile() -[copierChan]->
// copierRoutine() -[finisherChan]-> finisherRoutine() -[dbUpdateChan]->
// back to us and we're done. The copier routine doesn't do anything,
// but it's the way data is passed around. When the database update
// comes the finisher is done.
finisherChan := make ( chan * sharedPullerState )
defer close ( finisherChan )
copierChan := make ( chan copyBlocksState )
defer close ( copierChan )
go f . copierRoutine ( copierChan , nil , finisherChan )
2019-03-11 06:28:54 +00:00
go f . finisherRoutine ( finisherChan , dbUpdateChan , nil )
2019-03-25 11:59:22 +00:00
f . handleFile ( file , copierChan , nil )
2019-01-25 08:52:21 +00:00
<- dbUpdateChan
info , err = f . fs . Lstat ( "foo/bar/baz" )
if err != nil {
t . Fatal ( "Unexpected error (file):" , err )
}
if info . Owner ( ) != expOwner || info . Group ( ) != expGroup {
t . Fatalf ( "Expected file owner/group to be %d/%d, not %d/%d" , expOwner , expGroup , info . Owner ( ) , info . Group ( ) )
}
// Have the folder create a symlink. Verify it accordingly.
symlink := protocol . FileInfo {
Name : "foo/bar/sym" ,
Type : protocol . FileInfoTypeSymlink ,
Permissions : 0644 ,
SymlinkTarget : "over the rainbow" ,
}
2019-03-11 06:28:54 +00:00
f . handleSymlink ( symlink , dbUpdateChan , nil )
2019-01-25 08:52:21 +00:00
<- dbUpdateChan
info , err = f . fs . Lstat ( "foo/bar/sym" )
if err != nil {
t . Fatal ( "Unexpected error (file):" , err )
}
if info . Owner ( ) != expOwner || info . Group ( ) != expGroup {
t . Fatalf ( "Expected symlink owner/group to be %d/%d, not %d/%d" , expOwner , expGroup , info . Owner ( ) , info . Group ( ) )
}
}
2019-03-07 14:15:14 +00:00
// TestSRConflictReplaceFileByDir checks that a conflict is created when an existing file
// is replaced with a directory and versions are conflicting
func TestSRConflictReplaceFileByDir ( t * testing . T ) {
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-03-08 20:29:09 +00:00
ffs := f . Filesystem ( )
2019-03-07 14:15:14 +00:00
name := "foo"
// create local file
file := createFile ( t , name , ffs )
file . Version = protocol . Vector { } . Update ( myID . Short ( ) )
2019-04-07 11:29:17 +00:00
f . updateLocalsFromScanning ( [ ] protocol . FileInfo { file } )
2019-03-07 14:15:14 +00:00
// Simulate remote creating a dir with the same name
file . Type = protocol . FileInfoTypeDirectory
rem := device1 . Short ( )
file . Version = protocol . Vector { } . Update ( rem )
file . ModifiedBy = rem
dbUpdateChan := make ( chan dbUpdateJob , 1 )
scanChan := make ( chan string , 1 )
2019-03-11 06:28:54 +00:00
f . handleDir ( file , dbUpdateChan , scanChan )
2019-03-07 14:15:14 +00:00
if confls := existingConflicts ( name , ffs ) ; len ( confls ) != 1 {
t . Fatal ( "Expected one conflict, got" , len ( confls ) )
} else if scan := <- scanChan ; confls [ 0 ] != scan {
t . Fatal ( "Expected request to scan" , confls [ 0 ] , "got" , scan )
}
}
// TestSRConflictReplaceFileByLink checks that a conflict is created when an existing file
// is replaced with a link and versions are conflicting
func TestSRConflictReplaceFileByLink ( t * testing . T ) {
2019-03-08 20:29:09 +00:00
m , f := setupSendReceiveFolder ( )
2019-08-15 14:29:37 +00:00
defer cleanupSRFolder ( f , m )
2019-03-08 20:29:09 +00:00
ffs := f . Filesystem ( )
2019-03-07 14:15:14 +00:00
name := "foo"
// create local file
file := createFile ( t , name , ffs )
file . Version = protocol . Vector { } . Update ( myID . Short ( ) )
2019-04-07 11:29:17 +00:00
f . updateLocalsFromScanning ( [ ] protocol . FileInfo { file } )
2019-03-07 14:15:14 +00:00
// Simulate remote creating a symlink with the same name
file . Type = protocol . FileInfoTypeSymlink
file . SymlinkTarget = "bar"
rem := device1 . Short ( )
file . Version = protocol . Vector { } . Update ( rem )
file . ModifiedBy = rem
dbUpdateChan := make ( chan dbUpdateJob , 1 )
scanChan := make ( chan string , 1 )
2019-03-11 06:28:54 +00:00
f . handleSymlink ( file , dbUpdateChan , scanChan )
2019-03-07 14:15:14 +00:00
if confls := existingConflicts ( name , ffs ) ; len ( confls ) != 1 {
t . Fatal ( "Expected one conflict, got" , len ( confls ) )
} else if scan := <- scanChan ; confls [ 0 ] != scan {
t . Fatal ( "Expected request to scan" , confls [ 0 ] , "got" , scan )
}
}
2019-08-16 07:35:19 +00:00
2019-10-22 19:55:51 +00:00
// TestDeleteBehindSymlink checks that we don't delete or schedule a scan
// when trying to delete a file behind a symlink.
func TestDeleteBehindSymlink ( t * testing . T ) {
m , f := setupSendReceiveFolder ( )
defer cleanupSRFolder ( f , m )
ffs := f . Filesystem ( )
destDir := createTmpDir ( )
defer os . RemoveAll ( destDir )
destFs := fs . NewFilesystem ( fs . FilesystemTypeBasic , destDir )
link := "link"
file := filepath . Join ( link , "file" )
must ( t , ffs . MkdirAll ( link , 0755 ) )
fi := createFile ( t , file , ffs )
f . updateLocalsFromScanning ( [ ] protocol . FileInfo { fi } )
must ( t , osutil . RenameOrCopy ( ffs , destFs , file , "file" ) )
must ( t , ffs . RemoveAll ( link ) )
if err := osutil . DebugSymlinkForTestsOnly ( destFs . URI ( ) , filepath . Join ( ffs . URI ( ) , link ) ) ; err != nil {
if runtime . GOOS == "windows" {
// Probably we require permissions we don't have.
t . Skip ( "Need admin permissions or developer mode to run symlink test on Windows: " + err . Error ( ) )
} else {
t . Fatal ( err )
}
}
fi . Deleted = true
fi . Version = fi . Version . Update ( device1 . Short ( ) )
scanChan := make ( chan string , 1 )
dbUpdateChan := make ( chan dbUpdateJob , 1 )
f . deleteFile ( fi , dbUpdateChan , scanChan )
select {
case f := <- scanChan :
t . Fatalf ( "Received %v on scanChan" , f )
case u := <- dbUpdateChan :
if u . jobType != dbUpdateDeleteFile {
t . Errorf ( "Expected jobType %v, got %v" , dbUpdateDeleteFile , u . jobType )
}
if u . file . Name != fi . Name {
t . Errorf ( "Expected update for %v, got %v" , fi . Name , u . file . Name )
}
default :
t . Fatalf ( "No db update received" )
}
if _ , err := destFs . Stat ( "file" ) ; err != nil {
t . Errorf ( "Expected no error when stating file behind symlink, got %v" , err )
}
}
2019-08-16 07:35:19 +00:00
func cleanupSharedPullerState ( s * sharedPullerState ) {
s . mut . Lock ( )
defer s . mut . Unlock ( )
if s . writer == nil {
return
}
s . writer . mut . Lock ( )
s . writer . fd . Close ( )
s . writer . mut . Unlock ( )
}