2014-11-16 20:13:20 +00:00
// Copyright (C) 2014 The Syncthing Authors.
2014-10-04 14:48:33 +00:00
//
2015-03-07 20:36:35 +00:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 06:52:18 +00:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-10-04 14:48:33 +00:00
package model
import (
2017-12-07 08:42:03 +00:00
"bytes"
2017-04-26 00:15:23 +00:00
"context"
2016-12-14 23:30:29 +00:00
"crypto/rand"
"io"
2017-12-07 08:42:03 +00:00
"io/ioutil"
2014-10-10 23:27:17 +00:00
"os"
"path/filepath"
2014-10-04 14:48:33 +00:00
"testing"
2014-12-09 23:58:58 +00:00
"time"
2014-10-04 14:48:33 +00:00
2018-02-25 09:14:02 +00:00
"github.com/syncthing/syncthing/lib/config"
2015-10-31 11:31:25 +00:00
"github.com/syncthing/syncthing/lib/db"
2018-05-14 19:01:35 +00:00
"github.com/syncthing/syncthing/lib/events"
2016-12-14 23:30:29 +00:00
"github.com/syncthing/syncthing/lib/fs"
2017-12-07 08:42:03 +00:00
"github.com/syncthing/syncthing/lib/ignore"
2015-09-22 17:38:46 +00:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/sync"
2014-10-04 14:48:33 +00:00
)
var blocks = [ ] protocol . BlockInfo {
{ Hash : [ ] uint8 { 0xfa , 0x43 , 0x23 , 0x9b , 0xce , 0xe7 , 0xb9 , 0x7c , 0xa6 , 0x2f , 0x0 , 0x7c , 0xc6 , 0x84 , 0x87 , 0x56 , 0xa , 0x39 , 0xe1 , 0x9f , 0x74 , 0xf3 , 0xdd , 0xe7 , 0x48 , 0x6d , 0xb3 , 0xf9 , 0x8d , 0xf8 , 0xe4 , 0x71 } } , // Zero'ed out block
{ Offset : 0 , Size : 0x20000 , Hash : [ ] uint8 { 0x7e , 0xad , 0xbc , 0x36 , 0xae , 0xbb , 0xcf , 0x74 , 0x43 , 0xe2 , 0x7a , 0x5a , 0x4b , 0xb8 , 0x5b , 0xce , 0xe6 , 0x9e , 0x1e , 0x10 , 0xf9 , 0x8a , 0xbc , 0x77 , 0x95 , 0x2 , 0x29 , 0x60 , 0x9e , 0x96 , 0xae , 0x6c } } ,
{ Offset : 131072 , Size : 0x20000 , Hash : [ ] uint8 { 0x3c , 0xc4 , 0x20 , 0xf4 , 0xb , 0x2e , 0xcb , 0xb9 , 0x5d , 0xce , 0x34 , 0xa8 , 0xc3 , 0x92 , 0xea , 0xf3 , 0xda , 0x88 , 0x33 , 0xee , 0x7a , 0xb6 , 0xe , 0xf1 , 0x82 , 0x5e , 0xb0 , 0xa9 , 0x26 , 0xa9 , 0xc0 , 0xef } } ,
{ Offset : 262144 , Size : 0x20000 , Hash : [ ] uint8 { 0x76 , 0xa8 , 0xc , 0x69 , 0xd7 , 0x5c , 0x52 , 0xfd , 0xdf , 0x55 , 0xef , 0x44 , 0xc1 , 0xd6 , 0x25 , 0x48 , 0x4d , 0x98 , 0x48 , 0x4d , 0xaa , 0x50 , 0xf6 , 0x6b , 0x32 , 0x47 , 0x55 , 0x81 , 0x6b , 0xed , 0xee , 0xfb } } ,
{ Offset : 393216 , Size : 0x20000 , Hash : [ ] uint8 { 0x44 , 0x1e , 0xa4 , 0xf2 , 0x8d , 0x1f , 0xc3 , 0x1b , 0x9d , 0xa5 , 0x18 , 0x5e , 0x59 , 0x1b , 0xd8 , 0x5c , 0xba , 0x7d , 0xb9 , 0x8d , 0x70 , 0x11 , 0x5c , 0xea , 0xa1 , 0x57 , 0x4d , 0xcb , 0x3c , 0x5b , 0xf8 , 0x6c } } ,
{ Offset : 524288 , Size : 0x20000 , Hash : [ ] uint8 { 0x8 , 0x40 , 0xd0 , 0x5e , 0x80 , 0x0 , 0x0 , 0x7c , 0x8b , 0xb3 , 0x8b , 0xf7 , 0x7b , 0x23 , 0x26 , 0x28 , 0xab , 0xda , 0xcf , 0x86 , 0x8f , 0xc2 , 0x8a , 0x39 , 0xc6 , 0xe6 , 0x69 , 0x59 , 0x97 , 0xb6 , 0x1a , 0x43 } } ,
{ Offset : 655360 , Size : 0x20000 , Hash : [ ] uint8 { 0x38 , 0x8e , 0x44 , 0xcb , 0x30 , 0xd8 , 0x90 , 0xf , 0xce , 0x7 , 0x4b , 0x58 , 0x86 , 0xde , 0xce , 0x59 , 0xa2 , 0x46 , 0xd2 , 0xf9 , 0xba , 0xaf , 0x35 , 0x87 , 0x38 , 0xdf , 0xd2 , 0xd , 0xf9 , 0x45 , 0xed , 0x91 } } ,
{ Offset : 786432 , Size : 0x20000 , Hash : [ ] uint8 { 0x32 , 0x28 , 0xcd , 0xf , 0x37 , 0x21 , 0xe5 , 0xd4 , 0x1e , 0x58 , 0x87 , 0x73 , 0x8e , 0x36 , 0xdf , 0xb2 , 0x70 , 0x78 , 0x56 , 0xc3 , 0x42 , 0xff , 0xf7 , 0x8f , 0x37 , 0x95 , 0x0 , 0x26 , 0xa , 0xac , 0x54 , 0x72 } } ,
{ Offset : 917504 , Size : 0x20000 , Hash : [ ] uint8 { 0x96 , 0x6b , 0x15 , 0x6b , 0xc4 , 0xf , 0x19 , 0x18 , 0xca , 0xbb , 0x5f , 0xd6 , 0xbb , 0xa2 , 0xc6 , 0x2a , 0xac , 0xbb , 0x8a , 0xb9 , 0xce , 0xec , 0x4c , 0xdb , 0x78 , 0xec , 0x57 , 0x5d , 0x33 , 0xf9 , 0x8e , 0xaf } } ,
}
2015-09-04 10:01:00 +00:00
var folders = [ ] string { "default" }
2018-01-14 14:30:11 +00:00
var diffTestData = [ ] struct {
a string
b string
s int
d [ ] protocol . BlockInfo
} {
{ "contents" , "contents" , 1024 , [ ] protocol . BlockInfo { } } ,
{ "" , "" , 1024 , [ ] protocol . BlockInfo { } } ,
{ "contents" , "contents" , 3 , [ ] protocol . BlockInfo { } } ,
{ "contents" , "cantents" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } } } ,
{ "contents" , "contants" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } } } ,
{ "contents" , "cantants" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } , { Offset : 3 , Size : 3 } } } ,
{ "contents" , "" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 0 } } } ,
{ "" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 0 , Size : 3 } , { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
{ "con" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
{ "contents" , "con" , 3 , nil } ,
{ "contents" , "cont" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } } ,
{ "cont" , "contents" , 3 , [ ] protocol . BlockInfo { { Offset : 3 , Size : 3 } , { Offset : 6 , Size : 2 } } } ,
}
2015-12-21 21:47:26 +00:00
func setUpFile ( filename string , blockNumbers [ ] int ) protocol . FileInfo {
2014-10-10 23:27:17 +00:00
// Create existing file
2015-12-21 21:47:26 +00:00
existingBlocks := make ( [ ] protocol . BlockInfo , len ( blockNumbers ) )
for i := range blockNumbers {
existingBlocks [ i ] = blocks [ blockNumbers [ i ] ]
}
return protocol . FileInfo {
2016-07-04 10:40:29 +00:00
Name : filename ,
Blocks : existingBlocks ,
2014-10-04 14:48:33 +00:00
}
2015-12-21 21:47:26 +00:00
}
2014-10-04 14:48:33 +00:00
2018-07-12 08:15:57 +00:00
func setUpModel ( files ... protocol . FileInfo ) * Model {
2015-10-31 11:31:25 +00:00
db := db . OpenMemory ( )
2018-03-13 13:03:10 +00:00
model := NewModel ( defaultCfgWrapper , protocol . LocalDeviceID , "syncthing" , "dev" , db , nil )
2015-12-21 21:47:26 +00:00
model . AddFolder ( defaultFolderConfig )
2014-10-10 23:27:17 +00:00
// Update index
2018-07-12 08:15:57 +00:00
model . updateLocalsFromScanning ( "default" , files )
2015-12-21 21:47:26 +00:00
return model
}
2014-10-04 14:48:33 +00:00
2017-04-01 09:58:06 +00:00
func setUpSendReceiveFolder ( model * Model ) * sendReceiveFolder {
f := & sendReceiveFolder {
2016-04-26 20:19:30 +00:00
folder : folder {
2017-04-20 00:20:34 +00:00
stateTracker : newStateTracker ( "default" ) ,
model : model ,
initialScanFinished : make ( chan struct { } ) ,
2017-04-26 00:15:23 +00:00
ctx : context . TODO ( ) ,
2018-02-25 09:14:02 +00:00
FolderConfiguration : config . FolderConfiguration {
PullerMaxPendingKiB : defaultPullerPendingKiB ,
} ,
2016-04-26 20:19:30 +00:00
} ,
2016-12-14 23:30:29 +00:00
2017-08-19 14:36:56 +00:00
fs : fs . NewMtimeFS ( fs . NewFilesystem ( fs . FilesystemTypeBasic , "testdata" ) , db . NewNamespacedKV ( model . db , "mtime" ) ) ,
2016-04-26 20:19:30 +00:00
queue : newJobQueue ( ) ,
2015-06-26 11:31:30 +00:00
errors : make ( map [ string ] string ) ,
errorsMut : sync . NewMutex ( ) ,
2014-10-04 14:48:33 +00:00
}
2017-04-01 09:58:06 +00:00
// Folders are never actually started, so no initial scan will be done
2017-04-20 00:20:34 +00:00
close ( f . initialScanFinished )
2017-04-01 09:58:06 +00:00
return f
2015-12-21 21:47:26 +00:00
}
// Layout of the files: (indexes from the above array)
// 12345678 - Required file
// 02005008 - Existing file (currently in the index)
// 02340070 - Temp file on the disk
func TestHandleFile ( t * testing . T ) {
// After the diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
2014-10-04 14:48:33 +00:00
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 }
existingFile := setUpFile ( "filex" , existingBlocks )
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
m := setUpModel ( existingFile )
2016-12-16 22:23:35 +00:00
f := setUpSendReceiveFolder ( m )
2014-10-08 22:41:23 +00:00
copyChan := make ( chan copyBlocksState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-04 14:48:33 +00:00
2017-12-07 08:42:03 +00:00
f . handleFile ( requiredFile , copyChan , nil , dbUpdateChan )
2014-10-04 14:48:33 +00:00
// Receive the results
toCopy := <- copyChan
2014-10-08 22:41:23 +00:00
if len ( toCopy . blocks ) != 8 {
t . Errorf ( "Unexpected count of copy blocks: %d != 8" , len ( toCopy . blocks ) )
2014-10-04 14:48:33 +00:00
}
2016-04-15 10:59:41 +00:00
for _ , block := range blocks [ 1 : ] {
found := false
for _ , toCopyBlock := range toCopy . blocks {
if string ( toCopyBlock . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-04 14:48:33 +00:00
}
}
}
func TestHandleFileWithTemp ( t * testing . T ) {
// After diff between required and existing we should:
// Copy: 2, 5, 8
// Pull: 1, 3, 4, 6, 7
// After dropping out blocks already on the temp file we should:
// Copy: 5, 8
// Pull: 1, 6
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 }
existingFile := setUpFile ( "file" , existingBlocks )
2014-10-04 14:48:33 +00:00
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
2015-12-21 21:47:26 +00:00
m := setUpModel ( existingFile )
2016-12-16 22:23:35 +00:00
f := setUpSendReceiveFolder ( m )
2014-10-08 22:41:23 +00:00
copyChan := make ( chan copyBlocksState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-04 14:48:33 +00:00
2017-12-07 08:42:03 +00:00
f . handleFile ( requiredFile , copyChan , nil , dbUpdateChan )
2014-10-04 14:48:33 +00:00
// Receive the results
toCopy := <- copyChan
2014-10-08 22:41:23 +00:00
if len ( toCopy . blocks ) != 4 {
t . Errorf ( "Unexpected count of copy blocks: %d != 4" , len ( toCopy . blocks ) )
2014-10-04 14:48:33 +00:00
}
2016-04-15 10:59:41 +00:00
for _ , idx := range [ ] int { 1 , 5 , 6 , 8 } {
found := false
block := blocks [ idx ]
for _ , toCopyBlock := range toCopy . blocks {
if string ( toCopyBlock . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-04 14:48:33 +00:00
}
}
}
2014-10-10 23:27:17 +00:00
func TestCopierFinder ( t * testing . T ) {
// After diff between required and existing we should:
// Copy: 1, 2, 3, 4, 6, 7, 8
// Since there is no existing file, nor a temp file
// After dropping out blocks found locally:
// Pull: 1, 5, 6, 8
2017-09-02 05:52:38 +00:00
tempFile := filepath . Join ( "testdata" , fs . TempName ( "file2" ) )
2014-10-10 23:27:17 +00:00
err := os . Remove ( tempFile )
if err != nil && ! os . IsNotExist ( err ) {
t . Error ( err )
}
2018-03-13 13:03:10 +00:00
defer os . Remove ( tempFile )
2014-10-10 23:27:17 +00:00
2015-12-21 21:47:26 +00:00
existingBlocks := [ ] int { 0 , 2 , 3 , 4 , 0 , 0 , 7 , 0 }
2017-09-02 05:52:38 +00:00
existingFile := setUpFile ( fs . TempName ( "file" ) , existingBlocks )
2014-10-10 23:27:17 +00:00
requiredFile := existingFile
requiredFile . Blocks = blocks [ 1 : ]
requiredFile . Name = "file2"
2015-12-21 21:47:26 +00:00
m := setUpModel ( existingFile )
2016-12-16 22:23:35 +00:00
f := setUpSendReceiveFolder ( m )
2014-10-10 23:27:17 +00:00
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState , 4 )
finisherChan := make ( chan * sharedPullerState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2014-10-10 23:27:17 +00:00
// Run a single fetcher routine
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherChan )
2014-10-10 23:27:17 +00:00
2017-12-07 08:42:03 +00:00
f . handleFile ( requiredFile , copyChan , finisherChan , dbUpdateChan )
2014-10-10 23:27:17 +00:00
pulls := [ ] pullBlockState { <- pullChan , <- pullChan , <- pullChan , <- pullChan }
finish := <- finisherChan
select {
case <- pullChan :
2016-12-14 23:30:29 +00:00
t . Fatal ( "Pull channel has data to be read" )
2014-10-10 23:27:17 +00:00
case <- finisherChan :
t . Fatal ( "Finisher channel has data to be read" )
default :
}
2016-04-15 10:59:41 +00:00
// Verify that the right blocks went into the pull list.
// They are pulled in random order.
for _ , idx := range [ ] int { 1 , 5 , 6 , 8 } {
found := false
block := blocks [ idx ]
for _ , pulledBlock := range pulls {
if string ( pulledBlock . block . Hash ) == string ( block . Hash ) {
found = true
break
}
}
if ! found {
t . Errorf ( "Did not find block %s" , block . String ( ) )
2014-10-10 23:27:17 +00:00
}
2016-04-15 10:59:41 +00:00
if string ( finish . file . Blocks [ idx - 1 ] . Hash ) != string ( blocks [ idx ] . Hash ) {
t . Errorf ( "Block %d mismatch: %s != %s" , idx , finish . file . Blocks [ idx - 1 ] . String ( ) , blocks [ idx ] . String ( ) )
2014-10-10 23:27:17 +00:00
}
}
// Verify that the fetched blocks have actually been written to the temp file
2018-04-16 18:08:50 +00:00
blks , err := scanner . HashFile ( context . TODO ( ) , fs . NewFilesystem ( fs . FilesystemTypeBasic , "." ) , tempFile , protocol . MinBlockSize , nil , false )
2014-10-10 23:27:17 +00:00
if err != nil {
t . Log ( err )
}
for _ , eq := range [ ] int { 2 , 3 , 4 , 7 } {
if string ( blks [ eq - 1 ] . Hash ) != string ( blocks [ eq ] . Hash ) {
t . Errorf ( "Block %d mismatch: %s != %s" , eq , blks [ eq - 1 ] . String ( ) , blocks [ eq ] . String ( ) )
}
}
finish . fd . Close ( )
}
2014-10-22 14:24:11 +00:00
2016-12-14 23:30:29 +00:00
func TestWeakHash ( t * testing . T ) {
2017-09-02 05:52:38 +00:00
tempFile := filepath . Join ( "testdata" , fs . TempName ( "weakhash" ) )
2016-12-14 23:30:29 +00:00
var shift int64 = 10
var size int64 = 1 << 20
2018-04-16 18:08:50 +00:00
expectBlocks := int ( size / protocol . MinBlockSize )
expectPulls := int ( shift / protocol . MinBlockSize )
2016-12-14 23:30:29 +00:00
if shift > 0 {
expectPulls ++
}
cleanup := func ( ) {
for _ , path := range [ ] string { tempFile , "testdata/weakhash" } {
os . Remove ( path )
}
}
cleanup ( )
defer cleanup ( )
f , err := os . Create ( "testdata/weakhash" )
if err != nil {
t . Error ( err )
}
defer f . Close ( )
_ , err = io . CopyN ( f , rand . Reader , size )
if err != nil {
t . Error ( err )
}
info , err := f . Stat ( )
if err != nil {
t . Error ( err )
}
// Create two files, second file has `shifted` bytes random prefix, yet
// both are of the same length, for example:
// File 1: abcdefgh
// File 2: xyabcdef
f . Seek ( 0 , os . SEEK_SET )
2018-04-16 18:08:50 +00:00
existing , err := scanner . Blocks ( context . TODO ( ) , f , protocol . MinBlockSize , size , nil , true )
2016-12-14 23:30:29 +00:00
if err != nil {
t . Error ( err )
}
f . Seek ( 0 , os . SEEK_SET )
remainder := io . LimitReader ( f , size - shift )
prefix := io . LimitReader ( rand . Reader , shift )
nf := io . MultiReader ( prefix , remainder )
2018-04-16 18:08:50 +00:00
desired , err := scanner . Blocks ( context . TODO ( ) , nf , protocol . MinBlockSize , size , nil , true )
2016-12-14 23:30:29 +00:00
if err != nil {
t . Error ( err )
}
existingFile := protocol . FileInfo {
Name : "weakhash" ,
Blocks : existing ,
Size : size ,
ModifiedS : info . ModTime ( ) . Unix ( ) ,
ModifiedNs : int32 ( info . ModTime ( ) . Nanosecond ( ) ) ,
}
desiredFile := protocol . FileInfo {
Name : "weakhash" ,
Size : size ,
Blocks : desired ,
ModifiedS : info . ModTime ( ) . Unix ( ) + 1 ,
}
// Setup the model/pull environment
m := setUpModel ( existingFile )
2016-12-16 22:23:35 +00:00
fo := setUpSendReceiveFolder ( m )
2016-12-14 23:30:29 +00:00
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState , expectBlocks )
finisherChan := make ( chan * sharedPullerState , 1 )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2016-12-14 23:30:29 +00:00
// Run a single fetcher routine
go fo . copierRoutine ( copyChan , pullChan , finisherChan )
// Test 1 - no weak hashing, file gets fully repulled (`expectBlocks` pulls).
2017-01-04 21:04:13 +00:00
fo . WeakHashThresholdPct = 101
2017-12-07 08:42:03 +00:00
fo . handleFile ( desiredFile , copyChan , finisherChan , dbUpdateChan )
2016-12-14 23:30:29 +00:00
var pulls [ ] pullBlockState
for len ( pulls ) < expectBlocks {
select {
case pull := <- pullChan :
pulls = append ( pulls , pull )
2016-12-16 12:05:27 +00:00
case <- time . After ( 10 * time . Second ) :
t . Errorf ( "timed out, got %d pulls expected %d" , len ( pulls ) , expectPulls )
2016-12-14 23:30:29 +00:00
}
}
finish := <- finisherChan
select {
case <- pullChan :
t . Fatal ( "Pull channel has data to be read" )
case <- finisherChan :
t . Fatal ( "Finisher channel has data to be read" )
default :
}
finish . fd . Close ( )
if err := os . Remove ( tempFile ) ; err != nil && ! os . IsNotExist ( err ) {
t . Error ( err )
}
// Test 2 - using weak hash, expectPulls blocks pulled.
2017-01-04 21:04:13 +00:00
fo . WeakHashThresholdPct = - 1
2017-12-07 08:42:03 +00:00
fo . handleFile ( desiredFile , copyChan , finisherChan , dbUpdateChan )
2016-12-14 23:30:29 +00:00
pulls = pulls [ : 0 ]
for len ( pulls ) < expectPulls {
select {
case pull := <- pullChan :
pulls = append ( pulls , pull )
2016-12-16 12:05:27 +00:00
case <- time . After ( 10 * time . Second ) :
t . Errorf ( "timed out, got %d pulls expected %d" , len ( pulls ) , expectPulls )
2016-12-14 23:30:29 +00:00
}
}
finish = <- finisherChan
finish . fd . Close ( )
expectShifted := expectBlocks - expectPulls
if finish . copyOriginShifted != expectShifted {
t . Errorf ( "did not copy %d shifted" , expectShifted )
}
}
2017-11-04 07:20:11 +00:00
// Test that updating a file removes its old blocks from the blockmap
2014-10-22 14:24:11 +00:00
func TestCopierCleanup ( t * testing . T ) {
2015-01-18 01:12:06 +00:00
iterFn := func ( folder , file string , index int32 ) bool {
2014-10-22 14:24:11 +00:00
return true
}
// Create a file
2015-12-21 21:47:26 +00:00
file := setUpFile ( "test" , [ ] int { 0 } )
m := setUpModel ( file )
2014-10-22 14:24:11 +00:00
file . Blocks = [ ] protocol . BlockInfo { blocks [ 1 ] }
2015-03-25 21:37:23 +00:00
file . Version = file . Version . Update ( protocol . LocalDeviceID . Short ( ) )
2014-10-22 14:24:11 +00:00
// Update index (removing old blocks)
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( "default" , [ ] protocol . FileInfo { file } )
2014-10-22 14:24:11 +00:00
2015-09-04 10:01:00 +00:00
if m . finder . Iterate ( folders , blocks [ 0 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Unexpected block found" )
}
2015-09-04 10:01:00 +00:00
if ! m . finder . Iterate ( folders , blocks [ 1 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Expected block not found" )
}
file . Blocks = [ ] protocol . BlockInfo { blocks [ 0 ] }
2015-03-25 21:37:23 +00:00
file . Version = file . Version . Update ( protocol . LocalDeviceID . Short ( ) )
2014-10-22 14:24:11 +00:00
// Update index (removing old blocks)
2016-05-19 00:19:26 +00:00
m . updateLocalsFromScanning ( "default" , [ ] protocol . FileInfo { file } )
2014-10-22 14:24:11 +00:00
2015-09-04 10:01:00 +00:00
if ! m . finder . Iterate ( folders , blocks [ 0 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Unexpected block found" )
}
2015-09-04 10:01:00 +00:00
if m . finder . Iterate ( folders , blocks [ 1 ] . Hash , iterFn ) {
2014-10-22 14:24:11 +00:00
t . Error ( "Expected block not found" )
}
}
2014-10-24 22:20:08 +00:00
2015-01-07 23:12:12 +00:00
func TestDeregisterOnFailInCopy ( t * testing . T ) {
2015-12-21 21:47:26 +00:00
file := setUpFile ( "filex" , [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 } )
2017-09-02 05:52:38 +00:00
defer os . Remove ( "testdata/" + fs . TempName ( "filex" ) )
2015-01-07 23:12:12 +00:00
2015-10-31 11:31:25 +00:00
db := db . OpenMemory ( )
2015-01-07 23:12:12 +00:00
2018-03-13 13:03:10 +00:00
m := NewModel ( defaultCfgWrapper , protocol . LocalDeviceID , "syncthing" , "dev" , db , nil )
2015-03-04 23:33:48 +00:00
m . AddFolder ( defaultFolderConfig )
2016-12-16 22:23:35 +00:00
f := setUpSendReceiveFolder ( m )
2015-01-07 23:12:12 +00:00
// queue.Done should be called by the finisher routine
2016-08-06 13:05:59 +00:00
f . queue . Push ( "filex" , 0 , time . Time { } )
2016-04-26 20:19:30 +00:00
f . queue . Pop ( )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . queue . lenProgress ( ) != 1 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Expected file in progress" )
}
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState )
finisherBufferChan := make ( chan * sharedPullerState )
finisherChan := make ( chan * sharedPullerState )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherBufferChan )
2017-12-07 08:42:03 +00:00
go f . finisherRoutine ( ignore . New ( defaultFs ) , finisherChan , dbUpdateChan , make ( chan string ) )
2015-01-07 23:12:12 +00:00
2017-12-07 08:42:03 +00:00
f . handleFile ( file , copyChan , finisherChan , dbUpdateChan )
2015-01-07 23:12:12 +00:00
2015-04-28 15:34:55 +00:00
// Receive a block at puller, to indicate that at least a single copier
2015-01-07 23:12:12 +00:00
// loop has been performed.
toPull := <- pullChan
// Wait until copier is trying to pass something down to the puller again
time . Sleep ( 100 * time . Millisecond )
// Close the file
toPull . sharedPullerState . fail ( "test" , os . ErrNotExist )
// Unblock copier
<- pullChan
2018-05-14 19:01:35 +00:00
s := events . Default . Subscribe ( events . ItemFinished )
timeout = time . Second
2015-01-07 23:12:12 +00:00
select {
case state := <- finisherBufferChan :
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 1 || f . queue . lenProgress ( ) != 1 || f . queue . lenQueued ( ) != 0 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Could not find file" )
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
2018-05-14 19:01:35 +00:00
if ev , err := s . Poll ( timeout ) ; err != nil {
t . Fatal ( "Got error waiting for ItemFinished event:" , err )
} else if n := ev . Data . ( map [ string ] interface { } ) [ "item" ] ; n != state . file . Name {
t . Fatal ( "Got ItemFinished event for wrong file:" , n )
}
2015-01-07 23:12:12 +00:00
time . Sleep ( 100 * time . Millisecond )
2015-10-14 05:38:13 +00:00
state . mut . Lock ( )
stateFd := state . fd
state . mut . Unlock ( )
if stateFd != nil {
2015-01-07 23:12:12 +00:00
t . Fatal ( "File not closed?" )
}
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
// Doing it again should have no effect
finisherChan <- state
time . Sleep ( 100 * time . Millisecond )
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
case <- time . After ( time . Second ) :
t . Fatal ( "Didn't get anything to the finisher" )
}
}
func TestDeregisterOnFailInPull ( t * testing . T ) {
2015-12-21 21:47:26 +00:00
file := setUpFile ( "filex" , [ ] int { 0 , 2 , 0 , 0 , 5 , 0 , 0 , 8 } )
2017-09-02 05:52:38 +00:00
defer os . Remove ( "testdata/" + fs . TempName ( "filex" ) )
2015-01-07 23:12:12 +00:00
2015-10-31 11:31:25 +00:00
db := db . OpenMemory ( )
2018-03-13 13:03:10 +00:00
m := NewModel ( defaultCfgWrapper , protocol . LocalDeviceID , "syncthing" , "dev" , db , nil )
2015-03-04 23:33:48 +00:00
m . AddFolder ( defaultFolderConfig )
2015-01-07 23:12:12 +00:00
2016-12-16 22:23:35 +00:00
f := setUpSendReceiveFolder ( m )
2015-01-07 23:12:12 +00:00
// queue.Done should be called by the finisher routine
2016-08-06 13:05:59 +00:00
f . queue . Push ( "filex" , 0 , time . Time { } )
2016-04-26 20:19:30 +00:00
f . queue . Pop ( )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
if f . queue . lenProgress ( ) != 1 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Expected file in progress" )
}
copyChan := make ( chan copyBlocksState )
pullChan := make ( chan pullBlockState )
finisherBufferChan := make ( chan * sharedPullerState )
finisherChan := make ( chan * sharedPullerState )
2017-12-07 08:42:03 +00:00
dbUpdateChan := make ( chan dbUpdateJob , 1 )
2015-01-07 23:12:12 +00:00
2016-04-26 20:19:30 +00:00
go f . copierRoutine ( copyChan , pullChan , finisherBufferChan )
go f . pullerRoutine ( pullChan , finisherBufferChan )
2017-12-07 08:42:03 +00:00
go f . finisherRoutine ( ignore . New ( defaultFs ) , finisherChan , dbUpdateChan , make ( chan string ) )
2015-01-07 23:12:12 +00:00
2017-12-07 08:42:03 +00:00
f . handleFile ( file , copyChan , finisherChan , dbUpdateChan )
2015-01-07 23:12:12 +00:00
2015-11-12 02:20:34 +00:00
// Receive at finisher, we should error out as puller has nowhere to pull
2015-01-07 23:12:12 +00:00
// from.
2018-05-14 19:01:35 +00:00
s := events . Default . Subscribe ( events . ItemFinished )
timeout = time . Second
2015-01-07 23:12:12 +00:00
select {
case state := <- finisherBufferChan :
// At this point the file should still be registered with both the job
// queue, and the progress emitter. Verify this.
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 1 || f . queue . lenProgress ( ) != 1 || f . queue . lenQueued ( ) != 0 {
2015-01-07 23:12:12 +00:00
t . Fatal ( "Could not find file" )
}
// Pass the file down the real finisher, and give it time to consume
finisherChan <- state
2018-05-14 19:01:35 +00:00
if ev , err := s . Poll ( timeout ) ; err != nil {
t . Fatal ( "Got error waiting for ItemFinished event:" , err )
} else if n := ev . Data . ( map [ string ] interface { } ) [ "item" ] ; n != state . file . Name {
t . Fatal ( "Got ItemFinished event for wrong file:" , n )
}
2015-01-07 23:12:12 +00:00
time . Sleep ( 100 * time . Millisecond )
2015-10-14 05:38:13 +00:00
state . mut . Lock ( )
stateFd := state . fd
state . mut . Unlock ( )
if stateFd != nil {
2015-01-07 23:12:12 +00:00
t . Fatal ( "File not closed?" )
}
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
// Doing it again should have no effect
finisherChan <- state
time . Sleep ( 100 * time . Millisecond )
2016-04-26 20:19:30 +00:00
if f . model . progressEmitter . lenRegistry ( ) != 0 || f . queue . lenProgress ( ) != 0 || f . queue . lenQueued ( ) != 0 {
t . Fatal ( "Still registered" , f . model . progressEmitter . lenRegistry ( ) , f . queue . lenProgress ( ) , f . queue . lenQueued ( ) )
2015-01-07 23:12:12 +00:00
}
case <- time . After ( time . Second ) :
t . Fatal ( "Didn't get anything to the finisher" )
}
}
2017-12-07 08:42:03 +00:00
func TestIssue3164 ( t * testing . T ) {
m := setUpModel ( protocol . FileInfo { } )
f := setUpSendReceiveFolder ( m )
defaultFs . RemoveAll ( "issue3164" )
defer defaultFs . RemoveAll ( "issue3164" )
if err := defaultFs . MkdirAll ( "issue3164/oktodelete/foobar" , 0777 ) ; err != nil {
t . Fatal ( err )
}
if err := ioutil . WriteFile ( "testdata/issue3164/oktodelete/foobar/file" , [ ] byte ( "Hello" ) , 0644 ) ; err != nil {
t . Fatal ( err )
}
if err := ioutil . WriteFile ( "testdata/issue3164/oktodelete/file" , [ ] byte ( "Hello" ) , 0644 ) ; err != nil {
t . Fatal ( err )
}
file := protocol . FileInfo {
Name : "issue3164" ,
}
matcher := ignore . New ( defaultFs )
if err := matcher . Parse ( bytes . NewBufferString ( "(?d)oktodelete" ) , "" ) ; err != nil {
t . Fatal ( err )
}
dbUpdateChan := make ( chan dbUpdateJob , 1 )
f . handleDeleteDir ( file , matcher , dbUpdateChan , make ( chan string ) )
if _ , err := defaultFs . Stat ( "testdata/issue3164" ) ; ! fs . IsNotExist ( err ) {
t . Fatal ( err )
}
}
2018-01-14 14:30:11 +00:00
func TestDiff ( t * testing . T ) {
for i , test := range diffTestData {
a , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . a ) , test . s , - 1 , nil , false )
b , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . b ) , test . s , - 1 , nil , false )
_ , d := blockDiff ( a , b )
if len ( d ) != len ( test . d ) {
t . Fatalf ( "Incorrect length for diff %d; %d != %d" , i , len ( d ) , len ( test . d ) )
} else {
for j := range test . d {
if d [ j ] . Offset != test . d [ j ] . Offset {
t . Errorf ( "Incorrect offset for diff %d block %d; %d != %d" , i , j , d [ j ] . Offset , test . d [ j ] . Offset )
}
if d [ j ] . Size != test . d [ j ] . Size {
t . Errorf ( "Incorrect length for diff %d block %d; %d != %d" , i , j , d [ j ] . Size , test . d [ j ] . Size )
}
}
}
}
}
2018-01-14 21:52:41 +00:00
func BenchmarkDiff ( b * testing . B ) {
testCases := make ( [ ] struct { a , b [ ] protocol . BlockInfo } , 0 , len ( diffTestData ) )
for _ , test := range diffTestData {
a , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . a ) , test . s , - 1 , nil , false )
b , _ := scanner . Blocks ( context . TODO ( ) , bytes . NewBufferString ( test . b ) , test . s , - 1 , nil , false )
testCases = append ( testCases , struct { a , b [ ] protocol . BlockInfo } { a , b } )
}
b . ReportAllocs ( )
b . ResetTimer ( )
for i := 0 ; i < b . N ; i ++ {
for _ , tc := range testCases {
blockDiff ( tc . a , tc . b )
}
}
}
2018-01-14 14:30:11 +00:00
func TestDiffEmpty ( t * testing . T ) {
emptyCases := [ ] struct {
a [ ] protocol . BlockInfo
b [ ] protocol . BlockInfo
need int
have int
} {
{ nil , nil , 0 , 0 } ,
{ [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } , nil , 0 , 0 } ,
{ nil , [ ] protocol . BlockInfo { { Offset : 3 , Size : 1 } } , 1 , 0 } ,
}
for _ , emptyCase := range emptyCases {
h , n := blockDiff ( emptyCase . a , emptyCase . b )
if len ( h ) != emptyCase . have {
t . Errorf ( "incorrect have: %d != %d" , len ( h ) , emptyCase . have )
}
if len ( n ) != emptyCase . need {
t . Errorf ( "incorrect have: %d != %d" , len ( h ) , emptyCase . have )
}
}
}