Refactor out file scanner into separate package

This commit is contained in:
Jakob Borg 2014-03-08 23:02:01 +01:00
parent d6c9afd07f
commit 1448cfe66a
21 changed files with 471 additions and 338 deletions

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/scanner"
)
type fileMonitor struct {
@ -18,8 +19,8 @@ type fileMonitor struct {
path string // full path
writeDone sync.WaitGroup
model *Model
global File
localBlocks []Block
global scanner.File
localBlocks []scanner.Block
copyError error
writeError error
}
@ -29,7 +30,7 @@ func (m *fileMonitor) FileBegins(cc <-chan content) error {
log.Printf("FILE: FileBegins: " + m.name)
}
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
dir := path.Dir(tmp)
_, err := os.Stat(dir)
@ -115,7 +116,7 @@ func (m *fileMonitor) FileDone() error {
m.writeDone.Wait()
tmp := tempName(m.path, m.global.Modified)
tmp := defTempNamer.TempName(m.path)
defer os.Remove(tmp)
if m.copyError != nil {
@ -149,14 +150,14 @@ func (m *fileMonitor) FileDone() error {
return nil
}
func hashCheck(name string, correct []Block) error {
func hashCheck(name string, correct []scanner.Block) error {
rf, err := os.Open(name)
if err != nil {
return err
}
defer rf.Close()
current, err := Blocks(rf, BlockSize)
current, err := scanner.Blocks(rf, BlockSize)
if err != nil {
return err
}

View File

@ -5,6 +5,8 @@ import (
"sort"
"sync"
"time"
"github.com/calmh/syncthing/scanner"
)
type Monitor interface {
@ -23,7 +25,7 @@ type FileQueue struct {
type queuedFile struct {
name string
blocks []Block
blocks []scanner.Block
activeBlocks []bool
given int
remaining int
@ -54,7 +56,7 @@ func (l queuedFileList) Less(a, b int) bool {
type queuedBlock struct {
name string
block Block
block scanner.Block
index int
}
@ -65,7 +67,7 @@ func NewFileQueue() *FileQueue {
}
}
func (q *FileQueue) Add(name string, blocks []Block, monitor Monitor) {
func (q *FileQueue) Add(name string, blocks []scanner.Block, monitor Monitor) {
q.fmut.Lock()
defer q.fmut.Unlock()

View File

@ -5,6 +5,8 @@ import (
"sync"
"sync/atomic"
"testing"
"github.com/calmh/syncthing/scanner"
)
func TestFileQueueAdd(t *testing.T) {
@ -17,8 +19,8 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ := q.Get("nodeID")
if b.name != "aaa" {
t.Errorf("Incorrectly sorted get: %+v", b)
@ -28,12 +30,12 @@ func TestFileQueueAddSorting(t *testing.T) {
q.SetAvailable("zzz", []string{"nodeID"})
q.SetAvailable("aaa", []string{"nodeID"})
q.Add("zzz", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID") // Start on zzzz
if b.name != "zzz" {
t.Errorf("Incorrectly sorted get: %+v", b)
}
q.Add("aaa", []Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
b, _ = q.Get("nodeID")
if b.name != "zzz" {
// Continue rather than starting a new file
@ -56,12 +58,12 @@ func TestFileQueueGet(t *testing.T) {
q.SetAvailable("foo", []string{"nodeID"})
q.SetAvailable("bar", []string{"nodeID"})
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("bar", []Block{
q.Add("bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
@ -70,7 +72,7 @@ func TestFileQueueGet(t *testing.T) {
expected := queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@ -89,7 +91,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "bar",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other bar hash bytes"),
@ -109,7 +111,7 @@ func TestFileQueueGet(t *testing.T) {
expected = queuedBlock{
name: "foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@ -150,7 +152,7 @@ func TestFileQueueDone(t *testing.T) {
}()
q := FileQueue{resolver: fakeResolver{}}
q.Add("foo", []Block{
q.Add("foo", []scanner.Block{
{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
}, ch)
@ -181,19 +183,19 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
q.SetAvailable("a-foo", []string{"nodeID", "a"})
q.SetAvailable("b-bar", []string{"nodeID", "b"})
q.Add("a-foo", []Block{
q.Add("a-foo", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
}, nil)
q.Add("b-bar", []Block{
q.Add("b-bar", []scanner.Block{
{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
}, nil)
expected := queuedBlock{
name: "b-bar",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some bar hash bytes"),
@ -209,7 +211,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 0,
Size: 128,
Hash: []byte("some foo hash bytes"),
@ -225,7 +227,7 @@ func TestFileQueueGetNodeIDs(t *testing.T) {
expected = queuedBlock{
name: "a-foo",
block: Block{
block: scanner.Block{
Offset: 128,
Size: 128,
Hash: []byte("some other foo hash bytes"),
@ -246,9 +248,9 @@ func TestFileQueueThreadHandling(t *testing.T) {
const n = 100
var total int
var blocks []Block
var blocks []scanner.Block
for i := 1; i <= n; i++ {
blocks = append(blocks, Block{Offset: int64(i), Size: 1})
blocks = append(blocks, scanner.Block{Offset: int64(i), Size: 1})
total += i
}

View File

@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/calmh/syncthing/scanner"
"github.com/codegangsta/martini"
)
@ -107,7 +108,7 @@ func restPostRestart(req *http.Request) {
restart()
}
type guiFile File
type guiFile scanner.File
func (f guiFile) MarshalJSON() ([]byte, error) {
type t struct {
@ -116,7 +117,7 @@ func (f guiFile) MarshalJSON() ([]byte, error) {
}
return json.Marshal(t{
Name: f.Name,
Size: File(f).Size,
Size: scanner.File(f).Size,
})
}

View File

@ -21,8 +21,11 @@ import (
"github.com/calmh/ini"
"github.com/calmh/syncthing/discover"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
const BlockSize = 128 * 1024
var cfg Configuration
var Version = "unknown-dev"
@ -217,7 +220,17 @@ func main() {
infoln("Populating repository index")
}
loadIndex(m)
updateLocalModel(m)
sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
w := &scanner.Walker{
Dir: m.dir,
IgnoreFile: ".stignore",
FollowSymlinks: cfg.Options.FollowSymlinks,
BlockSize: BlockSize,
Suppressor: sup,
TempNamer: defTempNamer,
}
updateLocalModel(m, w)
connOpts := map[string]string{
"clientId": "syncthing",
@ -263,7 +276,7 @@ func main() {
for {
time.Sleep(td)
if m.LocalAge() > (td / 2).Seconds() {
updateLocalModel(m)
updateLocalModel(m, w)
}
}
}()
@ -502,8 +515,8 @@ func connect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Confi
}
}
func updateLocalModel(m *Model) {
files, _ := m.Walk(cfg.Options.FollowSymlinks)
func updateLocalModel(m *Model, w *scanner.Walker) {
files, _ := w.Walk()
m.ReplaceLocal(files)
saveIndex(m)
}

View File

@ -13,16 +13,17 @@ import (
"github.com/calmh/syncthing/buffers"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
type Model struct {
dir string
global map[string]File // the latest version of each file as it exists in the cluster
global map[string]scanner.File // the latest version of each file as it exists in the cluster
gmut sync.RWMutex // protects global
local map[string]File // the files we currently have locally on disk
local map[string]scanner.File // the files we currently have locally on disk
lmut sync.RWMutex // protects local
remote map[string]map[string]File
remote map[string]map[string]scanner.File
rmut sync.RWMutex // protects remote
protoConn map[string]Connection
rawConn map[string]io.Closer
@ -31,7 +32,7 @@ type Model struct {
// Queue for files to fetch. fq can call back into the model, so we must ensure
// to hold no locks when calling methods on fq.
fq *FileQueue
dq chan File // queue for files to delete
dq chan scanner.File // queue for files to delete
updatedLocal int64 // timestamp of last update to local
updateGlobal int64 // timestamp of last update to remote
@ -77,16 +78,16 @@ var (
func NewModel(dir string, maxChangeBw int) *Model {
m := &Model{
dir: dir,
global: make(map[string]File),
local: make(map[string]File),
remote: make(map[string]map[string]File),
global: make(map[string]scanner.File),
local: make(map[string]scanner.File),
remote: make(map[string]map[string]scanner.File),
protoConn: make(map[string]Connection),
rawConn: make(map[string]io.Closer),
lastIdxBcast: time.Now(),
trace: make(map[string]bool),
sup: suppressor{threshold: int64(maxChangeBw)},
fq: NewFileQueue(),
dq: make(chan File),
dq: make(chan scanner.File),
}
go m.broadcastIndexLoop()
@ -128,7 +129,6 @@ func (m *Model) StartRW(del bool, threads int) {
m.delete = del
m.parallelRequests = threads
go m.cleanTempFiles()
if del {
go m.deleteLoop()
}
@ -260,7 +260,7 @@ func (m *Model) InSyncSize() (files, bytes int64) {
}
// NeedFiles returns the list of currently needed files and the total size.
func (m *Model) NeedFiles() (files []File, bytes int64) {
func (m *Model) NeedFiles() (files []scanner.File, bytes int64) {
qf := m.fq.QueuedFiles()
m.gmut.RLock()
@ -278,7 +278,7 @@ func (m *Model) NeedFiles() (files []File, bytes int64) {
// Index is called when a new node is connected and we receive their full index.
// Implements the protocol.Model interface.
func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@ -290,7 +290,7 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
debugf("NET IDX(in): %s: %d files", nodeID, len(fs))
}
repo := make(map[string]File)
repo := make(map[string]scanner.File)
for _, f := range files {
m.indexUpdate(repo, f)
}
@ -306,7 +306,7 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
// IndexUpdate is called for incremental updates to connected nodes' indexes.
// Implements the protocol.Model interface.
func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
var files = make([]File, len(fs))
var files = make([]scanner.File, len(fs))
for i := range fs {
files[i] = fileFromFileInfo(fs[i])
}
@ -335,7 +335,7 @@ func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
m.recomputeNeedForFiles(files)
}
func (m *Model) indexUpdate(repo map[string]File, f File) {
func (m *Model) indexUpdate(repo map[string]scanner.File, f scanner.File) {
if m.trace["idx"] {
var flagComment string
if f.Flags&protocol.FlagDeleted != 0 {
@ -431,9 +431,9 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
}
// ReplaceLocal replaces the local repository index with the given list of files.
func (m *Model) ReplaceLocal(fs []File) {
func (m *Model) ReplaceLocal(fs []scanner.File) {
var updated bool
var newLocal = make(map[string]File)
var newLocal = make(map[string]scanner.File)
m.lmut.RLock()
for _, f := range fs {
@ -474,7 +474,7 @@ func (m *Model) ReplaceLocal(fs []File) {
// the local index from a cache file at startup.
func (m *Model) SeedLocal(fs []protocol.FileInfo) {
m.lmut.Lock()
m.local = make(map[string]File)
m.local = make(map[string]scanner.File)
for _, f := range fs {
m.local[f.Name] = fileFromFileInfo(f)
}
@ -628,7 +628,7 @@ func (m *Model) broadcastIndexLoop() {
}
// markDeletedLocals sets the deleted flag on files that have gone missing locally.
func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
func (m *Model) markDeletedLocals(newLocal map[string]scanner.File) bool {
// For every file in the existing local table, check if they are also
// present in the new local table. If they are not, check that we already
// had the newest version available according to the global table and if so
@ -658,7 +658,7 @@ func (m *Model) markDeletedLocals(newLocal map[string]File) bool {
return updated
}
func (m *Model) updateLocal(f File) {
func (m *Model) updateLocal(f scanner.File) {
var updated bool
m.lmut.Lock()
@ -685,7 +685,7 @@ func (m *Model) updateLocal(f File) {
/*
XXX: Not done, needs elegant handling of availability
func (m *Model) recomputeGlobalFor(files []File) bool {
func (m *Model) recomputeGlobalFor(files []scanner.File) bool {
m.gmut.Lock()
defer m.gmut.Unlock()
@ -702,7 +702,7 @@ func (m *Model) recomputeGlobalFor(files []File) bool {
*/
func (m *Model) recomputeGlobal() {
var newGlobal = make(map[string]File)
var newGlobal = make(map[string]scanner.File)
m.lmut.RLock()
for n, f := range m.local {
@ -761,12 +761,12 @@ func (m *Model) recomputeGlobal() {
type addOrder struct {
n string
remote []Block
remote []scanner.Block
fm *fileMonitor
}
func (m *Model) recomputeNeedForGlobal() {
var toDelete []File
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@ -785,8 +785,8 @@ func (m *Model) recomputeNeedForGlobal() {
}
}
func (m *Model) recomputeNeedForFiles(files []File) {
var toDelete []File
func (m *Model) recomputeNeedForFiles(files []scanner.File) {
var toDelete []scanner.File
var toAdd []addOrder
m.gmut.RLock()
@ -805,7 +805,7 @@ func (m *Model) recomputeNeedForFiles(files []File) {
}
}
func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File) ([]addOrder, []File) {
func (m *Model) recomputeNeedForFile(gf scanner.File, toAdd []addOrder, toDelete []scanner.File) ([]addOrder, []scanner.File) {
m.lmut.RLock()
lf, ok := m.local[gf.Name]
m.lmut.RUnlock()
@ -830,7 +830,7 @@ func (m *Model) recomputeNeedForFile(gf File, toAdd []addOrder, toDelete []File)
if gf.Flags&protocol.FlagDeleted != 0 {
toDelete = append(toDelete, gf)
} else {
local, remote := BlockDiff(lf.Blocks, gf.Blocks)
local, remote := scanner.BlockDiff(lf.Blocks, gf.Blocks)
fm := fileMonitor{
name: gf.Name,
path: path.Clean(path.Join(m.dir, gf.Name)),
@ -878,18 +878,18 @@ func (m *Model) deleteLoop() {
}
}
func fileFromFileInfo(f protocol.FileInfo) File {
var blocks = make([]Block, len(f.Blocks))
func fileFromFileInfo(f protocol.FileInfo) scanner.File {
var blocks = make([]scanner.Block, len(f.Blocks))
var offset int64
for i, b := range f.Blocks {
blocks[i] = Block{
blocks[i] = scanner.Block{
Offset: offset,
Size: b.Size,
Hash: b.Hash,
}
offset += int64(b.Size)
}
return File{
return scanner.File{
Name: f.Name,
Size: offset,
Flags: f.Flags,
@ -899,7 +899,7 @@ func fileFromFileInfo(f protocol.FileInfo) File {
}
}
func fileInfoFromFile(f File) protocol.FileInfo {
func fileInfoFromFile(f scanner.File) protocol.FileInfo {
var blocks = make([]protocol.BlockInfo, len(f.Blocks))
for i, b := range f.Blocks {
blocks[i] = protocol.BlockInfo{

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/calmh/syncthing/protocol"
"github.com/calmh/syncthing/scanner"
)
func TestNewModel(t *testing.T) {
@ -27,27 +28,27 @@ func TestNewModel(t *testing.T) {
}
}
var testDataExpected = map[string]File{
"foo": File{
var testDataExpected = map[string]scanner.File{
"foo": scanner.File{
Name: "foo",
Flags: 0,
Modified: 0,
Size: 7,
Blocks: []Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x7, Hash: []uint8{0xae, 0xc0, 0x70, 0x64, 0x5f, 0xe5, 0x3e, 0xe3, 0xb3, 0x76, 0x30, 0x59, 0x37, 0x61, 0x34, 0xf0, 0x58, 0xcc, 0x33, 0x72, 0x47, 0xc9, 0x78, 0xad, 0xd1, 0x78, 0xb6, 0xcc, 0xdf, 0xb0, 0x1, 0x9f}}},
},
"empty": File{
"empty": scanner.File{
Name: "empty",
Flags: 0,
Modified: 0,
Size: 0,
Blocks: []Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0x0, Hash: []uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}},
},
"bar": File{
"bar": scanner.File{
Name: "bar",
Flags: 0,
Modified: 0,
Size: 10,
Blocks: []Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
Blocks: []scanner.Block{{Offset: 0x0, Size: 0xa, Hash: []uint8{0x2f, 0x72, 0xcc, 0x11, 0xa6, 0xfc, 0xd0, 0x27, 0x1e, 0xce, 0xf8, 0xc6, 0x10, 0x56, 0xee, 0x1e, 0xb1, 0x24, 0x3b, 0xe3, 0x80, 0x5b, 0xf9, 0xa9, 0xdf, 0x98, 0xf9, 0x2f, 0x76, 0x36, 0xb0, 0x5c}}},
},
}
@ -63,7 +64,8 @@ func init() {
func TestUpdateLocal(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if fs, _ := m.NeedFiles(); len(fs) > 0 {
@ -105,7 +107,8 @@ func TestUpdateLocal(t *testing.T) {
func TestRemoteUpdateExisting(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@ -122,7 +125,8 @@ func TestRemoteUpdateExisting(t *testing.T) {
func TestRemoteAddNew(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
newFile := protocol.FileInfo{
@ -139,7 +143,8 @@ func TestRemoteAddNew(t *testing.T) {
func TestRemoteUpdateOld(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
oldTimeStamp := int64(1234)
@ -157,7 +162,8 @@ func TestRemoteUpdateOld(t *testing.T) {
func TestRemoteIndexUpdate(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
foo := protocol.FileInfo{
@ -190,7 +196,8 @@ func TestRemoteIndexUpdate(t *testing.T) {
func TestDelete(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@ -201,10 +208,10 @@ func TestDelete(t *testing.T) {
}
ot := time.Now().Unix()
newFile := File{
newFile := scanner.File{
Name: "a new file",
Modified: ot,
Blocks: []Block{{0, 100, []byte("some hash bytes")}},
Blocks: []scanner.Block{{0, 100, []byte("some hash bytes")}},
}
m.updateLocal(newFile)
@ -292,7 +299,8 @@ func TestDelete(t *testing.T) {
func TestForgetNode(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
if l1, l2 := len(m.local), len(fs); l1 != l2 {
@ -345,7 +353,8 @@ func TestForgetNode(t *testing.T) {
func TestRequest(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
bs, err := m.Request("some node", "default", "foo", 0, 6)
@ -367,7 +376,8 @@ func TestRequest(t *testing.T) {
func TestIgnoreWithUnknownFlags(t *testing.T) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
valid := protocol.FileInfo{
@ -410,7 +420,8 @@ func genFiles(n int) []protocol.FileInfo {
func BenchmarkIndex10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
@ -422,7 +433,8 @@ func BenchmarkIndex10000(b *testing.B) {
func BenchmarkIndex00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(100)
@ -434,7 +446,8 @@ func BenchmarkIndex00100(b *testing.B) {
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@ -447,7 +460,8 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@ -461,7 +475,8 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
files := genFiles(10000)
m.Index("42", files)
@ -506,7 +521,8 @@ func (FakeConnection) Statistics() protocol.Statistics {
func BenchmarkRequest(b *testing.B) {
m := NewModel("testdata", 1e6)
fs, _ := m.Walk(false)
w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
fs, _ := w.Walk()
m.ReplaceLocal(fs)
const n = 1000

View File

@ -1,6 +1,7 @@
package main
import (
"os"
"sync"
"time"
)
@ -51,6 +52,11 @@ func (h *changeHistory) append(size int64, t time.Time) {
h.changes = append(h.changes, c)
}
func (s *suppressor) Suppress(name string, fi os.FileInfo) bool {
sup, _ := s.suppress(name, fi.Size(), time.Now())
return sup
}
func (s *suppressor) suppress(name string, size int64, t time.Time) (bool, bool) {
s.Lock()

28
cmd/syncthing/tempname.go Normal file
View File

@ -0,0 +1,28 @@
package main
import (
"fmt"
"path"
"path/filepath"
"runtime"
"strings"
)
type tempNamer struct {
prefix string
}
var defTempNamer = tempNamer{".syncthing"}
func (t tempNamer) IsTemporary(name string) bool {
if runtime.GOOS == "windows" {
name = filepath.ToSlash(name)
}
return strings.HasPrefix(path.Base(name), t.prefix)
}
func (t tempNamer) TempName(name string) string {
tdir := path.Dir(name)
tname := fmt.Sprintf("%s.%s", t.prefix, path.Base(name))
return path.Join(tdir, tname)
}

View File

@ -1,242 +0,0 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
const BlockSize = 128 * 1024
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Size int64
Blocks []Block
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}
func isTempName(name string) bool {
if runtime.GOOS == "windows" {
name = filepath.ToSlash(name)
}
return strings.HasPrefix(path.Base(name), ".syncthing.")
}
func tempName(name string, modified int64) string {
tdir := path.Dir(name)
tname := fmt.Sprintf(".syncthing.%s.%d", path.Base(name), modified)
return path.Join(tdir, tname)
}
func (m *Model) loadIgnoreFiles(ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(rn); sn == ".stignore" {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (m *Model) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
if isTempName(p) {
return nil
}
rn, err := filepath.Rel(m.dir, p)
if err != nil {
return nil
}
if _, sn := path.Split(rn); sn == ".stignore" {
// We never sync the .stignore files
return nil
}
if ignoreFile(ign, rn) {
if m.trace["file"] {
log.Println("FILE: IGNORE:", rn)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
m.lmut.RLock()
lf, ok := m.local[rn]
m.lmut.RUnlock()
if ok && lf.Modified == modified {
if nf := uint32(info.Mode()); nf != lf.Flags {
lf.Flags = nf
lf.Version++
}
*res = append(*res, lf)
} else {
if cur, prev := m.sup.suppress(rn, info.Size(), time.Now()); cur {
if m.trace["file"] {
log.Printf("FILE: SUPPRESS: %q change bw over threshold", rn)
}
if !prev {
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", rn)
}
if ok {
lf.Flags = protocol.FlagInvalid
lf.Version++
*res = append(*res, lf)
}
return nil
} else if prev && !cur {
log.Printf("INFO: Changes to %q are no longer suppressed.", rn)
}
if m.trace["file"] {
log.Printf("FILE: Hash %q", p)
}
fd, err := os.Open(p)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
defer fd.Close()
blocks, err := Blocks(fd, BlockSize)
if err != nil {
if m.trace["file"] {
log.Printf("FILE: %q: %v", p, err)
}
return nil
}
f := File{
Name: rn,
Size: info.Size(),
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
*res = append(*res, f)
}
}
return nil
}
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (m *Model) Walk(followSymlinks bool) (files []File, ignore map[string][]string) {
ignore = make(map[string][]string)
hashFiles := m.walkAndHashFiles(&files, ignore)
filepath.Walk(m.dir, m.loadIgnoreFiles(ignore))
filepath.Walk(m.dir, hashFiles)
if followSymlinks {
d, err := os.Open(m.dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(m.dir, info.Name()) + "/"
filepath.Walk(dir, m.loadIgnoreFiles(ignore))
filepath.Walk(dir, hashFiles)
}
}
}
return
}
func (m *Model) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && isTempName(path) {
if m.trace["file"] {
log.Printf("FILE: Remove %q", path)
}
os.Remove(path)
}
return nil
}
func (m *Model) cleanTempFiles() {
filepath.Walk(m.dir, m.cleanTempFile)
}
func ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@ -1,4 +1,4 @@
package main
package scanner
import (
"bytes"

View File

@ -1,4 +1,4 @@
package main
package scanner
import (
"bytes"

12
scanner/debug.go Normal file
View File

@ -0,0 +1,12 @@
package scanner
import (
"log"
"os"
"strings"
)
var (
dlog = log.New(os.Stderr, "scanner: ", log.Lmicroseconds|log.Lshortfile)
debug = strings.Contains(os.Getenv("STTRACE"), "scanner")
)

25
scanner/file.go Normal file
View File

@ -0,0 +1,25 @@
package scanner
import "fmt"
type File struct {
Name string
Flags uint32
Modified int64
Version uint32
Size int64
Blocks []Block
}
func (f File) String() string {
return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
}
func (f File) Equals(o File) bool {
return f.Modified == o.Modified && f.Version == o.Version
}
func (f File) NewerThan(o File) bool {
return f.Modified > o.Modified || (f.Modified == o.Modified && f.Version > o.Version)
}

2
scanner/testdata/.stignore vendored Normal file
View File

@ -0,0 +1,2 @@
.*
quux

1
scanner/testdata/bar vendored Normal file
View File

@ -0,0 +1 @@
foobarbaz

1
scanner/testdata/baz/quux vendored Normal file
View File

@ -0,0 +1 @@
baazquux

0
scanner/testdata/empty vendored Normal file
View File

1
scanner/testdata/foo vendored Normal file
View File

@ -0,0 +1 @@
foobar

259
scanner/walk.go Normal file
View File

@ -0,0 +1,259 @@
package scanner
import (
"bytes"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/calmh/syncthing/protocol"
)
type Walker struct {
// Dir is the base directory for the walk
Dir string
// If FollowSymlinks is true, symbolic links directly under Dir will be followed.
// Symbolic links at deeper levels are never followed regardless of this flag.
FollowSymlinks bool
// BlockSize controls the size of the block used when hashing.
BlockSize int
// If IgnoreFile is not empty, it is the name used for the file that holds ignore patterns.
IgnoreFile string
// If TempNamer is not nil, it is used to ignore tempory files when walking.
TempNamer TempNamer
// If Suppressor is not nil, it is queried for supression of modified files.
Suppressor Suppressor
previous map[string]File // file name -> last seen file state
suppressed map[string]bool // file name -> suppression status
}
type TempNamer interface {
// Temporary returns a temporary name for the filed referred to by path.
TempName(path string) string
// IsTemporary returns true if path refers to the name of temporary file.
IsTemporary(path string) bool
}
type Suppressor interface {
// Supress returns true if the update to the named file should be ignored.
Suppress(name string, fi os.FileInfo) bool
}
// Walk returns the list of files found in the local repository by scanning the
// file system. Files are blockwise hashed.
func (w *Walker) Walk() (files []File, ignore map[string][]string) {
w.lazyInit()
if debug {
dlog.Println("Walk", w.Dir, w.FollowSymlinks, w.BlockSize, w.IgnoreFile)
}
t0 := time.Now()
ignore = make(map[string][]string)
hashFiles := w.walkAndHashFiles(&files, ignore)
filepath.Walk(w.Dir, w.loadIgnoreFiles(w.Dir, ignore))
filepath.Walk(w.Dir, hashFiles)
if w.FollowSymlinks {
d, err := os.Open(w.Dir)
if err != nil {
return
}
defer d.Close()
fis, err := d.Readdir(-1)
if err != nil {
return
}
for _, info := range fis {
if info.Mode()&os.ModeSymlink != 0 {
dir := path.Join(w.Dir, info.Name()) + "/"
filepath.Walk(dir, w.loadIgnoreFiles(dir, ignore))
filepath.Walk(dir, hashFiles)
}
}
}
if debug {
t1 := time.Now()
d := t1.Sub(t0).Seconds()
dlog.Printf("Walk in %.02f ms, %.0f files/s", d*1000, float64(len(files))/d)
}
return
}
// CleanTempFiles removes all files that match the temporary filename pattern.
func (w *Walker) CleanTempFiles() {
filepath.Walk(w.Dir, w.cleanTempFile)
}
func (w *Walker) lazyInit() {
if w.previous == nil {
w.previous = make(map[string]File)
w.suppressed = make(map[string]bool)
}
}
func (w *Walker) loadIgnoreFiles(dir string, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
p, err = filepath.Rel(dir, p)
if err != nil {
return nil
}
if pn, sn := path.Split(p); sn == w.IgnoreFile {
pn := strings.Trim(pn, "/")
bs, _ := ioutil.ReadFile(p)
lines := bytes.Split(bs, []byte("\n"))
var patterns []string
for _, line := range lines {
if len(line) > 0 {
patterns = append(patterns, string(line))
}
}
ign[pn] = patterns
}
return nil
}
}
func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath.WalkFunc {
return func(p string, info os.FileInfo, err error) error {
if err != nil {
if debug {
dlog.Println("error:", p, info, err)
}
return nil
}
p, err = filepath.Rel(w.Dir, p)
if err != nil {
return nil
}
if w.TempNamer != nil && w.TempNamer.IsTemporary(p) {
if debug {
dlog.Println("temporary:", p)
}
return nil
}
if _, sn := path.Split(p); sn == w.IgnoreFile {
if debug {
dlog.Println("ignorefile:", p)
}
return nil
}
if w.ignoreFile(ign, p) {
if debug {
dlog.Println("ignored:", p)
}
return nil
}
if info.Mode()&os.ModeType == 0 {
modified := info.ModTime().Unix()
pf := w.previous[p]
if pf.Modified == modified {
if nf := uint32(info.Mode()); nf != pf.Flags {
if debug {
dlog.Println("new flags:", p)
}
pf.Flags = nf
pf.Version++
w.previous[p] = pf
} else if debug {
dlog.Println("unchanged:", p)
}
*res = append(*res, pf)
return nil
}
if w.Suppressor != nil && w.Suppressor.Suppress(p, info) {
if debug {
dlog.Println("suppressed:", p)
}
if !w.suppressed[p] {
w.suppressed[p] = true
log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p)
}
f := pf
f.Flags = protocol.FlagInvalid
f.Blocks = nil
*res = append(*res, f)
return nil
} else if w.suppressed[p] {
log.Printf("INFO: Changes to %q are no longer suppressed.", p)
delete(w.suppressed, p)
}
fd, err := os.Open(p)
if err != nil {
return nil
}
defer fd.Close()
t0 := time.Now()
blocks, err := Blocks(fd, w.BlockSize)
if err != nil {
if debug {
dlog.Println("hash error:", p, err)
}
return nil
}
if debug {
t1 := time.Now()
dlog.Println("hashed:", p, ";", len(blocks), "blocks;", info.Size(), "bytes;", int(float64(info.Size())/1024/t1.Sub(t0).Seconds()), "KB/s")
}
f := File{
Name: p,
Size: info.Size(),
Flags: uint32(info.Mode()),
Modified: modified,
Blocks: blocks,
}
w.previous[p] = f
*res = append(*res, f)
}
return nil
}
}
func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.Mode()&os.ModeType == 0 && w.TempNamer.IsTemporary(path) {
os.Remove(path)
}
return nil
}
func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
first, last := path.Split(file)
for prefix, pats := range patterns {
if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
for _, pattern := range pats {
if match, _ := path.Match(pattern, last); match {
return true
}
}
}
}
return false
}

View File

@ -1,4 +1,4 @@
package main
package scanner
import (
"fmt"
@ -22,8 +22,12 @@ var correctIgnores = map[string][]string{
}
func TestWalk(t *testing.T) {
m := NewModel("testdata", 1e6)
files, ignores := m.Walk(false)
w := Walker{
Dir: "testdata",
BlockSize: 128 * 1024,
IgnoreFile: ".stignore",
}
files, ignores := w.Walk()
if l1, l2 := len(files), len(testdata); l1 != l2 {
t.Fatalf("Incorrect number of walked files %d != %d", l1, l2)
@ -75,8 +79,9 @@ func TestIgnore(t *testing.T) {
{"foo/bazz/quux", false},
}
w := Walker{}
for i, tc := range tests {
if r := ignoreFile(patterns, tc.f); r != tc.r {
if r := w.ignoreFile(patterns, tc.f); r != tc.r {
t.Errorf("Incorrect ignoreFile() #%d; E: %v, A: %v", i, tc.r, r)
}
}