2014-11-16 20:13:20 +00:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 19:43:32 +00:00
//
2015-03-07 20:36:35 +00:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
2014-06-01 20:50:14 +00:00
2014-05-15 03:26:55 +00:00
package model
2013-12-15 10:43:31 +00:00
import (
2014-09-14 22:03:53 +00:00
"bufio"
2014-09-10 06:48:15 +00:00
"crypto/tls"
2015-03-10 22:45:43 +00:00
"encoding/json"
2014-01-06 20:31:36 +00:00
"errors"
2013-12-23 17:12:44 +00:00
"fmt"
2014-01-01 02:22:49 +00:00
"io"
2014-01-05 22:54:57 +00:00
"net"
2013-12-15 10:43:31 +00:00
"os"
2014-03-28 13:36:57 +00:00
"path/filepath"
2015-06-03 07:47:39 +00:00
"reflect"
2015-04-29 18:46:32 +00:00
"runtime"
2014-08-11 18:20:01 +00:00
"strings"
2015-04-22 22:54:31 +00:00
stdsync "sync"
2013-12-15 10:43:31 +00:00
"time"
2014-06-21 07:43:12 +00:00
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/config"
"github.com/syncthing/syncthing/lib/db"
"github.com/syncthing/syncthing/lib/events"
"github.com/syncthing/syncthing/lib/ignore"
"github.com/syncthing/syncthing/lib/osutil"
2015-09-22 17:38:46 +00:00
"github.com/syncthing/syncthing/lib/protocol"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/scanner"
"github.com/syncthing/syncthing/lib/stats"
"github.com/syncthing/syncthing/lib/symlinks"
"github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/versioner"
2014-07-06 12:46:48 +00:00
"github.com/syndtr/goleveldb/leveldb"
2015-06-12 11:04:00 +00:00
"github.com/thejerf/suture"
2013-12-15 10:43:31 +00:00
)
2014-07-15 11:04:37 +00:00
// How many files to send in each Index/IndexUpdate message.
2014-08-11 18:54:59 +00:00
const (
2015-05-25 09:05:12 +00:00
indexTargetSize = 250 * 1024 // Aim for making index messages no larger than 250 KiB (uncompressed)
indexPerFileSize = 250 // Each FileInfo is approximately this big, in bytes, excluding BlockInfos
indexPerBlockSize = 40 // Each BlockInfo is approximately this big
indexBatchSize = 1000 // Either way, don't include more files than this
reqValidationTime = time . Hour // How long to cache validation entries for Request messages
reqValidationCacheSize = 1000 // How many entries to aim for in the validation cache size
2014-08-11 18:54:59 +00:00
)
2014-07-15 11:04:37 +00:00
2014-09-30 15:52:05 +00:00
type service interface {
Serve ( )
Stop ( )
2014-12-30 08:31:34 +00:00
Jobs ( ) ( [ ] string , [ ] string ) // In progress, Queued
2014-12-30 08:35:21 +00:00
BringToFront ( string )
2015-05-01 12:30:17 +00:00
DelayScan ( d time . Duration )
2015-05-07 20:45:07 +00:00
IndexUpdated ( ) // Remote index was updated notification
2015-06-20 17:26:25 +00:00
Scan ( subs [ ] string ) error
2015-03-16 20:14:19 +00:00
2015-04-12 20:12:01 +00:00
setState ( state folderState )
setError ( err error )
2015-06-13 18:10:11 +00:00
clearError ( )
2015-04-12 20:12:01 +00:00
getState ( ) ( folderState , time . Time , error )
2014-09-30 15:52:05 +00:00
}
2013-12-15 10:43:31 +00:00
type Model struct {
2015-06-12 11:04:00 +00:00
* suture . Supervisor
2015-07-23 14:13:53 +00:00
cfg * config . Wrapper
db * leveldb . DB
finder * db . BlockFinder
progressEmitter * ProgressEmitter
id protocol . DeviceID
shortID uint64
cacheIgnoredFiles bool
2014-05-15 03:26:55 +00:00
2014-09-28 11:05:25 +00:00
deviceName string
2014-05-15 03:26:55 +00:00
clientName string
clientVersion string
2014-09-28 11:05:25 +00:00
folderCfgs map [ string ] config . FolderConfiguration // folder -> cfg
2015-01-12 13:52:24 +00:00
folderFiles map [ string ] * db . FileSet // folder -> files
2014-09-28 11:05:25 +00:00
folderDevices map [ string ] [ ] protocol . DeviceID // folder -> deviceIDs
deviceFolders map [ protocol . DeviceID ] [ ] string // deviceID -> folders
2014-09-28 11:00:38 +00:00
deviceStatRefs map [ protocol . DeviceID ] * stats . DeviceStatisticsReference // deviceID -> statsRef
2014-10-12 21:35:15 +00:00
folderIgnores map [ string ] * ignore . Matcher // folder -> matcher object
2014-09-30 15:52:05 +00:00
folderRunners map [ string ] service // folder -> puller or scanner
2014-12-07 20:21:12 +00:00
folderStatRefs map [ string ] * stats . FolderStatisticsReference // folder -> statsRef
2014-09-28 11:39:39 +00:00
fmut sync . RWMutex // protects the above
2014-03-29 17:53:48 +00:00
2015-08-23 19:56:10 +00:00
conn map [ protocol . DeviceID ] Connection
deviceVer map [ protocol . DeviceID ] string
devicePaused map [ protocol . DeviceID ] bool
pmut sync . RWMutex // protects the above
2013-12-30 14:30:29 +00:00
2015-05-25 09:05:12 +00:00
reqValidationCache map [ string ] time . Time // folder / file name => time when confirmed to exist
rvmut sync . RWMutex // protects reqValidationCache
2013-12-15 10:43:31 +00:00
}
2014-01-07 21:44:21 +00:00
var (
2015-06-14 22:44:24 +00:00
symlinkWarning = stdsync . Once { }
2014-01-07 21:44:21 +00:00
)
2014-01-06 20:31:36 +00:00
2014-01-06 10:11:18 +00:00
// NewModel creates and starts a new model. The model starts in read-only mode,
// where it sends index information to connected peers and responds to requests
2014-09-28 11:00:38 +00:00
// for file data without altering the local folder in any way.
2015-03-25 21:37:35 +00:00
func NewModel ( cfg * config . Wrapper , id protocol . DeviceID , deviceName , clientName , clientVersion string , ldb * leveldb . DB ) * Model {
2013-12-15 10:43:31 +00:00
m := & Model {
2015-07-11 01:12:20 +00:00
Supervisor : suture . New ( "model" , suture . Spec {
Log : func ( line string ) {
2015-10-03 15:25:21 +00:00
l . Debugln ( line )
2015-07-11 01:12:20 +00:00
} ,
} ) ,
2015-05-25 09:05:12 +00:00
cfg : cfg ,
db : ldb ,
2015-09-04 10:01:00 +00:00
finder : db . NewBlockFinder ( ldb ) ,
2015-05-25 09:05:12 +00:00
progressEmitter : NewProgressEmitter ( cfg ) ,
id : id ,
shortID : id . Short ( ) ,
2015-07-23 14:13:53 +00:00
cacheIgnoredFiles : cfg . Options ( ) . CacheIgnoredFiles ,
2015-05-25 09:05:12 +00:00
deviceName : deviceName ,
clientName : clientName ,
clientVersion : clientVersion ,
folderCfgs : make ( map [ string ] config . FolderConfiguration ) ,
folderFiles : make ( map [ string ] * db . FileSet ) ,
folderDevices : make ( map [ string ] [ ] protocol . DeviceID ) ,
deviceFolders : make ( map [ protocol . DeviceID ] [ ] string ) ,
deviceStatRefs : make ( map [ protocol . DeviceID ] * stats . DeviceStatisticsReference ) ,
folderIgnores : make ( map [ string ] * ignore . Matcher ) ,
folderRunners : make ( map [ string ] service ) ,
folderStatRefs : make ( map [ string ] * stats . FolderStatisticsReference ) ,
2015-06-28 15:05:29 +00:00
conn : make ( map [ protocol . DeviceID ] Connection ) ,
2015-05-25 09:05:12 +00:00
deviceVer : make ( map [ protocol . DeviceID ] string ) ,
2015-08-23 19:56:10 +00:00
devicePaused : make ( map [ protocol . DeviceID ] bool ) ,
2015-05-25 09:05:12 +00:00
reqValidationCache : make ( map [ string ] time . Time ) ,
fmut : sync . NewRWMutex ( ) ,
pmut : sync . NewRWMutex ( ) ,
rvmut : sync . NewRWMutex ( ) ,
2013-12-15 10:43:31 +00:00
}
2014-11-25 22:07:18 +00:00
if cfg . Options ( ) . ProgressUpdateIntervalS > - 1 {
go m . progressEmitter . Serve ( )
}
2013-12-15 10:43:31 +00:00
return m
}
2015-04-28 20:32:10 +00:00
// StartDeadlockDetector starts a deadlock detector on the models locks which
// causes panics in case the locks cannot be acquired in the given timeout
// period.
2015-04-08 12:35:03 +00:00
func ( m * Model ) StartDeadlockDetector ( timeout time . Duration ) {
l . Infof ( "Starting deadlock detector with %v timeout" , timeout )
2015-04-22 22:54:31 +00:00
deadlockDetect ( m . fmut , timeout )
deadlockDetect ( m . pmut , timeout )
2015-04-08 12:35:03 +00:00
}
2015-04-28 20:32:10 +00:00
// StartFolderRW starts read/write processing on the current model. When in
2014-01-06 10:11:18 +00:00
// read/write mode the model will attempt to keep in sync with the cluster by
2014-09-28 11:00:38 +00:00
// pulling needed files from peer devices.
func ( m * Model ) StartFolderRW ( folder string ) {
2014-09-28 11:39:39 +00:00
m . fmut . Lock ( )
2014-09-28 11:00:38 +00:00
cfg , ok := m . folderCfgs [ folder ]
2014-09-27 12:44:15 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
panic ( "cannot start nonexistent folder " + folder )
2014-09-27 12:44:15 +00:00
}
2014-09-30 15:52:05 +00:00
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
2015-04-09 10:53:41 +00:00
p := newRWFolder ( m , m . shortID , cfg )
2014-09-30 15:52:05 +00:00
m . folderRunners [ folder ] = p
m . fmut . Unlock ( )
2014-09-27 12:44:15 +00:00
if len ( cfg . Versioning . Type ) > 0 {
factory , ok := versioner . Factories [ cfg . Versioning . Type ]
if ! ok {
l . Fatalf ( "Requested versioning type %q that does not exist" , cfg . Versioning . Type )
}
2015-06-20 18:04:47 +00:00
2015-06-12 11:04:00 +00:00
versioner := factory ( folder , cfg . Path ( ) , cfg . Versioning . Params )
if service , ok := versioner . ( suture . Service ) ; ok {
// The versioner implements the suture.Service interface, so
// expects to be run in the background in addition to being called
// when files are going to be archived.
m . Add ( service )
}
p . versioner = versioner
2014-03-29 17:53:48 +00:00
}
2014-09-27 12:44:15 +00:00
2015-06-20 18:04:47 +00:00
m . Add ( p )
2015-07-23 14:13:53 +00:00
l . Okln ( "Ready to synchronize" , folder , "(read-write)" )
2014-03-28 13:36:57 +00:00
}
2014-01-06 10:11:18 +00:00
2015-04-28 20:32:10 +00:00
// StartFolderRO starts read only processing on the current model. When in
// read only mode the model will announce files to the cluster but not pull in
// any external changes.
2014-09-28 11:00:38 +00:00
func ( m * Model ) StartFolderRO ( folder string ) {
2014-09-30 15:52:05 +00:00
m . fmut . Lock ( )
cfg , ok := m . folderCfgs [ folder ]
if ! ok {
panic ( "cannot start nonexistent folder " + folder )
}
_ , ok = m . folderRunners [ folder ]
if ok {
panic ( "cannot start already running folder " + folder )
}
2015-03-16 20:14:19 +00:00
s := newROFolder ( m , folder , time . Duration ( cfg . RescanIntervalS ) * time . Second )
2014-09-30 15:52:05 +00:00
m . folderRunners [ folder ] = s
m . fmut . Unlock ( )
2015-07-23 14:13:53 +00:00
m . Add ( s )
l . Okln ( "Ready to synchronize" , folder , "(read only; no external updates accepted)" )
2014-01-20 21:22:27 +00:00
}
2014-01-05 22:54:57 +00:00
type ConnectionInfo struct {
protocol . Statistics
2015-08-23 19:56:10 +00:00
Connected bool
Paused bool
2014-01-23 12:12:45 +00:00
Address string
ClientVersion string
2015-07-17 20:22:07 +00:00
Type ConnectionType
2014-01-05 22:54:57 +00:00
}
2015-03-10 22:45:43 +00:00
func ( info ConnectionInfo ) MarshalJSON ( ) ( [ ] byte , error ) {
return json . Marshal ( map [ string ] interface { } {
"at" : info . At ,
"inBytesTotal" : info . InBytesTotal ,
"outBytesTotal" : info . OutBytesTotal ,
2015-08-23 19:56:10 +00:00
"connected" : info . Connected ,
"paused" : info . Paused ,
2015-03-10 22:45:43 +00:00
"address" : info . Address ,
"clientVersion" : info . ClientVersion ,
2015-08-23 19:56:10 +00:00
"type" : info . Type . String ( ) ,
2015-03-10 22:45:43 +00:00
} )
}
2014-09-28 11:00:38 +00:00
// ConnectionStats returns a map with connection statistics for each connected device.
2015-04-07 12:20:40 +00:00
func ( m * Model ) ConnectionStats ( ) map [ string ] interface { } {
2014-01-05 22:54:57 +00:00
type remoteAddrer interface {
RemoteAddr ( ) net . Addr
}
2014-01-18 03:06:44 +00:00
m . pmut . RLock ( )
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2014-01-05 15:16:37 +00:00
2015-08-23 19:56:10 +00:00
res := make ( map [ string ] interface { } )
devs := m . cfg . Devices ( )
conns := make ( map [ string ] ConnectionInfo , len ( devs ) )
for device := range devs {
2014-01-05 22:54:57 +00:00
ci := ConnectionInfo {
2014-09-28 11:00:38 +00:00
ClientVersion : m . deviceVer [ device ] ,
2015-08-23 19:56:10 +00:00
Paused : m . devicePaused [ device ] ,
2014-01-05 22:54:57 +00:00
}
2015-08-23 19:56:10 +00:00
if conn , ok := m . conn [ device ] ; ok {
2015-07-17 20:22:07 +00:00
ci . Type = conn . Type
2015-08-23 19:56:10 +00:00
ci . Connected = ok
ci . Statistics = conn . Statistics ( )
if addr := conn . RemoteAddr ( ) ; addr != nil {
ci . Address = addr . String ( )
}
2014-01-05 22:54:57 +00:00
}
2014-02-13 11:41:37 +00:00
2015-04-07 12:20:40 +00:00
conns [ device . String ( ) ] = ci
2013-12-30 14:30:29 +00:00
}
2014-01-18 03:06:44 +00:00
2015-04-07 12:20:40 +00:00
res [ "connections" ] = conns
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-01-18 03:06:44 +00:00
m . pmut . RUnlock ( )
2014-03-28 13:36:57 +00:00
2014-05-24 19:34:11 +00:00
in , out := protocol . TotalInOut ( )
res [ "total" ] = ConnectionInfo {
Statistics : protocol . Statistics {
At : time . Now ( ) ,
2014-06-01 19:56:05 +00:00
InBytesTotal : in ,
OutBytesTotal : out ,
2014-05-24 19:34:11 +00:00
} ,
}
2014-01-05 15:16:37 +00:00
return res
2013-12-30 14:30:29 +00:00
}
2015-04-28 20:32:10 +00:00
// DeviceStatistics returns statistics about each device
2014-09-28 11:00:38 +00:00
func ( m * Model ) DeviceStatistics ( ) map [ string ] stats . DeviceStatistics {
var res = make ( map [ string ] stats . DeviceStatistics )
2014-10-06 07:25:45 +00:00
for id := range m . cfg . Devices ( ) {
res [ id . String ( ) ] = m . deviceStatRef ( id ) . GetStatistics ( )
2014-08-21 22:45:40 +00:00
}
return res
}
2015-04-28 20:32:10 +00:00
// FolderStatistics returns statistics about each folder
2014-12-07 20:21:12 +00:00
func ( m * Model ) FolderStatistics ( ) map [ string ] stats . FolderStatistics {
var res = make ( map [ string ] stats . FolderStatistics )
for id := range m . cfg . Folders ( ) {
res [ id ] = m . folderStatRef ( id ) . GetStatistics ( )
}
return res
}
2015-04-28 20:32:10 +00:00
// Completion returns the completion status, in percent, for the given device
// and folder.
2014-09-28 11:00:38 +00:00
func ( m * Model ) Completion ( device protocol . DeviceID , folder string ) float64 {
2014-07-29 09:06:52 +00:00
var tot int64
2014-08-05 18:16:25 +00:00
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
rf , ok := m . folderFiles [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-08-05 18:16:25 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
return 0 // Folder doesn't exist, so we hardly have any of it
2014-08-05 18:16:25 +00:00
}
2015-01-12 13:50:30 +00:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-08-12 11:53:31 +00:00
if ! f . IsDeleted ( ) {
tot += f . Size ( )
2014-07-29 09:06:52 +00:00
}
return true
} )
2014-08-05 18:16:25 +00:00
if tot == 0 {
2014-09-28 11:00:38 +00:00
return 100 // Folder is empty, so we have all of it
2014-08-05 18:16:25 +00:00
}
2014-07-29 09:06:52 +00:00
var need int64
2015-01-12 13:50:30 +00:00
rf . WithNeedTruncated ( device , func ( f db . FileIntf ) bool {
2014-08-12 11:53:31 +00:00
if ! f . IsDeleted ( ) {
need += f . Size ( )
2014-07-29 09:06:52 +00:00
}
return true
} )
2014-08-12 11:53:31 +00:00
res := 100 * ( 1 - float64 ( need ) / float64 ( tot ) )
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v Completion(%s, %q): %f (%d / %d)" , m , device , folder , res , need , tot )
2014-08-12 11:53:31 +00:00
return res
2014-07-29 09:06:52 +00:00
}
2014-07-12 21:06:48 +00:00
func sizeOf ( fs [ ] protocol . FileInfo ) ( files , deleted int , bytes int64 ) {
2014-03-28 13:36:57 +00:00
for _ , f := range fs {
2014-07-06 12:46:48 +00:00
fs , de , by := sizeOfFile ( f )
files += fs
deleted += de
bytes += by
}
return
}
2015-01-12 13:50:30 +00:00
func sizeOfFile ( f db . FileIntf ) ( files , deleted int , bytes int64 ) {
2014-08-12 11:53:31 +00:00
if ! f . IsDeleted ( ) {
2014-07-06 12:46:48 +00:00
files ++
} else {
deleted ++
2013-12-30 14:30:29 +00:00
}
2014-08-12 11:53:31 +00:00
bytes += f . Size ( )
2014-01-05 15:16:37 +00:00
return
}
2013-12-30 14:30:29 +00:00
2014-03-28 13:36:57 +00:00
// GlobalSize returns the number of files, deleted files and total bytes for all
// files in the global model.
2015-01-09 07:18:42 +00:00
func ( m * Model ) GlobalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 11:00:38 +00:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 13:50:30 +00:00
rf . WithGlobalTruncated ( func ( f db . FileIntf ) bool {
2014-07-06 12:46:48 +00:00
fs , de , by := sizeOfFile ( f )
2015-01-09 07:18:42 +00:00
nfiles += fs
2014-07-06 12:46:48 +00:00
deleted += de
bytes += by
return true
} )
2014-03-29 17:53:48 +00:00
}
2014-07-06 12:46:48 +00:00
return
2014-03-28 13:36:57 +00:00
}
2014-01-06 10:11:18 +00:00
// LocalSize returns the number of files, deleted files and total bytes for all
2014-09-28 11:00:38 +00:00
// files in the local folder.
2015-01-09 07:18:42 +00:00
func ( m * Model ) LocalSize ( folder string ) ( nfiles , deleted int , bytes int64 ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 11:00:38 +00:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 13:50:30 +00:00
rf . WithHaveTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-09-04 20:29:53 +00:00
if f . IsInvalid ( ) {
return true
}
2014-07-06 12:46:48 +00:00
fs , de , by := sizeOfFile ( f )
2015-01-09 07:18:42 +00:00
nfiles += fs
2014-07-06 12:46:48 +00:00
deleted += de
bytes += by
return true
} )
2014-03-29 17:53:48 +00:00
}
2014-07-06 21:15:28 +00:00
return
2014-01-06 05:38:01 +00:00
}
2014-05-19 20:31:28 +00:00
// NeedSize returns the number and total size of currently needed files.
2015-01-09 07:18:42 +00:00
func ( m * Model ) NeedSize ( folder string ) ( nfiles int , bytes int64 ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 11:00:38 +00:00
if rf , ok := m . folderFiles [ folder ] ; ok {
2015-01-12 13:50:30 +00:00
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
2014-07-15 15:54:00 +00:00
fs , de , by := sizeOfFile ( f )
2015-01-09 07:18:42 +00:00
nfiles += fs + de
2014-07-15 15:54:00 +00:00
bytes += by
return true
} )
}
2014-11-16 23:18:59 +00:00
bytes -= m . progressEmitter . BytesCompleted ( folder )
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v NeedSize(%q): %d %d" , m , folder , nfiles , bytes )
2014-07-15 15:54:00 +00:00
return
2013-12-23 17:12:44 +00:00
}
2015-04-28 20:32:10 +00:00
// NeedFolderFiles returns paginated list of currently needed files in
// progress, queued, and to be queued on next puller iteration, as well as the
// total number of files currently needed.
2015-04-25 21:53:44 +00:00
func ( m * Model ) NeedFolderFiles ( folder string , page , perpage int ) ( [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , [ ] db . FileInfoTruncated , int ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2015-01-17 20:51:46 +00:00
2015-04-25 21:53:44 +00:00
total := 0
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
2015-04-25 21:53:44 +00:00
rf , ok := m . folderFiles [ folder ]
if ! ok {
return nil , nil , nil , 0
}
2014-12-30 08:31:34 +00:00
2015-04-25 21:53:44 +00:00
var progress , queued , rest [ ] db . FileInfoTruncated
var seen map [ string ] struct { }
2014-12-30 08:31:34 +00:00
2015-04-25 21:53:44 +00:00
skip := ( page - 1 ) * perpage
get := perpage
2014-12-30 08:31:34 +00:00
2015-04-25 21:53:44 +00:00
runner , ok := m . folderRunners [ folder ]
if ok {
allProgressNames , allQueuedNames := runner . Jobs ( )
var progressNames , queuedNames [ ] string
progressNames , skip , get = getChunk ( allProgressNames , skip , get )
queuedNames , skip , get = getChunk ( allQueuedNames , skip , get )
progress = make ( [ ] db . FileInfoTruncated , len ( progressNames ) )
queued = make ( [ ] db . FileInfoTruncated , len ( queuedNames ) )
seen = make ( map [ string ] struct { } , len ( progressNames ) + len ( queuedNames ) )
for i , name := range progressNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
progress [ i ] = f
seen [ name ] = struct { } { }
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
}
}
2015-04-25 21:53:44 +00:00
for i , name := range queuedNames {
if f , ok := rf . GetGlobalTruncated ( name ) ; ok {
queued [ i ] = f
seen [ name ] = struct { } { }
}
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
}
2014-04-09 20:03:30 +00:00
}
2015-04-25 21:53:44 +00:00
rest = make ( [ ] db . FileInfoTruncated , 0 , perpage )
rf . WithNeedTruncated ( protocol . LocalDeviceID , func ( f db . FileIntf ) bool {
total ++
if skip > 0 {
skip --
return true
}
if get > 0 {
ft := f . ( db . FileInfoTruncated )
if _ , ok := seen [ ft . Name ] ; ! ok {
rest = append ( rest , ft )
get --
}
}
return true
} )
return progress , queued , rest , total
2014-04-01 21:18:32 +00:00
}
2014-09-28 11:00:38 +00:00
// Index is called when a new device is connected and we receive their full index.
2014-01-06 10:11:18 +00:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) Index ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in Index message" , flags )
return
}
2015-10-03 15:25:21 +00:00
l . Debugf ( "IDX(in): %s %q: %d files" , deviceID , folder , len ( fs ) )
2014-03-29 17:53:48 +00:00
2014-09-28 11:00:38 +00:00
if ! m . folderSharedWith ( folder , deviceID ) {
events . Default . Log ( events . FolderRejected , map [ string ] string {
"folder" : folder ,
"device" : deviceID . String ( ) ,
2014-08-18 21:34:03 +00:00
} )
2014-12-27 23:12:12 +00:00
l . Infof ( "Unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 19:48:29 +00:00
return
}
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-07-21 11:14:33 +00:00
cfg := m . folderCfgs [ folder ]
2014-09-28 11:00:38 +00:00
files , ok := m . folderFiles [ folder ]
2015-05-07 20:45:07 +00:00
runner := m . folderRunners [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-09-04 20:29:53 +00:00
2015-05-07 20:45:07 +00:00
if runner != nil {
// Runner may legitimately not be set if this is the "cleanup" Index
// message at startup.
defer runner . IndexUpdated ( )
}
2014-09-04 20:29:53 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
l . Fatalf ( "Index for nonexistant folder %q" , folder )
2013-12-15 10:43:31 +00:00
}
2014-07-13 19:07:24 +00:00
2015-07-21 11:14:33 +00:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete )
2014-09-28 11:00:38 +00:00
files . Replace ( deviceID , fs )
2014-09-04 20:29:53 +00:00
2014-07-17 11:38:36 +00:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 11:05:25 +00:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 11:38:36 +00:00
"items" : len ( fs ) ,
2014-09-28 11:00:38 +00:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 19:07:24 +00:00
} )
2013-12-28 13:10:36 +00:00
}
2014-09-28 11:00:38 +00:00
// IndexUpdate is called for incremental updates to connected devices' indexes.
2014-01-06 10:11:18 +00:00
// Implements the protocol.Model interface.
2015-01-14 22:11:31 +00:00
func ( m * Model ) IndexUpdate ( deviceID protocol . DeviceID , folder string , fs [ ] protocol . FileInfo , flags uint32 , options [ ] protocol . Option ) {
2015-01-14 22:28:19 +00:00
if flags != 0 {
l . Warnln ( "protocol error: unknown flags 0x%x in IndexUpdate message" , flags )
return
}
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v IDXUP(in): %s / %q: %d files" , m , deviceID , folder , len ( fs ) )
2014-03-29 17:53:48 +00:00
2014-09-28 11:00:38 +00:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Infof ( "Update for unexpected folder ID %q sent from device %q; ensure that the folder exists and that this device is selected under \"Share With\" in the folder configuration." , folder , deviceID )
2014-06-06 19:48:29 +00:00
return
}
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-05-07 20:45:07 +00:00
files := m . folderFiles [ folder ]
2015-07-21 11:14:33 +00:00
cfg := m . folderCfgs [ folder ]
2015-05-07 20:45:07 +00:00
runner , ok := m . folderRunners [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-09-04 20:29:53 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
l . Fatalf ( "IndexUpdate for nonexistant folder %q" , folder )
2013-12-28 13:10:36 +00:00
}
2014-07-13 19:07:24 +00:00
2015-07-21 11:14:33 +00:00
fs = filterIndex ( folder , fs , cfg . IgnoreDelete )
2014-09-28 11:00:38 +00:00
files . Update ( deviceID , fs )
2014-09-04 20:29:53 +00:00
2014-07-17 11:38:36 +00:00
events . Default . Log ( events . RemoteIndexUpdated , map [ string ] interface { } {
2014-09-28 11:05:25 +00:00
"device" : deviceID . String ( ) ,
"folder" : folder ,
2014-07-17 11:38:36 +00:00
"items" : len ( fs ) ,
2014-09-28 11:00:38 +00:00
"version" : files . LocalVersion ( deviceID ) ,
2014-07-13 19:07:24 +00:00
} )
2015-05-07 20:45:07 +00:00
runner . IndexUpdated ( )
2014-01-09 09:59:09 +00:00
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) folderSharedWith ( folder string , deviceID protocol . DeviceID ) bool {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-09-28 11:00:38 +00:00
for _ , nfolder := range m . deviceFolders [ deviceID ] {
if nfolder == folder {
2014-06-06 19:48:29 +00:00
return true
}
}
return false
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) ClusterConfig ( deviceID protocol . DeviceID , cm protocol . ClusterConfigMessage ) {
2014-04-13 13:28:26 +00:00
m . pmut . Lock ( )
2014-09-23 14:04:20 +00:00
if cm . ClientName == "syncthing" {
2014-09-28 11:00:38 +00:00
m . deviceVer [ deviceID ] = cm . ClientVersion
2014-04-13 13:28:26 +00:00
} else {
2014-09-28 11:00:38 +00:00
m . deviceVer [ deviceID ] = cm . ClientName + " " + cm . ClientVersion
2014-04-13 13:28:26 +00:00
}
2014-12-26 23:12:12 +00:00
event := map [ string ] string {
"id" : deviceID . String ( ) ,
2015-09-26 13:58:53 +00:00
"deviceName" : cm . DeviceName ,
2014-12-26 23:12:12 +00:00
"clientName" : cm . ClientName ,
"clientVersion" : cm . ClientVersion ,
}
2015-06-28 15:05:29 +00:00
if conn , ok := m . conn [ deviceID ] ; ok {
2015-07-17 20:22:07 +00:00
event [ "type" ] = conn . Type . String ( )
2015-06-28 15:05:29 +00:00
addr := conn . RemoteAddr ( )
if addr != nil {
event [ "addr" ] = addr . String ( )
}
2014-12-26 23:12:12 +00:00
}
2014-08-16 20:55:02 +00:00
m . pmut . Unlock ( )
2014-12-26 23:12:12 +00:00
events . Default . Log ( events . DeviceConnected , event )
2015-09-26 13:58:53 +00:00
l . Infof ( ` Device %s client is "%s %s named %s" ` , deviceID , cm . ClientName , cm . ClientVersion , cm . DeviceName )
2014-08-16 20:55:02 +00:00
2014-11-12 23:42:17 +00:00
var changed bool
2015-09-26 13:58:53 +00:00
device , ok := m . cfg . Devices ( ) [ deviceID ]
if ok && device . Name == "" {
device . Name = cm . DeviceName
m . cfg . SetDevice ( device )
changed = true
2014-08-14 22:15:26 +00:00
}
2014-09-23 14:04:20 +00:00
2014-10-06 07:25:45 +00:00
if m . cfg . Devices ( ) [ deviceID ] . Introducer {
2014-09-28 11:00:38 +00:00
// This device is an introducer. Go through the announced lists of folders
// and devices and add what we are missing.
2014-09-23 14:04:20 +00:00
2014-09-28 11:00:38 +00:00
for _ , folder := range cm . Folders {
// If we don't have this folder yet, skip it. Ideally, we'd
// offer up something in the GUI to create the folder, but for the
// moment we only handle folders that we already have.
if _ , ok := m . folderDevices [ folder . ID ] ; ! ok {
2014-09-23 14:04:20 +00:00
continue
}
2014-09-28 11:00:38 +00:00
nextDevice :
for _ , device := range folder . Devices {
var id protocol . DeviceID
copy ( id [ : ] , device . ID )
2014-09-23 14:04:20 +00:00
2014-10-06 07:25:45 +00:00
if _ , ok := m . cfg . Devices ( ) [ id ] ; ! ok {
2014-09-28 11:00:38 +00:00
// The device is currently unknown. Add it to the config.
2014-09-23 14:04:20 +00:00
2015-09-27 10:39:02 +00:00
addresses := [ ] string { "dynamic" }
for _ , addr := range device . Addresses {
if addr != "dynamic" {
addresses = append ( addresses , addr )
}
}
2014-09-28 11:00:38 +00:00
l . Infof ( "Adding device %v to config (vouched for by introducer %v)" , id , deviceID )
newDeviceCfg := config . DeviceConfiguration {
2014-10-06 07:25:45 +00:00
DeviceID : id ,
2015-09-27 10:39:02 +00:00
Name : device . Name ,
2014-12-07 22:43:30 +00:00
Compression : m . cfg . Devices ( ) [ deviceID ] . Compression ,
2015-09-27 10:39:02 +00:00
Addresses : addresses ,
CertName : device . CertName ,
2014-09-23 14:04:20 +00:00
}
// The introducers' introducers are also our introducers.
2014-09-28 11:00:38 +00:00
if device . Flags & protocol . FlagIntroducer != 0 {
l . Infof ( "Device %v is now also an introducer" , id )
newDeviceCfg . Introducer = true
2014-09-23 14:04:20 +00:00
}
2014-10-06 07:25:45 +00:00
m . cfg . SetDevice ( newDeviceCfg )
2014-09-23 14:04:20 +00:00
changed = true
}
2014-09-28 11:00:38 +00:00
for _ , er := range m . deviceFolders [ id ] {
if er == folder . ID {
// We already share the folder with this device, so
2014-09-23 14:04:20 +00:00
// nothing to do.
2014-09-28 11:00:38 +00:00
continue nextDevice
2014-09-23 14:04:20 +00:00
}
}
2014-09-28 11:00:38 +00:00
// We don't yet share this folder with this device. Add the device
// to sharing list of the folder.
2014-09-23 14:04:20 +00:00
2014-09-28 11:00:38 +00:00
l . Infof ( "Adding device %v to share %q (vouched for by introducer %v)" , id , folder . ID , deviceID )
2014-09-23 14:04:20 +00:00
2014-09-28 11:00:38 +00:00
m . deviceFolders [ id ] = append ( m . deviceFolders [ id ] , folder . ID )
m . folderDevices [ folder . ID ] = append ( m . folderDevices [ folder . ID ] , id )
2014-09-23 14:04:20 +00:00
2014-10-06 07:25:45 +00:00
folderCfg := m . cfg . Folders ( ) [ folder . ID ]
2014-09-28 11:00:38 +00:00
folderCfg . Devices = append ( folderCfg . Devices , config . FolderDeviceConfiguration {
DeviceID : id ,
2014-09-23 14:04:20 +00:00
} )
2014-10-06 07:25:45 +00:00
m . cfg . SetFolder ( folderCfg )
2014-09-23 14:04:20 +00:00
changed = true
}
}
2014-11-12 23:42:17 +00:00
}
2014-09-23 14:04:20 +00:00
2014-11-12 23:42:17 +00:00
if changed {
m . cfg . Save ( )
2014-09-23 14:04:20 +00:00
}
2014-04-13 13:28:26 +00:00
}
2014-01-20 21:22:27 +00:00
// Close removes the peer from the model and closes the underlying connection if possible.
2014-01-06 10:11:18 +00:00
// Implements the protocol.Model interface.
2014-09-28 11:00:38 +00:00
func ( m * Model ) Close ( device protocol . DeviceID , err error ) {
l . Infof ( "Connection to %s closed: %v" , device , err )
events . Default . Log ( events . DeviceDisconnected , map [ string ] string {
"id" : device . String ( ) ,
2014-07-13 19:07:24 +00:00
"error" : err . Error ( ) ,
} )
2014-02-09 22:13:06 +00:00
2014-07-15 11:04:37 +00:00
m . pmut . Lock ( )
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
for _ , folder := range m . deviceFolders [ device ] {
m . folderFiles [ folder ] . Replace ( device , nil )
2014-03-29 17:53:48 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-01-20 21:22:27 +00:00
2015-06-28 15:05:29 +00:00
conn , ok := m . conn [ device ]
2014-01-01 13:09:17 +00:00
if ok {
2015-07-22 07:02:55 +00:00
closeRawConn ( conn )
2013-12-31 02:21:57 +00:00
}
2015-06-28 15:05:29 +00:00
delete ( m . conn , device )
2014-09-28 11:00:38 +00:00
delete ( m . deviceVer , device )
2014-01-18 03:06:44 +00:00
m . pmut . Unlock ( )
2013-12-15 10:43:31 +00:00
}
2014-01-06 10:11:18 +00:00
// Request returns the specified data segment by reading it from local disk.
// Implements the protocol.Model interface.
2015-07-29 20:38:22 +00:00
func ( m * Model ) Request ( deviceID protocol . DeviceID , folder , name string , offset int64 , hash [ ] byte , flags uint32 , options [ ] protocol . Option , buf [ ] byte ) error {
if offset < 0 {
return protocol . ErrNoSuchFile
2015-01-18 01:12:06 +00:00
}
2015-01-16 11:25:54 +00:00
if ! m . folderSharedWith ( folder , deviceID ) {
l . Warnf ( "Request from %s for file %s in unshared folder %q" , deviceID , name , folder )
2015-07-29 20:38:22 +00:00
return protocol . ErrNoSuchFile
2015-01-16 11:25:54 +00:00
}
2015-01-14 22:28:19 +00:00
if flags != 0 {
// We don't currently support or expect any flags.
2015-07-29 20:38:22 +00:00
return fmt . Errorf ( "protocol error: unknown flags 0x%x in Request message" , flags )
2015-01-14 22:28:19 +00:00
}
2015-05-25 09:05:12 +00:00
// Verify that the requested file exists in the local model. We only need
// to validate this file if we haven't done so recently, so we keep a
// cache of successfull results. "Recently" can be quite a long time, as
// we remove validation cache entries when we detect local changes. If
// we're out of sync here and the file actually doesn't exist any more, or
// has shrunk or something, then we'll anyway get a read error that we
// pass on to the other side.
2014-03-29 17:53:48 +00:00
2015-05-25 09:05:12 +00:00
m . rvmut . RLock ( )
validated := m . reqValidationCache [ folder + "/" + name ]
m . rvmut . RUnlock ( )
2014-03-29 17:53:48 +00:00
2015-05-25 09:05:12 +00:00
if time . Since ( validated ) > reqValidationTime {
m . fmut . RLock ( )
folderFiles , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
2015-01-06 21:12:45 +00:00
2015-05-25 09:05:12 +00:00
if ! ok {
l . Warnf ( "Request from %s for file %s in nonexistent folder %q" , deviceID , name , folder )
2015-07-29 20:38:22 +00:00
return protocol . ErrNoSuchFile
2014-05-20 18:26:44 +00:00
}
2014-03-29 17:53:48 +00:00
2015-05-25 09:05:12 +00:00
// This call is really expensive for large files, as we load the full
// block list which may be megabytes and megabytes of data to allocate
// space for, read, and deserialize.
lf , ok := folderFiles . Get ( protocol . LocalDeviceID , name )
if ! ok {
2015-07-29 20:38:22 +00:00
return protocol . ErrNoSuchFile
2014-05-11 17:54:26 +00:00
}
2015-05-25 09:05:12 +00:00
if lf . IsInvalid ( ) || lf . IsDeleted ( ) {
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d; invalid: %v" , m , deviceID , folder , name , offset , len ( buf ) , lf )
2015-07-29 20:38:22 +00:00
return protocol . ErrInvalid
2015-05-25 09:05:12 +00:00
}
if offset > lf . Size ( ) {
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v REQ(in; nonexistent): %s: %q o=%d s=%d" , m , deviceID , name , offset , len ( buf ) )
2015-07-29 20:38:22 +00:00
return protocol . ErrNoSuchFile
2015-05-25 09:05:12 +00:00
}
m . rvmut . Lock ( )
m . reqValidationCache [ folder + "/" + name ] = time . Now ( )
if len ( m . reqValidationCache ) > reqValidationCacheSize {
// Don't let the cache grow infinitely
for name , validated := range m . reqValidationCache {
if time . Since ( validated ) > time . Minute {
delete ( m . reqValidationCache , name )
}
}
2015-07-20 13:29:05 +00:00
if len ( m . reqValidationCache ) > reqValidationCacheSize * 9 / 10 {
// The first clean didn't help much, we're still over 90%
// full; we may have synced a lot of files lately. Prune the
// cache more aggressively by removing every other item so we
// don't get stuck doing useless cache cleaning.
i := 0
for name := range m . reqValidationCache {
if i % 2 == 0 {
delete ( m . reqValidationCache , name )
}
i ++
}
}
2015-05-25 09:05:12 +00:00
}
m . rvmut . Unlock ( )
2014-01-07 21:44:21 +00:00
}
2014-01-06 20:31:36 +00:00
2015-10-03 15:25:21 +00:00
if deviceID != protocol . LocalDeviceID {
2015-07-29 20:38:22 +00:00
l . Debugf ( "%v REQ(in): %s: %q / %q o=%d s=%d" , m , deviceID , folder , name , offset , len ( buf ) )
2013-12-15 10:43:31 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-04-05 20:52:22 +00:00
fn := filepath . Join ( m . folderCfgs [ folder ] . Path ( ) , name )
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-11-09 04:26:52 +00:00
var reader io . ReaderAt
var err error
2015-05-25 09:05:12 +00:00
if info , err := os . Lstat ( fn ) ; err == nil && info . Mode ( ) & os . ModeSymlink != 0 {
2014-11-09 04:26:52 +00:00
target , _ , err := symlinks . Read ( fn )
if err != nil {
2015-07-29 20:38:22 +00:00
return err
2014-11-09 04:26:52 +00:00
}
reader = strings . NewReader ( target )
} else {
2015-01-28 14:32:59 +00:00
// Cannot easily cache fd's because we might need to delete the file
// at any moment.
reader , err = os . Open ( fn )
2014-11-09 04:26:52 +00:00
if err != nil {
2015-07-29 20:38:22 +00:00
return err
2014-11-09 04:26:52 +00:00
}
2014-12-08 11:54:22 +00:00
defer reader . ( * os . File ) . Close ( )
2013-12-15 10:43:31 +00:00
}
2014-11-09 04:26:52 +00:00
_ , err = reader . ReadAt ( buf , offset )
2013-12-15 10:43:31 +00:00
if err != nil {
2015-07-29 20:38:22 +00:00
return err
2013-12-15 10:43:31 +00:00
}
2015-07-29 20:38:22 +00:00
return nil
2013-12-15 10:43:31 +00:00
}
2015-01-06 21:12:45 +00:00
func ( m * Model ) CurrentFolderFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-04-18 13:41:47 +00:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2015-04-18 13:41:47 +00:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . Get ( protocol . LocalDeviceID , file )
2015-01-06 21:12:45 +00:00
return f , ok
2014-04-01 21:18:32 +00:00
}
2015-01-06 21:12:45 +00:00
func ( m * Model ) CurrentGlobalFile ( folder string , file string ) ( protocol . FileInfo , bool ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-04-18 13:41:47 +00:00
fs , ok := m . folderFiles [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2015-04-18 13:41:47 +00:00
if ! ok {
return protocol . FileInfo { } , false
}
f , ok := fs . GetGlobal ( file )
2015-01-06 21:12:45 +00:00
return f , ok
2014-04-01 21:18:32 +00:00
}
2014-03-29 17:53:48 +00:00
type cFiler struct {
m * Model
r string
2014-01-06 10:11:18 +00:00
}
2014-03-16 07:14:55 +00:00
// Implements scanner.CurrentFiler
2015-01-06 21:12:45 +00:00
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
2014-09-28 11:00:38 +00:00
return cf . m . CurrentFolderFile ( cf . r , file )
2014-03-16 07:14:55 +00:00
}
2014-09-28 11:00:38 +00:00
// ConnectedTo returns true if we are connected to the named device.
func ( m * Model ) ConnectedTo ( deviceID protocol . DeviceID ) bool {
2014-01-18 03:06:44 +00:00
m . pmut . RLock ( )
2015-06-28 15:05:29 +00:00
_ , ok := m . conn [ deviceID ]
2014-09-20 17:14:45 +00:00
m . pmut . RUnlock ( )
2014-09-10 09:29:01 +00:00
if ok {
2014-09-28 11:00:38 +00:00
m . deviceWasSeen ( deviceID )
2014-09-10 09:29:01 +00:00
}
2014-01-06 10:11:18 +00:00
return ok
}
2014-11-08 21:12:18 +00:00
func ( m * Model ) GetIgnores ( folder string ) ( [ ] string , [ ] string , error ) {
2014-09-14 22:03:53 +00:00
var lines [ ] string
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
cfg , ok := m . folderCfgs [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-14 22:03:53 +00:00
if ! ok {
2014-11-08 21:12:18 +00:00
return lines , nil , fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 22:03:53 +00:00
}
2015-04-05 20:52:22 +00:00
fd , err := os . Open ( filepath . Join ( cfg . Path ( ) , ".stignore" ) )
2014-09-14 22:03:53 +00:00
if err != nil {
if os . IsNotExist ( err ) {
2014-11-08 21:12:18 +00:00
return lines , nil , nil
2014-09-14 22:03:53 +00:00
}
l . Warnln ( "Loading .stignore:" , err )
2014-11-08 21:12:18 +00:00
return lines , nil , err
2014-09-14 22:03:53 +00:00
}
defer fd . Close ( )
scanner := bufio . NewScanner ( fd )
for scanner . Scan ( ) {
lines = append ( lines , strings . TrimSpace ( scanner . Text ( ) ) )
}
2014-11-29 21:29:49 +00:00
m . fmut . RLock ( )
2015-04-27 19:49:10 +00:00
patterns := m . folderIgnores [ folder ] . Patterns ( )
2014-11-29 21:29:49 +00:00
m . fmut . RUnlock ( )
2014-11-08 21:12:18 +00:00
return lines , patterns , nil
2014-09-14 22:03:53 +00:00
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) SetIgnores ( folder string , content [ ] string ) error {
cfg , ok := m . folderCfgs [ folder ]
2014-09-14 22:03:53 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
return fmt . Errorf ( "Folder %s does not exist" , folder )
2014-09-14 22:03:53 +00:00
}
2015-08-30 11:59:01 +00:00
path := filepath . Join ( cfg . Path ( ) , ".stignore" )
fd , err := osutil . CreateAtomic ( path , 0644 )
2014-09-14 22:03:53 +00:00
if err != nil {
l . Warnln ( "Saving .stignore:" , err )
return err
}
for _ , line := range content {
2015-07-11 15:03:40 +00:00
fmt . Fprintln ( fd , line )
2014-09-14 22:03:53 +00:00
}
2015-07-11 15:03:40 +00:00
if err := fd . Close ( ) ; err != nil {
2014-09-14 22:03:53 +00:00
l . Warnln ( "Saving .stignore:" , err )
return err
}
2015-08-30 11:59:01 +00:00
osutil . HideFile ( path )
2014-09-14 22:03:53 +00:00
2014-09-28 11:00:38 +00:00
return m . ScanFolder ( folder )
2014-09-14 22:03:53 +00:00
}
2014-01-06 10:11:18 +00:00
// AddConnection adds a new peer connection to the model. An initial index will
// be sent to the connected peer, thereafter index updates whenever the local
2014-09-28 11:00:38 +00:00
// folder changes.
2015-06-28 15:05:29 +00:00
func ( m * Model ) AddConnection ( conn Connection ) {
deviceID := conn . ID ( )
2014-07-15 11:04:37 +00:00
2014-01-18 03:06:44 +00:00
m . pmut . Lock ( )
2015-06-28 15:05:29 +00:00
if _ , ok := m . conn [ deviceID ] ; ok {
2014-09-28 11:00:38 +00:00
panic ( "add existing device" )
2014-03-23 07:45:05 +00:00
}
2015-06-28 15:05:29 +00:00
m . conn [ deviceID ] = conn
2014-01-06 10:11:18 +00:00
2015-06-28 15:05:29 +00:00
conn . Start ( )
2015-07-10 06:37:57 +00:00
2014-09-28 11:00:38 +00:00
cm := m . clusterConfig ( deviceID )
2015-06-28 15:05:29 +00:00
conn . ClusterConfig ( cm )
2014-04-13 13:28:26 +00:00
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
for _ , folder := range m . deviceFolders [ deviceID ] {
fs := m . folderFiles [ folder ]
2015-06-28 15:05:29 +00:00
go sendIndexes ( conn , folder , fs , m . folderIgnores [ folder ] )
2014-05-04 15:18:58 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-07-15 11:04:37 +00:00
m . pmut . Unlock ( )
2014-09-20 17:14:45 +00:00
2014-09-28 11:00:38 +00:00
m . deviceWasSeen ( deviceID )
2014-09-20 17:14:45 +00:00
}
2015-08-23 19:56:10 +00:00
func ( m * Model ) PauseDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = true
_ , ok := m . conn [ device ]
m . pmut . Unlock ( )
if ok {
m . Close ( device , errors . New ( "device paused" ) )
}
events . Default . Log ( events . DevicePaused , map [ string ] string { "device" : device . String ( ) } )
}
func ( m * Model ) ResumeDevice ( device protocol . DeviceID ) {
m . pmut . Lock ( )
m . devicePaused [ device ] = false
m . pmut . Unlock ( )
events . Default . Log ( events . DeviceResumed , map [ string ] string { "device" : device . String ( ) } )
}
func ( m * Model ) IsPaused ( device protocol . DeviceID ) bool {
m . pmut . Lock ( )
paused := m . devicePaused [ device ]
m . pmut . Unlock ( )
return paused
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) deviceStatRef ( deviceID protocol . DeviceID ) * stats . DeviceStatisticsReference {
2014-09-28 11:39:39 +00:00
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-09-20 17:14:45 +00:00
2014-09-28 11:00:38 +00:00
if sr , ok := m . deviceStatRefs [ deviceID ] ; ok {
2014-09-20 17:14:45 +00:00
return sr
}
2014-12-08 15:36:15 +00:00
2015-09-04 11:22:59 +00:00
sr := stats . NewDeviceStatisticsReference ( m . db , deviceID . String ( ) )
2014-12-08 15:36:15 +00:00
m . deviceStatRefs [ deviceID ] = sr
return sr
2014-09-20 17:14:45 +00:00
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) deviceWasSeen ( deviceID protocol . DeviceID ) {
m . deviceStatRef ( deviceID ) . WasSeen ( )
2014-07-15 11:04:37 +00:00
}
2014-12-07 20:21:12 +00:00
func ( m * Model ) folderStatRef ( folder string ) * stats . FolderStatisticsReference {
m . fmut . Lock ( )
defer m . fmut . Unlock ( )
2014-12-16 22:33:28 +00:00
sr , ok := m . folderStatRefs [ folder ]
if ! ok {
2014-12-07 20:21:12 +00:00
sr = stats . NewFolderStatisticsReference ( m . db , folder )
m . folderStatRefs [ folder ] = sr
}
2014-12-16 22:33:28 +00:00
return sr
2014-12-07 20:21:12 +00:00
}
2015-06-16 11:12:34 +00:00
func ( m * Model ) receivedFile ( folder string , file protocol . FileInfo ) {
2015-09-04 11:22:59 +00:00
m . folderStatRef ( folder ) . ReceivedFile ( file . Name , file . IsDeleted ( ) )
2014-12-07 20:21:12 +00:00
}
2015-01-12 13:52:24 +00:00
func sendIndexes ( conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) {
2014-09-28 11:00:38 +00:00
deviceID := conn . ID ( )
2014-07-15 11:04:37 +00:00
name := conn . Name ( )
2014-07-30 18:08:04 +00:00
var err error
2014-07-15 11:04:37 +00:00
2015-10-03 15:25:21 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q starting" , deviceID , name , folder )
defer l . Debugf ( "sendIndexes for %s-%s/%q exiting: %v" , deviceID , name , folder , err )
2014-05-04 15:18:58 +00:00
2014-09-28 11:00:38 +00:00
minLocalVer , err := sendIndexTo ( true , 0 , conn , folder , fs , ignores )
2014-07-30 18:08:04 +00:00
2015-07-28 17:22:44 +00:00
sub := events . Default . Subscribe ( events . LocalIndexUpdated )
defer events . Default . Unsubscribe ( sub )
2014-07-15 11:04:37 +00:00
for err == nil {
2015-07-28 17:22:44 +00:00
// While we have sent a localVersion at least equal to the one
// currently in the database, wait for the local index to update. The
// local index may update for other folders than the one we are
// sending for.
2014-09-28 11:00:38 +00:00
if fs . LocalVersion ( protocol . LocalDeviceID ) <= minLocalVer {
2015-07-28 17:22:44 +00:00
sub . Poll ( time . Minute )
2014-07-30 18:08:04 +00:00
continue
2014-07-15 11:04:37 +00:00
}
2014-09-28 11:00:38 +00:00
minLocalVer , err = sendIndexTo ( false , minLocalVer , conn , folder , fs , ignores )
2015-07-28 17:22:44 +00:00
// Wait a short amount of time before entering the next loop. If there
// are continous changes happening to the local index, this gives us
// time to batch them up a little.
time . Sleep ( 250 * time . Millisecond )
2014-07-30 18:08:04 +00:00
}
2014-09-27 12:44:15 +00:00
2014-07-30 18:08:04 +00:00
}
2014-07-15 11:04:37 +00:00
2015-01-18 01:12:06 +00:00
func sendIndexTo ( initial bool , minLocalVer int64 , conn protocol . Connection , folder string , fs * db . FileSet , ignores * ignore . Matcher ) ( int64 , error ) {
2014-09-28 11:00:38 +00:00
deviceID := conn . ID ( )
2014-07-30 18:08:04 +00:00
name := conn . Name ( )
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 18:54:59 +00:00
currentBatchSize := 0
2015-01-18 01:12:06 +00:00
maxLocalVer := int64 ( 0 )
2014-07-30 18:08:04 +00:00
var err error
2014-07-15 11:04:37 +00:00
2015-01-12 13:50:30 +00:00
fs . WithHave ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 11:53:31 +00:00
f := fi . ( protocol . FileInfo )
2014-07-30 18:08:04 +00:00
if f . LocalVersion <= minLocalVer {
return true
}
2014-07-15 11:04:37 +00:00
2014-07-30 18:08:04 +00:00
if f . LocalVersion > maxLocalVer {
maxLocalVer = f . LocalVersion
}
2014-07-15 11:04:37 +00:00
2015-06-14 22:44:24 +00:00
if ignores . Match ( f . Name ) || symlinkInvalid ( folder , f ) {
2015-10-03 15:25:21 +00:00
l . Debugln ( "not sending update for ignored/unsupported symlink" , f )
2014-09-04 20:29:53 +00:00
return true
}
2014-08-11 18:54:59 +00:00
if len ( batch ) == indexBatchSize || currentBatchSize > indexTargetSize {
2014-07-30 18:08:04 +00:00
if initial {
2015-01-14 22:11:31 +00:00
if err = conn . Index ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 18:08:04 +00:00
return false
}
2015-10-03 15:25:21 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (initial index)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-30 18:08:04 +00:00
initial = false
} else {
2015-01-14 22:11:31 +00:00
if err = conn . IndexUpdate ( folder , batch , 0 , nil ) ; err != nil {
2014-07-30 18:08:04 +00:00
return false
}
2015-10-03 15:25:21 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (<%d bytes) (batched update)" , deviceID , name , folder , len ( batch ) , currentBatchSize )
2014-07-03 10:30:10 +00:00
}
2014-01-06 10:11:18 +00:00
2014-07-30 18:08:04 +00:00
batch = make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2014-08-11 18:54:59 +00:00
currentBatchSize = 0
2014-07-15 11:04:37 +00:00
}
2014-07-30 18:08:04 +00:00
batch = append ( batch , f )
2015-05-25 09:05:12 +00:00
currentBatchSize += indexPerFileSize + len ( f . Blocks ) * indexPerBlockSize
2014-07-30 18:08:04 +00:00
return true
} )
if initial && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . Index ( folder , batch , 0 , nil )
2015-10-03 15:25:21 +00:00
if err == nil {
2014-09-28 11:00:38 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (small initial index)" , deviceID , name , folder , len ( batch ) )
2014-07-30 18:08:04 +00:00
}
} else if len ( batch ) > 0 && err == nil {
2015-01-14 22:11:31 +00:00
err = conn . IndexUpdate ( folder , batch , 0 , nil )
2015-10-03 15:25:21 +00:00
if err == nil {
2014-09-28 11:00:38 +00:00
l . Debugf ( "sendIndexes for %s-%s/%q: %d files (last batch)" , deviceID , name , folder , len ( batch ) )
2014-07-30 18:08:04 +00:00
}
2014-07-15 11:04:37 +00:00
}
2014-07-30 18:08:04 +00:00
return maxLocalVer , err
2014-01-06 10:11:18 +00:00
}
2015-04-05 13:34:29 +00:00
func ( m * Model ) updateLocals ( folder string , fs [ ] protocol . FileInfo ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-06-16 06:30:15 +00:00
files := m . folderFiles [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2015-06-16 06:30:15 +00:00
files . Update ( protocol . LocalDeviceID , fs )
2015-05-25 09:05:12 +00:00
m . rvmut . Lock ( )
for _ , f := range fs {
delete ( m . reqValidationCache , folder + "/" + f . Name )
}
m . rvmut . Unlock ( )
2015-04-05 13:34:29 +00:00
2014-07-17 11:38:36 +00:00
events . Default . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2015-06-16 06:30:15 +00:00
"folder" : folder ,
"items" : len ( fs ) ,
"version" : files . LocalVersion ( protocol . LocalDeviceID ) ,
2014-07-17 11:38:36 +00:00
} )
2014-03-28 13:36:57 +00:00
}
2015-01-14 22:11:31 +00:00
func ( m * Model ) requestGlobal ( deviceID protocol . DeviceID , folder , name string , offset int64 , size int , hash [ ] byte , flags uint32 , options [ ] protocol . Option ) ( [ ] byte , error ) {
2014-01-18 03:06:44 +00:00
m . pmut . RLock ( )
2015-06-28 15:05:29 +00:00
nc , ok := m . conn [ deviceID ]
2014-01-18 03:06:44 +00:00
m . pmut . RUnlock ( )
2014-01-06 10:11:18 +00:00
if ! ok {
2014-09-28 11:00:38 +00:00
return nil , fmt . Errorf ( "requestGlobal: no such device: %s" , deviceID )
2014-01-06 10:11:18 +00:00
}
2015-10-03 15:25:21 +00:00
l . Debugf ( "%v REQ(out): %s: %q / %q o=%d s=%d h=%x f=%x op=%s" , m , deviceID , folder , name , offset , size , hash , flags , options )
2014-01-06 10:11:18 +00:00
2015-01-14 22:11:31 +00:00
return nc . Request ( folder , name , offset , size , hash , flags , options )
2014-01-06 10:11:18 +00:00
}
2014-09-28 11:00:38 +00:00
func ( m * Model ) AddFolder ( cfg config . FolderConfiguration ) {
2014-05-23 12:31:16 +00:00
if len ( cfg . ID ) == 0 {
2014-09-28 11:00:38 +00:00
panic ( "cannot add empty folder id" )
2014-03-29 17:53:48 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . Lock ( )
2014-09-28 11:00:38 +00:00
m . folderCfgs [ cfg . ID ] = cfg
2015-01-12 13:52:24 +00:00
m . folderFiles [ cfg . ID ] = db . NewFileSet ( cfg . ID , m . db )
2013-12-15 10:43:31 +00:00
2014-09-28 11:00:38 +00:00
m . folderDevices [ cfg . ID ] = make ( [ ] protocol . DeviceID , len ( cfg . Devices ) )
for i , device := range cfg . Devices {
m . folderDevices [ cfg . ID ] [ i ] = device . DeviceID
m . deviceFolders [ device . DeviceID ] = append ( m . deviceFolders [ device . DeviceID ] , cfg . ID )
2014-03-29 17:53:48 +00:00
}
2014-01-23 21:20:15 +00:00
2015-07-23 14:13:53 +00:00
ignores := ignore . New ( m . cacheIgnoredFiles )
2015-09-29 16:01:19 +00:00
if err := ignores . Load ( filepath . Join ( cfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
l . Warnln ( "Loading ignores:" , err )
}
2014-11-22 02:19:16 +00:00
m . folderIgnores [ cfg . ID ] = ignores
2014-09-28 11:39:39 +00:00
m . fmut . Unlock ( )
2014-03-29 17:53:48 +00:00
}
2014-01-23 21:20:15 +00:00
2015-02-11 18:52:59 +00:00
func ( m * Model ) ScanFolders ( ) map [ string ] error {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-04-12 20:12:01 +00:00
folders := make ( [ ] string , 0 , len ( m . folderCfgs ) )
2014-09-28 11:00:38 +00:00
for folder := range m . folderCfgs {
folders = append ( folders , folder )
2014-03-29 17:53:48 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-04-14 07:58:17 +00:00
2015-04-12 20:12:01 +00:00
errors := make ( map [ string ] error , len ( m . folderCfgs ) )
2015-04-22 22:54:31 +00:00
errorsMut := sync . NewMutex ( )
2015-02-11 18:52:59 +00:00
2015-04-22 22:54:31 +00:00
wg := sync . NewWaitGroup ( )
2014-09-28 11:00:38 +00:00
wg . Add ( len ( folders ) )
for _ , folder := range folders {
folder := folder
2014-05-13 23:42:12 +00:00
go func ( ) {
2014-09-28 11:00:38 +00:00
err := m . ScanFolder ( folder )
2014-05-28 04:55:30 +00:00
if err != nil {
2015-02-11 18:52:59 +00:00
errorsMut . Lock ( )
errors [ folder ] = err
errorsMut . Unlock ( )
2015-04-12 20:12:01 +00:00
2015-03-28 14:25:42 +00:00
// Potentially sets the error twice, once in the scanner just
// by doing a check, and once here, if the error returned is
// the same one as returned by CheckFolderHealth, though
2015-04-12 20:12:01 +00:00
// duplicate set is handled by setError.
m . fmut . RLock ( )
srv := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
srv . setError ( err )
2014-05-28 04:55:30 +00:00
}
2014-05-13 23:42:12 +00:00
wg . Done ( )
} ( )
2014-04-14 07:58:17 +00:00
}
2014-05-13 23:42:12 +00:00
wg . Wait ( )
2015-02-11 18:52:59 +00:00
return errors
2014-03-29 17:53:48 +00:00
}
2013-12-15 10:43:31 +00:00
2014-09-28 11:00:38 +00:00
func ( m * Model ) ScanFolder ( folder string ) error {
2015-03-27 08:51:18 +00:00
return m . ScanFolderSubs ( folder , nil )
2014-08-11 18:20:01 +00:00
}
2015-03-27 08:51:18 +00:00
func ( m * Model ) ScanFolderSubs ( folder string , subs [ ] string ) error {
2015-06-20 17:26:25 +00:00
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
if ! ok {
return errors . New ( "no such folder" )
}
return runner . Scan ( subs )
}
func ( m * Model ) internalScanFolderSubs ( folder string , subs [ ] string ) error {
2015-03-27 08:51:18 +00:00
for i , sub := range subs {
sub = osutil . NativeFilename ( sub )
if p := filepath . Clean ( filepath . Join ( folder , sub ) ) ; ! strings . HasPrefix ( p , folder ) {
return errors . New ( "invalid subpath" )
}
subs [ i ] = sub
2014-08-11 18:20:01 +00:00
}
2014-11-29 21:29:49 +00:00
m . fmut . Lock ( )
2015-03-16 20:14:19 +00:00
fs := m . folderFiles [ folder ]
2014-12-23 12:41:02 +00:00
folderCfg := m . folderCfgs [ folder ]
2014-12-23 09:05:08 +00:00
ignores := m . folderIgnores [ folder ]
2015-03-16 20:14:19 +00:00
runner , ok := m . folderRunners [ folder ]
2014-12-23 12:41:02 +00:00
m . fmut . Unlock ( )
2015-03-16 20:14:19 +00:00
// Folders are added to folderRunners only when they are started. We can't
// scan them before they have started, so that's what we need to check for
// here.
2014-12-23 12:41:02 +00:00
if ! ok {
return errors . New ( "no such folder" )
}
2015-07-16 10:52:36 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-09-29 16:01:19 +00:00
runner . setError ( err )
l . Infof ( "Stopping folder %s due to error: %s" , folder , err )
2015-07-16 10:52:36 +00:00
return err
}
2015-09-29 16:01:19 +00:00
if err := ignores . Load ( filepath . Join ( folderCfg . Path ( ) , ".stignore" ) ) ; err != nil && ! os . IsNotExist ( err ) {
err = fmt . Errorf ( "loading ignores: %v" , err )
runner . setError ( err )
l . Infof ( "Stopping folder %s due to error: %s" , folder , err )
return err
}
2014-09-04 20:29:53 +00:00
2015-03-08 17:33:41 +00:00
// Required to make sure that we start indexing at a directory we're already
// aware off.
2015-03-27 08:51:18 +00:00
var unifySubs [ ] string
nextSub :
for _ , sub := range subs {
for sub != "" {
2015-07-31 16:52:50 +00:00
parent := filepath . Dir ( sub )
if parent == "." || parent == string ( filepath . Separator ) {
parent = ""
2015-03-27 08:51:18 +00:00
}
2015-07-31 16:52:50 +00:00
if _ , ok = fs . Get ( protocol . LocalDeviceID , parent ) ; ok {
break
2015-03-27 08:51:18 +00:00
}
2015-07-31 16:52:50 +00:00
sub = parent
2015-03-08 17:33:41 +00:00
}
2015-03-27 08:51:18 +00:00
for _ , us := range unifySubs {
if strings . HasPrefix ( sub , us ) {
continue nextSub
}
2015-03-08 17:33:41 +00:00
}
2015-03-27 08:51:18 +00:00
unifySubs = append ( unifySubs , sub )
2015-03-08 17:33:41 +00:00
}
2015-03-27 08:51:18 +00:00
subs = unifySubs
2015-03-08 17:33:41 +00:00
2014-03-29 17:53:48 +00:00
w := & scanner . Walker {
2015-08-26 22:49:06 +00:00
Folder : folderCfg . ID ,
Dir : folderCfg . Path ( ) ,
Subs : subs ,
Matcher : ignores ,
BlockSize : protocol . BlockSize ,
TempNamer : defTempNamer ,
TempLifetime : time . Duration ( m . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
CurrentFiler : cFiler { m , folder } ,
MtimeRepo : db . NewVirtualMtimeRepo ( m . db , folderCfg . ID ) ,
IgnorePerms : folderCfg . IgnorePerms ,
AutoNormalize : folderCfg . AutoNormalize ,
Hashers : m . numHashers ( folder ) ,
ShortID : m . shortID ,
ProgressTickIntervalS : folderCfg . ScanProgressIntervalS ,
2014-08-11 18:20:01 +00:00
}
2014-07-15 12:27:46 +00:00
2015-03-16 20:14:19 +00:00
runner . setState ( FolderScanning )
2014-07-15 12:27:46 +00:00
2015-04-12 20:12:01 +00:00
fchan , err := w . Walk ( )
2014-05-04 16:20:25 +00:00
if err != nil {
2015-06-13 18:10:11 +00:00
// The error we get here is likely an OS level error, which might not be
// as readable as our health check errors. Check if we can get a health
// check error first, and use that if it's available.
if ferr := m . CheckFolderHealth ( folder ) ; ferr != nil {
err = ferr
}
2015-04-12 20:12:01 +00:00
runner . setError ( err )
2014-05-04 16:20:25 +00:00
return err
}
2015-04-12 20:12:01 +00:00
2015-04-17 06:19:40 +00:00
batchSizeFiles := 100
batchSizeBlocks := 2048 // about 256 MB
batch := make ( [ ] protocol . FileInfo , 0 , batchSizeFiles )
blocksHandled := 0
2014-07-15 12:27:46 +00:00
for f := range fchan {
2015-04-17 06:19:40 +00:00
if len ( batch ) == batchSizeFiles || blocksHandled > batchSizeBlocks {
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 21:49:16 +00:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
}
2015-04-17 05:00:56 +00:00
m . updateLocals ( folder , batch )
2014-07-15 12:27:46 +00:00
batch = batch [ : 0 ]
2015-04-17 06:19:40 +00:00
blocksHandled = 0
2014-07-15 12:27:46 +00:00
}
batch = append ( batch , f )
2015-04-17 06:19:40 +00:00
blocksHandled += len ( f . Blocks )
2014-07-15 12:27:46 +00:00
}
2015-03-28 14:25:42 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
2015-03-30 21:49:16 +00:00
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
2015-03-28 14:25:42 +00:00
return err
} else if len ( batch ) > 0 {
2015-05-14 06:56:42 +00:00
m . updateLocals ( folder , batch )
2014-07-15 12:27:46 +00:00
}
batch = batch [ : 0 ]
2014-08-11 18:20:01 +00:00
// TODO: We should limit the Have scanning to start at sub
seenPrefix := false
2015-05-27 21:46:10 +00:00
var iterError error
2015-01-12 13:50:30 +00:00
fs . WithHaveTruncated ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
2015-03-27 08:51:18 +00:00
hasPrefix := len ( subs ) == 0
for _ , sub := range subs {
if strings . HasPrefix ( f . Name , sub ) {
hasPrefix = true
break
}
}
// Return true so that we keep iterating, until we get to the part
// of the tree we are interested in. Then return false so we stop
// iterating when we've passed the end of the subtree.
if ! hasPrefix {
2014-08-11 18:20:01 +00:00
return ! seenPrefix
}
2014-09-04 20:29:53 +00:00
2014-08-11 18:20:01 +00:00
seenPrefix = true
2014-11-04 23:22:15 +00:00
if ! f . IsDeleted ( ) {
2014-09-04 20:29:53 +00:00
if f . IsInvalid ( ) {
return true
}
2015-04-17 06:19:40 +00:00
if len ( batch ) == batchSizeFiles {
2015-05-27 21:46:10 +00:00
if err := m . CheckFolderHealth ( folder ) ; err != nil {
iterError = err
return false
}
2015-04-17 05:00:56 +00:00
m . updateLocals ( folder , batch )
2014-07-15 12:27:46 +00:00
batch = batch [ : 0 ]
}
2014-09-04 20:29:53 +00:00
2015-06-14 22:44:24 +00:00
if ignores . Match ( f . Name ) || symlinkInvalid ( folder , f ) {
2014-11-09 04:26:52 +00:00
// File has been ignored or an unsupported symlink. Set invalid bit.
2015-10-03 15:25:21 +00:00
l . Debugln ( "setting invalid bit on ignored" , f )
2014-09-04 20:29:53 +00:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagInvalid ,
Modified : f . Modified ,
Version : f . Version , // The file is still the same, so don't bump version
}
batch = append ( batch , nf )
2015-09-30 19:40:04 +00:00
} else if _ , err := osutil . Lstat ( filepath . Join ( folderCfg . Path ( ) , f . Name ) ) ; err != nil {
2015-03-01 09:34:32 +00:00
// File has been deleted.
// We don't specifically verify that the error is
// os.IsNotExist because there is a corner case when a
// directory is suddenly transformed into a file. When that
// happens, files that were in the directory (that is now a
// file) are deleted but will return a confusing error ("not a
// directory") when we try to Lstat() them.
2014-08-12 11:53:31 +00:00
nf := protocol . FileInfo {
Name : f . Name ,
Flags : f . Flags | protocol . FlagDeleted ,
Modified : f . Modified ,
2015-03-25 21:37:35 +00:00
Version : f . Version . Update ( m . shortID ) ,
2014-08-12 11:53:31 +00:00
}
batch = append ( batch , nf )
2014-07-15 12:27:46 +00:00
}
}
return true
} )
2015-05-27 21:46:10 +00:00
if iterError != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , iterError )
return iterError
}
if err := m . CheckFolderHealth ( folder ) ; err != nil {
l . Infof ( "Stopping folder %s mid-scan due to folder error: %s" , folder , err )
return err
} else if len ( batch ) > 0 {
2015-04-17 05:00:56 +00:00
m . updateLocals ( folder , batch )
2014-07-15 12:27:46 +00:00
}
2015-04-12 20:12:01 +00:00
runner . setState ( FolderIdle )
2014-05-04 16:20:25 +00:00
return nil
2014-03-29 17:53:48 +00:00
}
2015-05-01 12:30:17 +00:00
func ( m * Model ) DelayScan ( folder string , next time . Duration ) {
m . fmut . Lock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . Unlock ( )
if ! ok {
return
}
runner . DelayScan ( next )
}
2015-04-29 18:46:32 +00:00
// numHashers returns the number of hasher routines to use for a given folder,
// taking into account configuration and available CPU cores.
func ( m * Model ) numHashers ( folder string ) int {
m . fmut . Lock ( )
folderCfg := m . folderCfgs [ folder ]
numFolders := len ( m . folderCfgs )
m . fmut . Unlock ( )
if folderCfg . Hashers > 0 {
// Specific value set in the config, use that.
return folderCfg . Hashers
}
2015-09-01 08:05:06 +00:00
if runtime . GOOS == "windows" || runtime . GOOS == "darwin" {
// Interactive operating systems; don't load the system too heavily by
// default.
return 1
}
// For other operating systems and architectures, lets try to get some
// work done... Divide the available CPU cores among the configured
// folders.
2015-04-29 18:46:32 +00:00
if perFolder := runtime . GOMAXPROCS ( - 1 ) / numFolders ; perFolder > 0 {
return perFolder
}
return 1
}
2014-09-28 11:00:38 +00:00
// clusterConfig returns a ClusterConfigMessage that is correct for the given peer device
func ( m * Model ) clusterConfig ( device protocol . DeviceID ) protocol . ClusterConfigMessage {
2014-04-13 13:28:26 +00:00
cm := protocol . ClusterConfigMessage {
2015-09-26 13:58:53 +00:00
DeviceName : m . deviceName ,
2014-05-15 03:26:55 +00:00
ClientName : m . clientName ,
ClientVersion : m . clientVersion ,
2014-04-13 13:28:26 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
for _ , folder := range m . deviceFolders [ device ] {
2015-09-27 11:11:34 +00:00
folderCfg := m . cfg . Folders ( ) [ folder ]
2014-09-28 11:00:38 +00:00
cr := protocol . Folder {
ID : folder ,
2014-01-09 12:58:35 +00:00
}
2015-09-27 11:11:34 +00:00
var flags uint32
if folderCfg . ReadOnly {
flags |= protocol . FlagFolderReadOnly
}
if folderCfg . IgnorePerms {
flags |= protocol . FlagFolderIgnorePerms
}
if folderCfg . IgnoreDelete {
flags |= protocol . FlagFolderIgnoreDelete
}
cr . Flags = flags
2014-09-28 11:00:38 +00:00
for _ , device := range m . folderDevices [ folder ] {
// DeviceID is a value type, but with an underlying array. Copy it
// so we don't grab aliases to the same array later on in device[:]
device := device
2015-09-27 10:39:02 +00:00
// TODO: Set read only bit when relevant, and when we have per device
// access controls.
deviceCfg := m . cfg . Devices ( ) [ device ]
2014-09-28 11:00:38 +00:00
cn := protocol . Device {
2015-09-27 10:39:02 +00:00
ID : device [ : ] ,
Name : deviceCfg . Name ,
Addresses : deviceCfg . Addresses ,
Compression : uint32 ( deviceCfg . Compression ) ,
CertName : deviceCfg . CertName ,
Flags : protocol . FlagShareTrusted ,
2014-09-23 14:04:20 +00:00
}
2015-09-27 10:39:02 +00:00
if deviceCfg . Introducer {
2014-09-23 14:04:20 +00:00
cn . Flags |= protocol . FlagIntroducer
}
2014-09-28 11:00:38 +00:00
cr . Devices = append ( cr . Devices , cn )
2014-01-09 12:58:35 +00:00
}
2014-09-28 11:00:38 +00:00
cm . Folders = append ( cm . Folders , cr )
2013-12-30 01:33:57 +00:00
}
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2014-04-13 13:28:26 +00:00
return cm
2013-12-30 01:33:57 +00:00
}
2014-04-14 07:58:17 +00:00
2015-04-12 20:12:01 +00:00
func ( m * Model ) State ( folder string ) ( string , time . Time , error ) {
2015-03-16 20:14:19 +00:00
m . fmut . RLock ( )
runner , ok := m . folderRunners [ folder ]
m . fmut . RUnlock ( )
if ! ok {
2015-04-12 20:12:01 +00:00
// The returned error should be an actual folder error, so returning
// errors.New("does not exist") or similar here would be
// inappropriate.
return "" , time . Time { } , nil
2015-03-16 20:14:19 +00:00
}
2015-04-12 20:12:01 +00:00
state , changed , err := runner . getState ( )
return state . String ( ) , changed , err
2014-04-14 07:58:17 +00:00
}
2014-06-16 08:47:02 +00:00
2014-09-28 11:00:38 +00:00
func ( m * Model ) Override ( folder string ) {
2014-09-28 11:39:39 +00:00
m . fmut . RLock ( )
2015-04-18 13:41:47 +00:00
fs , ok := m . folderFiles [ folder ]
2015-03-16 20:14:19 +00:00
runner := m . folderRunners [ folder ]
2014-09-28 11:39:39 +00:00
m . fmut . RUnlock ( )
2015-04-18 13:41:47 +00:00
if ! ok {
return
}
2014-06-23 09:52:13 +00:00
2015-03-16 20:14:19 +00:00
runner . setState ( FolderScanning )
2014-07-15 15:54:00 +00:00
batch := make ( [ ] protocol . FileInfo , 0 , indexBatchSize )
2015-01-12 13:50:30 +00:00
fs . WithNeed ( protocol . LocalDeviceID , func ( fi db . FileIntf ) bool {
2014-08-12 11:53:31 +00:00
need := fi . ( protocol . FileInfo )
2014-07-15 15:54:00 +00:00
if len ( batch ) == indexBatchSize {
2015-06-18 08:37:50 +00:00
m . updateLocals ( folder , batch )
2014-07-15 15:54:00 +00:00
batch = batch [ : 0 ]
}
2015-01-06 21:12:45 +00:00
have , ok := fs . Get ( protocol . LocalDeviceID , need . Name )
if ! ok || have . Name != need . Name {
2014-06-16 08:47:02 +00:00
// We are missing the file
2014-07-15 15:54:00 +00:00
need . Flags |= protocol . FlagDeleted
need . Blocks = nil
2015-04-02 08:21:11 +00:00
need . Version = need . Version . Update ( m . shortID )
2014-06-16 08:47:02 +00:00
} else {
// We have the file, replace with our version
2015-04-02 08:21:11 +00:00
have . Version = have . Version . Merge ( need . Version ) . Update ( m . shortID )
2014-07-15 15:54:00 +00:00
need = have
2014-06-16 08:47:02 +00:00
}
2014-07-15 15:54:00 +00:00
need . LocalVersion = 0
batch = append ( batch , need )
return true
} )
if len ( batch ) > 0 {
2015-06-18 08:37:50 +00:00
m . updateLocals ( folder , batch )
2014-06-16 08:47:02 +00:00
}
2015-03-16 20:14:19 +00:00
runner . setState ( FolderIdle )
2014-06-16 08:47:02 +00:00
}
2014-06-19 22:27:54 +00:00
2014-09-28 11:00:38 +00:00
// CurrentLocalVersion returns the change version for the given folder.
// This is guaranteed to increment if the contents of the local folder has
2014-09-27 12:44:15 +00:00
// changed.
2015-06-24 07:52:38 +00:00
func ( m * Model ) CurrentLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 12:44:15 +00:00
if ! ok {
2014-10-12 08:36:04 +00:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 07:52:38 +00:00
return 0 , false
2014-09-27 12:44:15 +00:00
}
2015-06-24 07:52:38 +00:00
return fs . LocalVersion ( protocol . LocalDeviceID ) , true
2014-09-27 12:44:15 +00:00
}
2014-09-28 11:00:38 +00:00
// RemoteLocalVersion returns the change version for the given folder, as
2014-09-27 12:44:15 +00:00
// sent by remote peers. This is guaranteed to increment if the contents of
2014-09-28 11:00:38 +00:00
// the remote or global folder has changed.
2015-06-24 07:52:38 +00:00
func ( m * Model ) RemoteLocalVersion ( folder string ) ( int64 , bool ) {
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
defer m . fmut . RUnlock ( )
2014-07-15 15:54:00 +00:00
2014-09-28 11:00:38 +00:00
fs , ok := m . folderFiles [ folder ]
2014-07-15 15:54:00 +00:00
if ! ok {
2014-10-24 12:54:36 +00:00
// The folder might not exist, since this can be called with a user
// specified folder name from the REST interface.
2015-06-24 07:52:38 +00:00
return 0 , false
2014-07-15 15:54:00 +00:00
}
2015-01-18 01:12:06 +00:00
var ver int64
2014-09-28 11:00:38 +00:00
for _ , n := range m . folderDevices [ folder ] {
2014-07-15 15:54:00 +00:00
ver += fs . LocalVersion ( n )
2014-06-19 22:27:54 +00:00
}
2015-06-24 07:52:38 +00:00
return ver , true
2014-06-19 22:27:54 +00:00
}
2014-09-27 12:44:15 +00:00
2015-02-07 10:52:42 +00:00
func ( m * Model ) GlobalDirectoryTree ( folder , prefix string , levels int , dirsonly bool ) map [ string ] interface { } {
m . fmut . RLock ( )
files , ok := m . folderFiles [ folder ]
m . fmut . RUnlock ( )
if ! ok {
return nil
}
output := make ( map [ string ] interface { } )
sep := string ( filepath . Separator )
prefix = osutil . NativeFilename ( prefix )
if prefix != "" && ! strings . HasSuffix ( prefix , sep ) {
prefix = prefix + sep
}
files . WithPrefixedGlobalTruncated ( prefix , func ( fi db . FileIntf ) bool {
f := fi . ( db . FileInfoTruncated )
if f . IsInvalid ( ) || f . IsDeleted ( ) || f . Name == prefix {
return true
}
f . Name = strings . Replace ( f . Name , prefix , "" , 1 )
var dir , base string
if f . IsDirectory ( ) && ! f . IsSymlink ( ) {
dir = f . Name
} else {
dir = filepath . Dir ( f . Name )
base = filepath . Base ( f . Name )
}
if levels > - 1 && strings . Count ( f . Name , sep ) > levels {
return true
}
last := output
if dir != "." {
for _ , path := range strings . Split ( dir , sep ) {
directory , ok := last [ path ]
if ! ok {
newdir := make ( map [ string ] interface { } )
last [ path ] = newdir
last = newdir
} else {
last = directory . ( map [ string ] interface { } )
}
}
}
if ! dirsonly && base != "" {
2015-04-20 13:37:04 +00:00
last [ base ] = [ ] interface { } {
time . Unix ( f . Modified , 0 ) , f . Size ( ) ,
2015-02-07 10:52:42 +00:00
}
}
return true
} )
return output
}
2015-03-17 17:52:50 +00:00
func ( m * Model ) Availability ( folder , file string ) [ ] protocol . DeviceID {
2014-10-31 23:41:18 +00:00
// Acquire this lock first, as the value returned from foldersFiles can
2014-12-28 23:11:32 +00:00
// get heavily modified on Close()
2014-10-31 23:41:18 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
2014-11-03 21:02:55 +00:00
m . fmut . RLock ( )
2014-09-28 11:00:38 +00:00
fs , ok := m . folderFiles [ folder ]
2014-11-03 21:02:55 +00:00
m . fmut . RUnlock ( )
2014-09-27 12:44:15 +00:00
if ! ok {
return nil
}
2014-10-31 23:41:18 +00:00
availableDevices := [ ] protocol . DeviceID { }
for _ , device := range fs . Availability ( file ) {
2015-06-28 15:05:29 +00:00
_ , ok := m . conn [ device ]
2014-10-31 23:41:18 +00:00
if ok {
availableDevices = append ( availableDevices , device )
}
}
return availableDevices
2014-09-27 12:44:15 +00:00
}
2015-04-28 20:32:10 +00:00
// BringToFront bumps the given files priority in the job queue.
2014-12-30 08:35:21 +00:00
func ( m * Model ) BringToFront ( folder , file string ) {
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
m . pmut . RLock ( )
defer m . pmut . RUnlock ( )
runner , ok := m . folderRunners [ folder ]
if ok {
2014-12-30 08:35:21 +00:00
runner . BringToFront ( file )
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
}
}
2015-04-28 20:32:10 +00:00
// CheckFolderHealth checks the folder for common errors and returns the
// current folder error, or nil if the folder is healthy.
2015-03-28 14:25:42 +00:00
func ( m * Model ) CheckFolderHealth ( id string ) error {
2015-09-05 07:43:07 +00:00
if minFree := m . cfg . Options ( ) . MinHomeDiskFreePct ; minFree > 0 {
2015-08-09 08:35:48 +00:00
if free , err := osutil . DiskFreePercentage ( m . cfg . ConfigPath ( ) ) ; err == nil && free < minFree {
2015-09-06 06:29:10 +00:00
return errors . New ( "home disk has insufficient free space" )
2015-08-09 08:35:48 +00:00
}
2015-07-16 10:52:36 +00:00
}
2015-03-28 14:25:42 +00:00
folder , ok := m . cfg . Folders ( ) [ id ]
if ! ok {
2015-04-12 20:12:01 +00:00
return errors . New ( "folder does not exist" )
2015-03-28 14:25:42 +00:00
}
2015-04-05 20:52:22 +00:00
fi , err := os . Stat ( folder . Path ( ) )
2015-09-06 06:29:10 +00:00
v , ok := m . CurrentLocalVersion ( id )
indexHasFiles := ok && v > 0
if indexHasFiles {
// There are files in the folder according to the index, so it must
// have existed and had a correct marker at some point. Verify that
// this is still the case.
switch {
case err != nil || ! fi . IsDir ( ) :
2015-04-12 20:12:01 +00:00
err = errors . New ( "folder path missing" )
2015-09-06 06:29:10 +00:00
case ! folder . HasMarker ( ) :
2015-04-12 20:12:01 +00:00
err = errors . New ( "folder marker missing" )
2015-09-06 06:29:10 +00:00
case ! folder . ReadOnly :
// Check for free space, if it isn't a master folder. We aren't
// going to change the contents of master folders, so we don't
// care about the amount of free space there.
if free , errDfp := osutil . DiskFreePercentage ( folder . Path ( ) ) ; errDfp == nil && free < folder . MinDiskFreePct {
err = errors . New ( "insufficient free space" )
}
2015-03-28 14:25:42 +00:00
}
2015-09-06 06:29:10 +00:00
} else {
// It's a blank folder, so this may the first time we're looking at
// it. Attempt to create and tag with our marker as appropriate.
if os . IsNotExist ( err ) {
err = osutil . MkdirAll ( folder . Path ( ) , 0700 )
}
if err == nil && ! folder . HasMarker ( ) {
2015-03-28 14:25:42 +00:00
err = folder . CreateMarker ( )
}
}
2015-04-12 20:12:01 +00:00
m . fmut . RLock ( )
2015-04-25 06:27:45 +00:00
runner , runnerExists := m . folderRunners [ folder . ID ]
2015-04-12 20:12:01 +00:00
m . fmut . RUnlock ( )
2015-04-25 06:27:45 +00:00
var oldErr error
if runnerExists {
_ , _ , oldErr = runner . getState ( )
}
2015-03-28 14:25:42 +00:00
2015-04-12 20:12:01 +00:00
if err != nil {
if oldErr != nil && oldErr . Error ( ) != err . Error ( ) {
l . Infof ( "Folder %q error changed: %q -> %q" , folder . ID , oldErr , err )
} else if oldErr == nil {
l . Warnf ( "Stopping folder %q - %v" , folder . ID , err )
2015-03-28 14:25:42 +00:00
}
2015-04-25 06:27:45 +00:00
if runnerExists {
runner . setError ( err )
}
2015-04-12 20:12:01 +00:00
} else if oldErr != nil {
l . Infof ( "Folder %q error is cleared, restarting" , folder . ID )
2015-04-25 06:27:45 +00:00
if runnerExists {
2015-06-13 18:10:11 +00:00
runner . clearError ( )
2015-04-25 06:27:45 +00:00
}
2015-03-28 14:25:42 +00:00
}
return err
}
2015-06-21 07:35:41 +00:00
func ( m * Model ) ResetFolder ( folder string ) {
l . Infof ( "Cleaning data for folder %q" , folder )
db . DropFolder ( m . db , folder )
2015-04-03 18:06:03 +00:00
}
2014-09-27 12:44:15 +00:00
func ( m * Model ) String ( ) string {
return fmt . Sprintf ( "model@%p" , m )
}
2014-10-13 12:43:01 +00:00
2015-06-03 07:47:39 +00:00
func ( m * Model ) VerifyConfiguration ( from , to config . Configuration ) error {
return nil
}
func ( m * Model ) CommitConfiguration ( from , to config . Configuration ) bool {
// TODO: This should not use reflect, and should take more care to try to handle stuff without restart.
2015-07-22 07:02:55 +00:00
// Go through the folder configs and figure out if we need to restart or not.
fromFolders := mapFolders ( from . Folders )
toFolders := mapFolders ( to . Folders )
2015-07-23 14:13:53 +00:00
for folderID , cfg := range toFolders {
2015-07-22 07:02:55 +00:00
if _ , ok := fromFolders [ folderID ] ; ! ok {
2015-07-23 14:13:53 +00:00
// A folder was added.
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "adding folder" , folderID )
2015-07-23 14:13:53 +00:00
m . AddFolder ( cfg )
if cfg . ReadOnly {
m . StartFolderRO ( folderID )
} else {
m . StartFolderRW ( folderID )
}
// Drop connections to all devices that can now share the new
// folder.
m . pmut . Lock ( )
for _ , dev := range cfg . DeviceIDs ( ) {
2015-06-28 15:05:29 +00:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-23 14:13:53 +00:00
closeRawConn ( conn )
}
2015-07-22 07:02:55 +00:00
}
2015-07-23 14:13:53 +00:00
m . pmut . Unlock ( )
2015-07-22 07:02:55 +00:00
}
2015-06-03 07:47:39 +00:00
}
2015-07-22 07:02:55 +00:00
for folderID , fromCfg := range fromFolders {
toCfg , ok := toFolders [ folderID ]
if ! ok {
// A folder was removed. Requires restart.
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "requires restart, removing folder" , folderID )
2015-07-22 07:02:55 +00:00
return false
}
// This folder exists on both sides. Compare the device lists, as we
// can handle adding a device (but not currently removing one).
fromDevs := mapDevices ( fromCfg . DeviceIDs ( ) )
toDevs := mapDevices ( toCfg . DeviceIDs ( ) )
for dev := range fromDevs {
if _ , ok := toDevs [ dev ] ; ! ok {
// A device was removed. Requires restart.
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "requires restart, removing device" , dev , "from folder" , folderID )
2015-07-22 07:02:55 +00:00
return false
}
}
for dev := range toDevs {
if _ , ok := fromDevs [ dev ] ; ! ok {
// A device was added. Handle it!
m . fmut . Lock ( )
m . pmut . Lock ( )
m . folderCfgs [ folderID ] = toCfg
m . folderDevices [ folderID ] = append ( m . folderDevices [ folderID ] , dev )
m . deviceFolders [ dev ] = append ( m . deviceFolders [ dev ] , folderID )
// If we already have a connection to this device, we should
// disconnect it so that we start sharing the folder with it.
// We close the underlying connection and let the normal error
// handling kick in to clean up and reconnect.
2015-06-28 15:05:29 +00:00
if conn , ok := m . conn [ dev ] ; ok {
2015-07-22 07:02:55 +00:00
closeRawConn ( conn )
}
m . pmut . Unlock ( )
m . fmut . Unlock ( )
}
}
// Check if anything else differs, apart from the device list.
fromCfg . Devices = nil
toCfg . Devices = nil
if ! reflect . DeepEqual ( fromCfg , toCfg ) {
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "requires restart, folder" , folderID , "configuration differs" )
2015-07-22 07:02:55 +00:00
return false
}
2015-06-03 07:47:39 +00:00
}
2015-07-22 07:02:55 +00:00
// Removing a device requres restart
toDevs := mapDeviceCfgs ( from . Devices )
2015-06-03 07:47:39 +00:00
for _ , dev := range from . Devices {
if _ , ok := toDevs [ dev . DeviceID ] ; ! ok {
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "requires restart, device" , dev . DeviceID , "was removed" )
2015-06-03 07:47:39 +00:00
return false
}
}
// All of the generic options require restart
if ! reflect . DeepEqual ( from . Options , to . Options ) {
2015-10-03 15:25:21 +00:00
l . Debugln ( m , "requires restart, options differ" )
2015-06-03 07:47:39 +00:00
return false
}
return true
}
2015-07-22 07:02:55 +00:00
// mapFolders returns a map of folder ID to folder configuration for the given
// slice of folder configurations.
func mapFolders ( folders [ ] config . FolderConfiguration ) map [ string ] config . FolderConfiguration {
m := make ( map [ string ] config . FolderConfiguration , len ( folders ) )
for _ , cfg := range folders {
m [ cfg . ID ] = cfg
}
return m
}
// mapDevices returns a map of device ID to nothing for the given slice of
// device IDs.
func mapDevices ( devices [ ] protocol . DeviceID ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev ] = struct { } { }
}
return m
}
// mapDeviceCfgs returns a map of device ID to nothing for the given slice of
// device configurations.
func mapDeviceCfgs ( devices [ ] config . DeviceConfiguration ) map [ protocol . DeviceID ] struct { } {
m := make ( map [ protocol . DeviceID ] struct { } , len ( devices ) )
for _ , dev := range devices {
m [ dev . DeviceID ] = struct { } { }
}
return m
}
2015-07-21 11:14:33 +00:00
func filterIndex ( folder string , fs [ ] protocol . FileInfo , dropDeletes bool ) [ ] protocol . FileInfo {
for i := 0 ; i < len ( fs ) ; {
if fs [ i ] . Flags &^ protocol . FlagsAll != 0 {
2015-10-03 15:25:21 +00:00
l . Debugln ( "dropping update for file with unknown bits set" , fs [ i ] )
2015-07-21 11:14:33 +00:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if fs [ i ] . IsDeleted ( ) && dropDeletes {
2015-10-03 15:25:21 +00:00
l . Debugln ( "dropping update for undesired delete" , fs [ i ] )
2015-07-21 11:14:33 +00:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else if symlinkInvalid ( folder , fs [ i ] ) {
2015-10-03 15:25:21 +00:00
l . Debugln ( "dropping update for unsupported symlink" , fs [ i ] )
2015-07-21 11:14:33 +00:00
fs [ i ] = fs [ len ( fs ) - 1 ]
fs = fs [ : len ( fs ) - 1 ]
} else {
i ++
}
}
return fs
}
2015-06-14 22:44:24 +00:00
func symlinkInvalid ( folder string , fi db . FileIntf ) bool {
if ! symlinks . Supported && fi . IsSymlink ( ) && ! fi . IsInvalid ( ) && ! fi . IsDeleted ( ) {
symlinkWarning . Do ( func ( ) {
2015-04-28 15:34:55 +00:00
l . Warnln ( "Symlinks are disabled, unsupported or require Administrator privileges. This might cause your folder to appear out of sync." )
2014-11-09 04:26:52 +00:00
} )
2015-06-14 22:44:24 +00:00
// Need to type switch for the concrete type to be able to access fields...
var name string
switch fi := fi . ( type ) {
case protocol . FileInfo :
name = fi . Name
case db . FileInfoTruncated :
name = fi . Name
}
l . Infoln ( "Unsupported symlink" , name , "in folder" , folder )
2014-11-09 04:26:52 +00:00
return true
}
return false
}
2015-04-25 21:53:44 +00:00
// Skips `skip` elements and retrieves up to `get` elements from a given slice.
// Returns the resulting slice, plus how much elements are left to skip or
// copy to satisfy the values which were provided, given the slice is not
// big enough.
func getChunk ( data [ ] string , skip , get int ) ( [ ] string , int , int ) {
l := len ( data )
if l <= skip {
return [ ] string { } , skip - l , get
} else if l < skip + get {
return data [ skip : l ] , 0 , get - ( l - skip )
}
return data [ skip : skip + get ] , 0 , 0
}
2015-07-22 07:02:55 +00:00
func closeRawConn ( conn io . Closer ) error {
if conn , ok := conn . ( * tls . Conn ) ; ok {
// If the underlying connection is a *tls.Conn, Close() does more
// than it says on the tin. Specifically, it sends a TLS alert
// message, which might block forever if the connection is dead
// and we don't have a deadline set.
conn . SetWriteDeadline ( time . Now ( ) . Add ( 250 * time . Millisecond ) )
}
return conn . Close ( )
}