2016-04-26 14:01:46 +00:00
// Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
2017-02-09 06:52:18 +00:00
// You can obtain one at https://mozilla.org/MPL/2.0/.
2016-04-26 14:01:46 +00:00
package model
2017-04-26 00:15:23 +00:00
import (
"context"
2018-05-23 07:23:21 +00:00
"fmt"
2018-05-21 06:45:05 +00:00
"math/rand"
2018-11-07 10:04:41 +00:00
"path/filepath"
"sort"
2018-10-05 08:26:25 +00:00
"sync/atomic"
2017-04-26 00:15:23 +00:00
"time"
2017-08-25 19:47:01 +00:00
2019-11-19 08:56:53 +00:00
"github.com/pkg/errors"
2017-08-25 19:47:01 +00:00
"github.com/syncthing/syncthing/lib/config"
2018-05-21 06:56:24 +00:00
"github.com/syncthing/syncthing/lib/db"
2018-06-11 13:47:54 +00:00
"github.com/syncthing/syncthing/lib/events"
2018-11-07 10:04:41 +00:00
"github.com/syncthing/syncthing/lib/fs"
2018-02-04 21:46:24 +00:00
"github.com/syncthing/syncthing/lib/ignore"
2019-02-12 12:25:11 +00:00
"github.com/syncthing/syncthing/lib/locations"
2018-11-07 10:04:41 +00:00
"github.com/syncthing/syncthing/lib/osutil"
2018-02-25 08:39:00 +00:00
"github.com/syncthing/syncthing/lib/protocol"
2018-11-07 10:04:41 +00:00
"github.com/syncthing/syncthing/lib/scanner"
2019-03-11 16:57:21 +00:00
"github.com/syncthing/syncthing/lib/stats"
2021-03-07 12:43:22 +00:00
"github.com/syncthing/syncthing/lib/svcutil"
2018-02-04 21:46:24 +00:00
"github.com/syncthing/syncthing/lib/sync"
2020-06-18 08:55:41 +00:00
"github.com/syncthing/syncthing/lib/util"
2020-07-14 08:48:50 +00:00
"github.com/syncthing/syncthing/lib/versioner"
2017-10-20 14:52:55 +00:00
"github.com/syncthing/syncthing/lib/watchaggregator"
2017-04-26 00:15:23 +00:00
)
2016-04-26 14:01:46 +00:00
2022-04-05 19:32:06 +00:00
// Arbitrary limit that triggers a warning on kqueue systems
const kqueueItemCountThreshold = 10000
2016-04-26 14:01:46 +00:00
type folder struct {
stateTracker
2017-08-25 19:47:01 +00:00
config . FolderConfiguration
2019-03-11 16:57:21 +00:00
* stats . FolderStatisticsReference
2021-06-25 09:38:04 +00:00
ioLimiter * util . Semaphore
2019-03-11 16:57:21 +00:00
2018-07-12 08:15:57 +00:00
localFlags uint32
2017-04-01 09:58:06 +00:00
2020-08-25 06:11:14 +00:00
model * model
shortID protocol . ShortID
fset * db . FileSet
ignores * ignore . Matcher
2020-12-27 21:26:25 +00:00
mtimefs fs . Filesystem
2020-08-25 06:11:14 +00:00
modTimeWindow time . Duration
2020-11-09 08:05:48 +00:00
ctx context . Context // used internally, only accessible on serve lifetime
done chan struct { } // used externally, accessible regardless of serve
2018-02-25 08:39:00 +00:00
2020-07-14 08:48:50 +00:00
scanInterval time . Duration
scanTimer * time . Timer
scanDelay chan time . Duration
initialScanFinished chan struct { }
2021-08-17 07:23:33 +00:00
scanScheduled chan struct { }
2020-07-14 08:48:50 +00:00
versionCleanupInterval time . Duration
versionCleanupTimer * time . Timer
2018-02-25 08:39:00 +00:00
pullScheduled chan struct { }
2020-05-04 06:43:35 +00:00
pullPause time . Duration
pullFailTimer * time . Timer
2018-02-25 08:39:00 +00:00
2020-11-06 13:22:20 +00:00
scanErrors [ ] FileError
pullErrors [ ] FileError
errorsMut sync . Mutex
2020-03-27 12:05:09 +00:00
doInSyncChan chan syncRequest
2020-05-01 09:08:59 +00:00
forcedRescanRequested chan struct { }
forcedRescanPaths map [ string ] struct { }
forcedRescanPathsMut sync . Mutex
2018-02-25 08:39:00 +00:00
watchCancel context . CancelFunc
watchChan chan [ ] string
restartWatchChan chan struct { }
watchErr error
2018-12-21 11:06:21 +00:00
watchMut sync . Mutex
2018-05-11 08:45:13 +00:00
2020-07-14 08:48:50 +00:00
puller puller
versioner versioner . Versioner
2022-04-05 19:32:06 +00:00
warnedKqueue bool
2018-05-11 08:45:13 +00:00
}
2020-03-27 12:05:09 +00:00
type syncRequest struct {
fn func ( ) error
err chan error
2018-05-21 06:45:05 +00:00
}
2018-05-11 08:45:13 +00:00
type puller interface {
2021-03-07 12:43:22 +00:00
pull ( ) ( bool , error ) // true when successful and should not be retried
2016-04-26 14:01:46 +00:00
}
2021-06-25 09:38:04 +00:00
func newFolder ( model * model , fset * db . FileSet , ignores * ignore . Matcher , cfg config . FolderConfiguration , evLogger events . Logger , ioLimiter * util . Semaphore , ver versioner . Versioner ) folder {
2020-05-04 06:43:35 +00:00
f := folder {
2019-08-15 14:29:37 +00:00
stateTracker : newStateTracker ( cfg . ID , evLogger ) ,
2019-03-11 16:57:21 +00:00
FolderConfiguration : cfg ,
FolderStatisticsReference : stats . NewFolderStatisticsReference ( model . db , cfg . ID ) ,
2020-02-01 07:02:18 +00:00
ioLimiter : ioLimiter ,
2017-08-25 19:47:01 +00:00
2020-08-25 06:11:14 +00:00
model : model ,
shortID : model . shortID ,
fset : fset ,
ignores : ignores ,
2022-04-10 18:55:05 +00:00
mtimefs : cfg . Filesystem ( fset ) ,
2020-08-25 06:11:14 +00:00
modTimeWindow : cfg . ModTimeWindow ( ) ,
2020-11-09 08:05:48 +00:00
done : make ( chan struct { } ) ,
2018-02-25 08:39:00 +00:00
2020-07-14 08:48:50 +00:00
scanInterval : time . Duration ( cfg . RescanIntervalS ) * time . Second ,
scanTimer : time . NewTimer ( 0 ) , // The first scan should be done immediately.
scanDelay : make ( chan time . Duration ) ,
initialScanFinished : make ( chan struct { } ) ,
2021-08-17 07:23:33 +00:00
scanScheduled : make ( chan struct { } , 1 ) ,
2020-07-14 08:48:50 +00:00
versionCleanupInterval : time . Duration ( cfg . Versioning . CleanupIntervalS ) * time . Second ,
versionCleanupTimer : time . NewTimer ( time . Duration ( cfg . Versioning . CleanupIntervalS ) * time . Second ) ,
2018-02-25 08:39:00 +00:00
pullScheduled : make ( chan struct { } , 1 ) , // This needs to be 1-buffered so that we queue a pull if we're busy when it comes.
2020-11-06 13:22:20 +00:00
errorsMut : sync . NewMutex ( ) ,
2020-03-27 12:05:09 +00:00
doInSyncChan : make ( chan syncRequest ) ,
2020-05-01 09:08:59 +00:00
forcedRescanRequested : make ( chan struct { } , 1 ) ,
forcedRescanPaths : make ( map [ string ] struct { } ) ,
forcedRescanPathsMut : sync . NewMutex ( ) ,
2018-06-11 13:47:54 +00:00
watchCancel : func ( ) { } ,
restartWatchChan : make ( chan struct { } , 1 ) ,
2018-12-21 11:06:21 +00:00
watchMut : sync . NewMutex ( ) ,
2020-07-14 08:48:50 +00:00
versioner : ver ,
2017-08-25 19:47:01 +00:00
}
2020-05-04 06:43:35 +00:00
f . pullPause = f . pullBasePause ( )
f . pullFailTimer = time . NewTimer ( 0 )
<- f . pullFailTimer . C
return f
2016-04-26 14:01:46 +00:00
}
2020-11-17 12:19:04 +00:00
func ( f * folder ) Serve ( ctx context . Context ) error {
2018-10-05 08:26:25 +00:00
atomic . AddInt32 ( & f . model . foldersRunning , 1 )
defer atomic . AddInt32 ( & f . model . foldersRunning , - 1 )
2019-11-21 07:41:15 +00:00
f . ctx = ctx
2018-05-11 08:45:13 +00:00
l . Debugln ( f , "starting" )
defer l . Debugln ( f , "exiting" )
defer func ( ) {
2018-05-21 06:45:05 +00:00
f . scanTimer . Stop ( )
2020-07-14 08:48:50 +00:00
f . versionCleanupTimer . Stop ( )
2018-05-11 08:45:13 +00:00
f . setState ( FolderIdle )
} ( )
2020-04-21 08:15:59 +00:00
if f . FSWatcherEnabled && f . getHealthErrorAndLoadIgnores ( ) == nil {
2018-05-11 08:45:13 +00:00
f . startWatch ( )
}
2020-07-14 08:48:50 +00:00
// If we're configured to not do version cleanup, or we don't have a
// versioner, cancel and drain that timer now.
if f . versionCleanupInterval == 0 || f . versioner == nil {
if ! f . versionCleanupTimer . Stop ( ) {
<- f . versionCleanupTimer . C
}
}
2018-05-11 08:45:13 +00:00
initialCompleted := f . initialScanFinished
for {
2021-03-07 12:43:22 +00:00
var err error
2018-05-11 08:45:13 +00:00
select {
case <- f . ctx . Done ( ) :
2020-11-09 08:05:48 +00:00
close ( f . done )
2020-11-17 12:19:04 +00:00
return nil
2018-05-11 08:45:13 +00:00
case <- f . pullScheduled :
2021-03-07 12:43:22 +00:00
_ , err = f . pull ( )
2018-05-11 08:45:13 +00:00
2020-05-04 06:43:35 +00:00
case <- f . pullFailTimer . C :
2021-03-07 12:43:22 +00:00
var success bool
success , err = f . pull ( )
if ( err != nil || ! success ) && f . pullPause < 60 * f . pullBasePause ( ) {
2020-05-29 07:52:28 +00:00
// Back off from retrying to pull
f . pullPause *= 2
}
2018-05-11 08:45:13 +00:00
case <- initialCompleted :
// Initial scan has completed, we should do a pull
initialCompleted = nil // never hit this case again
2021-03-07 12:43:22 +00:00
_ , err = f . pull ( )
2018-05-11 08:45:13 +00:00
2020-05-01 09:08:59 +00:00
case <- f . forcedRescanRequested :
2021-03-07 12:43:22 +00:00
err = f . handleForcedRescans ( )
2020-05-01 09:08:59 +00:00
2018-05-21 06:45:05 +00:00
case <- f . scanTimer . C :
2019-08-30 12:27:26 +00:00
l . Debugln ( f , "Scanning due to timer" )
2021-03-07 12:43:22 +00:00
err = f . scanTimerFired ( )
2018-05-11 08:45:13 +00:00
2020-03-27 12:05:09 +00:00
case req := <- f . doInSyncChan :
l . Debugln ( f , "Running something due to request" )
2021-03-07 12:43:22 +00:00
err = req . fn ( )
req . err <- err
2018-05-11 08:45:13 +00:00
2018-05-21 06:45:05 +00:00
case next := <- f . scanDelay :
2019-08-30 12:27:26 +00:00
l . Debugln ( f , "Delaying scan" )
2018-05-21 06:45:05 +00:00
f . scanTimer . Reset ( next )
2018-05-11 08:45:13 +00:00
2021-08-17 07:23:33 +00:00
case <- f . scanScheduled :
l . Debugln ( f , "Scan was scheduled" )
f . scanTimer . Reset ( 0 )
2018-05-11 08:45:13 +00:00
case fsEvents := <- f . watchChan :
2019-08-30 12:27:26 +00:00
l . Debugln ( f , "Scan due to watcher" )
2021-03-07 12:43:22 +00:00
err = f . scanSubdirs ( fsEvents )
2018-05-11 08:45:13 +00:00
case <- f . restartWatchChan :
2019-08-30 12:27:26 +00:00
l . Debugln ( f , "Restart watcher" )
2021-03-07 12:43:22 +00:00
err = f . restartWatch ( )
2020-07-14 08:48:50 +00:00
case <- f . versionCleanupTimer . C :
l . Debugln ( f , "Doing version cleanup" )
f . versionCleanupTimerFired ( )
2018-05-11 08:45:13 +00:00
}
2021-03-07 12:43:22 +00:00
if err != nil {
if svcutil . IsFatal ( err ) {
return err
}
f . setError ( err )
}
2018-05-11 08:45:13 +00:00
}
}
2017-10-24 07:58:55 +00:00
func ( f * folder ) BringToFront ( string ) { }
2019-04-07 11:29:17 +00:00
func ( f * folder ) Override ( ) { }
2018-05-21 06:56:24 +00:00
2019-04-07 11:29:17 +00:00
func ( f * folder ) Revert ( ) { }
2018-07-12 08:15:57 +00:00
2016-04-26 14:01:46 +00:00
func ( f * folder ) DelayScan ( next time . Duration ) {
2020-11-09 08:05:48 +00:00
select {
case f . scanDelay <- next :
case <- f . done :
}
2016-04-26 14:01:46 +00:00
}
2021-08-17 07:23:33 +00:00
func ( f * folder ) ScheduleScan ( ) {
// 1-buffered chan
select {
case f . scanScheduled <- struct { } { } :
default :
}
}
2019-01-01 09:17:14 +00:00
func ( f * folder ) ignoresUpdated ( ) {
2017-10-24 07:58:55 +00:00
if f . FSWatcherEnabled {
f . scheduleWatchRestart ( )
}
}
2018-02-25 08:39:00 +00:00
func ( f * folder ) SchedulePull ( ) {
select {
case f . pullScheduled <- struct { } { } :
default :
// We might be busy doing a pull and thus not reading from this
// channel. The channel is 1-buffered, so one notification will be
// queued to ensure we recheck after the pull, but beyond that we must
// make sure to not block index receiving.
}
}
2017-11-07 06:59:35 +00:00
2019-06-27 18:25:38 +00:00
func ( f * folder ) Jobs ( _ , _ int ) ( [ ] string , [ ] string , int ) {
return nil , nil , 0
2017-10-24 07:58:55 +00:00
}
2016-04-26 14:01:46 +00:00
func ( f * folder ) Scan ( subdirs [ ] string ) error {
2017-04-20 00:20:34 +00:00
<- f . initialScanFinished
2020-03-27 12:05:09 +00:00
return f . doInSync ( func ( ) error { return f . scanSubdirs ( subdirs ) } )
}
// doInSync allows to run functions synchronously in folder.serve from exported,
// asynchronously called methods.
func ( f * folder ) doInSync ( fn func ( ) error ) error {
req := syncRequest {
fn : fn ,
err : make ( chan error , 1 ) ,
2018-05-21 06:45:05 +00:00
}
2018-07-04 07:07:33 +00:00
select {
2020-03-27 12:05:09 +00:00
case f . doInSyncChan <- req :
2018-07-04 07:07:33 +00:00
return <- req . err
2020-11-09 08:05:48 +00:00
case <- f . done :
return context . Canceled
2018-07-04 07:07:33 +00:00
}
2018-05-21 06:45:05 +00:00
}
func ( f * folder ) Reschedule ( ) {
if f . scanInterval == 0 {
return
}
// Sleep a random time between 3/4 and 5/4 of the configured interval.
sleepNanos := ( f . scanInterval . Nanoseconds ( ) * 3 + rand . Int63n ( 2 * f . scanInterval . Nanoseconds ( ) ) ) / 4
interval := time . Duration ( sleepNanos ) * time . Nanosecond
l . Debugln ( f , "next rescan in" , interval )
f . scanTimer . Reset ( interval )
}
2020-04-21 08:15:59 +00:00
func ( f * folder ) getHealthErrorAndLoadIgnores ( ) error {
if err := f . getHealthErrorWithoutIgnores ( ) ; err != nil {
return err
}
2021-03-16 14:04:11 +00:00
if f . Type != config . FolderTypeReceiveEncrypted {
if err := f . ignores . Load ( ".stignore" ) ; err != nil && ! fs . IsNotExist ( err ) {
return errors . Wrap ( err , "loading ignores" )
}
2020-04-21 08:15:59 +00:00
}
return nil
2017-10-24 07:58:55 +00:00
}
2020-04-21 08:15:59 +00:00
func ( f * folder ) getHealthErrorWithoutIgnores ( ) error {
2017-10-24 07:58:55 +00:00
// Check for folder errors, with the most serious and specific first and
// generic ones like out of space on the home disk later.
if err := f . CheckPath ( ) ; err != nil {
return err
}
2019-02-12 12:25:11 +00:00
dbPath := locations . Get ( locations . Database )
if usage , err := fs . NewFilesystem ( fs . FilesystemTypeBasic , dbPath ) . Usage ( "." ) ; err == nil {
if err = config . CheckFreeSpace ( f . model . cfg . Options ( ) . MinHomeDiskFree , usage ) ; err != nil {
2022-02-24 16:07:51 +00:00
return fmt . Errorf ( "insufficient space on disk for database (%v): %w" , dbPath , err )
2019-02-12 12:25:11 +00:00
}
2017-10-24 07:58:55 +00:00
}
2016-04-26 14:01:46 +00:00
2017-10-12 06:16:46 +00:00
return nil
}
2021-03-07 12:43:22 +00:00
func ( f * folder ) pull ( ) ( success bool , err error ) {
2020-05-04 06:43:35 +00:00
f . pullFailTimer . Stop ( )
select {
case <- f . pullFailTimer . C :
default :
}
2020-01-27 16:31:17 +00:00
select {
case <- f . initialScanFinished :
default :
// Once the initial scan finished, a pull will be scheduled
2021-03-07 12:43:22 +00:00
return true , nil
2020-01-27 16:31:17 +00:00
}
2020-05-29 07:52:28 +00:00
defer func ( ) {
if success {
// We're good, reset the pause interval.
f . pullPause = f . pullBasePause ( )
}
} ( )
2020-01-27 16:31:17 +00:00
// If there is nothing to do, don't even enter sync-waiting state.
abort := true
2021-03-07 12:43:22 +00:00
snap , err := f . dbSnapshot ( )
if err != nil {
return false , err
}
2020-05-30 07:50:23 +00:00
snap . WithNeed ( protocol . LocalDeviceID , func ( intf protocol . FileIntf ) bool {
2020-01-27 16:31:17 +00:00
abort = false
return false
} )
snap . Release ( )
if abort {
2020-11-06 13:22:20 +00:00
// Clears pull failures on items that were needed before, but aren't anymore.
f . errorsMut . Lock ( )
f . pullErrors = nil
f . errorsMut . Unlock ( )
2021-03-07 12:43:22 +00:00
return true , nil
2020-01-27 16:31:17 +00:00
}
2020-07-31 17:26:09 +00:00
// Abort early (before acquiring a token) if there's a folder error
2021-03-07 12:43:22 +00:00
err = f . getHealthErrorWithoutIgnores ( )
2020-07-31 17:26:09 +00:00
if err != nil {
l . Debugln ( "Skipping pull of" , f . Description ( ) , "due to folder error:" , err )
2021-03-07 12:43:22 +00:00
return false , err
2020-07-31 17:26:09 +00:00
}
2020-09-03 11:59:45 +00:00
// Send only folder doesn't do any io, it only checks for out-of-sync
// items that differ in metadata and updates those.
if f . Type != config . FolderTypeSendOnly {
f . setState ( FolderSyncWaiting )
2021-06-25 09:38:04 +00:00
if err := f . ioLimiter . TakeWithContext ( f . ctx , 1 ) ; err != nil {
2021-03-07 12:43:22 +00:00
return true , err
2020-09-03 11:59:45 +00:00
}
2021-06-25 09:38:04 +00:00
defer f . ioLimiter . Give ( 1 )
2020-02-24 20:57:15 +00:00
}
2020-01-27 16:31:17 +00:00
2020-05-04 06:43:35 +00:00
startTime := time . Now ( )
2020-07-31 17:26:09 +00:00
// Check if the ignore patterns changed.
oldHash := f . ignores . Hash ( )
defer func ( ) {
if f . ignores . Hash ( ) != oldHash {
f . ignoresUpdated ( )
}
} ( )
err = f . getHealthErrorAndLoadIgnores ( )
if err != nil {
l . Debugln ( "Skipping pull of" , f . Description ( ) , "due to folder error:" , err )
2021-03-07 12:43:22 +00:00
return false , err
2020-07-31 17:26:09 +00:00
}
2022-03-27 19:29:40 +00:00
f . setError ( nil )
2020-07-31 17:26:09 +00:00
2021-03-07 12:43:22 +00:00
success , err = f . puller . pull ( )
2020-05-04 06:43:35 +00:00
2021-03-07 12:43:22 +00:00
if success && err == nil {
return true , nil
2020-05-04 06:43:35 +00:00
}
// Pulling failed, try again later.
delay := f . pullPause + time . Since ( startTime )
2020-06-18 08:55:41 +00:00
l . Infof ( "Folder %v isn't making sync progress - retrying in %v." , f . Description ( ) , util . NiceDurationString ( delay ) )
2020-05-04 06:43:35 +00:00
f . pullFailTimer . Reset ( delay )
2021-03-07 12:43:22 +00:00
return false , err
2020-01-27 16:31:17 +00:00
}
2017-04-20 00:20:34 +00:00
func ( f * folder ) scanSubdirs ( subDirs [ ] string ) error {
2020-12-20 17:13:35 +00:00
l . Debugf ( "%v scanning" , f )
2020-04-21 08:15:59 +00:00
oldHash := f . ignores . Hash ( )
err := f . getHealthErrorAndLoadIgnores ( )
if err != nil {
2019-08-30 12:27:26 +00:00
return err
}
2021-03-07 12:43:22 +00:00
f . setError ( nil )
2018-11-07 10:04:41 +00:00
2019-08-30 12:27:26 +00:00
// Check on the way out if the ignore patterns changed as part of scanning
// this folder. If they did we should schedule a pull of the folder so that
// we request things we might have suddenly become unignored and so on.
defer func ( ) {
if f . ignores . Hash ( ) != oldHash {
l . Debugln ( "Folder" , f . Description ( ) , "ignore patterns change detected while scanning; triggering puller" )
f . ignoresUpdated ( )
f . SchedulePull ( )
}
} ( )
2018-12-05 07:40:05 +00:00
f . setState ( FolderScanWaiting )
2020-06-18 13:13:46 +00:00
defer f . setState ( FolderIdle )
2019-08-30 12:27:26 +00:00
2021-06-25 09:38:04 +00:00
if err := f . ioLimiter . TakeWithContext ( f . ctx , 1 ) ; err != nil {
2020-02-24 20:57:15 +00:00
return err
}
2021-06-25 09:38:04 +00:00
defer f . ioLimiter . Give ( 1 )
2018-12-05 07:40:05 +00:00
2018-11-07 10:04:41 +00:00
for i := range subDirs {
sub := osutil . NativeFilename ( subDirs [ i ] )
if sub == "" {
// A blank subdirs means to scan the entire folder. We can trim
// the subDirs list and go on our way.
subDirs = nil
break
}
subDirs [ i ] = sub
}
// Clean the list of subitems to ensure that we start at a known
// directory, and don't scan subdirectories of things we've already
// scanned.
2021-03-07 12:43:22 +00:00
snap , err := f . dbSnapshot ( )
if err != nil {
return err
}
2019-03-11 06:28:54 +00:00
subDirs = unifySubs ( subDirs , func ( file string ) bool {
2020-01-21 17:23:08 +00:00
_ , ok := snap . Get ( protocol . LocalDeviceID , file )
2018-11-07 10:04:41 +00:00
return ok
} )
2021-02-08 07:40:57 +00:00
snap . Release ( )
2018-11-07 10:04:41 +00:00
f . setState ( FolderScanning )
2021-02-08 07:40:57 +00:00
f . clearScanErrors ( subDirs )
2018-11-07 10:04:41 +00:00
2021-06-27 06:48:54 +00:00
batch := f . newScanBatch ( )
2021-02-08 07:40:57 +00:00
2020-11-09 14:33:32 +00:00
// Schedule a pull after scanning, but only if we actually detected any
// changes.
changes := 0
defer func ( ) {
2020-12-20 17:13:35 +00:00
l . Debugf ( "%v finished scanning, detected %v changes" , f , changes )
2020-11-09 14:33:32 +00:00
if changes > 0 {
f . SchedulePull ( )
}
} ( )
2021-06-27 06:48:54 +00:00
changesHere , err := f . scanSubdirsChangedAndNew ( subDirs , batch )
2021-02-08 07:40:57 +00:00
changes += changesHere
if err != nil {
return err
}
2021-04-29 20:01:46 +00:00
if err := batch . Flush ( ) ; err != nil {
2021-02-08 07:40:57 +00:00
return err
}
if len ( subDirs ) == 0 {
// If we have no specific subdirectories to traverse, set it to one
// empty prefix so we traverse the entire folder contents once.
subDirs = [ ] string { "" }
}
// Do a scan of the database for each prefix, to check for deleted and
// ignored files.
2021-06-27 06:48:54 +00:00
changesHere , err = f . scanSubdirsDeletedAndIgnored ( subDirs , batch )
2021-02-08 07:40:57 +00:00
changes += changesHere
if err != nil {
return err
}
2021-04-29 20:01:46 +00:00
if err := batch . Flush ( ) ; err != nil {
2021-02-08 07:40:57 +00:00
return err
}
f . ScanCompleted ( )
return nil
}
2021-11-10 08:46:21 +00:00
const maxToRemove = 1000
2021-06-27 06:48:54 +00:00
type scanBatch struct {
2021-11-10 08:46:21 +00:00
f * folder
updateBatch * db . FileInfoBatch
toRemove [ ] string
2021-06-27 06:48:54 +00:00
}
func ( f * folder ) newScanBatch ( ) * scanBatch {
b := & scanBatch {
2021-11-10 08:46:21 +00:00
f : f ,
toRemove : make ( [ ] string , 0 , maxToRemove ) ,
2021-06-27 06:48:54 +00:00
}
2021-11-10 08:46:21 +00:00
b . updateBatch = db . NewFileInfoBatch ( func ( fs [ ] protocol . FileInfo ) error {
2021-06-27 06:48:54 +00:00
if err := b . f . getHealthErrorWithoutIgnores ( ) ; err != nil {
l . Debugf ( "Stopping scan of folder %s due to: %s" , b . f . Description ( ) , err )
return err
2018-11-07 10:04:41 +00:00
}
2021-06-27 06:48:54 +00:00
b . f . updateLocalsFromScanning ( fs )
return nil
} )
return b
}
2021-11-10 08:46:21 +00:00
func ( b * scanBatch ) Remove ( item string ) {
b . toRemove = append ( b . toRemove , item )
}
func ( b * scanBatch ) flushToRemove ( ) {
if len ( b . toRemove ) > 0 {
b . f . fset . RemoveLocalItems ( b . toRemove )
b . toRemove = b . toRemove [ : 0 ]
}
}
func ( b * scanBatch ) Flush ( ) error {
b . flushToRemove ( )
return b . updateBatch . Flush ( )
}
func ( b * scanBatch ) FlushIfFull ( ) error {
if len ( b . toRemove ) >= maxToRemove {
b . flushToRemove ( )
}
return b . updateBatch . FlushIfFull ( )
}
// Update adds the fileinfo to the batch for updating, and does a few checks.
2021-06-27 06:48:54 +00:00
// It returns false if the checks result in the file not going to be updated or removed.
2021-11-10 08:46:21 +00:00
func ( b * scanBatch ) Update ( fi protocol . FileInfo , snap * db . Snapshot ) bool {
2021-06-27 06:48:54 +00:00
// Check for a "virtual" parent directory of encrypted files. We don't track
// it, but check if anything still exists within and delete it otherwise.
if b . f . Type == config . FolderTypeReceiveEncrypted && fi . IsDirectory ( ) && protocol . IsEncryptedParent ( fs . PathComponents ( fi . Name ) ) {
if names , err := b . f . mtimefs . DirNames ( fi . Name ) ; err == nil && len ( names ) == 0 {
b . f . mtimefs . Remove ( fi . Name )
2018-11-07 10:04:41 +00:00
}
2021-06-27 06:48:54 +00:00
return false
}
// Resolve receive-only items which are identical with the global state or
// the global item is our own receive-only item.
switch gf , ok := snap . GetGlobal ( fi . Name ) ; {
case ! ok :
case gf . IsReceiveOnlyChanged ( ) :
2021-11-10 08:46:21 +00:00
if fi . IsDeleted ( ) {
// Our item is deleted and the global item is our own receive only
// file. No point in keeping track of that.
b . Remove ( fi . Name )
return true
2020-11-09 14:33:32 +00:00
}
2022-07-26 06:24:58 +00:00
case gf . IsEquivalentOptional ( fi , protocol . FileInfoComparison {
ModTimeWindow : b . f . modTimeWindow ,
IgnorePerms : b . f . IgnorePerms ,
IgnoreBlocks : true ,
IgnoreFlags : protocol . FlagLocalReceiveOnly ,
IgnoreOwnership : ! b . f . SyncOwnership ,
} ) :
2021-11-10 08:46:21 +00:00
// What we have locally is equivalent to the global file.
l . Debugf ( "%v scanning: Merging identical locally changed item with global" , b . f , fi )
2021-06-27 06:48:54 +00:00
fi = gf
2020-11-09 14:33:32 +00:00
}
2021-11-10 08:46:21 +00:00
b . updateBatch . Append ( fi )
2021-06-27 06:48:54 +00:00
return true
2021-02-08 07:40:57 +00:00
}
2021-06-27 06:48:54 +00:00
func ( f * folder ) scanSubdirsChangedAndNew ( subDirs [ ] string , batch * scanBatch ) ( int , error ) {
2021-02-08 07:40:57 +00:00
changes := 0
2021-03-07 12:43:22 +00:00
snap , err := f . dbSnapshot ( )
if err != nil {
return changes , err
}
2021-02-08 07:40:57 +00:00
defer snap . Release ( )
// If we return early e.g. due to a folder health error, the scan needs
// to be cancelled.
scanCtx , scanCancel := context . WithCancel ( f . ctx )
defer scanCancel ( )
scanConfig := scanner . Config {
Folder : f . ID ,
Subs : subDirs ,
Matcher : f . ignores ,
TempLifetime : time . Duration ( f . model . cfg . Options ( ) . KeepTemporariesH ) * time . Hour ,
CurrentFiler : cFiler { snap } ,
Filesystem : f . mtimefs ,
IgnorePerms : f . IgnorePerms ,
2022-07-26 06:24:58 +00:00
IgnoreOwnership : ! f . SyncOwnership ,
2021-02-08 07:40:57 +00:00
AutoNormalize : f . AutoNormalize ,
Hashers : f . model . numHashers ( f . ID ) ,
ShortID : f . shortID ,
ProgressTickIntervalS : f . ScanProgressIntervalS ,
LocalFlags : f . localFlags ,
ModTimeWindow : f . modTimeWindow ,
EventLogger : f . evLogger ,
}
var fchan chan scanner . ScanResult
if f . Type == config . FolderTypeReceiveEncrypted {
fchan = scanner . WalkWithoutHashing ( scanCtx , scanConfig )
} else {
fchan = scanner . Walk ( scanCtx , scanConfig )
}
2018-11-07 10:04:41 +00:00
2021-01-31 20:02:42 +00:00
alreadyUsedOrExisting := make ( map [ string ] struct { } )
2018-11-07 10:04:41 +00:00
for res := range fchan {
if res . Err != nil {
f . newScanError ( res . Path , res . Err )
continue
}
2020-05-16 12:39:27 +00:00
2021-04-29 20:01:46 +00:00
if err := batch . FlushIfFull ( ) ; err != nil {
2020-09-25 09:27:44 +00:00
// Prevent a race between the scan aborting due to context
// cancellation and releasing the snapshot in defer here.
scanCancel ( )
for range fchan {
}
2021-02-08 07:40:57 +00:00
return changes , err
2018-11-07 10:04:41 +00:00
}
2021-11-10 08:46:21 +00:00
if batch . Update ( res . File , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2020-05-11 18:15:11 +00:00
2020-11-09 14:33:32 +00:00
switch f . Type {
case config . FolderTypeReceiveOnly , config . FolderTypeReceiveEncrypted :
default :
2021-01-31 20:02:42 +00:00
if nf , ok := f . findRename ( snap , res . File , alreadyUsedOrExisting ) ; ok {
2021-11-10 08:46:21 +00:00
if batch . Update ( nf , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2020-05-11 18:15:11 +00:00
}
}
2018-11-07 10:04:41 +00:00
}
2021-02-08 07:40:57 +00:00
return changes , nil
}
2018-11-07 10:04:41 +00:00
2021-06-27 06:48:54 +00:00
func ( f * folder ) scanSubdirsDeletedAndIgnored ( subDirs [ ] string , batch * scanBatch ) ( int , error ) {
2018-11-07 10:04:41 +00:00
var toIgnore [ ] db . FileInfoTruncated
ignoredParent := ""
2021-02-08 07:40:57 +00:00
changes := 0
2021-03-07 12:43:22 +00:00
snap , err := f . dbSnapshot ( )
if err != nil {
return 0 , err
}
2020-01-21 17:23:08 +00:00
defer snap . Release ( )
2018-11-07 10:04:41 +00:00
for _ , sub := range subDirs {
var iterError error
2020-05-30 07:50:23 +00:00
snap . WithPrefixedHaveTruncated ( protocol . LocalDeviceID , sub , func ( fi protocol . FileIntf ) bool {
2019-07-23 08:49:22 +00:00
select {
case <- f . ctx . Done ( ) :
return false
default :
}
2018-11-07 10:04:41 +00:00
file := fi . ( db . FileInfoTruncated )
2021-04-29 20:01:46 +00:00
if err := batch . FlushIfFull ( ) ; err != nil {
2018-11-07 10:04:41 +00:00
iterError = err
return false
}
2018-11-22 10:16:45 +00:00
if ignoredParent != "" && ! fs . IsParent ( file . Name , ignoredParent ) {
2018-11-07 10:04:41 +00:00
for _ , file := range toIgnore {
l . Debugln ( "marking file as ignored" , file )
2021-02-08 14:30:39 +00:00
nf := file . ConvertToIgnoredFileInfo ( )
2021-11-10 08:46:21 +00:00
if batch . Update ( nf , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2021-04-29 20:01:46 +00:00
if err := batch . FlushIfFull ( ) ; err != nil {
2018-11-07 10:04:41 +00:00
iterError = err
return false
}
}
toIgnore = toIgnore [ : 0 ]
ignoredParent = ""
}
2019-03-11 06:28:54 +00:00
switch ignored := f . ignores . Match ( file . Name ) . IsIgnored ( ) ; {
2020-07-30 11:41:45 +00:00
case file . IsIgnored ( ) && ignored :
return true
2018-11-07 10:04:41 +00:00
case ! file . IsIgnored ( ) && ignored :
// File was not ignored at last pass but has been ignored.
if file . IsDirectory ( ) {
// Delay ignoring as a child might be unignored.
toIgnore = append ( toIgnore , file )
if ignoredParent == "" {
// If the parent wasn't ignored already, set
// this path as the "highest" ignored parent
ignoredParent = file . Name
}
return true
}
2019-09-12 06:47:39 +00:00
l . Debugln ( "marking file as ignored" , file )
2021-02-08 14:30:39 +00:00
nf := file . ConvertToIgnoredFileInfo ( )
2021-11-10 08:46:21 +00:00
if batch . Update ( nf , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2018-11-07 10:04:41 +00:00
case file . IsIgnored ( ) && ! ignored :
// Successfully scanned items are already un-ignored during
// the scan, so check whether it is deleted.
fallthrough
case ! file . IsIgnored ( ) && ! file . IsDeleted ( ) && ! file . IsUnsupported ( ) :
// The file is not ignored, deleted or unsupported. Lets check if
// it's still here. Simply stat:ing it wont do as there are
// tons of corner cases (e.g. parent dir->symlink, missing
// permissions)
2020-12-27 21:26:25 +00:00
if ! osutil . IsDeleted ( f . mtimefs , file . Name ) {
2018-11-07 10:04:41 +00:00
if ignoredParent != "" {
// Don't ignore parents of this not ignored item
toIgnore = toIgnore [ : 0 ]
ignoredParent = ""
}
return true
}
2020-02-19 15:58:09 +00:00
nf := file . ConvertToDeletedFileInfo ( f . shortID )
nf . LocalFlags = f . localFlags
2018-11-07 10:04:41 +00:00
if file . ShouldConflict ( ) {
2020-02-10 09:48:30 +00:00
// We do not want to override the global version with
// the deleted file. Setting to an empty version makes
// sure the file gets in sync on the following pull.
2019-10-01 13:34:59 +00:00
nf . Version = protocol . Vector { }
2018-11-07 10:04:41 +00:00
}
2020-08-20 14:11:20 +00:00
l . Debugln ( "marking file as deleted" , nf )
2021-11-10 08:46:21 +00:00
if batch . Update ( nf , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2021-06-27 06:48:54 +00:00
case file . IsDeleted ( ) && file . IsReceiveOnlyChanged ( ) :
switch f . Type {
case config . FolderTypeReceiveOnly , config . FolderTypeReceiveEncrypted :
2021-11-10 08:46:21 +00:00
switch gf , ok := snap . GetGlobal ( file . Name ) ; {
case ! ok :
case gf . IsReceiveOnlyChanged ( ) :
l . Debugln ( "removing deleted, receive-only item that is globally receive-only from db" , file )
batch . Remove ( file . Name )
changes ++
case gf . IsDeleted ( ) :
2021-06-27 06:48:54 +00:00
// Our item is deleted and the global item is deleted too. We just
// pretend it is a normal deleted file (nobody cares about that).
l . Debugf ( "%v scanning: Marking globally deleted item as not locally changed: %v" , f , file . Name )
file . LocalFlags &^= protocol . FlagLocalReceiveOnly
2021-11-10 08:46:21 +00:00
if batch . Update ( file . ConvertDeletedToFileInfo ( ) , snap ) {
2021-06-27 06:48:54 +00:00
changes ++
}
}
default :
// No need to bump the version for a file that was and is
// deleted and just the folder type/local flags changed.
file . LocalFlags &^= protocol . FlagLocalReceiveOnly
l . Debugln ( "removing receive-only flag on deleted item" , file )
2021-11-10 08:46:21 +00:00
if batch . Update ( file . ConvertDeletedToFileInfo ( ) , snap ) {
2021-06-27 06:48:54 +00:00
changes ++
}
2021-02-08 07:40:57 +00:00
}
2018-11-07 10:04:41 +00:00
}
2020-04-02 14:14:25 +00:00
2018-11-07 10:04:41 +00:00
return true
} )
2019-07-23 08:49:22 +00:00
select {
case <- f . ctx . Done ( ) :
2021-02-08 07:40:57 +00:00
return changes , f . ctx . Err ( )
2019-07-23 08:49:22 +00:00
default :
}
2018-11-07 10:04:41 +00:00
if iterError == nil && len ( toIgnore ) > 0 {
for _ , file := range toIgnore {
l . Debugln ( "marking file as ignored" , f )
2021-02-08 14:30:39 +00:00
nf := file . ConvertToIgnoredFileInfo ( )
2021-11-10 08:46:21 +00:00
if batch . Update ( nf , snap ) {
2021-02-08 07:40:57 +00:00
changes ++
}
2021-04-29 20:01:46 +00:00
if iterError = batch . FlushIfFull ( ) ; iterError != nil {
2018-11-07 10:04:41 +00:00
break
}
}
toIgnore = toIgnore [ : 0 ]
}
if iterError != nil {
2021-02-08 07:40:57 +00:00
return changes , iterError
2018-11-07 10:04:41 +00:00
}
}
2021-02-08 07:40:57 +00:00
return changes , nil
2016-04-26 14:01:46 +00:00
}
2017-09-07 06:17:47 +00:00
2021-01-31 20:02:42 +00:00
func ( f * folder ) findRename ( snap * db . Snapshot , file protocol . FileInfo , alreadyUsedOrExisting map [ string ] struct { } ) ( protocol . FileInfo , bool ) {
2020-05-16 12:39:27 +00:00
if len ( file . Blocks ) == 0 || file . Size == 0 {
2020-05-13 18:38:21 +00:00
return protocol . FileInfo { } , false
}
2020-05-11 18:15:11 +00:00
found := false
nf := protocol . FileInfo { }
2020-05-30 07:50:23 +00:00
snap . WithBlocksHash ( file . BlocksHash , func ( ifi protocol . FileIntf ) bool {
2020-05-11 18:15:11 +00:00
fi := ifi . ( protocol . FileInfo )
select {
case <- f . ctx . Done ( ) :
return false
default :
}
2021-01-31 20:02:42 +00:00
if fi . Name == file . Name {
alreadyUsedOrExisting [ fi . Name ] = struct { } { }
return true
}
if _ , ok := alreadyUsedOrExisting [ fi . Name ] ; ok {
2020-05-16 12:39:27 +00:00
return true
}
2020-05-11 18:15:11 +00:00
if fi . ShouldConflict ( ) {
return true
}
if f . ignores . Match ( fi . Name ) . IsIgnored ( ) {
return true
}
// Only check the size.
// No point checking block equality, as that uses BlocksHash comparison if that is set (which it will be).
// No point checking BlocksHash comparison as WithBlocksHash already does that.
if file . Size != fi . Size {
return true
}
2021-01-31 20:02:42 +00:00
alreadyUsedOrExisting [ fi . Name ] = struct { } { }
2020-12-27 21:26:25 +00:00
if ! osutil . IsDeleted ( f . mtimefs , fi . Name ) {
2020-05-11 18:15:11 +00:00
return true
}
nf = fi
nf . SetDeleted ( f . shortID )
nf . LocalFlags = f . localFlags
found = true
return false
} )
return nf , found
}
2021-03-07 12:43:22 +00:00
func ( f * folder ) scanTimerFired ( ) error {
2017-09-07 06:17:47 +00:00
err := f . scanSubdirs ( nil )
select {
case <- f . initialScanFinished :
default :
status := "Completed"
if err != nil {
status = "Failed"
}
l . Infoln ( status , "initial scan of" , f . Type . String ( ) , "folder" , f . Description ( ) )
close ( f . initialScanFinished )
}
2018-05-21 06:45:05 +00:00
f . Reschedule ( )
2021-03-07 12:43:22 +00:00
return err
2017-09-07 06:17:47 +00:00
}
2017-10-20 14:52:55 +00:00
2020-07-14 08:48:50 +00:00
func ( f * folder ) versionCleanupTimerFired ( ) {
f . setState ( FolderCleanWaiting )
defer f . setState ( FolderIdle )
2021-06-25 09:38:04 +00:00
if err := f . ioLimiter . TakeWithContext ( f . ctx , 1 ) ; err != nil {
2020-07-14 08:48:50 +00:00
return
}
2021-06-25 09:38:04 +00:00
defer f . ioLimiter . Give ( 1 )
2020-07-14 08:48:50 +00:00
f . setState ( FolderCleaning )
if err := f . versioner . Clean ( f . ctx ) ; err != nil {
l . Infoln ( "Failed to clean versions in %s: %v" , f . Description ( ) , err )
}
f . versionCleanupTimer . Reset ( f . versionCleanupInterval )
}
2018-02-04 21:46:24 +00:00
func ( f * folder ) WatchError ( ) error {
2018-12-21 11:06:21 +00:00
f . watchMut . Lock ( )
defer f . watchMut . Unlock ( )
2018-02-04 21:46:24 +00:00
return f . watchErr
2017-10-20 14:52:55 +00:00
}
2018-02-04 21:46:24 +00:00
// stopWatch immediately aborts watching and may be called asynchronously
func ( f * folder ) stopWatch ( ) {
2018-12-21 11:06:21 +00:00
f . watchMut . Lock ( )
2017-10-20 14:52:55 +00:00
f . watchCancel ( )
2018-12-21 11:06:21 +00:00
f . watchMut . Unlock ( )
2020-04-17 15:43:42 +00:00
f . setWatchError ( nil , 0 )
2017-10-20 14:52:55 +00:00
}
2018-02-04 21:46:24 +00:00
// scheduleWatchRestart makes sure watching is restarted from the main for loop
// in a folder's Serve and thus may be called asynchronously (e.g. when ignores change).
2017-10-24 07:58:55 +00:00
func ( f * folder ) scheduleWatchRestart ( ) {
2017-10-20 14:52:55 +00:00
select {
2017-10-24 07:58:55 +00:00
case f . restartWatchChan <- struct { } { } :
2017-10-20 14:52:55 +00:00
default :
// We might be busy doing a pull and thus not reading from this
// channel. The channel is 1-buffered, so one notification will be
// queued to ensure we recheck after the pull.
}
}
2017-10-24 07:58:55 +00:00
2018-02-04 21:46:24 +00:00
// restartWatch should only ever be called synchronously. If you want to use
// this asynchronously, you should probably use scheduleWatchRestart instead.
2021-03-07 12:43:22 +00:00
func ( f * folder ) restartWatch ( ) error {
2018-02-04 21:46:24 +00:00
f . stopWatch ( )
f . startWatch ( )
2021-03-07 12:43:22 +00:00
return f . scanSubdirs ( nil )
2018-02-04 21:46:24 +00:00
}
// startWatch should only ever be called synchronously. If you want to use
// this asynchronously, you should probably use scheduleWatchRestart instead.
func ( f * folder ) startWatch ( ) {
ctx , cancel := context . WithCancel ( f . ctx )
2018-12-21 11:06:21 +00:00
f . watchMut . Lock ( )
2018-02-04 21:46:24 +00:00
f . watchChan = make ( chan [ ] string )
f . watchCancel = cancel
2018-12-21 11:06:21 +00:00
f . watchMut . Unlock ( )
2019-05-25 19:08:26 +00:00
go f . monitorWatch ( ctx )
2018-02-04 21:46:24 +00:00
}
2019-05-25 19:08:26 +00:00
// monitorWatch starts the filesystem watching and retries every minute on failure.
// It should not be used except in startWatch.
func ( f * folder ) monitorWatch ( ctx context . Context ) {
failTimer := time . NewTimer ( 0 )
aggrCtx , aggrCancel := context . WithCancel ( ctx )
var err error
var eventChan <- chan fs . Event
var errChan <- chan error
warnedOutside := false
2020-04-17 15:43:42 +00:00
var lastWatch time . Time
pause := time . Minute
2022-04-05 19:32:06 +00:00
// Subscribe to folder summaries only on kqueue systems, to warn about potential high resource usage
var summarySub events . Subscription
var summaryChan <- chan events . Event
if fs . WatchKqueue && ! f . warnedKqueue {
2022-04-17 08:41:25 +00:00
summarySub = f . evLogger . Subscribe ( events . FolderSummary )
2022-04-05 19:32:06 +00:00
summaryChan = summarySub . C ( )
}
defer func ( ) {
aggrCancel ( ) // aggrCancel might e re-assigned -> call within closure
if summaryChan != nil {
summarySub . Unsubscribe ( )
}
} ( )
2018-02-04 21:46:24 +00:00
for {
select {
2019-05-25 19:08:26 +00:00
case <- failTimer . C :
2022-04-10 18:55:05 +00:00
eventChan , errChan , err = f . mtimefs . Watch ( "." , f . ignores , ctx , f . IgnorePerms )
2020-04-17 15:43:42 +00:00
// We do this once per minute initially increased to
// max one hour in case of repeat failures.
2019-05-25 19:08:26 +00:00
f . scanOnWatchErr ( )
2020-04-17 15:43:42 +00:00
f . setWatchError ( err , pause )
2018-02-04 21:46:24 +00:00
if err != nil {
2020-04-17 15:43:42 +00:00
failTimer . Reset ( pause )
if pause < 60 * time . Minute {
pause *= 2
}
2018-02-04 21:46:24 +00:00
continue
}
2020-04-17 15:43:42 +00:00
lastWatch = time . Now ( )
2019-11-21 07:41:15 +00:00
watchaggregator . Aggregate ( aggrCtx , eventChan , f . watchChan , f . FolderConfiguration , f . model . cfg , f . evLogger )
2018-02-04 21:46:24 +00:00
l . Debugln ( "Started filesystem watcher for folder" , f . Description ( ) )
2019-05-25 19:08:26 +00:00
case err = <- errChan :
2020-04-17 15:43:42 +00:00
var next time . Duration
if dur := time . Since ( lastWatch ) ; dur > pause {
pause = time . Minute
next = 0
} else {
next = pause - dur
if pause < 60 * time . Minute {
pause *= 2
}
}
failTimer . Reset ( next )
f . setWatchError ( err , next )
2019-05-25 19:08:26 +00:00
// This error was previously a panic and should never occur, so generate
// a warning, but don't do it repetitively.
2020-10-07 08:05:13 +00:00
var errOutside * fs . ErrWatchEventOutsideRoot
if errors . As ( err , & errOutside ) {
if ! warnedOutside {
2019-05-25 19:08:26 +00:00
l . Warnln ( err )
warnedOutside = true
}
2020-10-07 08:05:13 +00:00
f . evLogger . Log ( events . Failure , "watching for changes encountered an event outside of the filesystem root" )
2019-05-25 19:08:26 +00:00
}
aggrCancel ( )
errChan = nil
aggrCtx , aggrCancel = context . WithCancel ( ctx )
2022-04-05 19:32:06 +00:00
case ev := <- summaryChan :
if data , ok := ev . Data . ( FolderSummaryEventData ) ; ! ok {
f . evLogger . Log ( events . Failure , "Unexpected type of folder-summary event in folder.monitorWatch" )
2022-05-28 18:15:38 +00:00
} else if data . Summary . LocalTotalItems - data . Summary . LocalDeleted > kqueueItemCountThreshold {
2022-04-05 19:32:06 +00:00
f . warnedKqueue = true
summarySub . Unsubscribe ( )
summaryChan = nil
l . Warnf ( "Filesystem watching (kqueue) is enabled on %v with a lot of files/directories, and that requires a lot of resources and might slow down your system significantly" , f . Description ( ) )
}
2018-02-04 21:46:24 +00:00
case <- ctx . Done ( ) :
return
}
}
}
2019-05-25 19:08:26 +00:00
// setWatchError sets the current error state of the watch and should be called
// regardless of whether err is nil or not.
2020-04-17 15:43:42 +00:00
func ( f * folder ) setWatchError ( err error , nextTryIn time . Duration ) {
2019-05-25 19:08:26 +00:00
f . watchMut . Lock ( )
prevErr := f . watchErr
f . watchErr = err
f . watchMut . Unlock ( )
if err != prevErr {
2022-04-21 13:45:31 +00:00
data := map [ string ] interface { } {
2019-05-25 19:08:26 +00:00
"folder" : f . ID ,
}
if prevErr != nil {
data [ "from" ] = prevErr . Error ( )
}
if err != nil {
data [ "to" ] = err . Error ( )
}
2019-08-15 14:29:37 +00:00
f . evLogger . Log ( events . FolderWatchStateChanged , data )
2019-05-25 19:08:26 +00:00
}
if err == nil {
return
}
2020-04-17 15:43:42 +00:00
msg := fmt . Sprintf ( "Error while trying to start filesystem watcher for folder %s, trying again in %v: %v" , f . Description ( ) , nextTryIn , err )
2019-07-10 09:00:06 +00:00
if prevErr != err {
l . Infof ( msg )
2019-05-25 19:08:26 +00:00
return
}
2019-07-10 09:00:06 +00:00
l . Debugf ( msg )
2019-05-25 19:08:26 +00:00
}
// scanOnWatchErr schedules a full scan immediately if an error occurred while watching.
func ( f * folder ) scanOnWatchErr ( ) {
f . watchMut . Lock ( )
2019-08-13 07:04:43 +00:00
err := f . watchErr
f . watchMut . Unlock ( )
if err != nil {
2020-11-09 08:05:48 +00:00
f . DelayScan ( 0 )
2019-05-25 19:08:26 +00:00
}
}
2017-10-24 07:58:55 +00:00
func ( f * folder ) setError ( err error ) {
2018-12-30 20:56:16 +00:00
select {
case <- f . ctx . Done ( ) :
return
default :
}
2017-10-24 07:58:55 +00:00
_ , _ , oldErr := f . getState ( )
if ( err != nil && oldErr != nil && oldErr . Error ( ) == err . Error ( ) ) || ( err == nil && oldErr == nil ) {
return
}
if err != nil {
if oldErr == nil {
l . Warnf ( "Error on folder %s: %v" , f . Description ( ) , err )
} else {
l . Infof ( "Error on folder %s changed: %q -> %q" , f . Description ( ) , oldErr , err )
}
} else {
l . Infoln ( "Cleared error on folder" , f . Description ( ) )
2021-01-14 12:29:01 +00:00
f . SchedulePull ( )
2017-10-24 07:58:55 +00:00
}
if f . FSWatcherEnabled {
if err != nil {
2018-02-04 21:46:24 +00:00
f . stopWatch ( )
2017-10-24 07:58:55 +00:00
} else {
f . scheduleWatchRestart ( )
}
}
f . stateTracker . setError ( err )
}
2018-05-11 08:45:13 +00:00
2020-05-04 06:43:35 +00:00
func ( f * folder ) pullBasePause ( ) time . Duration {
2018-05-11 08:45:13 +00:00
if f . PullerPauseS == 0 {
return defaultPullerPause
}
return time . Duration ( f . PullerPauseS ) * time . Second
}
2018-05-23 07:23:21 +00:00
func ( f * folder ) String ( ) string {
return fmt . Sprintf ( "%s/%s@%p" , f . Type , f . folderID , f )
}
2018-11-07 10:04:41 +00:00
func ( f * folder ) newScanError ( path string , err error ) {
2020-11-06 13:22:20 +00:00
f . errorsMut . Lock ( )
2020-06-16 07:25:41 +00:00
l . Infof ( "Scanner (folder %s, item %q): %v" , f . Description ( ) , path , err )
2018-11-07 10:04:41 +00:00
f . scanErrors = append ( f . scanErrors , FileError {
Err : err . Error ( ) ,
Path : path ,
} )
2020-11-06 13:22:20 +00:00
f . errorsMut . Unlock ( )
2018-11-07 10:04:41 +00:00
}
func ( f * folder ) clearScanErrors ( subDirs [ ] string ) {
2020-11-06 13:22:20 +00:00
f . errorsMut . Lock ( )
defer f . errorsMut . Unlock ( )
2018-11-07 10:04:41 +00:00
if len ( subDirs ) == 0 {
f . scanErrors = nil
return
}
filtered := f . scanErrors [ : 0 ]
outer :
for _ , fe := range f . scanErrors {
for _ , sub := range subDirs {
2018-11-22 10:16:45 +00:00
if fe . Path == sub || fs . IsParent ( fe . Path , sub ) {
2018-11-07 10:04:41 +00:00
continue outer
}
}
filtered = append ( filtered , fe )
}
f . scanErrors = filtered
}
func ( f * folder ) Errors ( ) [ ] FileError {
2020-11-06 13:22:20 +00:00
f . errorsMut . Lock ( )
defer f . errorsMut . Unlock ( )
scanLen := len ( f . scanErrors )
errors := make ( [ ] FileError , scanLen + len ( f . pullErrors ) )
copy ( errors [ : scanLen ] , f . scanErrors )
copy ( errors [ scanLen : ] , f . pullErrors )
sort . Sort ( fileErrorList ( errors ) )
return errors
2018-11-07 10:04:41 +00:00
}
2020-05-01 09:08:59 +00:00
// ScheduleForceRescan marks the file such that it gets rehashed on next scan, and schedules a scan.
func ( f * folder ) ScheduleForceRescan ( path string ) {
f . forcedRescanPathsMut . Lock ( )
f . forcedRescanPaths [ path ] = struct { } { }
f . forcedRescanPathsMut . Unlock ( )
2019-04-07 11:29:17 +00:00
2020-05-01 09:08:59 +00:00
select {
case f . forcedRescanRequested <- struct { } { } :
default :
}
2019-04-07 11:29:17 +00:00
}
func ( f * folder ) updateLocalsFromScanning ( fs [ ] protocol . FileInfo ) {
f . updateLocals ( fs )
f . emitDiskChangeEvents ( fs , events . LocalChangeDetected )
}
func ( f * folder ) updateLocalsFromPulling ( fs [ ] protocol . FileInfo ) {
f . updateLocals ( fs )
f . emitDiskChangeEvents ( fs , events . RemoteChangeDetected )
}
func ( f * folder ) updateLocals ( fs [ ] protocol . FileInfo ) {
f . fset . Update ( protocol . LocalDeviceID , fs )
filenames := make ( [ ] string , len ( fs ) )
2020-06-25 18:23:59 +00:00
f . forcedRescanPathsMut . Lock ( )
2019-04-07 11:29:17 +00:00
for i , file := range fs {
filenames [ i ] = file . Name
2020-06-25 18:23:59 +00:00
// No need to rescan a file that was changed since anyway.
delete ( f . forcedRescanPaths , file . Name )
2019-04-07 11:29:17 +00:00
}
2020-06-25 18:23:59 +00:00
f . forcedRescanPathsMut . Unlock ( )
2019-04-07 11:29:17 +00:00
2020-09-07 07:35:37 +00:00
seq := f . fset . Sequence ( protocol . LocalDeviceID )
2019-08-15 14:29:37 +00:00
f . evLogger . Log ( events . LocalIndexUpdated , map [ string ] interface { } {
2019-04-07 11:29:17 +00:00
"folder" : f . ID ,
"items" : len ( fs ) ,
"filenames" : filenames ,
2020-09-07 07:35:37 +00:00
"sequence" : seq ,
"version" : seq , // legacy for sequence
2019-04-07 11:29:17 +00:00
} )
}
func ( f * folder ) emitDiskChangeEvents ( fs [ ] protocol . FileInfo , typeOfEvent events . EventType ) {
for _ , file := range fs {
if file . IsInvalid ( ) {
continue
}
objType := "file"
action := "modified"
2020-05-06 06:47:02 +00:00
if file . IsDeleted ( ) {
2019-04-07 11:29:17 +00:00
action = "deleted"
}
if file . IsSymlink ( ) {
objType = "symlink"
} else if file . IsDirectory ( ) {
objType = "dir"
}
// Two different events can be fired here based on what EventType is passed into function
2019-08-15 14:29:37 +00:00
f . evLogger . Log ( typeOfEvent , map [ string ] string {
2019-04-07 11:29:17 +00:00
"folder" : f . ID ,
"folderID" : f . ID , // incorrect, deprecated, kept for historical compliance
"label" : f . Label ,
"action" : action ,
"type" : objType ,
"path" : filepath . FromSlash ( file . Name ) ,
"modifiedBy" : file . ModifiedBy . String ( ) ,
} )
}
}
2021-03-07 12:43:22 +00:00
func ( f * folder ) handleForcedRescans ( ) error {
2020-05-01 09:08:59 +00:00
f . forcedRescanPathsMut . Lock ( )
paths := make ( [ ] string , 0 , len ( f . forcedRescanPaths ) )
for path := range f . forcedRescanPaths {
paths = append ( paths , path )
}
f . forcedRescanPaths = make ( map [ string ] struct { } )
f . forcedRescanPathsMut . Unlock ( )
2020-06-25 18:23:59 +00:00
if len ( paths ) == 0 {
2021-03-07 12:43:22 +00:00
return nil
2020-06-25 18:23:59 +00:00
}
2020-05-01 09:08:59 +00:00
2021-04-29 20:01:46 +00:00
batch := db . NewFileInfoBatch ( func ( fs [ ] protocol . FileInfo ) error {
2020-05-01 09:08:59 +00:00
f . fset . Update ( protocol . LocalDeviceID , fs )
return nil
} )
2021-03-07 12:43:22 +00:00
snap , err := f . dbSnapshot ( )
if err != nil {
return err
}
defer snap . Release ( )
2020-05-01 09:08:59 +00:00
for _ , path := range paths {
2021-04-29 20:01:46 +00:00
if err := batch . FlushIfFull ( ) ; err != nil {
2021-03-07 12:43:22 +00:00
return err
}
2020-05-01 09:08:59 +00:00
fi , ok := snap . Get ( protocol . LocalDeviceID , path )
if ! ok {
continue
}
2021-02-08 14:30:39 +00:00
fi . SetMustRescan ( )
2021-04-29 20:01:46 +00:00
batch . Append ( fi )
2020-05-01 09:08:59 +00:00
}
2021-04-29 20:01:46 +00:00
if err = batch . Flush ( ) ; err != nil {
2021-03-07 12:43:22 +00:00
return err
}
2020-05-01 09:08:59 +00:00
2021-03-07 12:43:22 +00:00
return f . scanSubdirs ( paths )
}
2020-05-01 09:08:59 +00:00
2021-03-07 12:43:22 +00:00
// dbSnapshots gets a snapshot from the fileset, and wraps any error
// in a svcutil.FatalErr.
func ( f * folder ) dbSnapshot ( ) ( * db . Snapshot , error ) {
snap , err := f . fset . Snapshot ( )
if err != nil {
return nil , svcutil . AsFatalErr ( err , svcutil . ExitError )
}
return snap , nil
2020-05-01 09:08:59 +00:00
}
2018-11-07 10:04:41 +00:00
// The exists function is expected to return true for all known paths
// (excluding "" and ".")
func unifySubs ( dirs [ ] string , exists func ( dir string ) bool ) [ ] string {
if len ( dirs ) == 0 {
return nil
}
sort . Strings ( dirs )
if dirs [ 0 ] == "" || dirs [ 0 ] == "." || dirs [ 0 ] == string ( fs . PathSeparator ) {
return nil
}
prev := "./" // Anything that can't be parent of a clean path
for i := 0 ; i < len ( dirs ) ; {
dir , err := fs . Canonicalize ( dirs [ i ] )
if err != nil {
l . Debugf ( "Skipping %v for scan: %s" , dirs [ i ] , err )
dirs = append ( dirs [ : i ] , dirs [ i + 1 : ] ... )
continue
}
2018-11-22 10:16:45 +00:00
if dir == prev || fs . IsParent ( dir , prev ) {
2018-11-07 10:04:41 +00:00
dirs = append ( dirs [ : i ] , dirs [ i + 1 : ] ... )
continue
}
parent := filepath . Dir ( dir )
for parent != "." && parent != string ( fs . PathSeparator ) && ! exists ( parent ) {
dir = parent
parent = filepath . Dir ( dir )
}
dirs [ i ] = dir
prev = dir
i ++
}
return dirs
}
2019-04-07 11:29:17 +00:00
type cFiler struct {
2020-01-21 17:23:08 +00:00
* db . Snapshot
2019-04-07 11:29:17 +00:00
}
// Implements scanner.CurrentFiler
func ( cf cFiler ) CurrentFile ( file string ) ( protocol . FileInfo , bool ) {
return cf . Get ( protocol . LocalDeviceID , file )
}