2012-02-15 19:10:50 +00:00
|
|
|
--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
2012-02-15 19:00:28 +00:00
|
|
|
-- default-rsync.lua
|
|
|
|
--
|
|
|
|
-- Syncs with rsync ("classic" Lsyncd)
|
|
|
|
-- A (Layer 1) configuration.
|
|
|
|
--
|
|
|
|
-- Note:
|
2012-02-15 19:10:50 +00:00
|
|
|
-- this is infact just a configuration using Layer 1 configuration
|
2012-02-15 19:00:28 +00:00
|
|
|
-- like any other. It only gets compiled into the binary by default.
|
|
|
|
-- You can simply use a modified one, by copying everything into a
|
2012-02-15 19:10:50 +00:00
|
|
|
-- config file of yours and name it differently.
|
2012-02-15 19:00:28 +00:00
|
|
|
--
|
|
|
|
-- License: GPLv2 (see COPYING) or any later version
|
|
|
|
-- Authors: Axel Kittenberger <axkibe@gmail.com>
|
|
|
|
--
|
2012-02-15 19:10:50 +00:00
|
|
|
--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
2012-02-15 19:00:28 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2018-03-01 10:26:12 +00:00
|
|
|
if not default then error( 'default not loaded' ) end
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2018-03-01 10:26:12 +00:00
|
|
|
if default.rsync then error( 'default-rsync already loaded' ) end
|
2012-02-15 19:00:28 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2012-10-06 11:43:55 +00:00
|
|
|
local rsync = { }
|
2015-10-14 12:39:14 +00:00
|
|
|
|
2012-10-06 11:43:55 +00:00
|
|
|
default.rsync = rsync
|
|
|
|
|
2012-10-05 19:48:06 +00:00
|
|
|
-- uses default collect
|
|
|
|
|
|
|
|
--
|
|
|
|
-- used to ensure there aren't typos in the keys
|
|
|
|
--
|
|
|
|
rsync.checkgauge = {
|
|
|
|
|
|
|
|
-- unsets default user action handlers
|
2012-11-02 23:32:44 +00:00
|
|
|
onCreate = false,
|
|
|
|
onModify = false,
|
|
|
|
onDelete = false,
|
|
|
|
onStartup = false,
|
|
|
|
onMove = false,
|
|
|
|
|
|
|
|
delete = true,
|
|
|
|
exclude = true,
|
|
|
|
excludeFrom = true,
|
2018-02-27 16:14:36 +00:00
|
|
|
filter = true,
|
|
|
|
filterFrom = true,
|
2012-11-02 23:32:44 +00:00
|
|
|
target = true,
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
batchSizeLimit = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
|
|
|
|
rsync = {
|
2012-11-09 19:15:42 +00:00
|
|
|
acls = true,
|
2016-12-05 14:47:12 +00:00
|
|
|
append = true,
|
|
|
|
append_verify = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
archive = true,
|
2016-12-05 15:38:07 +00:00
|
|
|
backup = true,
|
|
|
|
backup_dir = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
binary = true,
|
2013-06-07 09:11:42 +00:00
|
|
|
bwlimit = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
checksum = true,
|
2016-08-29 11:12:09 +00:00
|
|
|
chown = true,
|
2016-12-01 13:12:56 +00:00
|
|
|
chmod = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
compress = true,
|
2016-12-01 11:35:02 +00:00
|
|
|
copy_dirlinks = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
copy_links = true,
|
2019-08-06 23:09:52 +00:00
|
|
|
copy_unsafe_links = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
cvs_exclude = true,
|
2020-03-17 00:01:44 +00:00
|
|
|
delete_excluded = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
dry_run = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
executability = true,
|
2016-11-28 10:20:12 +00:00
|
|
|
existing = true,
|
2014-02-28 09:15:48 +00:00
|
|
|
group = true,
|
2016-12-05 14:25:58 +00:00
|
|
|
groupmap = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
hard_links = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
ignore_times = true,
|
2015-10-14 12:39:14 +00:00
|
|
|
inplace = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
ipv4 = true,
|
|
|
|
ipv6 = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
keep_dirlinks = true,
|
|
|
|
links = true,
|
|
|
|
one_file_system = true,
|
2016-05-04 09:25:40 +00:00
|
|
|
omit_dir_times = true,
|
|
|
|
omit_link_times = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
owner = true,
|
2012-12-06 09:02:07 +00:00
|
|
|
password_file = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
perms = true,
|
|
|
|
protect_args = true,
|
|
|
|
prune_empty_dirs = true,
|
|
|
|
quiet = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
rsh = true,
|
|
|
|
rsync_path = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
sparse = true,
|
2016-12-05 15:38:07 +00:00
|
|
|
suffix = true,
|
2012-10-23 05:23:58 +00:00
|
|
|
temp_dir = true,
|
2013-03-19 07:25:29 +00:00
|
|
|
timeout = true,
|
2014-02-28 09:15:48 +00:00
|
|
|
times = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
update = true,
|
2016-12-05 14:25:58 +00:00
|
|
|
usermap = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
verbose = true,
|
2014-02-28 09:15:48 +00:00
|
|
|
whole_file = true,
|
2012-11-09 19:15:42 +00:00
|
|
|
xattrs = true,
|
|
|
|
_extra = true,
|
2012-10-05 19:48:06 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2012-02-15 19:00:28 +00:00
|
|
|
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
-- internal function to actually do the transfer
|
|
|
|
local run_action = function
|
|
|
|
(
|
|
|
|
inlet,
|
|
|
|
elist
|
|
|
|
)
|
2017-01-03 14:30:13 +00:00
|
|
|
local config = inlet.getConfig( )
|
|
|
|
|
Implement tunnel pool mode.
In this mode, multiple tunnel processes are started and connection a load
balanced on the pool of connections.
Example config:
...
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@testmachine"},
mode = "pool",
parallel = 2,
},
target = "rsync://localhost:${localport}/test",
...
}
2022-03-23 22:44:19 +00:00
|
|
|
local substitudes = inlet.getSubstitutionData(elist, {})
|
|
|
|
local target = substitudeCommands(config.target, substitudes)
|
|
|
|
|
2017-02-16 11:17:37 +00:00
|
|
|
--
|
|
|
|
-- Replaces what rsync would consider filter rules by literals
|
|
|
|
--
|
2018-02-27 16:14:36 +00:00
|
|
|
local function sub
|
|
|
|
(
|
|
|
|
p -- pattern
|
|
|
|
)
|
|
|
|
if not p then return end
|
2017-02-16 11:17:37 +00:00
|
|
|
|
|
|
|
return p:
|
|
|
|
gsub( '%?', '\\?' ):
|
|
|
|
gsub( '%*', '\\*' ):
|
|
|
|
gsub( '%[', '\\[' ):
|
|
|
|
gsub( '%]', '\\]' )
|
|
|
|
end
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Gets the list of paths for the event list
|
|
|
|
--
|
|
|
|
-- Deletes create multi match patterns
|
|
|
|
--
|
|
|
|
local paths = elist.getPaths(
|
2018-02-27 16:14:36 +00:00
|
|
|
function
|
|
|
|
(
|
|
|
|
etype, -- event type
|
|
|
|
path1, -- path
|
|
|
|
path2 -- path to for move events
|
|
|
|
)
|
|
|
|
if string.byte( path1, -1 ) == 47 and etype == 'Delete'
|
|
|
|
then
|
2017-02-16 11:17:37 +00:00
|
|
|
return sub( path1 )..'***', sub( path2 )
|
|
|
|
else
|
|
|
|
return sub( path1 ), sub( path2 )
|
|
|
|
end
|
|
|
|
end
|
|
|
|
)
|
|
|
|
|
|
|
|
-- stores all filters by integer index
|
|
|
|
local filterI = { }
|
|
|
|
|
|
|
|
-- stores all filters with path index
|
|
|
|
local filterP = { }
|
|
|
|
|
|
|
|
-- adds one path to the filter
|
2018-02-27 16:14:36 +00:00
|
|
|
local function addToFilter
|
|
|
|
(
|
|
|
|
path
|
|
|
|
)
|
|
|
|
if filterP[ path ] then return end
|
2017-02-16 11:17:37 +00:00
|
|
|
|
|
|
|
filterP[ path ] = true
|
|
|
|
|
|
|
|
table.insert( filterI, path )
|
|
|
|
end
|
|
|
|
|
|
|
|
-- adds a path to the filter.
|
|
|
|
--
|
|
|
|
-- rsync needs to have entries for all steps in the path,
|
|
|
|
-- so the file for example d1/d2/d3/f1 needs following filters:
|
|
|
|
-- 'd1/', 'd1/d2/', 'd1/d2/d3/' and 'd1/d2/d3/f1'
|
2018-02-27 16:14:36 +00:00
|
|
|
for _, path in ipairs( paths )
|
|
|
|
do
|
|
|
|
if path and path ~= ''
|
|
|
|
then
|
|
|
|
addToFilter( path )
|
2017-02-16 11:17:37 +00:00
|
|
|
|
|
|
|
local pp = string.match( path, '^(.*/)[^/]+/?' )
|
|
|
|
|
2018-02-27 16:14:36 +00:00
|
|
|
while pp
|
|
|
|
do
|
|
|
|
addToFilter( pp )
|
|
|
|
|
2017-02-16 11:17:37 +00:00
|
|
|
pp = string.match( pp, '^(.*/)[^/]+/?' )
|
|
|
|
end
|
|
|
|
end
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
log(
|
|
|
|
'Normal',
|
|
|
|
'Calling rsync with filter-list of new/modified files/dirs\n',
|
2017-02-16 11:17:37 +00:00
|
|
|
table.concat( filterI, '\n' )
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
|
|
|
|
2017-02-16 11:17:37 +00:00
|
|
|
local config = inlet.getConfig( )
|
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
local delete = nil
|
|
|
|
|
2017-02-16 11:17:37 +00:00
|
|
|
if config.delete == true or config.delete == 'running'
|
2016-12-13 16:40:30 +00:00
|
|
|
then
|
2017-02-16 11:17:37 +00:00
|
|
|
delete = { '--delete', '--ignore-errors' }
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
spawn(
|
|
|
|
elist,
|
|
|
|
config.rsync.binary,
|
2017-02-16 11:17:37 +00:00
|
|
|
'<', table.concat( filterI, '\000' ),
|
2012-10-03 16:34:09 +00:00
|
|
|
config.rsync._computed,
|
2017-02-16 11:17:37 +00:00
|
|
|
'-r',
|
2012-10-03 16:34:09 +00:00
|
|
|
delete,
|
|
|
|
'--force',
|
|
|
|
'--from0',
|
2017-02-16 11:17:37 +00:00
|
|
|
'--include-from=-',
|
|
|
|
'--exclude=*',
|
2012-10-03 16:34:09 +00:00
|
|
|
config.source,
|
Implement tunnel pool mode.
In this mode, multiple tunnel processes are started and connection a load
balanced on the pool of connections.
Example config:
...
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@testmachine"},
mode = "pool",
parallel = 2,
},
target = "rsync://localhost:${localport}/test",
...
}
2022-03-23 22:44:19 +00:00
|
|
|
target
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
|
|
|
end
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
--
|
|
|
|
-- Returns true for non Init and Blanket events.
|
|
|
|
--
|
|
|
|
local eventNotInitBlank =
|
|
|
|
function
|
|
|
|
(
|
|
|
|
event
|
|
|
|
)
|
|
|
|
return event.etype ~= 'Init' and event.etype ~= 'Blanket'
|
|
|
|
end
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Returns size or true if the event is for batch processing
|
|
|
|
--
|
|
|
|
local getBatchSize =
|
|
|
|
function
|
|
|
|
(
|
|
|
|
event
|
|
|
|
)
|
|
|
|
-- print("getBatchSize", event, event.status, event.etype, event.pathname)
|
|
|
|
if event.status == 'active' then
|
|
|
|
return false
|
|
|
|
end
|
2022-11-11 14:11:09 +00:00
|
|
|
if event.etype == 'Init' or event.etype == 'Blanket' or event.etype == 'Full' then
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
return false
|
|
|
|
end
|
|
|
|
-- moves and deletes go always into batch
|
|
|
|
if event.etype == 'Move' or event.etype == 'Delete' then
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
return lsyncd.get_file_size(event.sourcePath)
|
|
|
|
end
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Spawns rsync for a list of events
|
|
|
|
--
|
|
|
|
-- Exclusions are already handled by not having
|
|
|
|
-- events for them.
|
|
|
|
--
|
|
|
|
rsync.action = function
|
|
|
|
(
|
|
|
|
inlet
|
|
|
|
)
|
|
|
|
local sizeLimit = inlet.getConfig().batchSizeLimit
|
|
|
|
|
|
|
|
if sizeLimit == nil then
|
|
|
|
-- gets all events ready for syncing
|
|
|
|
return run_action(inlet, inlet.getEvents(eventNotInitBlank))
|
|
|
|
else
|
|
|
|
-- spawn all files under the size limit/deletes/moves in batch mode
|
|
|
|
local eventInBatch = function(event)
|
2022-11-11 14:11:09 +00:00
|
|
|
if event.etype == "Full" then
|
|
|
|
return false
|
|
|
|
end
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
local size = getBatchSize(event)
|
|
|
|
if type(size) == "boolean" then
|
|
|
|
return size
|
|
|
|
elseif size == nil then
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
if size <= sizeLimit then
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
|
|
|
|
-- indicator for grabbing one element of the queue
|
|
|
|
local single_returned = false
|
|
|
|
-- grab all events for seperate transfers
|
|
|
|
local eventNoBatch = function(event)
|
2022-11-11 14:11:09 +00:00
|
|
|
if event.etype == "Full" then
|
|
|
|
return false
|
|
|
|
end
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
local size = getBatchSize(event)
|
|
|
|
if type(size) ~= "number" or size == nil then
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
if single_returned then
|
|
|
|
return 'break'
|
|
|
|
end
|
|
|
|
if size > sizeLimit then
|
|
|
|
single_returned = true
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
return false
|
|
|
|
end
|
2022-11-11 14:11:09 +00:00
|
|
|
local extralist = inlet.getEvents(eventInBatch)
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
|
2022-11-11 14:11:09 +00:00
|
|
|
-- get all batched events
|
|
|
|
if extralist.size() > 0 then
|
|
|
|
run_action(inlet, extralist)
|
Implement batchSizeLimit for rsync based transfers
If the batchSizeLimit is set, only files lower then this limit will
be grouped in one rsync transfer.
Each file larger then this limit will spawn their own transfer process.
This will cause large files to no longer block small file transfers under the
circumstance the maxProcess limit on the sync is larger then 1
A very optimized, very secure transfer configuration based on a
pool of ssh connection looks like this:
```
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@targetmachine"},
mode = "pool",
parallel = 2,
},
source = "/tmp/src",
target = "rsync://localhost:${localport}/test",
delay = 5,
batchSizeLimit = 1024 * 1024 * 30,
maxProcesses = 4,
rsync = {
verbose = true,
inplace = true,
}
}
```
If you configure remote ssh configuration only allows portforwarding and your rsync daemon
is configured correctly, you can very securely transfer data without giving shell access.
2022-04-14 12:05:49 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
while true do
|
|
|
|
local cnt, maxcnt = lsyncd.get_process_info()
|
|
|
|
if inlet.getSync().processes:size( ) >= inlet.getConfig().maxProcesses then
|
|
|
|
log('Normal',
|
|
|
|
'Maximum processes for sync reached. Delaying large transfer for sync: '..inlet.getConfig().name)
|
|
|
|
break
|
|
|
|
elseif maxcnt and cnt >= maxcnt then
|
|
|
|
log('Normal',
|
|
|
|
'Maximum process count reached. Delaying large transfer for sync: '..inlet.getConfig().name)
|
|
|
|
break
|
|
|
|
end
|
|
|
|
local extralist = inlet.getEvents(eventNoBatch)
|
|
|
|
|
|
|
|
-- no more single size events
|
|
|
|
if extralist.size() == 0 then break end
|
|
|
|
run_action(inlet, extralist)
|
|
|
|
-- get next result
|
|
|
|
single_returned = false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-02-16 11:17:37 +00:00
|
|
|
----
|
|
|
|
---- NOTE: This optimized version can be used once
|
|
|
|
---- https://bugzilla.samba.org/show_bug.cgi?id=12569
|
|
|
|
---- is fixed.
|
|
|
|
----
|
|
|
|
---- Spawns rsync for a list of events
|
|
|
|
----
|
|
|
|
---- Exclusions are already handled by not having
|
|
|
|
---- events for them.
|
|
|
|
----
|
|
|
|
--rsync.action = function
|
|
|
|
--(
|
|
|
|
-- inlet
|
|
|
|
--)
|
|
|
|
-- local config = inlet.getConfig( )
|
|
|
|
--
|
|
|
|
-- -- gets all events ready for syncing
|
|
|
|
-- local elist = inlet.getEvents( eventNotInitBlank )
|
|
|
|
--
|
|
|
|
-- -- gets the list of paths for the event list
|
|
|
|
-- -- deletes create multi match patterns
|
|
|
|
-- local paths = elist.getPaths( )
|
|
|
|
--
|
|
|
|
-- -- removes trailing slashes from dirs.
|
|
|
|
-- for k, v in ipairs( paths )
|
|
|
|
-- do
|
|
|
|
-- if string.byte( v, -1 ) == 47
|
|
|
|
-- then
|
|
|
|
-- paths[ k ] = string.sub( v, 1, -2 )
|
|
|
|
-- end
|
|
|
|
-- end
|
|
|
|
--
|
|
|
|
-- log(
|
|
|
|
-- 'Normal',
|
|
|
|
-- 'Calling rsync with filter-list of new/modified files/dirs\n',
|
|
|
|
-- table.concat( paths, '\n' )
|
|
|
|
-- )
|
|
|
|
--
|
|
|
|
-- local delete = nil
|
|
|
|
--
|
|
|
|
-- if config.delete == true
|
|
|
|
-- or config.delete == 'running'
|
|
|
|
-- then
|
|
|
|
-- delete = { '--delete-missing-args', '--ignore-errors' }
|
|
|
|
-- end
|
|
|
|
--
|
|
|
|
-- spawn(
|
|
|
|
-- elist,
|
|
|
|
-- config.rsync.binary,
|
|
|
|
-- '<', table.concat( paths, '\000' ),
|
|
|
|
-- config.rsync._computed,
|
|
|
|
-- delete,
|
|
|
|
-- '--force',
|
|
|
|
-- '--from0',
|
|
|
|
-- '--files-from=-',
|
|
|
|
-- config.source,
|
|
|
|
-- config.target
|
|
|
|
-- )
|
|
|
|
--end
|
|
|
|
|
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
--
|
2016-12-13 16:40:30 +00:00
|
|
|
-- Spawns the recursive startup sync.
|
2012-10-03 16:34:09 +00:00
|
|
|
--
|
2016-12-13 16:40:30 +00:00
|
|
|
rsync.init = function
|
|
|
|
(
|
|
|
|
event
|
|
|
|
)
|
2022-03-31 01:54:21 +00:00
|
|
|
return rsync.full(event)
|
|
|
|
end
|
|
|
|
|
|
|
|
--
|
|
|
|
-- Triggers a full sync event
|
|
|
|
--
|
|
|
|
rsync.full = function
|
|
|
|
(
|
|
|
|
event
|
|
|
|
)
|
2012-10-05 19:48:06 +00:00
|
|
|
local config = event.config
|
2016-12-13 16:40:30 +00:00
|
|
|
|
2012-10-05 19:48:06 +00:00
|
|
|
local inlet = event.inlet
|
2016-12-13 16:40:30 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
local excludes = inlet.getExcludes( )
|
2016-12-13 16:40:30 +00:00
|
|
|
|
2018-02-27 16:14:36 +00:00
|
|
|
local filters = inlet.hasFilters( ) and inlet.getFilters( )
|
|
|
|
|
2020-03-17 00:01:44 +00:00
|
|
|
local delete = {}
|
2016-12-13 16:40:30 +00:00
|
|
|
|
2012-10-05 19:48:06 +00:00
|
|
|
local target = config.target
|
|
|
|
|
2016-12-05 14:47:12 +00:00
|
|
|
if not target
|
|
|
|
then
|
|
|
|
if not config.host
|
|
|
|
then
|
2012-10-05 19:48:06 +00:00
|
|
|
error('Internal fail, Neither target nor host is configured')
|
|
|
|
end
|
|
|
|
|
|
|
|
target = config.host .. ':' .. config.targetdir
|
|
|
|
end
|
2012-10-03 16:34:09 +00:00
|
|
|
|
Implement tunnel pool mode.
In this mode, multiple tunnel processes are started and connection a load
balanced on the pool of connections.
Example config:
...
sync {
default.rsync,
tunnel = tunnel {
command = {"ssh", "-N", "-L", "localhost:${localport}:localhost:873", "user@testmachine"},
mode = "pool",
parallel = 2,
},
target = "rsync://localhost:${localport}/test",
...
}
2022-03-23 22:44:19 +00:00
|
|
|
local substitudes = inlet.getSubstitutionData(event, {})
|
|
|
|
target = substitudeCommands(target, substitudes)
|
|
|
|
|
2016-12-13 16:40:30 +00:00
|
|
|
if config.delete == true
|
|
|
|
or config.delete == 'startup'
|
2016-12-05 14:47:12 +00:00
|
|
|
then
|
2012-10-03 16:34:09 +00:00
|
|
|
delete = { '--delete', '--ignore-errors' }
|
|
|
|
end
|
|
|
|
|
2020-03-17 00:01:44 +00:00
|
|
|
if config.rsync.delete_excluded == true
|
|
|
|
then
|
|
|
|
table.insert( delete, '--delete-excluded' )
|
|
|
|
end
|
|
|
|
|
2018-02-27 16:14:36 +00:00
|
|
|
if not filters and #excludes == 0
|
2016-12-05 14:47:12 +00:00
|
|
|
then
|
2018-02-27 16:14:36 +00:00
|
|
|
-- starts rsync without any filters or excludes
|
2012-10-03 16:34:09 +00:00
|
|
|
log(
|
|
|
|
'Normal',
|
2022-11-11 14:11:09 +00:00
|
|
|
'recursive full rsync: ',
|
2012-10-03 16:34:09 +00:00
|
|
|
config.source,
|
|
|
|
' -> ',
|
2012-10-05 19:48:06 +00:00
|
|
|
target
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
spawn(
|
|
|
|
event,
|
|
|
|
config.rsync.binary,
|
|
|
|
delete,
|
|
|
|
config.rsync._computed,
|
|
|
|
'-r',
|
|
|
|
config.source,
|
2012-10-05 19:48:06 +00:00
|
|
|
target
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2018-02-27 16:14:36 +00:00
|
|
|
elseif not filters
|
|
|
|
then
|
2016-12-05 14:47:12 +00:00
|
|
|
-- starts rsync providing an exclusion list
|
2012-10-03 16:34:09 +00:00
|
|
|
-- on stdin
|
|
|
|
local exS = table.concat( excludes, '\n' )
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
log(
|
|
|
|
'Normal',
|
2022-11-11 14:11:09 +00:00
|
|
|
'recursive full rsync: ',
|
2012-10-03 16:34:09 +00:00
|
|
|
config.source,
|
|
|
|
' -> ',
|
2012-10-05 19:48:06 +00:00
|
|
|
target,
|
2012-10-03 16:34:09 +00:00
|
|
|
' excluding\n',
|
|
|
|
exS
|
|
|
|
)
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
spawn(
|
|
|
|
event,
|
|
|
|
config.rsync.binary,
|
|
|
|
'<', exS,
|
|
|
|
'--exclude-from=-',
|
|
|
|
delete,
|
|
|
|
config.rsync._computed,
|
|
|
|
'-r',
|
|
|
|
config.source,
|
2012-10-05 19:48:06 +00:00
|
|
|
target
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
2018-02-27 16:14:36 +00:00
|
|
|
else
|
|
|
|
-- starts rsync providing a filter list
|
|
|
|
-- on stdin
|
|
|
|
local fS = table.concat( filters, '\n' )
|
|
|
|
|
|
|
|
log(
|
|
|
|
'Normal',
|
2022-11-11 14:11:09 +00:00
|
|
|
'recursive full rsync: ',
|
2018-02-27 16:14:36 +00:00
|
|
|
config.source,
|
|
|
|
' -> ',
|
|
|
|
target,
|
|
|
|
' filtering\n',
|
|
|
|
fS
|
|
|
|
)
|
|
|
|
|
|
|
|
spawn(
|
|
|
|
event,
|
|
|
|
config.rsync.binary,
|
|
|
|
'<', fS,
|
|
|
|
'--filter=. -',
|
|
|
|
delete,
|
|
|
|
config.rsync._computed,
|
|
|
|
'-r',
|
|
|
|
config.source,
|
|
|
|
target
|
|
|
|
)
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
end
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
--
|
|
|
|
-- Prepares and checks a syncs configuration on startup.
|
|
|
|
--
|
2016-12-13 16:40:30 +00:00
|
|
|
rsync.prepare = function
|
|
|
|
(
|
|
|
|
config, -- the configuration
|
|
|
|
level, -- additional error level for inherited use ( by rsyncssh )
|
|
|
|
skipTarget -- used by rsyncssh, do not check for target
|
|
|
|
)
|
2012-10-05 19:48:06 +00:00
|
|
|
|
|
|
|
-- First let default.prepare test the checkgauge
|
|
|
|
default.prepare( config, level + 6 )
|
2012-10-02 06:25:46 +00:00
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if not skipTarget and not config.target
|
|
|
|
then
|
2012-10-03 16:34:09 +00:00
|
|
|
error(
|
|
|
|
'default.rsync needs "target" configured',
|
2012-10-05 19:48:06 +00:00
|
|
|
level
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
|
|
|
end
|
2012-10-02 05:53:13 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
-- checks if the _computed argument exists already
|
2015-10-14 12:39:14 +00:00
|
|
|
if config.rsync._computed
|
|
|
|
then
|
2012-10-03 16:34:09 +00:00
|
|
|
error(
|
|
|
|
'please do not use the internal rsync._computed parameter',
|
2012-10-05 19:48:06 +00:00
|
|
|
level
|
2012-10-03 16:34:09 +00:00
|
|
|
)
|
|
|
|
end
|
|
|
|
|
|
|
|
-- computes the rsync arguments into one list
|
2012-10-06 12:22:08 +00:00
|
|
|
local crsync = config.rsync;
|
|
|
|
|
|
|
|
-- everything implied by archive = true
|
|
|
|
local archiveFlags = {
|
|
|
|
recursive = true,
|
|
|
|
links = true,
|
|
|
|
perms = true,
|
|
|
|
times = true,
|
|
|
|
group = true,
|
|
|
|
owner = true,
|
|
|
|
devices = true,
|
|
|
|
specials = true,
|
|
|
|
hard_links = false,
|
|
|
|
acls = false,
|
|
|
|
xattrs = false,
|
|
|
|
}
|
2012-10-05 19:48:06 +00:00
|
|
|
|
2014-02-28 09:15:48 +00:00
|
|
|
-- if archive is given the implications are filled in
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.archive
|
|
|
|
then
|
|
|
|
for k, v in pairs( archiveFlags )
|
|
|
|
do
|
|
|
|
if crsync[ k ] == nil
|
|
|
|
then
|
2012-10-06 12:22:08 +00:00
|
|
|
crsync[ k ] = v
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
crsync._computed = { true }
|
2016-12-05 14:25:58 +00:00
|
|
|
|
2022-11-11 14:27:21 +00:00
|
|
|
--- @type any
|
2012-10-06 12:22:08 +00:00
|
|
|
local computed = crsync._computed
|
2016-12-05 14:25:58 +00:00
|
|
|
|
2012-10-30 08:57:54 +00:00
|
|
|
local computedN = 2
|
2012-10-05 19:48:06 +00:00
|
|
|
|
|
|
|
local shortFlags = {
|
2012-11-09 19:15:42 +00:00
|
|
|
acls = 'A',
|
2016-12-05 15:38:07 +00:00
|
|
|
backup = 'b',
|
2012-10-05 19:48:06 +00:00
|
|
|
checksum = 'c',
|
2012-11-09 19:15:42 +00:00
|
|
|
compress = 'z',
|
2016-12-01 11:35:02 +00:00
|
|
|
copy_dirlinks = 'k',
|
2012-10-05 19:48:06 +00:00
|
|
|
copy_links = 'L',
|
2012-11-09 19:15:42 +00:00
|
|
|
cvs_exclude = 'C',
|
|
|
|
dry_run = 'n',
|
2012-10-05 19:48:06 +00:00
|
|
|
executability = 'E',
|
2014-02-28 09:15:48 +00:00
|
|
|
group = 'g',
|
2012-11-09 19:15:42 +00:00
|
|
|
hard_links = 'H',
|
2012-10-05 19:48:06 +00:00
|
|
|
ignore_times = 'I',
|
|
|
|
ipv4 = '4',
|
2012-11-09 19:15:42 +00:00
|
|
|
ipv6 = '6',
|
|
|
|
keep_dirlinks = 'K',
|
|
|
|
links = 'l',
|
|
|
|
one_file_system = 'x',
|
2016-05-04 09:25:40 +00:00
|
|
|
omit_dir_times = 'O',
|
|
|
|
omit_link_times = 'J',
|
2012-11-09 19:15:42 +00:00
|
|
|
owner = 'o',
|
|
|
|
perms = 'p',
|
|
|
|
protect_args = 's',
|
|
|
|
prune_empty_dirs = 'm',
|
|
|
|
quiet = 'q',
|
|
|
|
sparse = 'S',
|
2014-02-28 09:15:48 +00:00
|
|
|
times = 't',
|
2012-11-09 19:15:42 +00:00
|
|
|
update = 'u',
|
|
|
|
verbose = 'v',
|
2014-02-28 09:15:48 +00:00
|
|
|
whole_file = 'W',
|
2012-11-09 19:15:42 +00:00
|
|
|
xattrs = 'X',
|
2012-10-05 19:48:06 +00:00
|
|
|
}
|
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
local shorts = { '-' }
|
2012-10-05 19:48:06 +00:00
|
|
|
local shortsN = 2
|
2012-10-03 16:34:09 +00:00
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync._extra
|
|
|
|
then
|
|
|
|
for k, v in ipairs( crsync._extra )
|
|
|
|
do
|
2012-10-05 19:48:06 +00:00
|
|
|
computed[ computedN ] = v
|
|
|
|
computedN = computedN + 1
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
for k, flag in pairs( shortFlags )
|
|
|
|
do
|
|
|
|
if crsync[ k ]
|
|
|
|
then
|
2012-10-05 19:48:06 +00:00
|
|
|
shorts[ shortsN ] = flag
|
|
|
|
shortsN = shortsN + 1
|
|
|
|
end
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.devices and crsync.specials
|
|
|
|
then
|
2012-10-06 12:22:08 +00:00
|
|
|
shorts[ shortsN ] = 'D'
|
|
|
|
shortsN = shortsN + 1
|
|
|
|
else
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.devices
|
|
|
|
then
|
2012-10-06 12:22:08 +00:00
|
|
|
computed[ computedN ] = '--devices'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.specials
|
|
|
|
then
|
2012-10-06 12:22:08 +00:00
|
|
|
computed[ computedN ] = '--specials'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
end
|
2013-07-30 09:16:29 +00:00
|
|
|
|
2016-12-05 14:47:12 +00:00
|
|
|
if crsync.append
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--append'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-14 07:45:33 +00:00
|
|
|
|
2016-12-05 14:47:12 +00:00
|
|
|
if crsync.append_verify
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--append-verify'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-14 07:45:33 +00:00
|
|
|
|
2016-12-05 15:38:07 +00:00
|
|
|
if crsync.backup_dir
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--backup-dir=' .. crsync.backup_dir
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-05 14:47:12 +00:00
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.bwlimit
|
|
|
|
then
|
2013-06-07 09:11:42 +00:00
|
|
|
computed[ computedN ] = '--bwlimit=' .. crsync.bwlimit
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2012-10-06 12:22:08 +00:00
|
|
|
|
2016-12-01 13:12:56 +00:00
|
|
|
if crsync.chmod
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--chmod=' .. crsync.chmod
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2016-08-29 11:12:09 +00:00
|
|
|
if crsync.chown
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--chown=' .. crsync.chown
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-14 07:45:33 +00:00
|
|
|
|
2019-08-06 23:09:52 +00:00
|
|
|
if crsync.copy_unsafe_links
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--copy-unsafe-links'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2016-12-05 14:25:58 +00:00
|
|
|
if crsync.groupmap
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--groupmap=' .. crsync.groupmap
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-14 07:45:33 +00:00
|
|
|
|
2016-11-28 10:20:12 +00:00
|
|
|
if crsync.existing
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--existing'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-08-29 11:12:09 +00:00
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.inplace
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--inplace'
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
|
|
|
if crsync.password_file
|
|
|
|
then
|
2012-10-30 08:57:54 +00:00
|
|
|
computed[ computedN ] = '--password-file=' .. crsync.password_file
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.rsh
|
|
|
|
then
|
2012-10-30 08:57:54 +00:00
|
|
|
computed[ computedN ] = '--rsh=' .. crsync.rsh
|
2012-10-05 19:48:06 +00:00
|
|
|
computedN = computedN + 1
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.rsync_path
|
|
|
|
then
|
2012-10-30 08:57:54 +00:00
|
|
|
computed[ computedN ] = '--rsync-path=' .. crsync.rsync_path
|
2012-10-05 19:48:06 +00:00
|
|
|
computedN = computedN + 1
|
2012-10-03 16:34:09 +00:00
|
|
|
end
|
|
|
|
|
2016-12-05 15:38:07 +00:00
|
|
|
if crsync.suffix
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--suffix=' .. crsync.suffix
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.temp_dir
|
|
|
|
then
|
2012-10-30 08:57:54 +00:00
|
|
|
computed[ computedN ] = '--temp-dir=' .. crsync.temp_dir
|
2012-10-23 05:23:58 +00:00
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if crsync.timeout
|
|
|
|
then
|
2013-03-19 07:25:29 +00:00
|
|
|
computed[ computedN ] = '--timeout=' .. crsync.timeout
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2016-12-14 07:45:33 +00:00
|
|
|
|
2016-12-05 14:25:58 +00:00
|
|
|
if crsync.usermap
|
|
|
|
then
|
|
|
|
computed[ computedN ] = '--usermap=' .. crsync.usermap
|
|
|
|
computedN = computedN + 1
|
|
|
|
end
|
2013-03-19 07:25:29 +00:00
|
|
|
|
2015-10-14 12:39:14 +00:00
|
|
|
if shortsN ~= 2
|
|
|
|
then
|
2012-10-03 16:34:09 +00:00
|
|
|
computed[ 1 ] = table.concat( shorts, '' )
|
|
|
|
else
|
|
|
|
computed[ 1 ] = { }
|
|
|
|
end
|
|
|
|
|
|
|
|
-- appends a / to target if not present
|
2018-02-27 09:09:28 +00:00
|
|
|
-- and not a ':' for home dir.
|
|
|
|
if not skipTarget
|
|
|
|
and string.sub( config.target, -1 ) ~= '/'
|
|
|
|
and string.sub( config.target, -1 ) ~= ':'
|
2015-10-14 12:39:14 +00:00
|
|
|
then
|
2012-10-03 16:34:09 +00:00
|
|
|
config.target = config.target..'/'
|
|
|
|
end
|
2012-10-05 19:48:06 +00:00
|
|
|
end
|
2012-10-03 16:34:09 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
--
|
|
|
|
-- By default do deletes.
|
|
|
|
--
|
2012-10-05 07:41:46 +00:00
|
|
|
rsync.delete = true
|
|
|
|
|
2012-10-05 19:48:06 +00:00
|
|
|
--
|
|
|
|
-- Rsyncd exitcodes
|
|
|
|
--
|
|
|
|
rsync.exitcodes = default.rsyncExitCodes
|
2012-10-03 16:34:09 +00:00
|
|
|
|
|
|
|
--
|
|
|
|
-- Calls rsync with this default options
|
|
|
|
--
|
2015-10-14 12:39:14 +00:00
|
|
|
rsync.rsync =
|
|
|
|
{
|
2012-10-03 16:34:09 +00:00
|
|
|
-- The rsync binary to be called.
|
2021-12-02 16:01:37 +00:00
|
|
|
binary = 'rsync',
|
2012-10-05 19:48:06 +00:00
|
|
|
links = true,
|
|
|
|
times = true,
|
|
|
|
protect_args = true
|
2012-02-15 19:00:28 +00:00
|
|
|
}
|
2012-10-03 16:34:09 +00:00
|
|
|
|
2012-10-05 07:41:46 +00:00
|
|
|
|
2012-10-03 16:34:09 +00:00
|
|
|
--
|
|
|
|
-- Default delay
|
|
|
|
--
|
2012-10-05 07:41:46 +00:00
|
|
|
rsync.delay = 15
|