Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Birger Schmidt 2012-10-24 17:43:32 +11:00
commit 848f6e1f06
17 changed files with 5880 additions and 3248 deletions

View File

@ -1,3 +1,16 @@
23-10-2012: 2.1.0
fix: fail startup if settings.inist is false and one of the target hosts fails
enhancement: rsyncOpts has been replaced by rsync = {...} parameter lists
enhancement: default.rsyncssh has now a ssh = {...} parameter similar to default.rsync to
add option to ssh calls. Ditto for xargs = {...}
enhancement: the default.* implementations have a checkgauge erroring on any unknown
parameters to the sync{} call
enhancement: the delete parameter now takes: true, false, 'running' and 'startup'
improvement: Dennis Schridde provided various improvements for Lsyncd's autoconf building
change: Lsyncd is now Lua 5.2 compatible
change: Lsyncd now exits with exitcode 143 on TERM signal
change: settings is now be used as call like settings{...} instead of settings = {...}
04-04-2012: 2.0.7
fix: closed a memory leak due to not correct configured weak tables
fix: default.direct, do not use on OSX unrecognized option -t on modify

View File

@ -1,4 +1,5 @@
AUTOMAKE_OPTIONS = foreign
ACLOCAL_AMFLAGS = -I m4
CFLAGS += -Wall $(LUA_CFLAGS)
bin_PROGRAMS = lsyncd
lsyncd_SOURCES = lsyncd.h lsyncd.c lsyncd.lua default-rsync.lua
@ -37,7 +38,7 @@ EXTRA_DIST = doc/lsyncd.1.txt inotify.c fsevents.c bin2carray.lua \
doc/lsyncd.1: doc/lsyncd.1.txt
$(A2X) --format=manpage $<
CLEANFILES = runner.out defaults.out runner.c defaults.c doc/lsyncd.1
CLEANFILES = runner.out defaults.out runner.c defaults.c
# compiles the runner and the defaults into the binary
lsyncd_LDADD += runner.o defaults.o

View File

@ -1,11 +1,15 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
#AC_PREREQ(2.60)
AC_INIT(lsyncd, 2.0.7, axkibe@gmail.com)
AC_INIT(lsyncd, 2.1.0, axkibe@gmail.com)
AM_INIT_AUTOMAKE([foreign])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_SRCDIR([lsyncd.c])
AC_CONFIG_HEADER([config.h])
AM_INIT_AUTOMAKE(AC_PACKAGE_NAME, AC_PACKAGE_VERSION)
###
# Checks for programs.
AC_PROG_CC
AC_PROG_INSTALL
@ -17,25 +21,83 @@ if test x${A2X} = xno ; then
fi
###
# Checks for lua
PKG_CHECK_MODULES([LUA], [lua5.1 >= 5.1.3],,[
PKG_CHECK_MODULES([LUA], [lua51 >= 5.1.3],,[
PKG_CHECK_MODULES([LUA], [lua-5.1 >= 5.1.3],,[
PKG_CHECK_MODULES([LUA], [lua >= 5.1.3])
])
])
])
# Checks for Lua
AC_PATH_PROGS([LUA], [lua5.1 lua51 lua], [no])
if test x${LUA} = xno ; then
AC_MSG_ERROR([Program 'lua' is required])
# Try versioned Lua 5.2 first
PKG_CHECK_MODULES([LUA52], [lua5.2],,[
PKG_CHECK_MODULES([LUA52], [lua52],,[
PKG_CHECK_MODULES([LUA52], [lua-5.2],,[:])
])
])
AC_PATH_PROGS([LUA52], [lua5.2 lua52], [no])
AC_PATH_PROGS([LUAC52], [luac5.2 luac52], [no])
if test -z "${LUA52_PKG_ERRORS}" -a "${LUA52}" != no -a "${LUAC52}" != no ; then
LUA_VERSION="5.2"
LUA_CFLAGS="${LUA52_CFLAGS}"
LUA_LIBS="${LUA52_LIBS}"
LUA="${LUA52}"
LUAC="${LUAC52}"
else
# Fall back to versioned Lua 5.1
PKG_CHECK_MODULES([LUA51], [lua5.1 >= 5.1.3],,[
PKG_CHECK_MODULES([LUA51], [lua51 >= 5.1.3],,[
PKG_CHECK_MODULES([LUA51], [lua-5.1 >= 5.1.3],,[:])
])
])
AC_PATH_PROGS([LUA51], [lua5.1 lua51], [no])
AC_PATH_PROGS([LUAC51], [luac5.1 luac51], [no])
if test -z "${LUA51_PKG_ERRORS}" -a "${LUA51}" != no -a "${LUAC51}" != no ; then
LUA_VERSION="5.1"
LUA_CFLAGS="${LUA51_CFLAGS}"
LUA_LIBS="${LUA51_LIBS}"
LUA="${LUA51}"
LUAC="${LUAC51}"
else
# Try any Lua now
PKG_CHECK_MODULES([LUA], [lua >= 5.1.3],,[:])
AC_PATH_PROG([LUA], [lua], [no])
AC_PATH_PROG([LUAC], [luac], [no])
if test -z "${LUA_PKG_ERRORS}" -a "${LUA}" != no -a "${LUAC}" != no ; then
LUA_VERSION="(unknown version)"
else
AC_MSG_ERROR([Need a Lua toolchain with matching versions ('lua' library and 'lua' and 'luac' programs)])
fi
fi
fi
AC_PATH_PROGS([LUAC], [luac5.1 luac51 luac], [no])
if test x${LUAC} = xno ; then
AC_MSG_ERROR([Program 'luac' is required])
_LIBS="${LIBS}"
_CFLAGS="${CFLAGS}"
_CPPFLAGS="${CPPFLAGS}"
LIBS="${LIBS} ${LUA_LIBS}"
CFLAGS="${CFLAGS} ${LUA_CFLAGS}"
CPPFLAGS="${CPPFLAGS} ${LUA_CFLAGS}"
AC_MSG_CHECKING([whether Lua library was compiled with compat support])
AC_LINK_IFELSE(
[AC_LANG_PROGRAM([
#define LUA_COMPAT_ALL
#include <lauxlib.h>
],[luaL_register(0,0,0);])],
[lua_compat_support=yes],
[lua_compat_support=no]
)
AC_MSG_RESULT([${lua_compat_support}])
if test "x${lua_compat_support}" = xno ; then
AC_MSG_ERROR([Lua library needs to be compiled with compat support])
fi
LIBS="${_LIBS}"
CFLAGS="${_CFLAGS}"
CPPFLAGS="${_CPPFLAGS}"
unset _LIBS _CFLAGS _CPPFLAGS
AX_SUBST_L([LUA_CFLAGS], [LUA_LIBS], [LUA], [LUAC])
###
# Checks for header files.
AC_CHECK_HEADERS([sys/inotify.h])
@ -71,4 +133,7 @@ AM_CONDITIONAL([FSEVENTS],
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
AC_MSG_NOTICE([
Summary:
Using Lua ${LUA_VERSION}
])

View File

@ -9,6 +9,7 @@
-- Note:
-- this is infact just a configuration using Layer 1 configuration
-- like any other. It only gets compiled into the binary by default.
--
-- You can simply use a modified one, by copying everything into a
-- config file of yours and name it differently.
--
@ -17,158 +18,190 @@
--
--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if not default then error('default not loaded'); end
if not default.rsync then error('default-direct (currently) needs default.rsync loaded'); end
if default.direct then error('default-direct already loaded'); end
if not default then
error('default not loaded')
end
default.direct = {
-----
-- Spawns rsync for a list of events
if not default.rsync then
error('default-direct (currently) needs default.rsync loaded')
end
if default.direct then
error('default-direct already loaded')
end
local direct = { }
default.direct = direct
--
-- known configuration parameters
--
direct.checkgauge = {
--
action = function(inlet)
-- gets all events ready for syncing
local event, event2 = inlet.getEvent()
local config = inlet.getConfig()
-- inherits rsync config params
--
default.rsync.checkgauge,
if event.etype == 'Create' then
if event.isdir then
spawn(
event,
'/bin/mkdir',
event.targetPath
)
else
-- 'cp -t', not supported on OSX
spawn(
event,
'/bin/cp',
event.sourcePath,
event.targetPathdir
)
end
elseif event.etype == 'Modify' then
if event.isdir then
error("Do not know how to handle 'Modify' on dirs")
end
spawn(event,
rsyncExitCodes = true,
onMove = true,
}
--
-- Spawns rsync for a list of events
--
direct.action = function(inlet)
-- gets all events ready for syncing
local event, event2 = inlet.getEvent()
local config = inlet.getConfig()
if event.etype == 'Create' then
if event.isdir then
spawn(
event,
'/bin/mkdir',
event.targetPath
)
else
-- 'cp -t', not supported on OSX
spawn(
event,
'/bin/cp',
event.sourcePath,
event.targetPathdir
)
elseif event.etype == 'Delete' then
if not config.delete then
inlet.discardEvent(event)
end
end
elseif event.etype == 'Modify' then
if event.isdir then
error("Do not know how to handle 'Modify' on dirs")
end
spawn(event,
'/bin/cp',
event.sourcePath,
event.targetPathdir
)
elseif event.etype == 'Delete' then
local tp = event.targetPath
-- extra security check
if tp == '' or tp == '/' or not tp then
error('Refusing to erase your harddisk!')
end
spawn(event, '/bin/rm', '-rf', tp)
elseif event.etype == 'Move' then
local tp = event.targetPath
-- extra security check
if tp == '' or tp == '/' or not tp then
error('Refusing to erase your harddisk!')
end
local command = '/bin/mv $1 $2 || /bin/rm -rf $1'
if not config.delete then command = '/bin/mv $1 $2'; end
spawnShell(
event,
command,
event.targetPath,
event2.targetPath)
else
log('Warn', 'ignored an event of type "',event.etype, '"')
if
config.delete ~= true and
config.delete ~= 'running'
then
inlet.discardEvent(event)
return
end
end,
-----
-- Called when collecting a finished child process
--
collect = function(agent, exitcode)
local config = agent.config
local tp = event.targetPath
if not agent.isList and agent.etype == 'Init' then
local rc = config.rsyncExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode)
elseif rc == 'again' then
if settings.insist then
log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Temporary or permanent failure on startup of "',
agent.source, '". Terminating since "insist" is not set.');
terminate(-1) -- ERRNO
end
elseif rc == 'die' then
log('Error', 'Failure on startup of "',agent.source,'": ', exitcode)
-- extra security check
if tp == '' or tp == '/' or not tp then
error('Refusing to erase your harddisk!')
end
spawn(event, '/bin/rm', '-rf', tp)
elseif event.etype == 'Move' then
local tp = event.targetPath
-- extra security check
if tp == '' or tp == '/' or not tp then
error('Refusing to erase your harddisk!')
end
local command = '/bin/mv $1 $2 || /bin/rm -rf $1'
if
config.delete ~= true and
config.delete ~= 'running'
then
command = '/bin/mv $1 $2'
end
spawnShell(
event,
command,
event.targetPath,
event2.targetPath
)
else
log('Warn', 'ignored an event of type "',event.etype, '"')
inlet.discardEvent(event)
end
end
--
-- Called when collecting a finished child process
--
direct.collect = function(agent, exitcode)
local config = agent.config
if not agent.isList and agent.etype == 'Init' then
local rc = config.rsyncExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode)
elseif rc == 'again' then
if settings.insist then
log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode)
rc = 'die'
log('Error', 'Temporary or permanent failure on startup of "',
agent.source, '". Terminating since "insist" is not set.');
terminate(-1) -- ERRNO
end
return rc
elseif rc == 'die' then
log('Error', 'Failure on startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode)
rc = 'die'
end
return rc
end
-- everything else is just as it is,
-- there is no network to retry something.
return
end,
-- everything else is just as it is,
-- there is no network to retry something.
return
end
-----
-- Spawns the recursive startup sync
-- (currently) identical to default rsync.
--
init = default.rsync.init,
--
-- Spawns the recursive startup sync
-- (currently) identical to default rsync.
--
direct.init = default.rsync.init
-----
-- Checks the configuration.
--
prepare = function(config)
if not config.target then
error('default.direct needs "target".', 4)
end
--
-- Checks the configuration.
--
direct.prepare = function( config, level )
if config.rsyncOps then
error('did you mean rsyncOpts with "t"?', 4)
end
end,
default.rsync.prepare( config, level + 1 )
-----
-- Default delay is very short.
--
delay = 1,
end
------
-- Let the core not split move events.
--
onMove = true,
--
-- Default delay is very short.
--
direct.delay = 1
-----
-- The rsync binary called.
--
rsyncBinary = '/usr/bin/rsync',
--
-- Let the core not split move events.
--
direct.onMove = true
-----
-- For startup sync
--
rsyncOpts = '-lts',
--
-- Rsync configuration for startup.
--
direct.rsync = default.rsync.rsync
direct.rsyncExitCodes = default.rsyncExitCodes
-----
-- By default do deletes.
--
delete = true,
--
-- By default do deletes.
--
direct.delete = true
-----
-- rsync exit codes
--
rsyncExitCodes = default.rsyncExitCodes,
--
-- On many system multiple disk operations just rather slow down
-- than speed up.
-----
-- On many system multiple disk operations just rather slow down
-- than speed up.
maxProcesses = 1,
}
direct.maxProcesses = 1

View File

@ -15,265 +15,503 @@
--
--~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if not default then
error('default not loaded')
error( 'default not loaded' )
end
if default.rsync then
error('default-rsync already loaded')
error( 'default-rsync already loaded' )
end
default.rsync = {
-----
-- Spawns rsync for a list of events
local rsync = { }
default.rsync = rsync
-- uses default collect
--
-- used to ensure there aren't typos in the keys
--
rsync.checkgauge = {
-- unsets default user action handlers
onCreate = false,
onModify = false,
onDelete = false,
onStartup = false,
onMove = false,
delete = true,
exclude = true,
target = true,
rsync = {
-- rsync binary
binary = true,
-- rsync shortflags
verbose = true,
quiet = true,
checksum = true,
update = true,
links = true,
copy_links = true,
hard_links = true,
perms = true,
executability = true,
acls = true,
xattrs = true,
owner = true,
group = true,
times = true,
sparse = true,
dry_run = true,
whole_file = true,
one_file_system = true,
prune_empty_dirs = true,
ignore_times = true,
compress = true,
cvs_exclude = true,
protect_args = true,
ipv4 = true,
ipv6 = true,
-- further rsync options
rsh = true,
rsync_path = true,
temp_dir = true,
},
}
--
-- Spawns rsync for a list of events
--
-- Exlcusions are already handled by not having
-- events for them.
--
rsync.action = function( inlet )
--
action = function(inlet)
-- gets all events ready for syncing
local elist = inlet.getEvents(
function(event)
return event.etype ~= 'Init' and event.etype ~= 'Blanket'
-- gets all events ready for syncing
--
local elist = inlet.getEvents(
function(event)
return event.etype ~= 'Init' and event.etype ~= 'Blanket'
end
)
--
-- Replaces what rsync would consider filter rules by literals
--
local function sub( p )
if not p then
return
end
return p:
gsub( '%?', '\\?' ):
gsub( '%*', '\\*' ):
gsub( '%[', '\\[' ):
gsub( '%]', '\\]' )
end
--
-- Gets the list of paths for the event list
--
-- Deletes create multi match patterns
--
local paths = elist.getPaths(
function( etype, path1, path2 )
if string.byte( path1, -1 ) == 47 and etype == 'Delete' then
return sub( path1 )..'***', sub( path2 )
else
return sub( path1 ), sub( path2 )
end
end
)
--
-- stores all filters by integer index
--
local filterI = { }
--
-- Stores all filters with path index
--
local filterP = { }
--
-- Adds one path to the filter
--
local function addToFilter( path )
if filterP[ path ] then
return
end
filterP[ path ] = true
table.insert( filterI, path )
end
--
-- Adds a path to the filter.
--
-- Rsync needs to have entries for all steps in the path,
-- so the file for example d1/d2/d3/f1 needs following filters:
-- 'd1/', 'd1/d2/', 'd1/d2/d3/' and 'd1/d2/d3/f1'
--
for _, path in ipairs( paths ) do
if path and path ~= '' then
addToFilter(path)
local pp = string.match( path, '^(.*/)[^/]+/?' )
while pp do
addToFilter(pp)
pp = string.match( pp, '^(.*/)[^/]+/?' )
end
end
end
local filterS = table.concat( filterI, '\n' )
local filter0 = table.concat( filterI, '\000' )
log(
'Normal',
'Calling rsync with filter-list of new/modified files/dirs\n',
filterS
)
local config = inlet.getConfig( )
local delete = nil
if config.delete == true or config.delete == 'running' then
delete = { '--delete', '--ignore-errors' }
end
spawn(
elist,
config.rsync.binary,
'<', filter0,
config.rsync._computed,
'-r',
delete,
'--force',
'--from0',
'--include-from=-',
'--exclude=*',
config.source,
config.target
)
end
--
-- Spawns the recursive startup sync
--
rsync.init = function(event)
local config = event.config
local inlet = event.inlet
local excludes = inlet.getExcludes( )
local delete = nil
local target = config.target
if not target then
if not config.host then
error('Internal fail, Neither target nor host is configured')
end
target = config.host .. ':' .. config.targetdir
end
if config.delete == true or config.delete == 'startup' then
delete = { '--delete', '--ignore-errors' }
end
if #excludes == 0 then
-- start rsync without any excludes
log(
'Normal',
'recursive startup rsync: ',
config.source,
' -> ',
target
)
-----
-- replaces filter rule by literals
--
local function sub(p)
if not p then
return
end
return p:gsub('%?', '\\?'):
gsub('%*', '\\*'):
gsub('%[', '\\['):
gsub('%]', '\\]')
end
local paths = elist.getPaths(
function(etype, path1, path2)
if string.byte(path1, -1) == 47 and etype == 'Delete' then
return sub(path1)..'***', sub(path2)
else
return sub(path1), sub(path2)
end
end)
-- stores all filters with integer index
-- local filterI = inlet.getExcludes()
local filterI = {}
-- stores all filters with path index
local filterP = {}
-- adds one entry into the filter
-- @param path ... path to add
-- @param leaf ... true if this the original path
-- false if its a parent
local function addToFilter(path)
if filterP[path] then
return
end
filterP[path]=true
table.insert(filterI, path)
end
-- adds a path to the filter, for rsync this needs
-- to have entries for all steps in the path, so the file
-- d1/d2/d3/f1 needs filters
-- 'd1/', 'd1/d2/', 'd1/d2/d3/' and 'd1/d2/d3/f1'
for _, path in ipairs(paths) do
if path and path ~= '' then
addToFilter(path)
local pp = string.match(path, '^(.*/)[^/]+/?')
while pp do
addToFilter(pp)
pp = string.match(pp, '^(.*/)[^/]+/?')
end
end
end
local filterS = table.concat(filterI, '\n')
local filter0 = table.concat(filterI, '\000')
log('Normal', 'Calling rsync with filter-list of new/modified files/dirs\n', filterS)
local config = inlet.getConfig()
local delete = nil
if config.delete then
delete = { '--delete', '--ignore-errors' }
end
spawn(elist, config.rsync.binary,
'<', filter0,
spawn(
event,
config.rsync.binary,
delete,
config.rsync._computed,
'-r',
delete,
'--force',
'--from0',
'--include-from=-',
'--exclude=*',
config.source,
config.target)
end,
target
)
else
-- start rsync providing an exclude list
-- on stdin
local exS = table.concat( excludes, '\n' )
log(
'Normal',
'recursive startup rsync: ',
config.source,
' -> ',
target,
' excluding\n',
exS
)
spawn(
event,
config.rsync.binary,
'<', exS,
'--exclude-from=-',
delete,
config.rsync._computed,
'-r',
config.source,
target
)
end
end
--
-- Prepares and checks a syncs configuration on startup.
--
rsync.prepare = function(
config, -- the configuration
level, -- additional error level for inherited use ( by rsyncssh )
skipTarget -- used by rsyncssh, do not check for target
)
-----
-- Spawns the recursive startup sync
--
init = function(event)
local config = event.config
local inlet = event.inlet
local excludes = inlet.getExcludes()
local delete = nil
if config.delete then
delete = { '--delete', '--ignore-errors' }
end
if #excludes == 0 then
log('Normal', 'recursive startup rsync: ', config.source, ' -> ', config.target)
spawn(event, config.rsync.binary,
delete,
config.rsync._computed,
'-r',
config.source,
config.target)
else
local exS = table.concat(excludes, '\n')
log('Normal', 'recursive startup rsync: ',config.source,
' -> ',config.target,' excluding\n',exS)
spawn(event, config.rsync.binary,
'<', exS,
'--exclude-from=-',
delete,
config.rsync._computed,
'-r',
config.source,
config.target)
end
end,
-----
-- Checks the configuration.
-- First let default.prepare test the checkgauge
--
prepare = function(config)
if not config.target then
error('default.rsync needs "target" configured', 4)
end
default.prepare( config, level + 6 )
if config.rsyncOps then
error('"rsyncOps" is outdated please use the new rsync = { ... } syntax.', 4)
end
if not skipTarget and not config.target then
error(
'default.rsync needs "target" configured',
level
)
end
if config.rsyncOpts and config.rsync._extra then
error(
'"rsyncOpts" is outdated in favor of the new rsync = { ... } syntax\n"' +
'for which you provided the _extra attribute as well.\n"' +
'Please remove rsyncOpts from your config.',
4
)
end
if config.rsyncOps then
error(
'"rsyncOps" is outdated please use the new rsync = { ... } syntax.',
level
)
end
if config.rsyncOpts then
log(
'Warn',
'"rsyncOpts" is outdated. Please use the new rsync = { ... } syntax."',
event.etype, '"'
)
if config.rsyncOpts and config.rsync._extra then
error(
'"rsyncOpts" is outdated in favor of the new rsync = { ... } syntax\n"' +
'for which you provided the _extra attribute as well.\n"' +
'Please remove rsyncOpts from your config.',
level
)
end
config.rsync._extra = config.rsyncOpts
if config.rsyncOpts then
log(
'Warn',
'"rsyncOpts" is outdated. Please use the new rsync = { ... } syntax."'
)
config.rsyncOpts = nil
end
config.rsync._extra = config.rsyncOpts
config.rsyncOpts = nil
end
if config.rsyncBinary and config.rsync.binary then
error(
'"rsyncBinary is outdated in favor of the new rsync = { ... } syntax\n"'+
'for which you provided the binary attribute as well.\n"' +
"Please remove rsyncBinary from your config.'",
4
)
end
if config.rsyncBinary and config.rsync.binary then
error(
'"rsyncBinary is outdated in favor of the new rsync = { ... } syntax\n"'+
'for which you provided the binary attribute as well.\n"' +
"Please remove rsyncBinary from your config.'",
level
)
end
if config.rsyncBinary then
log(
'Warn',
'"rsyncBinary" is outdated. Please use the new rsync = { ... } syntax."',
event.etype, '"'
)
if config.rsyncBinary then
log(
'Warn',
'"rsyncBinary" is outdated. Please use the new rsync = { ... } syntax."'
)
config.rsync.binary = config.rsyncBinary
config.rsync.binary = config.rsyncBinary
config.rsyncOpts = nil
end
config.rsyncOpts = nil
end
-- checks if the _computed argument exists already
if config.rsync._computed then
error(
'please do not use the internal rsync._computed parameter',
level
)
end
-- checks if the _computed argument does not exist already
if config.rsync._computed then
error(
'please do not use the internal rsync._computed parameter',
4
)
end
--
-- computes the rsync arguments into one list
--
local crsync = config.rsync;
-- computes the rsync arguments into one list
local rsync = config.rsync;
rsync._computed = { true }
local computed = rsync._computed
local shorts = { '-' }
--
-- everything implied by archive = true
--
local archiveFlags = {
recursive = true,
links = true,
perms = true,
times = true,
group = true,
owner = true,
devices = true,
specials = true,
hard_links = false,
acls = false,
xattrs = false,
}
if config.rsync._extra then
for k, v in ipairs( config.rsync._extra ) do
computed[ k + 1 ] = v
--
-- if archive given the implications are filled in
--
if crsync.archive then
for k, v in pairs( archiveFlags ) do
if crsync[ k ] == nil then
crsync[ k ] = v
end
end
end
if rsync.links then
shorts[ #shorts + 1 ] = 'l'
crsync._computed = { true }
local computed = crsync._computed
local computedN = 1
local shortFlags = {
verbose = 'v',
quiet = 'q',
checksum = 'c',
update = 'u',
links = 'l',
copy_links = 'L',
hard_links = 'H',
perms = 'p',
executability = 'E',
acls = 'A',
xattrs = 'X',
owner = 'o',
group = 'g',
times = 't',
sparse = 'S',
dry_run = 'n',
whole_file = 'W',
one_file_system = 'x',
prune_empty_dirs = 'm',
ignore_times = 'I',
compress = 'z',
cvs_exclude = 'C',
protect_args = 's',
ipv4 = '4',
ipv6 = '6'
}
local shorts = { '-' }
local shortsN = 2
if crsync._extra then
for k, v in ipairs( crsync._extra ) do
computed[ computedN ] = v
computedN = computedN + 1
end
end
for k, flag in pairs( shortFlags ) do
if crsync[ k ] then
shorts[ shortsN ] = flag
shortsN = shortsN + 1
end
end
if crsync.devices and crsync.specials then
shorts[ shortsN ] = 'D'
shortsN = shortsN + 1
else
if crsync.devices then
computed[ computedN ] = '--devices'
computedN = computedN + 1
end
if rsync.times then
shorts[ #shorts + 1 ] = 't'
if crsync.specials then
computed[ computedN ] = '--specials'
computedN = computedN + 1
end
end
if rsync.protectArgs then
shorts[ #shorts + 1 ] = 's'
end
if crsync.rsh then
computed[ computedN ] = '--rsh=' + crsync.rsh
computedN = computedN + 1
end
if #shorts ~= 1 then
computed[ 1 ] = table.concat( shorts, '' )
else
computed[ 1 ] = { }
end
if crsync.rsync_path then
computed[ computedN ] = '--rsync-path=' + crsync.rsync_path
computedN = computedN + 1
end
-- appends a / to target if not present
if string.sub(config.target, -1) ~= '/' then
config.target = config.target..'/'
end
end,
if crsync.temp_dir then
computed[ computedN ] = '--temp-dir=' + crsync.temp_dir
computedN = computedN + 1
end
-----
-- rsync uses default collect
----
if shortsN ~= 2 then
computed[ 1 ] = table.concat( shorts, '' )
else
computed[ 1 ] = { }
end
-----
-- By default do deletes.
--
delete = true,
-- appends a / to target if not present
if not skipTarget and string.sub(config.target, -1) ~= '/' then
config.target = config.target..'/'
end
-----
-- Calls rsync with this default short opts.
--
rsync = {
-- The rsync binary to be called.
binary = '/usr/bin/rsync',
links = true,
times = true,
protectArgs = true
},
end
-----
-- Exit codes for rsync.
--
exitcodes = default.rsyncExitCodes,
-----
-- Default delay
--
delay = 15,
--
-- By default do deletes.
--
rsync.delete = true
--
-- Rsyncd exitcodes
--
rsync.exitcodes = default.rsyncExitCodes
--
-- Calls rsync with this default options
--
rsync.rsync = {
-- The rsync binary to be called.
binary = '/usr/bin/rsync',
links = true,
times = true,
protect_args = true
}
--
-- Default delay
--
rsync.delay = 15

View File

@ -16,344 +16,376 @@
--
if not default then
error('default not loaded');
error( 'default not loaded' );
end
if not default.rsync then
error( 'default.rsync not loaded' );
end
if default.rsyncssh then
error('default-rsyncssh already loaded');
error( 'default-rsyncssh already loaded' );
end
default.rsyncssh = {
--
-- rsyncssh extends default.rsync
--
local rsyncssh = { default.rsync }
default.rsyncssh = rsyncssh
--
-- Spawns rsync for a list of events
--
action = function(inlet)
--
-- used to ensure there aren't typos in the keys
--
rsyncssh.checkgauge = {
local event, event2 = inlet.getEvent()
local config = inlet.getConfig()
-- unsets the inherited value of from default.rsync
target = false,
onMove = true,
-- makes move local on host
-- if fails deletes the source...
if event.etype == 'Move' then
log('Normal', 'Moving ',event.path,' -> ',event2.path)
spawn(event, '/usr/bin/ssh',
config.host,
'mv',
'\"' .. config.targetdir .. event.path .. '\"',
'\"' .. config.targetdir .. event2.path .. '\"',
'||', 'rm', '-rf',
'\"' .. config.targetdir .. event.path .. '\"')
-- rsyncssh users host and targetdir
host = true,
targetdir = true,
sshExitCodes = true,
rsyncExitCodes = true,
-- ssh settings
ssh = {
binary = true,
port = true,
_extra = true
},
-- xargs settings
xargs = {
binary = true,
delimiter = true,
_extra = true
}
}
--
-- Spawns rsync for a list of events
--
rsyncssh.action = function( inlet )
local event, event2 = inlet.getEvent()
local config = inlet.getConfig()
-- makes move local on target host
-- if the move fails, it deletes the source
if event.etype == 'Move' then
log('Normal', 'Moving ',event.path,' -> ',event2.path)
spawn(
event,
config.ssh.binary,
config.ssh._computed,
config.host,
'mv',
'\"' .. config.targetdir .. event.path .. '\"',
'\"' .. config.targetdir .. event2.path .. '\"',
'||', 'rm', '-rf',
'\"' .. config.targetdir .. event.path .. '\"')
return
end
-- uses ssh to delete files on remote host
-- instead of constructing rsync filters
if event.etype == 'Delete' then
if
config.delete ~= true and
config.delete ~= 'running'
then
inlet.discardEvent(event)
return
end
-- uses ssh to delete files on remote host
-- instead of constructing rsync filters
if event.etype == 'Delete' then
if not config.delete then
inlet.discardEvent(event)
return
end
local elist = inlet.getEvents(
function(e)
return e.etype == 'Delete'
end
)
local paths = elist.getPaths(
function(etype, path1, path2)
if path2 then
return config.targetdir..path1, config.targetdir..path2
else
return config.targetdir..path1
end
end
)
for _, v in pairs(paths) do
if string.match(v, '^%s*/+%s*$') then
log('Error', 'refusing to `rm -rf /` the target!')
terminate(-1) -- ERRNO
end
end
log('Normal', 'Deleting list\n', table.concat(paths, '\n'))
local params = {}
if config.port then
params[#params + 1] = 'p'
params[#params + 1] = config.port
end
spawn(
elist,
config.ssh.binary,
'<', table.concat(paths, config.xargs.delimiter),
params,
config.ssh._extra,
config.host,
config.xargs.binary,
config.xargs._extra
)
return
end
-- for everything else spawn a rsync
-- gets all other deletes ready to be
-- executed
local elist = inlet.getEvents(
function(e)
-- TODO use a table
return e.etype ~= 'Move' and
e.etype ~= 'Delete' and
e.etype ~= 'Init' and
e.etype ~= 'Blanket'
function( e )
return e.etype == 'Delete'
end
)
local paths = elist.getPaths()
-- returns the paths of the delete list
local paths = elist.getPaths(
function( etype, path1, path2 )
if path2 then
return config.targetdir..path1, config.targetdir..path2
else
return config.targetdir..path1
end
end
)
-- removes trailing slashes from dirs.
for k, v in ipairs(paths) do
if string.byte(v, -1) == 47 then
paths[k] = string.sub(v, 1, -2)
-- ensures none of the paths is '/'
for _, v in pairs( paths ) do
if string.match(v, '^%s*/+%s*$') then
log('Error', 'refusing to `rm -rf /` the target!')
terminate(-1) -- ERRNO
end
end
local sPaths = table.concat(paths, '\n')
local zPaths = table.concat(paths, '\000')
log('Normal', 'Rsyncing list\n', sPaths)
log(
'Normal',
'Deleting list\n',
table.concat( paths, '\n' )
)
local params = { }
spawn(
elist,
config.rsyncBinary,
'<', zPaths,
config.rsyncOpts,
'--from0',
'--files-from=-',
config.source,
config.host .. ':' .. config.targetdir
config.ssh.binary,
'<', table.concat(paths, config.xargs.delimiter),
params,
config.ssh._computed,
config.host,
config.xargs.binary,
config.xargs._extra
)
end,
-----
-- Called when collecting a finished child process
return
end
--
collect = function(agent, exitcode)
-- for everything else a rsync is spawned
--
local elist = inlet.getEvents(
function(e)
-- TODO use a table
return e.etype ~= 'Move' and
e.etype ~= 'Delete' and
e.etype ~= 'Init' and
e.etype ~= 'Blanket'
end
)
local config = agent.config
if not agent.isList and agent.etype == 'Init' then
local rc = config.rsyncExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode)
elseif rc == 'again' then
if settings.insist then
log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Temporary or permanent failure on startup of "',
agent.source, '". Terminating since "insist" is not set.');
terminate(-1) -- ERRNO
end
elseif rc == 'die' then
log('Error', 'Failure on startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode)
rc = 'die'
end
return rc
local paths = elist.getPaths( )
--
-- removes trailing slashes from dirs.
--
for k, v in ipairs( paths ) do
if string.byte(v, -1) == 47 then
paths[k] = string.sub(v, 1, -2)
end
if agent.isList then
end
local rc = config.rsyncExitCodes[exitcode]
local sPaths = table.concat(paths, '\n')
local zPaths = table.concat(paths, '\000')
if rc == 'ok' then
log('Normal', 'Finished (list): ',exitcode)
elseif rc == 'again' then
log('Normal', 'Retrying (list): ',exitcode)
elseif rc == 'die' then
log('Error', 'Failure (list): ', exitcode)
log('Normal', 'Rsyncing list\n', sPaths)
spawn(
elist,
config.rsync.binary,
'<', zPaths,
config.rsync._computed,
'--from0',
'--files-from=-',
config.source,
config.host .. ':' .. config.targetdir
)
end
-----
-- Called when collecting a finished child process
--
rsyncssh.collect = function( agent, exitcode )
local config = agent.config
if not agent.isList and agent.etype == 'Init' then
local rc = config.rsyncExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished: ', exitcode)
elseif rc == 'again' then
if uSettings.insist then
log('Normal', 'Retrying startup of "',agent.source,'": ', exitcode)
else
log('Error', 'Unknown exitcode (list): ',exitcode)
rc = 'die'
log('Error', 'Temporary or permanent failure on startup of "',
agent.source, '". Terminating since "insist" is not set.');
terminate(-1) -- ERRNO
end
return rc
elseif rc == 'die' then
log('Error', 'Failure on startup of "',agent.source,'": ', exitcode)
else
local rc = config.sshExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Finished ',agent.etype,' ',agent.sourcePath,': ',exitcode)
elseif rc == 'again' then
log('Normal', 'Retrying ',agent.etype,' ',agent.sourcePath,': ',exitcode)
elseif rc == 'die' then
log('Normal', 'Failure ',agent.etype,' ',agent.sourcePath,': ',exitcode)
else
log('Error', 'Unknown exitcode ',agent.etype,' ',agent.sourcePath,': ',exitcode)
rc = 'die'
end
return rc
log('Error', 'Unknown exitcode on startup of "', agent.source,': "',exitcode)
rc = 'die'
end
end,
return rc
--
-- spawns the recursive startup sync
--
init = function(event)
end
local config = event.config
local inlet = event.inlet
local excludes = inlet.getExcludes()
local target = config.host .. ':' .. config.targetdir
local delete = nil
if config.delete then
delete = { '--delete', '--ignore-errors' };
end
if #excludes == 0 then
log('Normal', 'Recursive startup rsync: ',config.source,' -> ',target)
spawn(
event, config.rsyncBinary,
delete,
'-r',
config.rsyncOpts,
config.source,
target
)
if agent.isList then
local rc = config.rsyncExitCodes[exitcode]
if rc == 'ok' then
log('Normal', 'Finished (list): ',exitcode)
elseif rc == 'again' then
log('Normal', 'Retrying (list): ',exitcode)
elseif rc == 'die' then
log('Error', 'Failure (list): ', exitcode)
else
local exS = table.concat(excludes, '\n')
log('Normal', 'Recursive startup rsync: ',config.source,
' -> ',target,' with excludes.')
spawn(
event, config.rsyncBinary,
'<', exS,
'--exclude-from=-',
delete,
'-r',
config.rsyncOpts,
config.source,
target
)
log('Error', 'Unknown exitcode (list): ',exitcode)
rc = 'die'
end
end,
return rc
else
local rc = config.sshExitCodes[exitcode]
--
-- checks the configuration.
--
prepare = function(config)
if not config.host then
error('default.rsyncssh needs "host" configured', 4)
if rc == 'ok' then
log('Normal', 'Finished ',agent.etype,' ',agent.sourcePath,': ',exitcode)
elseif rc == 'again' then
log('Normal', 'Retrying ',agent.etype,' ',agent.sourcePath,': ',exitcode)
elseif rc == 'die' then
log('Normal', 'Failure ',agent.etype,' ',agent.sourcePath,': ',exitcode)
else
log('Error', 'Unknown exitcode ',agent.etype,' ',agent.sourcePath,': ',exitcode)
rc = 'die'
end
if not config.targetdir then
error('default.rsyncssh needs "targetdir" configured', 4)
return rc
end
end
--
-- checks the configuration.
--
rsyncssh.prepare = function( config, level )
default.rsync.prepare( config, level + 1, true )
if not config.host then
error(
'default.rsyncssh needs "host" configured',
level
)
end
if not config.targetdir then
error(
'default.rsyncssh needs "targetdir" configured',
level
)
end
--
-- computes the ssh options
--
if config.ssh._computed then
error(
'please do not use the internal rsync._computed parameter',
level
)
end
local cssh = config.rsync;
cssh._computed = { }
local computed = cssh._computed
local computedN = 1
if cssh._extra then
for k, v in ipairs( cssh._extra ) do
computed[ computedN ] = v
computedN = computedN + 1
end
end
if config.rsyncOps then
error('did you mean rsyncOpts with "t"?', 4)
end
if cssh.port then
computed[ computedN ] = '-p'
computed[ computedN + 1 ] = cssh.port
computedN = computedN + 2
end
-- appends a slash to the targetdir if missing
if string.sub(config.targetdir, -1) ~= '/' then
config.targetdir = config.targetdir .. '/'
end
end,
-- appends a slash to the targetdir if missing
if string.sub(config.targetdir, -1) ~= '/' then
config.targetdir = config.targetdir .. '/'
end
--
-- the rsync binary called
--
rsyncBinary = '/usr/bin/rsync',
end
--
-- calls rsync with this default short opts
--
rsyncOpts = '-lts',
--
-- allow processes
--
rsyncssh.maxProcesses = 1
--
-- allow processes
--
maxProcesses = 1,
--
-- The core should not split move events
--
rsyncssh.onMove = true
--
-- The core should not split move events
--
onMove = true,
--
-- default delay
--
delay = 15,
--
-- default delay
--
rsyncssh.delay = 15
--
-- by default do deletes
--
delete = true,
--
-- no default exit codes
--
rsyncssh.exitcodes = false
--
-- rsync exit codes
--
rsyncssh.rsyncExitCodes = default.rsyncExitCodes
--
-- ssh exit codes
--
rsyncssh.sshExitCodes = default.sshExitCodes
--
-- xargs calls configuration
--
-- xargs is used to delete multiple remote files, when ssh access is
-- available this is simpler than to build filters for rsync for this.
--
rsyncssh.xargs = {
--
-- rsync exit codes
--
rsyncExitCodes = default.rsyncExitCodes,
-- the binary called (on target host)
binary = '/usr/bin/xargs',
--
-- ssh exit codes
--
sshExitCodes = default.sshExitCodes,
-- delimiter, uses null by default, you might want to override this for older
-- by for example '\n'
delimiter = '\000',
--
-- xargs calls configuration
--
-- xargs is used to delete multiple remote files, when ssh access is
-- available this is simpler than to build filters for rsync for this.
--
xargs = {
--
-- the binary called (on target host)
binary = '/usr/bin/xargs',
--
-- delimiter, uses null by default, you might want to override this for older
-- by for example '\n'
delimiter = '\000',
--
-- extra parameters
_extra = { '-0', 'rm -rf' }
},
--
-- ssh calls configuration
--
-- ssh is used to move and delete files on the target host
--
ssh = {
--
-- the binary called
binary = '/usr/bin/ssh',
--
-- if set connect to this port
port = nil,
--
-- extra parameters
_extra = { }
}
-- extra parameters
_extra = { '-0', 'rm -rf' }
}
--
-- ssh calls configuration
--
-- ssh is used to move and delete files on the target host
--
rsyncssh.ssh = {
--
-- the binary called
--
binary = '/usr/bin/ssh',
--
-- if set connect to this port
--
port = nil,
--
-- extra parameters
--
_extra = { }
}

View File

@ -8,192 +8,349 @@
-- Authors: Axel Kittenberger <axkibe@gmail.com>
--============================================================================
if default then error('default already loaded'); end
if default then
error( 'default already loaded' )
end
default = {
-----
-- Default action calls user scripts on**** functions.
--
action = function(inlet)
-- in case of moves getEvent returns the origin and dest of the move
local event, event2 = inlet.getEvent()
local config = inlet.getConfig()
local func = config['on'.. event.etype]
if func then
func(event, event2)
end
-- if function didnt change the wait status its not interested
-- in this event -> drop it.
if event.status == 'wait' then
inlet.discardEvent(event)
end
end,
default = { }
-----
-- Default collector.
--
-- Called when collecting a finished child process
--
collect = function(agent, exitcode)
local config = agent.config
local rc
if config.exitcodes then
rc = config.exitcodes[exitcode]
elseif exitcode == 0 then
rc = 'ok'
--
-- Only this items are inherited from the default
-- table
--
default._merge = {
action = true,
checkgauge = true,
collect = true,
delay = true,
init = true,
maxDelays = true,
maxProcesses = true,
prepare = true,
}
--
-- used to ensure there aren't typos in the keys
--
default.checkgauge = {
action = true,
checkgauge = true,
collect = true,
delay = true,
exitcodes = true,
init = true,
maxDelays = true,
maxProcesses = true,
onCreate = true,
onModify = true,
onDelete = true,
onStartup = true,
onMove = true,
prepare = true,
source = true,
target = true,
}
--
-- On default action the user's on*** scripts are called.
--
default.action = function( inlet )
-- in case of moves getEvent returns the origin and dest of the move
local event, event2 = inlet.getEvent( )
local config = inlet.getConfig( )
local func = config[ 'on'.. event.etype ]
if type( func ) == 'function' then
func( event, event2 )
end
-- if function didnt change the wait status its not interested
-- in this event -> drop it.
if event.status == 'wait' then
inlet.discardEvent( event )
end
end
--
-- Default collector.
--
-- Called when collecting a finished child process
--
default.collect = function( agent, exitcode )
local config = agent.config
local rc
if config.exitcodes then
rc = config.exitcodes[exitcode]
elseif exitcode == 0 then
rc = 'ok'
else
rc = 'die'
end
-- TODO synchronize with similar code before
if not agent.isList and agent.etype == 'Init' then
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished.')
return 'ok'
elseif rc == 'again' then
if settings.insist then
log(
'Normal',
'Retrying startup of "',
agent.source,
'": ',
exitcode
)
return 'again'
else
log(
'Error',
'Temporary or permanent failure on startup of "',
agent.source,
'". Terminating since "insist" is not set.'
)
terminate( -1 )
end
elseif rc == 'die' then
log(
'Error',
'Failure on startup of "',
agent.source,
'".'
)
terminate( -1 )
else
log(
'Error',
'Unknown exitcode "',
exitcode,
'" on startup of "',
agent.source,
'".'
)
return 'die'
end
end
if agent.isList then
if rc == 'ok' then
log(
'Normal',
'Finished a list after exitcode: ',
exitcode
)
elseif rc == 'again' then
log(
'Normal',
'Retrying a list after exitcode = ',
exitcode
)
elseif rc == 'die' then
log(
'Error',
'Failure with a list width exitcode = ',
exitcode
)
else
log(
'Error',
'Unknown exitcode "',exitcode,'" with a list'
)
rc = 'die'
end
-- TODO synchronize with similar code before
if not agent.isList and agent.etype == 'Init' then
if rc == 'ok' then
log('Normal', 'Startup of "',agent.source,'" finished.')
return 'ok'
elseif rc == 'again' then
if settings.insist then
log(
'Normal',
'Retrying startup of "',
agent.source,
'": ',
exitcode
)
return 'again'
else
log(
'Error',
'Temporary or permanent failure on startup of "',
agent.source,
'". Terminating since "insist" is not set.'
)
terminate(-1) -- ERRNO
end
elseif rc == 'die' then
log(
'Error',
'Failure on startup of "',
agent.source,
'".'
)
terminate(-1) -- ERRNO
else
log(
'Error',
'Unknown exitcode "',
exitcode,
'" on startup of "',
agent.source,
'".'
)
return 'die'
end
end
if agent.isList then
if rc == 'ok' then
log('Normal', 'Finished a list = ',exitcode)
elseif rc == 'again' then
log('Normal', 'Retrying a list on exitcode = ',exitcode)
elseif rc == 'die' then
log('Error', 'Failure with a list on exitcode = ',exitcode)
else
log('Error', 'Unknown exitcode "',exitcode,'" with a list')
rc = 'die'
end
else
if rc == 'ok' then
log('Normal', 'Retrying ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
elseif rc == 'again' then
log('Normal', 'Finished ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
elseif rc == 'die' then
log('Error', 'Failure with ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
else
if rc == 'ok' then
log('Normal', 'Retrying ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
elseif rc == 'again' then
log('Normal', 'Finished ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
elseif rc == 'die' then
log('Error', 'Failure with ',agent.etype,' on ',agent.sourcePath,' = ',exitcode)
else
log('Normal', 'Unknown exitcode "',exitcode,'" with ', agent.etype,
' on ',agent.sourcePath,' = ',exitcode)
rc = 'die'
end
log('Normal', 'Unknown exitcode "',exitcode,'" with ', agent.etype,
' on ',agent.sourcePath,' = ',exitcode)
rc = 'die'
end
end
return rc
end,
return rc
end
--
-- Called on the Init event sent
-- on (re)initialization of Lsyncd for every sync
--
default.init = function(event)
local config = event.config
local inlet = event.inlet
-- user functions
-- calls a startup if given by user script.
if type(config.onStartup) == 'function' then
local startup = config.onStartup(event)
-- TODO honor some return codes of startup like "warmstart".
end
if event.status == 'wait' then
-- user script did not spawn anything
-- thus the blanket event is deleted again.
inlet.discardEvent(event)
end
end
--
-- The collapsor tries not to have more than these delays.
-- So it dealy stack does not grow too large,
-- since calculation for stacking events is n*log(n) (or so)
--
default.maxDelays = 1000
--
-- The maximum number of processes Lsyncd will
-- simultanously spawn for this sync.
--
default.maxProcesses = 1
--
-- Exitcodes of rsync and what to do.
-- TODO move to rsync
--
default.rsyncExitCodes = {
-----
-- called on (re)initialization of Lsyncd.
--
init = function(event)
local config = event.config
local inlet = event.inlet
-- user functions
-- calls a startup if given by user script.
if type(config.onStartup) == 'function' then
local startup = config.onStartup(event)
-- TODO honor some return codes of startup like "warmstart".
end
if event.status == 'wait' then
-- user script did not spawn anything
-- thus the blanket event is deleted again.
inlet.discardEvent(event)
end
end,
-----
-- The maximum number of processes Lsyncd will spawn simultanously for
-- one sync.
-- if another config provides the same table
-- this will not be inherited (merged) into that one
--
maxProcesses = 1,
-----
-- Try not to have more than these delays.
-- not too large, since total calculation for stacking
-- events is n*log(n) or so..
-- if it does not, integer keys are to be copied
-- verbatim
--
maxDelays = 1000,
_merge = false,
_verbatim = true,
-----
-- a default configuration using /bin/cp|rm|mv.
--
direct = default_direct,
[ 0 ] = 'ok',
[ 1 ] = 'die',
[ 2 ] = 'die',
[ 3 ] = 'again',
[ 4 ] = 'die',
[ 5 ] = 'again',
[ 6 ] = 'again',
[ 10 ] = 'again',
[ 11 ] = 'again',
[ 12 ] = 'again',
[ 14 ] = 'again',
[ 20 ] = 'again',
[ 21 ] = 'again',
[ 22 ] = 'again',
------
-- Exitcodes of rsync and what to do.
--
rsyncExitCodes = {
[ 0] = 'ok',
[ 1] = 'die',
[ 2] = 'die',
[ 3] = 'again',
[ 4] = 'die',
[ 5] = 'again',
[ 6] = 'again',
[ 10] = 'again',
[ 11] = 'again',
[ 12] = 'again',
[ 14] = 'again',
[ 20] = 'again',
[ 21] = 'again',
[ 22] = 'again',
[ 23] = 'ok', -- partial transfers are ok, since Lsyncd has registered the event that
[ 24] = 'ok', -- caused the transfer to be partial and will recall rsync.
[ 25] = 'die',
[ 30] = 'again',
[ 35] = 'again',
[255] = 'again',
},
-- partial transfers are ok, since Lsyncd has registered the event that
-- caused the transfer to be partial and will recall rsync.
[ 23 ] = 'ok',
[ 24 ] = 'ok',
-----
-- Exitcodes of ssh and what to do.
--
sshExitCodes = {
[0] = 'ok',
[255] = 'again',
},
[ 25 ] = 'die',
[ 30 ] = 'again',
[ 35 ] = 'again',
-----
-- Minimum seconds between two writes of a status file.
--
statusInterval = 10,
[ 255 ] = 'again',
}
--
-- Exitcodes of ssh and what to do.
--
default.sshExitCodes = {
--
-- if another config provides the same table
-- this will not be inherited (merged) into that one
--
-- if it does not, integer keys are to be copied
-- verbatim
--
_merge = false,
_verbatim = true,
[ 0 ] = 'ok',
[ 255 ] = 'again',
}
--
-- Minimum seconds between two writes of a status file.
--
default.statusInterval = 10
--
-- checks all keys to be in the checkgauge
--
local function check(
config,
gauge,
subtable,
level
)
for k, v in pairs( config ) do
if not gauge[k] then
error(
'Parameter "'
.. subtable
.. k
.. '" unknown.'
.. ' (if this is not a typo add it to checkgauge)',
level
);
end
if type( gauge [ k ] ) == 'table' then
if type( v ) ~= 'table' then
error(
'Parameter "'
.. subtable
.. k
.. '" must be a table.',
level
)
end
check(
config[ k ],
gauge[ k ],
subtable .. k .. '.',
level + 1
)
end
end
end
default.prepare = function( config, level )
local gauge = config.checkgauge
if not gauge then
return
end
check( config, gauge, '', level + 1 )
end

565
inotify.c
View File

@ -1,14 +1,14 @@
/**
* inotify.c from Lsyncd - Live (Mirror) Syncing Demon
*
* License: GPLv2 (see COPYING) or any later version
*
* Authors: Axel Kittenberger <axkibe@gmail.com>
*
* -----------------------------------------------------------------------
*
* Event interface for Lsyncd to Linux´ inotify.
*/
/*
| inotify.c from Lsyncd - Live (Mirror) Syncing Demon
|
| License: GPLv2 (see COPYING) or any later version
|
| Authors: Axel Kittenberger <axkibe@gmail.com>
|
| -----------------------------------------------------------------------
|
| Event interface for Lsyncd to Linux´ inotify.
*/
#include "lsyncd.h"
@ -39,83 +39,130 @@
#include <lualib.h>
#include <lauxlib.h>
/*-----------------------------------------------------------------------------
* Event types.
*/
/*
| Event types.
*/
static const char * ATTRIB = "Attrib";
static const char * MODIFY = "Modify";
static const char * CREATE = "Create";
static const char * DELETE = "Delete";
static const char * MOVE = "Move";
/**
/*
* The inotify file descriptor.
*/
static int inotify_fd = -1;
/**
* Standard inotify events to listen to.
*/
/*
| Standard inotify events to listen to.
*/
static const uint32_t standard_event_mask =
IN_ATTRIB | IN_CLOSE_WRITE | IN_CREATE |
IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM |
IN_MOVED_TO | IN_DONT_FOLLOW | IN_ONLYDIR;
IN_ATTRIB |
IN_CLOSE_WRITE |
IN_CREATE |
IN_DELETE |
IN_DELETE_SELF |
IN_MOVED_FROM |
IN_MOVED_TO |
IN_DONT_FOLLOW |
IN_ONLYDIR;
/**
* Adds an inotify watch
*
* @param dir (Lua stack) path to directory
* @param inotifyMode (Lua stack) path to directory
* @return (Lua stack) numeric watch descriptor
*/
/*
| Adds an inotify watch
|
| param dir (Lua stack) path to directory
| param inotifyMode (Lua stack) which inotify event to react upon
| "CloseWrite", "CloseWrite or Modify"
|
| returns (Lua stack) numeric watch descriptor
*/
static int
l_addwatch(lua_State *L)
l_addwatch( lua_State *L )
{
const char *path = luaL_checkstring(L, 1);
const char *imode = luaL_checkstring(L, 2);
const char *path = luaL_checkstring( L, 1 );
const char *imode = luaL_checkstring( L, 2 );
uint32_t mask = standard_event_mask;
if (*imode) {
if (!strcmp(imode, "Modify")) {
// act on modify instead of closeWrite
mask |= IN_MODIFY;
// checks the desired inotify reaction mode
if (*imode)
{
if ( !strcmp( imode, "Modify" ) )
{
// acts on modify instead of closeWrite
mask |= IN_MODIFY;
mask &= ~IN_CLOSE_WRITE;
} else if (!strcmp(imode, "CloseWrite")) {
// default
} else if (!strcmp(imode, "CloseWrite or Modify")) {
}
else if ( !strcmp( imode, "CloseWrite" ) )
{
// thats default
}
else if ( !strcmp( imode, "CloseWrite or Modify" ) )
{
// acts on modify and closeWrite
mask |= IN_MODIFY;
} else if (!strcmp(imode, "CloseWrite after Modify")) {
}
else if ( ! strcmp( imode, "CloseWrite after Modify") )
{
// might be done in future
printlogf(L, "Error", "'CloseWrite after Modify' not implemented.");
exit(-1); // ERRNO
} else {
printlogf(L, "Error", "'%s' not a valid inotfiyMode.", imode);
exit(-1); // ERRNO
printlogf(
L, "Error",
"'CloseWrite after Modify' not implemented."
);
exit(-1);
}
else
{
printlogf(
L, "Error",
"'%s' not a valid inotfiyMode.",
imode
);
exit(-1);
}
}
// kernel call to create the inotify watch
int wd = inotify_add_watch( inotify_fd, path, mask );
int wd = inotify_add_watch(inotify_fd, path, mask);
if (wd < 0) {
if (errno == ENOSPC) {
printlogf(L, "Error", "Terminating since out of inotify watches.");
printlogf(L, "Error", "Consider increasing /proc/sys/fs/inotify/max_user_watches");
if (wd < 0)
{
if (errno == ENOSPC)
{
printlogf(
L, "Error",
"%s\n%s",
"Terminating since out of inotify watches.",
"Consider increasing /proc/sys/fs/inotify/max_user_watches"
);
exit(-1); // ERRNO.
}
printlogf(L, "Inotify", "addwatch(%s)->%d; err=%d:%s", path, wd, errno, strerror(errno));
} else {
printlogf(L, "Inotify", "addwatch(%s)->%d", path, wd);
printlogf(
L, "Inotify",
"addwatch( %s )-> % d; err= %d : %s",
path, wd, errno, strerror( errno )
);
}
lua_pushinteger(L, wd);
else
{
printlogf(L, "Inotify", "addwatch( %s )-> %d ", path, wd );
}
lua_pushinteger( L, wd );
return 1;
}
/**
* Removes an inotify watch
/*
* Removes an inotify watch.
*
* @param dir (Lua stack) numeric watch descriptor
* @return nil
* param dir (Lua stack) numeric watch descriptor
*
* return nil
*/
static int
l_rmwatch(lua_State *L)
@ -126,147 +173,214 @@ l_rmwatch(lua_State *L)
return 0;
}
/**
* Cores inotify functions.
*/
/*
| Lsyncd's core's inotify functions.
*/
static const luaL_Reg linotfylib[] = {
{"addwatch", l_addwatch },
{"rmwatch", l_rmwatch },
{NULL, NULL}
{ "addwatch", l_addwatch },
{ "rmwatch", l_rmwatch },
{ NULL, NULL}
};
/**
* Buffer for MOVE_FROM events.
* Lsyncd buffers MOVE_FROM events to check if
* they are followed by MOVE_TO events with identical cookie
* then they are condensed into one move event to be sent to the
* runner
*/
/*
| Buffer for MOVE_FROM events.
| Lsyncd buffers MOVE_FROM events to check if
| they are followed by MOVE_TO events with identical cookie
| then they are condensed into one move event to be sent to the
| runner
*/
static struct inotify_event * move_event_buf = NULL;
/**
* Memory allocated for move_event_buf
*/
/*
| Memory allocated for move_event_buf
*/
static size_t move_event_buf_size = 0;
/**
* true if the buffer is used.
*/
/*
| True if the buffer is used.
*/
static bool move_event = false;
/**
* Handles an inotify event.
*/
/*
| Handles an inotify event.
*/
static void
handle_event(lua_State *L,
struct inotify_event *event)
handle_event(
lua_State *L,
struct inotify_event *event
)
{
const char *event_type = NULL;
// used to execute two events in case of unmatched MOVE_FROM buffer
struct inotify_event *after_buf = NULL;
if (event && (IN_Q_OVERFLOW & event->mask)) {
/* and overflow happened, tells the runner */
load_runner_func(L, "overflow");
if (lua_pcall(L, 0, 0, -2)) {
exit(-1); // ERRNO
if( event && ( IN_Q_OVERFLOW & event->mask ) )
{
// and overflow happened, tells the runner
load_runner_func( L, "overflow" );
if( lua_pcall( L, 0, 0, -2 ) )
{
exit( -1 );
}
lua_pop(L, 1);
lua_pop( L, 1 );
hup = 1;
return;
}
// cancel on ignored or resetting
if (event && (IN_IGNORED & event->mask)) {
if( event && ( IN_IGNORED & event->mask ) )
{
return;
}
if (event && event->len == 0) {
if( event && event->len == 0 )
{
// sometimes inotify sends such strange events,
// (e.g. when touching a dir
return;
}
if (event == NULL) {
if( event == NULL )
{
// a buffered MOVE_FROM is not followed by anything,
// thus it is unary
event = move_event_buf;
event_type = "Delete";
move_event = false;
} else if (move_event &&
( !(IN_MOVED_TO & event->mask) ||
event->cookie != move_event_buf->cookie) ) {
}
else if(
move_event &&
(
!( IN_MOVED_TO & event->mask ) ||
event->cookie != move_event_buf->cookie
)
)
{
// there is a MOVE_FROM event in the buffer and this is not the match
// continue in this function iteration to handle the buffer instead */
logstring("Inotify", "icore, changing unary MOVE_FROM into DELETE")
logstring(
"Inotify",
"icore, changing unary MOVE_FROM into DELETE"
)
after_buf = event;
event = move_event_buf;
event_type = "Delete";
move_event = false;
} else if ( move_event &&
(IN_MOVED_TO & event->mask) &&
event->cookie == move_event_buf->cookie ) {
}
else if(
move_event &&
(
IN_MOVED_TO & event->mask ) &&
event->cookie == move_event_buf->cookie
)
{
// this is indeed a matched move */
event_type = "Move";
move_event = false;
} else if (IN_MOVED_FROM & event->mask) {
}
else if( IN_MOVED_FROM & event->mask )
{
// just the MOVE_FROM, buffers this event, and wait if next event is
// a matching MOVED_TO of this was an unary move out of the watched
// tree.
size_t el = sizeof(struct inotify_event) + event->len;
if (move_event_buf_size < el) {
size_t el = sizeof( struct inotify_event ) + event->len;
if( move_event_buf_size < el )
{
move_event_buf_size = el;
move_event_buf = s_realloc(move_event_buf, el);
move_event_buf = s_realloc( move_event_buf, el );
}
memcpy(move_event_buf, event, el);
memcpy( move_event_buf, event, el );
move_event = true;
return;
} else if (IN_MOVED_TO & event->mask) {
}
else if( IN_MOVED_TO & event->mask )
{
// must be an unary move-to
event_type = CREATE;
} else if (IN_ATTRIB & event->mask) {
}
else if( IN_ATTRIB & event->mask )
{
// just attrib change
event_type = ATTRIB;
} else if ((IN_CLOSE_WRITE | IN_MODIFY) & event->mask) {
}
else if( ( IN_CLOSE_WRITE | IN_MODIFY) & event->mask )
{
// modify, or closed after written something
// the event type received depends settings.inotifyMode
event_type = MODIFY;
} else if (IN_CREATE & event->mask) {
}
else if( IN_CREATE & event->mask )
{
// a new file
event_type = CREATE;
} else if (IN_DELETE & event->mask) {
}
else if( IN_DELETE & event->mask )
{
// rm'ed
event_type = DELETE;
} else {
logstring("Inotify", "icore, skipped some inotify event.");
}
else
{
logstring(
"Inotify",
"skipped some inotify event."
);
return;
}
// and hands over to runner
load_runner_func(L, "inotifyEvent");
if (!event_type) {
logstring("Error", "Internal: unknown event in handle_event()");
exit(-1); // ERRNO
// hands the event over to the runner
load_runner_func( L, "inotifyEvent" );
if( !event_type )
{
logstring(
"Error",
"internal failure: unknown event in handle_event()"
);
exit( -1 );
}
lua_pushstring(L, event_type);
if (event_type != MOVE) {
lua_pushnumber(L, event->wd);
} else {
lua_pushnumber(L, move_event_buf->wd);
lua_pushstring( L, event_type );
if( event_type != MOVE )
{
lua_pushnumber( L, event->wd );
}
lua_pushboolean(L, (event->mask & IN_ISDIR) != 0);
l_now(L);
if (event_type == MOVE) {
lua_pushstring(L, move_event_buf->name);
lua_pushnumber(L, event->wd);
lua_pushstring(L, event->name);
} else {
lua_pushstring(L, event->name);
lua_pushnil(L);
lua_pushnil(L);
else
{
lua_pushnumber( L, move_event_buf->wd );
}
if (lua_pcall(L, 7, 0, -9)) {
exit(-1); // ERRNO
lua_pushboolean( L, ( event->mask & IN_ISDIR ) != 0 );
l_now( L );
if( event_type == MOVE )
{
lua_pushstring( L, move_event_buf->name );
lua_pushnumber( L, event->wd );
lua_pushstring( L, event->name );
}
lua_pop(L, 1);
else
{
lua_pushstring( L, event->name );
lua_pushnil( L );
lua_pushnil( L );
}
if( lua_pcall( L, 7, 0, -9 ) )
{
exit( -1 );
}
lua_pop( L, 1 );
// if there is a buffered event, executes it
if (after_buf) {
@ -275,31 +389,44 @@ handle_event(lua_State *L,
}
}
/**
* buffer to read inotify events into
*/
/*
| buffer to read inotify events into
*/
static size_t readbuf_size = 2048;
static char * readbuf = NULL;
/**
* Called by function pointer from when the inotify file descriptor
* became ready. Reads it contents and forward all received events
* to the runner.
*/
/*
| Called when the inotify file descriptor became ready.
| Reads it contents and forwards all received events
| to the runner.
*/
static void
inotify_ready(lua_State *L, struct observance *obs)
inotify_ready(
lua_State *L,
struct observance *obs
)
{
if (obs->fd != inotify_fd) {
logstring("Error", "Internal, inotify_fd != ob->fd");
exit(-1); // ERRNO
// sanity check
if( obs->fd != inotify_fd )
{
logstring(
"Error",
"internal failure, inotify_fd != ob->fd"
);
exit( -1 );
}
while(true) {
while( true )
{
ptrdiff_t len;
int err;
do {
len = read (inotify_fd, readbuf, readbuf_size);
len = read( inotify_fd, readbuf, readbuf_size );
err = errno;
if (len < 0 && err == EINVAL) {
if( len < 0 && err == EINVAL )
{
// kernel > 2.6.21 indicates that way that way that
// the buffer was too small to fit a filename.
// double its size and try again. When using a lower
@ -309,90 +436,138 @@ inotify_ready(lua_State *L, struct observance *obs)
readbuf_size *= 2;
readbuf = s_realloc(readbuf, readbuf_size);
}
} while(len < 0 && err == EINVAL);
if (len == 0) {
// nothing more inotify
} while( len < 0 && err == EINVAL );
if( len == 0 )
{
// no more inotify events
break;
}
if (len < 0) {
if (len < 0)
{
if (err == EAGAIN) {
// nothing more inotify
break;
} else {
printlogf(L, "Error", "Read fail on inotify");
exit(-1); // ERRNO
}
else
{
printlogf(
L, "Error",
"Read fail on inotify"
);
exit( -1 );
}
}
{
int i = 0;
while (i < len && !hup && !term) {
while( i < len && !hup && !term )
{
struct inotify_event *event =
(struct inotify_event *) &readbuf[i];
handle_event(L, event);
i += sizeof(struct inotify_event) + event->len;
( struct inotify_event * )
(readbuf + i);
handle_event( L, event );
i += sizeof( struct inotify_event ) + event->len;
}
}
if (!move_event) {
if( !move_event )
{
// give it a pause if not endangering splitting a move
break;
}
}
// checks if there is an unary MOVE_FROM left in the buffer
if (move_event) {
logstring("Inotify", "icore, handling unary move from.");
handle_event(L, NULL);
if( move_event )
{
logstring(
"Inotify",
"handling unary move from."
);
handle_event( L, NULL );
}
}
/**
* registers inotify functions.
*/
/*
| Registers the inotify functions.
*/
extern void
register_inotify(lua_State *L)
register_inotify( lua_State *L )
{
luaL_register(L, LSYNCD_INOTIFYLIBNAME, linotfylib);
luaL_register( L, LSYNCD_INOTIFYLIBNAME, linotfylib );
}
/**
* closes inotify
*/
/*
| Cleans up the inotify handling.
*/
static void
inotify_tidy(struct observance *obs)
inotify_tidy( struct observance *obs )
{
if (obs->fd != inotify_fd) {
logstring("Error", "Internal, inotify_fd != ob->fd");
exit(-1); // ERRNO
if( obs->fd != inotify_fd )
{
logstring(
"Error",
"internal failure: inotify_fd != ob->fd"
);
exit( -1 );
}
close(inotify_fd);
free(readbuf);
close( inotify_fd );
free( readbuf );
readbuf = NULL;
}
/**
* opens and initalizes inotify.
*/
/*
| Initalizes inotify handling
*/
extern void
open_inotify(lua_State *L)
open_inotify( lua_State *L )
{
if (readbuf) {
logstring("Error",
"internal fail, inotify readbuf!=NULL in open_inotify()")
exit(-1); // ERRNO
if( readbuf )
{
logstring(
"Error",
"internal failure, inotify readbuf != NULL in open_inotify()"
)
exit(-1);
}
readbuf = s_malloc(readbuf_size);
inotify_fd = inotify_init();
if (inotify_fd < 0) {
printlogf(L, "Error",
"Cannot access inotify monitor! (%d:%s)",
errno, strerror(errno));
exit(-1); // ERRNO
readbuf = s_malloc( readbuf_size );
inotify_fd = inotify_init( );
if( inotify_fd < 0 )
{
printlogf(
L,
"Error",
"Cannot access inotify monitor! ( %d : %s )",
errno, strerror(errno)
);
exit( -1 );
}
printlogf(L, "Inotify", "inotify fd = %d", inotify_fd);
close_exec_fd(inotify_fd);
non_block_fd(inotify_fd);
observe_fd(inotify_fd, inotify_ready, NULL, inotify_tidy, NULL);
printlogf(
L, "Inotify",
"inotify fd = %d",
inotify_fd
);
close_exec_fd( inotify_fd );
non_block_fd( inotify_fd );
observe_fd(
inotify_fd,
inotify_ready,
NULL,
inotify_tidy,
NULL
);
}

2999
lsyncd.c

File diff suppressed because it is too large Load Diff

3246
lsyncd.lua

File diff suppressed because it is too large Load Diff

15
m4/ax_subst_l.m4 Normal file
View File

@ -0,0 +1,15 @@
# ax_subst_l.m4 - Substitute every var in the given comma seperated list -*-Autoconf-*-
#
# Copyright (C) 2012 Dennis Schridde
#
# This file is free software; the authors give
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
# serial 1
# Substitute every var in the given comma seperated list
AC_DEFUN([AX_SUBST_L],[
m4_foreach([__var__], [$@], [AC_SUBST(__var__)])
])

View File

@ -14,7 +14,8 @@ local tdir, srcdir, trgdir = mktemps()
-- makes some startup data
churn(srcdir, 10)
local logs = {'-log', 'Exec', '-log', 'Delay' }
local logs = { }
--local logs = {'-log', 'Exec', '-log', 'Delay' }
local pid = spawn(
'./lsyncd',
'-nodaemon',

View File

@ -1,42 +1,50 @@
#!/usr/bin/lua
-- a heavy duty test.
-- makes thousends of random changes to the source tree
require("posix")
dofile("tests/testlib.lua")
require( "posix" )
dofile( "tests/testlib.lua" )
cwriteln("****************************************************************")
cwriteln(" Testing default.rsync with random data activity")
cwriteln("****************************************************************")
cwriteln( "****************************************************************" )
cwriteln( " Testing default.rsync with random data activity" )
cwriteln( "****************************************************************" )
local tdir, srcdir, trgdir = mktemps()
local tdir, srcdir, trgdir = mktemps( )
-- makes some startup data
churn(srcdir, 100)
churn( srcdir, 100 )
local logs = {}
-- logs = {"-log", "Delay", "-log", "Fsevents" }
local pid = spawn("./lsyncd", "-nodaemon", "-delay", "5",
"-rsync", srcdir, trgdir, unpack(logs))
local logs = { }
-- logs = { "-log", "Delay", "-log", "Fsevents" }
local pid = spawn(
"./lsyncd",
"-nodaemon",
"-delay", "5",
"-rsync", srcdir, trgdir,
unpack( logs )
)
cwriteln("waiting for Lsyncd to startup")
posix.sleep(1)
cwriteln( "waiting for Lsyncd to startup" )
churn(srcdir, 500)
posix.sleep( 1 )
cwriteln("waiting for Lsyncd to finish its jobs.")
posix.sleep(10)
churn( srcdir, 500 )
cwriteln( "waiting for Lsyncd to finish its jobs." )
posix.sleep( 10 )
cwriteln( "killing the Lsyncd daemon" )
cwriteln("killing the Lsyncd daemon")
posix.kill(pid)
local _, exitmsg, lexitcode = posix.wait(lpid)
local _, exitmsg, lexitcode = posix.wait( lpid )
cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode)
exitcode = os.execute("diff -r "..srcdir.." "..trgdir)
cwriteln("Exitcode of diff = '", exitcode, "'")
exitcode = os.execute( "diff -r "..srcdir.." "..trgdir )
cwriteln( "Exitcode of diff = '", exitcode, "'" )
if exitcode ~= 0 then
os.exit(1)
else
os.exit(0)
end

View File

@ -37,6 +37,7 @@ cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode)
exitcode = os.execute("diff -r "..srcdir.." "..trgdir)
cwriteln("Exitcode of diff = '", exitcode, "'")
if exitcode ~= 0 then
os.exit(1)
else

View File

@ -73,17 +73,19 @@ end
cwriteln("testing startup excludes");
writefiles();
cwriteln("starting Lsyncd");
local pid = spawn("./lsyncd", cfgfile);
local pid = spawn("./lsyncd", cfgfile, '-log', 'all');
cwriteln("waiting for Lsyncd to start");
posix.sleep(3)
cwriteln("testing excludes after startup");
testfiles();
cwriteln("ok, removing sources");
if srcdir:sub(1,4) ~= "/tmp" then
-- just to make sure before rm -rf
cwriteln("exist before drama, srcdir is '", srcdir, "'");
os.exit(1);
end
os.execute("rm -rf "..srcdir.."/*");
cwriteln("waiting for Lsyncd to remove destination");
posix.sleep(5);
@ -92,20 +94,22 @@ if os.execute("diff -urN "..srcdir.." "..trgdir) ~= 0 then
os.exit(1);
end
cwriteln("writing files after startup");
writefiles();
cwriteln("waiting for Lsyncd to transmit changes");
posix.sleep(5);
testfiles();
cwriteln( "writing files after startup" );
writefiles( );
cwriteln( "waiting for Lsyncd to transmit changes" );
posix.sleep( 5 );
testfiles( );
cwriteln("killing started Lsyncd");
posix.kill(pid);
local _, exitmsg, lexitcode = posix.wait(lpid);
cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode);
posix.sleep(1);
if lexitcode == 0 then
cwriteln("OK");
cwriteln( "killing started Lsyncd" );
posix.kill( pid );
local _, exitmsg, lexitcode = posix.wait( lpid );
cwriteln( "Exitcode of Lsyncd = ", exitmsg, " ", lexitcode );
if lexitcode == 143 then
cwriteln( "OK" );
os.exit( 0 );
else
os.exit( 1 );
end
os.exit(lexitcode);
-- TODO remove temp

View File

@ -108,9 +108,12 @@ posix.kill(pid);
local _, exitmsg, lexitcode = posix.wait(lpid);
cwriteln('Exitcode of Lsyncd = ', exitmsg, ' ', lexitcode);
posix.sleep(1);
if lexitcode == 0 then
cwriteln('OK');
if lexitcode == 143 then
cwriteln( 'OK' );
os.exit( 0 );
else
os.exit( 1 );
end
os.exit(lexitcode);
-- TODO remove temp

View File

@ -47,30 +47,33 @@ sync {ccircuit, source ="]]..srcdir..[[", target = "]]..trgdir..[["}
-- test if the filename exists, fails if this is different to expect
local function testfile(filename)
local function testfile(filename)
local stat, err = posix.stat(filename)
if not stat then
cwriteln("failure: ",filename," missing");
os.exit(1);
cwriteln("failure: ",filename," missing")
os.exit(1)
end
end
cwriteln("starting Lsyncd");
local pid = spawn("./lsyncd", cfgfile, unpack(logs));
cwriteln("waiting for Lsyncd to do a few cycles");
cwriteln("starting Lsyncd")
local pid = spawn("./lsyncd", cfgfile, unpack(logs))
cwriteln("waiting for Lsyncd to do a few cycles")
posix.sleep(30)
cwriteln("look if every circle got a chance to run");
cwriteln("look if every circle got a chance to run")
testfile(srcdir.."a")
testfile(srcdir.."b")
testfile(srcdir.."c")
cwriteln("killing started Lsyncd");
posix.kill(pid);
local _, exitmsg, lexitcode = posix.wait(lpid);
cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode);
cwriteln("killing started Lsyncd")
posix.kill(pid)
local _, exitmsg, lexitcode = posix.wait(lpid)
cwriteln("Exitcode of Lsyncd = ", exitmsg, " ", lexitcode)
posix.sleep(1);
if lexitcode == 0 then
cwriteln("OK");
if lexitcode == 143 then
cwriteln("OK")
os.exit( 0 )
else
os.exit( 1 )
end
os.exit(lexitcode);
-- TODO remove temp