mirror of
https://github.com/octoleo/restic.git
synced 2024-11-21 12:25:09 +00:00
Add a global option --retry-lock
Fixes restic#719 If the option is passed, restic will wait the specified duration of time and retry locking the repo every 10 seconds (or more often if the total timeout is relatively small). - Play nice with json output - Reduce wait time in lock tests - Rework timeout last attempt - Reduce test wait time to 0.1s - Use exponential back off for the retry lock - Don't pass gopts to lockRepo functions - Use global variable for retry sleep setup - Exit retry lock on cancel - Better wording for flag help - Reorder debug statement - Refactor tests - Lower max sleep time to 1m - Test that we cancel/timeout in time - Use non blocking sleep function - Refactor into minDuration func Co-authored-by: Julian Brost <julian@0x4a42.net>
This commit is contained in:
parent
71c9516b26
commit
ea59896bd6
8
changelog/unreleased/issue-719
Normal file
8
changelog/unreleased/issue-719
Normal file
@ -0,0 +1,8 @@
|
||||
Enhancement: Add --retry-lock option
|
||||
|
||||
This option allows to specify a duration for which restic will wait if there
|
||||
already exists a conflicting lock within the repository.
|
||||
|
||||
https://github.com/restic/restic/issues/719
|
||||
https://github.com/restic/restic/pull/2214
|
||||
https://github.com/restic/restic/pull/4107
|
@ -506,7 +506,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
||||
if !gopts.JSON {
|
||||
progressPrinter.V("lock repository")
|
||||
}
|
||||
lock, ctx, err := lockRepo(ctx, repo)
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -45,7 +45,7 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -211,7 +211,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
||||
if !gopts.NoLock {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -74,14 +74,14 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
||||
|
||||
if !gopts.NoLock {
|
||||
var srcLock *restic.Lock
|
||||
srcLock, ctx, err = lockRepo(ctx, srcRepo)
|
||||
srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(srcLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dstLock, ctx, err := lockRepo(ctx, dstRepo)
|
||||
dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(dstLock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -156,7 +156,7 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -462,7 +462,7 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, args []string) er
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -334,7 +334,7 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -132,7 +132,7 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -575,7 +575,7 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -116,7 +116,7 @@ func runForget(ctx context.Context, opts ForgetOptions, gopts GlobalOptions, arg
|
||||
|
||||
if !opts.DryRun || !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -212,7 +212,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
switch args[0] {
|
||||
case "list":
|
||||
lock, ctx, err := lockRepo(ctx, repo)
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -220,7 +220,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
return listKeys(ctx, repo, gopts)
|
||||
case "add":
|
||||
lock, ctx, err := lockRepo(ctx, repo)
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -228,7 +228,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
return addKey(ctx, repo, gopts)
|
||||
case "remove":
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -241,7 +241,7 @@ func runKey(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
return deleteKey(ctx, repo, id)
|
||||
case "passwd":
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -31,19 +31,19 @@ func init() {
|
||||
cmdRoot.AddCommand(cmdList)
|
||||
}
|
||||
|
||||
func runList(ctx context.Context, cmd *cobra.Command, opts GlobalOptions, args []string) error {
|
||||
func runList(ctx context.Context, cmd *cobra.Command, gopts GlobalOptions, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.Fatal("type not specified, usage: " + cmd.Use)
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, opts)
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.NoLock && args[0] != "locks" {
|
||||
if !gopts.NoLock && args[0] != "locks" {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -122,7 +122,7 @@ func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, a
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -123,7 +123,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -167,7 +167,7 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
||||
opts.unsafeRecovery = true
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -49,7 +49,7 @@ func runRebuildIndex(ctx context.Context, opts RebuildIndexOptions, gopts Global
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -46,7 +46,7 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepo(ctx, repo)
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -154,7 +154,7 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -164,9 +164,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
||||
var err error
|
||||
if opts.Forget {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
} else {
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
}
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
|
@ -65,7 +65,7 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -83,7 +83,7 @@ func runStats(ctx context.Context, gopts GlobalOptions, args []string) error {
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo)
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -111,7 +111,7 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
||||
if !gopts.NoLock {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo)
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -59,6 +59,7 @@ type GlobalOptions struct {
|
||||
Quiet bool
|
||||
Verbose int
|
||||
NoLock bool
|
||||
RetryLock time.Duration
|
||||
JSON bool
|
||||
CacheDir string
|
||||
NoCache bool
|
||||
@ -115,6 +116,7 @@ func init() {
|
||||
// use empty paremeter name as `-v, --verbose n` instead of the correct `--verbose=n` is confusing
|
||||
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify multiple times or a level using --verbose=n``, max level/times is 2)")
|
||||
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repository, this allows some operations on read-only repositories")
|
||||
f.DurationVar(&globalOptions.RetryLock, "retry-lock", 0, "retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)")
|
||||
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
|
||||
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
|
||||
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
|
||||
|
@ -21,17 +21,29 @@ var globalLocks struct {
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func lockRepo(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, false)
|
||||
func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, false, retryLock, json)
|
||||
}
|
||||
|
||||
func lockRepoExclusive(ctx context.Context, repo restic.Repository) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, true)
|
||||
func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, true, retryLock, json)
|
||||
}
|
||||
|
||||
var (
|
||||
retrySleepStart = 5 * time.Second
|
||||
retrySleepMax = 60 * time.Second
|
||||
)
|
||||
|
||||
func minDuration(a, b time.Duration) time.Duration {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked
|
||||
// cancelling the original context also stops the lock refresh
|
||||
func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool) (*restic.Lock, context.Context, error) {
|
||||
func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
// make sure that a repository is unlocked properly and after cancel() was
|
||||
// called by the cleanup handler in global.go
|
||||
globalLocks.Do(func() {
|
||||
@ -43,7 +55,44 @@ func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool)
|
||||
lockFn = restic.NewExclusiveLock
|
||||
}
|
||||
|
||||
lock, err := lockFn(ctx, repo)
|
||||
var lock *restic.Lock
|
||||
var err error
|
||||
|
||||
retrySleep := minDuration(retrySleepStart, retryLock)
|
||||
retryMessagePrinted := false
|
||||
retryTimeout := time.After(retryLock)
|
||||
|
||||
retryLoop:
|
||||
for {
|
||||
lock, err = lockFn(ctx, repo)
|
||||
if err != nil && restic.IsAlreadyLocked(err) {
|
||||
|
||||
if !retryMessagePrinted {
|
||||
if !json {
|
||||
Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock)
|
||||
}
|
||||
retryMessagePrinted = true
|
||||
}
|
||||
|
||||
debug.Log("repo already locked, retrying in %v", retrySleep)
|
||||
retrySleepCh := time.After(retrySleep)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx, ctx.Err()
|
||||
case <-retryTimeout:
|
||||
debug.Log("repo already locked, timeout expired")
|
||||
// Last lock attempt
|
||||
lock, err = lockFn(ctx, repo)
|
||||
break retryLoop
|
||||
case <-retrySleepCh:
|
||||
retrySleep = minDuration(retrySleep*2, retrySleepMax)
|
||||
}
|
||||
} else {
|
||||
// anything else, either a successful lock or another error
|
||||
break retryLoop
|
||||
}
|
||||
}
|
||||
if restic.IsInvalidLock(err) {
|
||||
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
|
||||
}
|
||||
|
@ -3,11 +3,13 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
@ -23,8 +25,8 @@ func openTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository,
|
||||
return repo, cleanup, env
|
||||
}
|
||||
|
||||
func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository) (*restic.Lock, context.Context) {
|
||||
lock, wrappedCtx, err := lockRepo(ctx, repo)
|
||||
func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) {
|
||||
lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, wrappedCtx.Err())
|
||||
if lock.Stale() {
|
||||
@ -34,10 +36,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository)
|
||||
}
|
||||
|
||||
func TestLock(t *testing.T) {
|
||||
repo, cleanup, _ := openTestRepo(t, nil)
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
|
||||
unlockRepo(lock)
|
||||
if wrappedCtx.Err() == nil {
|
||||
t.Fatal("unlock did not cancel context")
|
||||
@ -45,12 +47,12 @@ func TestLock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLockCancel(t *testing.T) {
|
||||
repo, cleanup, _ := openTestRepo(t, nil)
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
lock, wrappedCtx := checkedLockRepo(ctx, t, repo)
|
||||
lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env)
|
||||
cancel()
|
||||
if wrappedCtx.Err() == nil {
|
||||
t.Fatal("canceled parent context did not cancel context")
|
||||
@ -61,10 +63,10 @@ func TestLockCancel(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLockUnlockAll(t *testing.T) {
|
||||
repo, cleanup, _ := openTestRepo(t, nil)
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
|
||||
_, err := unlockAll(0)
|
||||
rtest.OK(t, err)
|
||||
if wrappedCtx.Err() == nil {
|
||||
@ -81,10 +83,10 @@ func TestLockConflict(t *testing.T) {
|
||||
repo2, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
|
||||
lock, _, err := lockRepoExclusive(context.Background(), repo)
|
||||
lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
rtest.OK(t, err)
|
||||
defer unlockRepo(lock)
|
||||
_, _, err = lockRepo(context.Background(), repo2)
|
||||
_, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON)
|
||||
if err == nil {
|
||||
t.Fatal("second lock should have failed")
|
||||
}
|
||||
@ -104,7 +106,7 @@ func (b *writeOnceBackend) Save(ctx context.Context, h restic.Handle, rd restic.
|
||||
}
|
||||
|
||||
func TestLockFailedRefresh(t *testing.T) {
|
||||
repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
|
||||
repo, cleanup, env := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
|
||||
return &writeOnceBackend{Backend: r}, nil
|
||||
})
|
||||
defer cleanup()
|
||||
@ -117,7 +119,7 @@ func TestLockFailedRefresh(t *testing.T) {
|
||||
refreshInterval, refreshabilityTimeout = ri, rt
|
||||
}()
|
||||
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
|
||||
|
||||
select {
|
||||
case <-wrappedCtx.Done():
|
||||
@ -140,7 +142,7 @@ func (b *loggingBackend) Save(ctx context.Context, h restic.Handle, rd restic.Re
|
||||
}
|
||||
|
||||
func TestLockSuccessfulRefresh(t *testing.T) {
|
||||
repo, cleanup, _ := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
|
||||
repo, cleanup, env := openTestRepo(t, func(r restic.Backend) (restic.Backend, error) {
|
||||
return &loggingBackend{
|
||||
Backend: r,
|
||||
t: t,
|
||||
@ -157,7 +159,7 @@ func TestLockSuccessfulRefresh(t *testing.T) {
|
||||
refreshInterval, refreshabilityTimeout = ri, rt
|
||||
}()
|
||||
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo)
|
||||
lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env)
|
||||
|
||||
select {
|
||||
case <-wrappedCtx.Done():
|
||||
@ -168,3 +170,74 @@ func TestLockSuccessfulRefresh(t *testing.T) {
|
||||
// unlockRepo should not crash
|
||||
unlockRepo(lock)
|
||||
}
|
||||
|
||||
func TestLockWaitTimeout(t *testing.T) {
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
test.OK(t, err)
|
||||
|
||||
retryLock := 100 * time.Millisecond
|
||||
|
||||
start := time.Now()
|
||||
lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
|
||||
duration := time.Since(start)
|
||||
|
||||
test.Assert(t, err != nil,
|
||||
"create normal lock with exclusively locked repo didn't return an error")
|
||||
test.Assert(t, strings.Contains(err.Error(), "repository is already locked exclusively"),
|
||||
"create normal lock with exclusively locked repo didn't return the correct error")
|
||||
test.Assert(t, retryLock <= duration && duration < retryLock+5*time.Millisecond,
|
||||
"create normal lock with exclusively locked repo didn't wait for the specified timeout")
|
||||
|
||||
test.OK(t, lock.Unlock())
|
||||
test.OK(t, elock.Unlock())
|
||||
}
|
||||
func TestLockWaitCancel(t *testing.T) {
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
test.OK(t, err)
|
||||
|
||||
retryLock := 100 * time.Millisecond
|
||||
cancelAfter := 40 * time.Millisecond
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
time.AfterFunc(cancelAfter, cancel)
|
||||
|
||||
start := time.Now()
|
||||
lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON)
|
||||
duration := time.Since(start)
|
||||
|
||||
test.Assert(t, err != nil,
|
||||
"create normal lock with exclusively locked repo didn't return an error")
|
||||
test.Assert(t, strings.Contains(err.Error(), "context canceled"),
|
||||
"create normal lock with exclusively locked repo didn't return the correct error")
|
||||
test.Assert(t, cancelAfter <= duration && duration < cancelAfter+5*time.Millisecond,
|
||||
"create normal lock with exclusively locked repo didn't return in time")
|
||||
|
||||
test.OK(t, lock.Unlock())
|
||||
test.OK(t, elock.Unlock())
|
||||
}
|
||||
|
||||
func TestLockWaitSuccess(t *testing.T) {
|
||||
repo, cleanup, env := openTestRepo(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON)
|
||||
test.OK(t, err)
|
||||
|
||||
retryLock := 100 * time.Millisecond
|
||||
unlockAfter := 40 * time.Millisecond
|
||||
|
||||
time.AfterFunc(unlockAfter, func() {
|
||||
test.OK(t, elock.Unlock())
|
||||
})
|
||||
|
||||
lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON)
|
||||
test.OK(t, err)
|
||||
|
||||
test.OK(t, lock.Unlock())
|
||||
}
|
||||
|
@ -603,7 +603,10 @@ that the process is dead and considers the lock to be stale.
|
||||
When a new lock is to be created and no other conflicting locks are
|
||||
detected, restic creates a new lock, waits, and checks if other locks
|
||||
appeared in the repository. Depending on the type of the other locks and
|
||||
the lock to be created, restic either continues or fails.
|
||||
the lock to be created, restic either continues or fails. If the
|
||||
``--retry-lock`` option is specified, restic will retry
|
||||
creating the lock periodically until it succeeds or the specified
|
||||
timeout expires.
|
||||
|
||||
Read and Write Ordering
|
||||
=======================
|
||||
|
@ -66,6 +66,7 @@ Usage help is available:
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
--repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
|
||||
--retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
|
||||
--tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key
|
||||
-v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2)
|
||||
|
||||
@ -141,6 +142,7 @@ command:
|
||||
-q, --quiet do not output comprehensive progress report
|
||||
-r, --repo repository repository to backup to or restore from (default: $RESTIC_REPOSITORY)
|
||||
--repository-file file file to read the repository location from (default: $RESTIC_REPOSITORY_FILE)
|
||||
--retry-lock duration retry to lock the repository if it is already locked, takes a value like 5m or 2h (default: no retries)
|
||||
--tls-client-cert file path to a file containing PEM encoded TLS client certificate and private key
|
||||
-v, --verbose be verbose (specify multiple times or a level using --verbose=n, max level/times is 2)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user